repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
Jerryxiaoyu/my_baselines | [
"c0163328e33dd05713e2139d2c1703fc5f661be3"
] | [
"my_envs/mujoco/cellrobotFull.py"
] | [
"import numpy as np\nfrom gym import utils\nfrom my_envs.mujoco import mujoco_env\nfrom math import pi,sin,cos\nfrom transformations import quaternion_inverse, quaternion_multiply, euler_from_quaternion\nimport os\n\nstate_M =np.array([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],\n [0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.],\n [0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]])\n\nfrom my_envs.base.ExperienceDataset import DataBuffer\nfrom my_envs.base.command_generator import command_generator\nimport time\n\n\nclass CellRobotEnvFull(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self):\n\n num_joint =13\n policy_a_dim =13 # networt output\n self.command = command_generator(10000, 0.01, 2)\n self.c_index = 0\n self.c_index_max = 10000\n self.action_pre = 0\n dt = 0.01\n self.buffer_mode =1\n self.num_buffer = 0\n self.deta_vel = True\n self.goal_orien_yaw = 0\n\n self.command_vx_low = 0\n self.command_vx_high = 0.5\n self.command_vy_low = 0\n self.command_vy_high = 0\n self.command_wz_low = 0\n self.command_wz_high = 0\n\n self.command_max_step = 10000 # steps\n self.command_duration = 2 # second\n\n\n self.Calc_Reward = self.reward_fun1\n\n if self.buffer_mode == 1:\n self.size_buffer_data = num_joint * 2 + policy_a_dim\n\n elif self.buffer_mode ==2:\n self.size_buffer_data = num_joint * 2 + policy_a_dim +6\n elif self.buffer_mode ==3:\n self.size_buffer_data = num_joint * 2\n elif self.buffer_mode == 4:\n self.size_buffer_data = num_joint * 2 +6\n else:\n raise Exception(\"buffer_mode is not correct!\")\n\n\n self.history_buffer = DataBuffer(num_size_per=self.size_buffer_data, max_trajectory=self.num_buffer)\n\n mujoco_env.MujocoEnv.__init__(self, 'cellrobot/cellrobot_Quadruped_float_simple.xml', 1) #cellrobot_test_gen CR_quadruped_v1_A001 'cellrobot/cellrobot_test_gen.xml' Atlas_v5/atlas_v5.xml\n utils.EzPickle.__init__(self)\n\n\n \n def step(self, a):\n action = a\n\n v_commdand = self.command[self.c_index, :3]\n\n pose_pre = np.concatenate((self.get_body_com(\"torso\"), self.get_orien() ))\n\n\n self.do_simulation(action, self.frame_skip)\n\n\n\n obs = self._get_obs()\n\n state = self.state_concatenate(obs, pose_pre, self.history_buffer, self.command[self.c_index], num_buffer=self.num_buffer)\n\n\n if self.buffer_mode == 1:\n toStoreData = np.concatenate((obs[6:32], action), axis=0)\n elif self.buffer_mode ==2:\n toStoreData = np.concatenate((obs[0:32], action), axis=0)\n elif self.buffer_mode ==3:\n toStoreData = np.concatenate((obs[6:32] ), axis=0)\n elif self.buffer_mode == 4:\n toStoreData = np.concatenate((obs[0:32]), axis=0)\n else:\n raise Exception(\"buffer_mode is not correct!\")\n self.history_buffer.push(toStoreData)\n\n\n\n pose_post = obs[:6]\n velocity_base = (pose_post - pose_pre)/self.dt #dt 0.01\n\n reward, other_rewards = self.Calc_Reward(velocity_base, v_commdand, action, obs)\n\n self.action_pre = action\n self.c_index += 1\n\n # confirm if done\n q_state = self.state_vector()\n notdone = np.isfinite(q_state).all() \\\n and state[2] >= 0.1 and state[2] <= 0.6\n done = not notdone\n\n\n #print('t1 = {}, t2 ={}, t3 = {}', end_t1, end_t2, end_t3)\n\n return state, reward, done, dict(\n velocity_base = velocity_base,\n commands = v_commdand,\n rewards = other_rewards,\n obs=obs\n )\n\n def _get_obs(self):\n orien = self.get_orien()\n\n\n obs =np.concatenate([\n self.get_body_com(\"torso\").flat, # base x, y, z 0-3\n orien , # oren 3-6\n state_M.dot(self.sim.data.qpos[7:].reshape((-1, 1))).flat, # joint positon 6-19\n state_M.dot(self.sim.data.qvel[6:].reshape((-1, 1))).flat # joint velosity 19-32\n ])\n\n return obs\n\n\n\n def reset_model(self,command = None, reward_fun_choice = None):\n qpos = self.init_qpos # + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)\n qvel = self.init_qvel # + self.np_random.randn(self.model.nv) * .1\n self.set_state(qpos, qvel)\n self.goal_theta = pi / 4.0\n self.model.site_pos[1] = [cos(self.goal_theta), sin(self.goal_theta), 0]\n\n\n reward_choice = os.getenv('REWARD_CHOICE')\n if reward_choice is None:\n print('REWARD_CHOICE is not specified!')\n reward_fun_choice_env = 1\n else:\n reward_fun_choice_env = int(reward_choice)\n\n if command is None:\n self.command = command_generator(self.command_max_step, self.dt, self.command_duration,\n vx_range=(self.command_vx_low, self.command_vx_high),\n vy_range=(self.command_vy_low, self.command_vy_high),\n wyaw_range=(self.command_wz_low, self.command_wz_high))\n else:\n self.command = command\n\n if reward_fun_choice == 1:\n self.Calc_Reward = self.reward_fun1\n elif reward_fun_choice == 2:\n self.Calc_Reward = self.reward_fun2\n elif reward_fun_choice == 3:\n self.Calc_Reward = self.reward_fun3\n elif reward_fun_choice == 4:\n self.Calc_Reward = self.reward_fun4\n elif reward_fun_choice == 5:\n self.Calc_Reward = self.reward_fun5\n elif reward_fun_choice == 6:\n self.Calc_Reward = self.reward_fun6\n elif reward_fun_choice == 7:\n self.Calc_Reward = self.reward_fun7\n elif reward_fun_choice == 8:\n self.Calc_Reward = self.reward_fun8\n elif reward_fun_choice is None:\n self.Calc_Reward = self.reward_fun1\n reward_fun_choice = 1\n else:\n raise Exception('reward fun error!')\n\n self.goal_orien_yaw = 0\n # print('Reward function: ', reward_fun_choice)\n self.c_index = 0\n self.history_buffer = DataBuffer(num_size_per=self.size_buffer_data, max_trajectory=self.num_buffer)\n\n pre_pose = np.zeros(6)\n obs = self._get_obs()\n action = np.zeros(13)\n\n state = self.state_concatenate(obs, pre_pose, self.history_buffer, self.command[self.c_index], num_buffer=self.num_buffer)\n\n if self.buffer_mode == 1:\n toStoreData = np.concatenate((obs[6:32], action), axis=0)\n elif self.buffer_mode ==2:\n toStoreData = np.concatenate((obs[0:32], action), axis=0)\n elif self.buffer_mode ==3:\n toStoreData = np.concatenate((obs[6:32] ), axis=0)\n elif self.buffer_mode == 4:\n toStoreData = np.concatenate((obs[0:32]), axis=0)\n else:\n raise Exception(\"buffer_mode is not correct!\")\n self.history_buffer.push(toStoreData)\n\n\n return state\n\n def viewer_setup(self):\n self.viewer.cam.distance = self.model.stat.extent * 0.5\n \n def state_vector(self):\n return np.concatenate([\n self.sim.data.qpos.flat,\n self.sim.data.qvel.flat\n ])\n\n def get_pose(self):\n pos = self.sim.data.qpos[:3]\n\n q_g2 = self.sim.data.qpos[3:7]\n\n q = np.array([0.5000, 0.5000, 0.5000, -0.5000])\n\n R_q = quaternion_multiply(q_g2, quaternion_inverse(q))\n print(q_g2, q, R_q)\n\n orien = euler_from_quaternion(R_q, axes='sxyz')\n\n # # 以上计算效率不一定高\n # Tg2_e = quat2tform(q_g2)\n # Tg1 = Tg1 =\n #\n # 0.0000 1.0000 0 0\n # -0.0000 0.0000 -1.0000 0\n # -1.0000 0.0000 0.0000 0\n # 0 0 0 1.0000\n #\n # XYZ = tform2eul(Tg2_e * inv(Tg1), 'XYZ')\n\n pos = np.concatenate((pos, orien))\n return pos\n\n def get_orien(self):\n #pos = self.sim.data.qpos[:3]\n\n q_g2 = self.sim.data.qpos[3:7]\n\n q = np.array([0.5000, 0.5000, 0.5000, -0.5000])\n\n R_q = quaternion_multiply(q_g2, quaternion_inverse(q))\n #print(q_g2, q, R_q)\n\n orien = euler_from_quaternion(R_q, axes='sxyz')\n\n\n return orien\n\n def state_concatenate(self, obs ,pose_pre, history_buffer, command, num_buffer=2 ):\n \"\"\"\n\n :param obs:\n :param history_buffer:\n :param command:\n :return:\n \"\"\"\n\n data_tmp = history_buffer.pull().copy()[::-1] # reverse output\n data_size = history_buffer.num_size_per\n\n if len(data_tmp) == 0:\n data_history = np.zeros(data_size * num_buffer)\n else:\n for i in range(len(data_tmp)):\n if i == 0:\n data_history = data_tmp[0]\n else:\n data_history = np.append(data_history, data_tmp[i])\n if len(data_tmp) < num_buffer:\n for i in range(num_buffer - len(data_tmp)):\n data_history = np.append(data_history, np.zeros(data_size))\n\n state = obs\n if num_buffer >0:\n state = np.append(state, data_history.reshape((1, -1)))\n \n\n if self.deta_vel :\n vel = ( np.concatenate((obs[:2], obs[5:6])) - np.concatenate((pose_pre[:2], pose_pre[5:6])) )/self.dt\n v_e = vel - command\n\n state = np.append(state, v_e)\n else:\n state = np.append(state, command)\n\n return state\n\n def reward_fun1(self,velocity_base, v_commdand, action, obs):\n v_e = np.concatenate((velocity_base[:2], velocity_base[-1:])) - v_commdand # x, y, yaw\n vxy = v_commdand[:2]\n wyaw = v_commdand[2]\n\n c_f = -1\n c_f2 = -0.2\n forward_reward = c_f * np.linalg.norm(velocity_base[0:2] - vxy) + c_f2 * np.linalg.norm(velocity_base[2] - wyaw)\n\n\n ctrl_cost = -0.005 * np.square(action).sum()\n contact_cost = -0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\n survive_reward = 0.5\n reward = forward_reward + ctrl_cost + contact_cost + survive_reward\n other_rewards = np.array([reward, forward_reward, ctrl_cost, contact_cost, survive_reward])\n\n return reward, other_rewards\n\n def reward_fun2(self, velocity_base, v_commdand, action, obs):\n #print('reward2')\n v_e = np.concatenate((velocity_base[:2], velocity_base[-1:])) - v_commdand # x, y, yaw\n vxy = v_commdand[:2]\n wyaw = v_commdand[2]\n\n c_f = -1\n c_f2 = -0.2\n forward_reward = c_f * K_kernel2(velocity_base[0:2] - vxy) + c_f2 * K_kernel2(velocity_base[2] - wyaw)\n\n ctrl_cost = -0.005 * np.square(action).sum()\n contact_cost = -0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\n survive_reward = 0.2\n reward = forward_reward + ctrl_cost + contact_cost + survive_reward\n other_rewards = np.array([reward, forward_reward, ctrl_cost, contact_cost, survive_reward])\n\n return reward, other_rewards\n\n def reward_fun3(self, velocity_base, v_commdand, action, obs):\n # print('reward2')\n v_e = np.concatenate((velocity_base[:2], velocity_base[-1:])) - v_commdand # x, y, yaw\n vxy = v_commdand[:2]\n wyaw = v_commdand[2]\n\n c_f = -2\n c_f2 = -0.2\n forward_reward = c_f * K_kernel3(velocity_base[0:2] - vxy) + c_f2 * K_kernel3(velocity_base[2] - wyaw)\n\n ctrl_cost = -0.005 * np.square(action).sum()\n contact_cost = -0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\n survive_reward = 0.2\n reward = forward_reward + ctrl_cost + contact_cost + survive_reward\n other_rewards = np.array([reward, forward_reward, ctrl_cost, contact_cost, survive_reward])\n\n return reward, other_rewards\n def reward_fun4(self, velocity_base, v_commdand, action, obs):\n # print('reward2')\n v_e = np.concatenate((velocity_base[:2], velocity_base[-1:])) - v_commdand # x, y, yaw\n vxy = v_commdand[:2]\n wyaw = v_commdand[2]\n\n\n c_f = -30 * self.dt\n c_f2 = -6* self.dt\n forward_reward = c_f * K_kernel3(velocity_base[0:2] - vxy) + c_f2 * K_kernel3(velocity_base[2] - wyaw)\n\n ctrl_cost = -0.005 * np.square(action).sum()\n contact_cost = -0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\n survive_reward = 0.2\n reward = forward_reward + ctrl_cost + contact_cost + survive_reward\n other_rewards = np.array([reward, forward_reward, ctrl_cost, contact_cost, survive_reward])\n\n return reward, other_rewards\n\n def reward_fun5(self, velocity_base, v_commdand, action, obs):\n v_e = np.concatenate((velocity_base[:2], velocity_base[-1:])) - v_commdand # x, y, yaw\n vxy = v_commdand[:2]\n wyaw = v_commdand[2]\n q_vel = obs[19:32]\n orien = obs[3:6]\n\n vx = v_commdand[0]\n vy = v_commdand[1]\n\n # reward calculate\n kc = 1\n c_w = -2 * self.dt\n c_v1 = -10 * self.dt\n c_v2 = -1 * self.dt\n # lin_vel_cost = c_v1 * K_kernel(c_v2 * (velocity_base[:2] - vxy))\n lin_vel_reward = c_v1 *np.linalg.norm(velocity_base[0:2] - vxy) #np.linalg.norm(velocity_base[0] - vx) + np.linalg.norm(velocity_base[1] - vy) # c_v1 * (K_kernel3((velocity_base[0] - vx)) + K_kernel3((velocity_base[1] - vy)))\n ang_vel_reward = c_w * np.linalg.norm(velocity_base[-1] - wyaw)\n\n c_t = 0.0005 * self.dt\n torque_cost = -kc * c_t * np.square(action).sum()\n\n c_js = 0.03 * self.dt\n joint_speed_cost = -kc * c_js * np.square(q_vel).sum()\n\n c_0 = 0.4 * self.dt\n orientation_cost = 0\n # orientation_cost = kc * c_0 * np.sqrt([0,0,-1] - orien).sum() # TODO need to debug , otherwise output nan\n c_s = 0.5 * self.dt\n smoothness_cost = -kc * c_s * np.square(self.action_pre - action).sum()\n survive_reward = 0.2\n reward = lin_vel_reward + ang_vel_reward + torque_cost + joint_speed_cost + orientation_cost + smoothness_cost + survive_reward\n\n #reward = self.reward_fir.apply(reward)\n\n other_rewards = np.array([reward, lin_vel_reward, ang_vel_reward, torque_cost, joint_speed_cost,orientation_cost, smoothness_cost, survive_reward ])\n\n return reward, other_rewards\n\n def reward_fun6(self, velocity_base, v_commdand, action, obs):\n '''\n add orien\n :param velocity_base:\n :param v_commdand:\n :param action:\n :param obs:\n :return:\n '''\n v_e = np.concatenate((velocity_base[:2], velocity_base[-1:])) - v_commdand # x, y, yaw\n vxy = v_commdand[:2]\n wyaw = v_commdand[2]\n q_vel = obs[19:32]\n orien = obs[3:6]\n\n vx = v_commdand[0]\n vy = v_commdand[1]\n\n # reward calculate\n kc = 1\n c_w = -2 * self.dt\n c_v1 = -10 * self.dt\n c_v2 = -1 * self.dt\n # lin_vel_cost = c_v1 * K_kernel(c_v2 * (velocity_base[:2] - vxy))\n lin_vel_reward = c_v1 * np.linalg.norm(velocity_base[\n 0:2] - vxy) # np.linalg.norm(velocity_base[0] - vx) + np.linalg.norm(velocity_base[1] - vy) # c_v1 * (K_kernel3((velocity_base[0] - vx)) + K_kernel3((velocity_base[1] - vy)))\n ang_vel_reward = c_w * np.linalg.norm(velocity_base[-1] - wyaw)\n\n c_t = 0.0005 * self.dt\n torque_cost = -kc * c_t * np.square(action).sum()\n\n c_js = 0.03 * self.dt\n joint_speed_cost = -kc * c_js * np.square(q_vel).sum()\n\n c_0 = 0.4 * self.dt\n orientation_cost = 0\n orientation_cost = kc * c_0 * np.square([0,0] - orien[:2]).sum() # TODO need to debug , otherwise output nan\n c_s = 0.5 * self.dt\n smoothness_cost = -kc * c_s * np.square(self.action_pre - action).sum()\n survive_reward = 0.2\n reward = lin_vel_reward + ang_vel_reward + torque_cost + joint_speed_cost + orientation_cost + smoothness_cost + survive_reward\n\n # reward = self.reward_fir.apply(reward)\n\n other_rewards = np.array(\n [reward, lin_vel_reward, ang_vel_reward, torque_cost, joint_speed_cost, orientation_cost, smoothness_cost,\n survive_reward])\n\n return reward, other_rewards\n def reward_fun7(self, velocity_base, v_commdand, action, obs):\n '''\n integal orien\n :param velocity_base:\n :param v_commdand:\n :param action:\n :param obs:\n :return:\n '''\n v_e = np.concatenate((velocity_base[:2], velocity_base[-1:])) - v_commdand # x, y, yaw\n vxy = v_commdand[:2]\n wyaw = v_commdand[2]\n q_vel = obs[19:32]\n orien = obs[3:6]\n\n vx = v_commdand[0]\n vy = v_commdand[1]\n\n # reward calculate\n kc = 1\n c_w = -2 * self.dt\n c_v1 = -10 * self.dt\n c_v2 = -1 * self.dt\n # lin_vel_cost = c_v1 * K_kernel(c_v2 * (velocity_base[:2] - vxy))\n lin_vel_reward = c_v1 * np.linalg.norm(velocity_base[\n 0:2] - vxy) # np.linalg.norm(velocity_base[0] - vx) + np.linalg.norm(velocity_base[1] - vy) # c_v1 * (K_kernel3((velocity_base[0] - vx)) + K_kernel3((velocity_base[1] - vy)))\n ang_vel_reward = c_w * np.linalg.norm(velocity_base[-1] - wyaw)\n\n c_t = 0.0005 * self.dt\n torque_cost = -kc * c_t * np.square(action).sum()\n\n c_js = 0.03 * self.dt\n joint_speed_cost = -kc * c_js * np.square(q_vel).sum()\n\n c_0 = 0.4 * self.dt\n orientation_cost = 0\n orientation_cost = kc * c_0 * np.square([0, 0] - orien[:2]).sum() # TODO need to debug , otherwise output nan\n c_s = 0.5 * self.dt\n smoothness_cost = -kc * c_s * np.square(self.action_pre - action).sum()\n survive_reward = 0.2\n\n c_y = 5 * self.dt\n orien_yaw_cost = - c_y * np.linalg.norm(orien[-1]- self.goal_orien_yaw)\n reward = lin_vel_reward + ang_vel_reward + torque_cost + joint_speed_cost + orientation_cost + smoothness_cost + survive_reward + orien_yaw_cost\n\n # reward = self.reward_fir.apply(reward)\n\n other_rewards = np.array(\n [reward, lin_vel_reward, ang_vel_reward, torque_cost, joint_speed_cost, orientation_cost, smoothness_cost,\n survive_reward, orien_yaw_cost])\n\n return reward, other_rewards\n\n\ndef K_kernel(x):\n x = np.linalg.norm(x)\n K = -1 / (np.exp(x) + 2 + np.exp(-x))\n return K\n\n\ndef K_kernel2(x):\n x = np.linalg.norm(x)\n x = np.clip(x, -10, 10)\n K = -1 / (np.exp(x / 0.2) + np.exp(-x / 0.2))\n return K\n\ndef K_kernel3(x):\n x = np.linalg.norm(x)\n K = -1 / (np.exp(x/0.1) + 2 + np.exp(-x/0.1))\n return K\n\n\n\n\n\n\n"
] | [
[
"numpy.square",
"numpy.isfinite",
"numpy.clip",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.append",
"numpy.exp",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Astroua/M33_NOEMA | [
"872b72fc06804d3dbfcb45c6b927c1e34c55a667"
] | [
"analysis/flux_recovery.py"
] | [
"\n'''\nCompare the flux in the masked NOEMA and 30-m data.\n'''\n\nfrom spectral_cube import SpectralCube\nfrom astropy.io import fits\nimport numpy as np\nimport astropy.units as u\n\nfrom cube_analysis.feather_cubes import flux_recovery\n\nfrom paths import noema_co21_file_dict, iram_matched_data_path\nfrom constants import co21_freq\n\nnoema_cube = SpectralCube.read(noema_co21_file_dict['Cube'])\nnoema_mask = fits.open(noema_co21_file_dict['Source_Mask'])\nnoema_cube = noema_cube.with_mask(noema_mask[0].data > 0)\n\nnoema_cube.allow_huge_operations = True\n\niram_cube = SpectralCube.read(iram_matched_data_path(\"m33.co21_iram.noema_spatialregion.fits\"))\n\n# Remove padded area\niram_subcube = iram_cube.subcube(xlo=iram_cube.longitude_extrema[0],\n xhi=iram_cube.longitude_extrema[1],\n ylo=iram_cube.latitude_extrema[0],\n yhi=iram_cube.latitude_extrema[1])\n\n# iram_subcube = iram_subcube.spectral_interpolate(noema_cube.spectral_axis)\n\n# total_hires, total_lores = flux_recovery(noema_cube, iram_subcube,\n# frequency=co21_freq,\n# doplot=True)\n\niram_mom0 = iram_subcube.moment0()\nnoema_mom0 = noema_cube.moment0()\n\n# Adjust for difference in the pixel area\niram_total = np.nansum(iram_mom0) * (iram_mom0.header['CDELT2'] * u.deg)**2\nnoema_total = np.nansum(noema_mom0) * (noema_mom0.header['CDELT2'] * u.deg)**2\n\nprint(\"IRAM {0}; NOEMA {1}\".format(iram_total, noema_total))\n# IRAM 1.35010104124 deg2 K m / s; NOEMA 1.01810349853 deg2 K m / s\n\nprint(\"Flux recovered by NOEMA: {}\".format(noema_total / iram_total))\n# Flux recovered by NOEMA: 0.75409429919\n"
] | [
[
"numpy.nansum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
haiwangyang/PP | [
"0c470c39adc8453ecfb336e1374e2ac1fcc1ad80"
] | [
"Parse.Pico.Ribo.Reads.py"
] | [
"#!/usr/bin/env python\n'''\nParse PicoGreen/RiboGreen results from from Gemini XPS Microplate Reader\nConvert 384 well plate format to 96 well plate format\nWrite to different excel spreadsheets\n\nexample:\npython Parse.Pico.Ribo.Reads.py -f data/pico.reads.txt -s data/standards.txt -a1 1st_plate -a2 2nd_plate -b1 3rd_plate -b2 4th_plate\n'''\n\nfrom __future__ import print_function\nimport re\nimport argparse\nimport pandas as pd\nfrom numpy import array\nfrom numpy.random import rand, randn\nimport statsmodels.formula.api as sm\nimport matplotlib.pyplot as plt\n\n#############\n### functions\n#############\ndef parse_matrix_of_data_from_plate(df, letters, numbers):\n ''' obtain matrix of data from 384 plate\n\tletters are row IDs\n\tnumbers are column IDs\n '''\n matrix = []\n for letter in letters:\n row = []\n for number in numbers:\n row.append(df[number][letter])\n matrix.append(row)\n df2 = pd.DataFrame(array(matrix),index = abcdefgh, columns = n_1_to_12)\n return(df2)\n\ndef writeToExcel(df, sheet_name):\n ''' write data frame to excel spread sheets '''\n df.to_excel(writer, sheet_name=sheet_name, startrow=0 , startcol=0)\n df_stack = df.stack()\n df_stack_multipleindex = df_stack.reset_index()\n new_index = [ df_stack_multipleindex[\"level_0\"][i] + str(df_stack_multipleindex[\"level_1\"][i]) for i in range(len(df_stack_multipleindex[\"level_0\"])) ]\n df_stack_singleindex = pd.DataFrame(df_stack.as_matrix(), index = new_index, columns = [\"reads\"])\n df_stack_singleindex.to_excel(writer, sheet_name= sheet_name, startrow=0 , startcol=15)\n\n\n############################\n### obtain args from command\n############################\nparser = argparse.ArgumentParser(description='please provide filename for Pico/Ribo reads')\nparser.add_argument('-f', '--filename_result', type=str)\nparser.add_argument('-s', '--filename_standard', type=str) # filename of Pico/Ribo standard\n\n# four possible positions in the upper left corner of a 384 well plate\n\"\"\"\n a1 | a2\n---- ----\n b1 | b2\n\"\"\"\nparser.add_argument('-a1', '--a1_plate_name', type=str)\nparser.add_argument('-a2', '--a2_plate_name', type=str)\nparser.add_argument('-b1', '--b1_plate_name', type=str)\nparser.add_argument('-b2', '--b2_plate_name', type=str)\n\nargs = parser.parse_args()\nfilename_result = args.filename_result\nfilename_standard = args.filename_standard\na1_plate_name = args.a1_plate_name\na2_plate_name = args.a2_plate_name\nb1_plate_name = args.b1_plate_name\nb2_plate_name = args.b2_plate_name\n\n\n#######################################################################\n### define row IDs and column IDs of plates (both 384 well and 96 well)\n####################################################################### \n# define row IDs (letters)\na_to_p = [chr(i) for i in range(ord('a'),ord('p')+1)]\nacegikmo = a_to_p[::2]\nbdfhjlnp = a_to_p[1::2]\nabcdefgh = a_to_p[:8]\n\n# define column IDs (numbers)\nn_1_to_24 = list(range(1,25))\nn_odd = list(map(int,n_1_to_24[::2]))\nn_even = list(map(int, n_1_to_24[1::2]))\nn_1_to_12 = list(range(1,13))\n\n\n#################################\n### fetch data of Pico/Ribo reads\n#################################\n''' fetch Pico/Ribo reads of whole samples '''\nwholeMatrix = []\nfor line in open(filename_result,\"r\"):\n lst = line.rstrip().lstrip().split(\"\\t\")\n if len(lst) == 24:\n wholeMatrix.append(list(map(float,lst)))\n\ndf = pd.DataFrame(array(wholeMatrix),index = a_to_p, columns = n_1_to_24)\n\n\n#####################################\n### fetch data of Pico/Ribo standards\n#####################################\n''' get well IDs and corresponding concentrations'''\nstandardDict = {}\nfor line in open(filename_standard,\"r\"):\n if line.startswith(tuple(a_to_p)): # if startswith well ID\n lst = line.rstrip().split(\"\\t\")\n standardDict[lst[0]] = lst[1]\n\n''' fetch Pico/Ribo reads of standards '''\nstandardMatrix = [] \t\t\nfor well in sorted(standardDict):\n letter, number = well[:1], well[1:]\n concentration = standardDict[well]\n reads = df[int(number)][letter]\n standardMatrix.append([float(reads),float(concentration)])\n\ndf_std = pd.DataFrame(array(standardMatrix),columns = [\"reads\",\"concentration(ng/ul)\"]).sort(\"concentration(ng/ul)\")\n\n##############################################\n### parse data and write to excel spreadsheets\n##############################################\nwriter = pd.ExcelWriter(filename_result.replace(\"txt\", \"xlsx\"), engine='xlsxwriter')\n\n''' raw data in 384 well format '''\ndf.to_excel(writer, sheet_name='raw')\n\n''' reads of Pico/Ribo standards and their known concentration (ng/ul) '''\ndf_std.to_excel(writer, sheet_name='standard')\n\n''' write 96 well format for each position (if data is available)\n a1 | a2\n---- ----\n b1 | b2\n'''\nif a1_plate_name:\n a1_df = parse_matrix_of_data_from_plate(df, acegikmo, n_odd)\n writeToExcel(a1_df, a1_plate_name)\n\nif b1_plate_name:\n b1_df = parse_matrix_of_data_from_plate(df, bdfhjlnp, n_odd)\n writeToExcel(b1_df, b1_plate_name)\n\nif a2_plate_name:\n a2_df = parse_matrix_of_data_from_plate(df, acegikmo, n_even)\n writeToExcel(a2_df, a2_plate_name)\n\t\nif b2_plate_name:\n b2_df = parse_matrix_of_data_from_plate(df, bdfhjlnp, n_even)\n writeToExcel(b2_df, b2_plate_name)\n\nwriter.close()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AliengirlLiv/dreamer-pytorch-1 | [
"33979d7c61d0406d27ea46b9dcbbd823f765a518"
] | [
"utils.py"
] | [
"import os\nimport cv2\nimport numpy as np\nimport plotly\nfrom plotly.graph_objs import Scatter\nfrom plotly.graph_objs.scatter import Line\nimport torch\nfrom torch.nn import functional as F\nfrom typing import Iterable\nfrom torch.nn import Module\n\n\n# Plots min, max and mean + standard deviation bars of a population over time\ndef lineplot(xs, ys_population, title, path='', xaxis='episode'):\n max_colour, mean_colour, std_colour, transparent = 'rgb(0, 132, 180)', 'rgb(0, 172, 237)', 'rgba(29, 202, 255, 0.2)', 'rgba(0, 0, 0, 0)'\n\n if isinstance(ys_population[0], list) or isinstance(ys_population[0], tuple):\n ys = np.asarray(ys_population, dtype=np.float32)\n ys_min, ys_max, ys_mean, ys_std, ys_median = ys.min(1), ys.max(1), ys.mean(1), ys.std(1), np.median(ys, 1)\n ys_upper, ys_lower = ys_mean + ys_std, ys_mean - ys_std\n\n trace_max = Scatter(x=xs, y=ys_max, line=Line(color=max_colour, dash='dash'), name='Max')\n trace_upper = Scatter(x=xs, y=ys_upper, line=Line(color=transparent), name='+1 Std. Dev.', showlegend=False)\n trace_mean = Scatter(x=xs, y=ys_mean, fill='tonexty', fillcolor=std_colour, line=Line(color=mean_colour), name='Mean')\n trace_lower = Scatter(x=xs, y=ys_lower, fill='tonexty', fillcolor=std_colour, line=Line(color=transparent), name='-1 Std. Dev.', showlegend=False)\n trace_min = Scatter(x=xs, y=ys_min, line=Line(color=max_colour, dash='dash'), name='Min')\n trace_median = Scatter(x=xs, y=ys_median, line=Line(color=max_colour), name='Median')\n data = [trace_upper, trace_mean, trace_lower, trace_min, trace_max, trace_median]\n else:\n data = [Scatter(x=xs, y=ys_population, line=Line(color=mean_colour))]\n plotly.offline.plot({\n 'data': data,\n 'layout': dict(title=title, xaxis={'title': xaxis}, yaxis={'title': title})\n }, filename=os.path.join(path, title + '.html'), auto_open=False)\n\n\ndef write_video(frames, title, path=''):\n frames = np.multiply(np.stack(frames, axis=0).transpose(0, 2, 3, 1), 255).clip(0, 255).astype(np.uint8)[:, :, :, ::-1] # VideoWrite expects H x W x C in BGR\n _, H, W, _ = frames.shape\n writer = cv2.VideoWriter(os.path.join(path, '%s.mp4' % title), cv2.VideoWriter_fourcc(*'mp4v'), 30., (W, H), True)\n for frame in frames:\n writer.write(frame)\n writer.release()\n\ndef imagine_ahead(prev_state, prev_belief, policy, transition_model, planning_horizon=12):\n '''\n imagine_ahead is the function to draw the imaginary tracjectory using the dynamics model, actor, critic.\n Input: current state (posterior), current belief (hidden), policy, transition_model # torch.Size([50, 30]) torch.Size([50, 200]) \n Output: generated trajectory of features includes beliefs, prior_states, prior_means, prior_std_devs\n torch.Size([49, 50, 200]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30])\n '''\n flatten = lambda x: x.view([-1]+list(x.size()[2:]))\n prev_belief = flatten(prev_belief)\n prev_state = flatten(prev_state)\n \n # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)\n T = planning_horizon\n beliefs, prior_states, prior_means, prior_std_devs = [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T, [torch.empty(0)] * T\n beliefs[0], prior_states[0] = prev_belief, prev_state\n\n # Loop over time sequence\n for t in range(T - 1):\n _state = prior_states[t]\n actions = policy.get_action(beliefs[t].detach(),_state.detach())\n # Compute belief (deterministic hidden state)\n hidden = transition_model.act_fn(transition_model.fc_embed_state_action(torch.cat([_state, actions], dim=1)))\n beliefs[t + 1] = transition_model.rnn(hidden, beliefs[t])\n # Compute state prior by applying transition dynamics\n hidden = transition_model.act_fn(transition_model.fc_embed_belief_prior(beliefs[t + 1]))\n prior_means[t + 1], _prior_std_dev = torch.chunk(transition_model.fc_state_prior(hidden), 2, dim=1)\n prior_std_devs[t + 1] = F.softplus(_prior_std_dev) + transition_model.min_std_dev\n prior_states[t + 1] = prior_means[t + 1] + prior_std_devs[t + 1] * torch.randn_like(prior_means[t + 1]) \n # Return new hidden states\n # imagined_traj = [beliefs, prior_states, prior_means, prior_std_devs]\n imagined_traj = [torch.stack(beliefs[1:], dim=0), torch.stack(prior_states[1:], dim=0), torch.stack(prior_means[1:], dim=0), torch.stack(prior_std_devs[1:], dim=0)]\n return imagined_traj\n\ndef lambda_return(imged_reward, value_pred, bootstrap, discount=0.99, lambda_=0.95):\n # Setting lambda=1 gives a discounted Monte Carlo return.\n # Setting lambda=0 gives a fixed 1-step return.\n next_values = torch.cat([value_pred[1:], bootstrap[None]], 0)\n discount_tensor = discount * torch.ones_like(imged_reward) #pcont\n inputs = imged_reward + discount_tensor * next_values * (1 - lambda_)\n last = bootstrap\n indices = reversed(range(len(inputs)))\n outputs = []\n for index in indices:\n inp, disc = inputs[index], discount_tensor[index]\n last = inp + disc*lambda_*last\n outputs.append(last)\n outputs = list(reversed(outputs))\n outputs = torch.stack(outputs, 0)\n returns = outputs\n return returns\n\nclass ActivateParameters:\n def __init__(self, modules: Iterable[Module]):\n \"\"\"\n Context manager to locally Activate the gradients.\n example:\n ```\n with ActivateParameters([module]):\n output_tensor = module(input_tensor)\n ```\n :param modules: iterable of modules. used to call .parameters() to freeze gradients.\n \"\"\"\n self.modules = modules\n self.param_states = [p.requires_grad for p in get_parameters(self.modules)]\n\n def __enter__(self):\n for param in get_parameters(self.modules):\n # print(param.requires_grad)\n param.requires_grad = True\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n for i, param in enumerate(get_parameters(self.modules)):\n param.requires_grad = self.param_states[i]\n \n# \"get_parameters\" and \"FreezeParameters\" are from the following repo\n# https://github.com/juliusfrost/dreamer-pytorch\ndef get_parameters(modules: Iterable[Module]):\n \"\"\"\n Given a list of torch modules, returns a list of their parameters.\n :param modules: iterable of modules\n :returns: a list of parameters\n \"\"\"\n model_parameters = []\n for module in modules:\n model_parameters += list(module.parameters())\n return model_parameters\n\nclass FreezeParameters:\n def __init__(self, modules: Iterable[Module]):\n \"\"\"\n Context manager to locally freeze gradients.\n In some cases with can speed up computation because gradients aren't calculated for these listed modules.\n example:\n ```\n with FreezeParameters([module]):\n output_tensor = module(input_tensor)\n ```\n :param modules: iterable of modules. used to call .parameters() to freeze gradients.\n \"\"\"\n self.modules = modules\n self.param_states = [p.requires_grad for p in get_parameters(self.modules)]\n\n def __enter__(self):\n for param in get_parameters(self.modules):\n param.requires_grad = False\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n for i, param in enumerate(get_parameters(self.modules)):\n param.requires_grad = self.param_states[i]\n"
] | [
[
"torch.randn_like",
"torch.empty",
"torch.cat",
"numpy.asarray",
"numpy.median",
"numpy.stack",
"torch.nn.functional.softplus",
"torch.stack",
"torch.ones_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ArenBabikian/Scenic | [
"5687d9a70c0e6588ee0cda81b4a4a6731bcd2d91"
] | [
"src/scenic/formats/opendrive/xodr_parser.py"
] | [
"\"\"\"Parser for OpenDRIVE (.xodr) files.\"\"\"\n\nimport math\nimport itertools\nimport warnings\nimport xml.etree.ElementTree as ET\nimport numpy as np\nfrom scipy.integrate import quad\nfrom scipy.integrate import solve_ivp\nfrom pynverse import inversefunc\nfrom shapely.geometry import Polygon, MultiPolygon, GeometryCollection, Point, MultiPoint\nfrom shapely.ops import unary_union, snap\nimport abc\nfrom collections import defaultdict\n\nfrom scenic.core.regions import PolygonalRegion, PolylineRegion\nfrom scenic.core.geometry import (polygonUnion, cleanPolygon, cleanChain, plotPolygon,\n removeHoles, averageVectors)\nfrom scenic.core.vectors import Vector\nfrom scenic.domains.driving import roads as roadDomain\n\nclass OpenDriveWarning(UserWarning):\n pass\n\ndef warn(message):\n warnings.warn(message, OpenDriveWarning, stacklevel=2)\n\ndef buffer_union(polys, tolerance=0.01):\n return polygonUnion(polys, buf=tolerance, tolerance=tolerance)\n\nclass Poly3:\n '''Cubic polynomial.'''\n def __init__(self, a, b, c, d):\n self.a = a\n self.b = b\n self.c = c\n self.d = d\n\n def eval_at(self, x):\n return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3\n\n def grad_at(self, x):\n return self.b + 2 * self.c * x + 3 * self.d * x ** 2\n\n\nclass Curve:\n ''' Geometric elements which compose road reference lines.\n See the OpenDRIVE Format Specification for coordinate system details.'''\n def __init__(self, x0, y0, hdg, length):\n self.x0 = x0\n self.y0 = y0\n self.hdg = hdg # In radians counterclockwise, 0 at positive x-axis.\n self.cos_hdg, self.sin_hdg = math.cos(hdg), math.sin(hdg)\n self.length = length\n\n def to_points(self, num, extra_points=[]):\n '''Sample NUM evenly-spaced points from curve.\n\n Points are tuples of (x, y, s) with (x, y) absolute coordinates\n and s the arc length along the curve. Additional points at s values in\n extra_points are included if they are contained in the curve (unless\n they are extremely close to one of the equally-spaced points).\n '''\n s_vals = []\n extras = itertools.chain(extra_points, itertools.repeat(float('inf')))\n next_extra = next(extras)\n last_s = 0\n for s in np.linspace(0, self.length, num=num):\n while next_extra <= s:\n if last_s + 1e-6 < next_extra < s - 1e-6:\n s_vals.append(next_extra)\n next_extra = next(extras)\n s_vals.append(s)\n last_s = s\n return [self.point_at(s) for s in s_vals]\n\n @abc.abstractmethod\n def point_at(self, s):\n '''Get an (x, y, s) point along the curve at the given s coordinate.'''\n return\n\n def rel_to_abs(self, point):\n '''Convert from relative coordinates of curve to absolute coordinates.\n I.e. rotate counterclockwise by self.hdg and translate by (x0, x1).'''\n x, y, s = point\n return (self.x0 + self.cos_hdg * x - self.sin_hdg * y,\n self.y0 + self.sin_hdg * x + self.cos_hdg * y,\n s)\n\n\nclass Cubic(Curve):\n '''A curve defined by the cubic polynomial a + bu + cu^2 + du^3.\n The curve starts at (X0, Y0) in direction HDG, with length LENGTH.'''\n def __init__(self, x0, y0, hdg, length, a, b, c, d):\n super().__init__(x0, y0, hdg, length)\n self.poly = Poly3(a, b, c, d)\n\n def arclength(self, u):\n d_arc = lambda x: np.sqrt(1 + self.poly.grad_at(x) ** 2)\n return quad(d_arc, 0, u)[0]\n\n def point_at(self, s):\n u = float(inversefunc(self.arclength, s))\n pt = (s, self.poly.eval_at(u), s)\n return self.rel_to_abs(pt)\n\n\nclass ParamCubic(Curve):\n ''' A curve defined by the parametric equations\n u = a_u + b_up + c_up^2 + d_up^3,\n v = a_v + b_vp + c_vp^2 + d_up^3,\n with p in [0, p_range].\n The curve starts at (X0, Y0) in direction HDG, with length LENGTH.'''\n def __init__(self, x0, y0, hdg, length,\n au, bu, cu, du, av, bv, cv, dv, p_range=1):\n super().__init__(x0, y0, hdg, length)\n self.u_poly = Poly3(au, bu, cu, du)\n self.v_poly = Poly3(av, bv, cv, dv)\n self.p_range = p_range if p_range else 1\n\n def arclength(self, p):\n d_arc = lambda x: math.hypot(self.u_poly.grad_at(x),\n self.v_poly.grad_at(x))\n return quad(d_arc, 0, p)[0]\n\n def point_at(self, s):\n p = float(inversefunc(self.arclength, s))\n pt = (self.u_poly.eval_at(p), self.v_poly.eval_at(p), s)\n return self.rel_to_abs(pt)\n\n\nclass Clothoid(Curve):\n '''An Euler spiral with curvature varying linearly between CURV0 and CURV1.\n The spiral starts at (X0, Y0) in direction HDG, with length LENGTH.'''\n def __init__(self, x0, y0, hdg, length, curv0, curv1):\n super().__init__(x0, y0, hdg, length)\n # Initial and final curvature.\n self.curv0 = curv0\n self.curv1 = curv1\n self.curve_rate = (curv1 - curv0) / length if length != 0 else 0\n # ^ changed for Zalazone\n self.a = abs(curv0)\n self.r = 1 / self.a if curv0 != 0 else 1 # value not used if curv0 == 0\n self.ode_init = np.array([x0, y0, hdg])\n\n def point_at(self, s):\n # Generate a origin-centered clothoid with zero curvature at origin,\n # then translate/rotate the relevant segment.\n # Arcs are just a degenerate clothoid:\n if self.curv0 == self.curv1:\n if self.curv0 == 0:\n pt = (s, 0, s)\n else:\n r = self.r\n th = s * self.a\n if self.curv0 > 0:\n pt = (r * math.sin(th), r - r * math.cos(th), s)\n else:\n pt = (r * math.sin(th), -r + r * math.cos(th), s)\n return self.rel_to_abs(pt)\n else:\n def clothoid_ode(s, state):\n x, y, theta = state\n return np.array([math.cos(theta), math.sin(theta),\n self.curv0 + (self.curve_rate * s)])\n sol = solve_ivp(clothoid_ode, (0, s), self.ode_init)\n x, y, hdg = sol.y[:,-1]\n return (x, y, s)\n\nclass Line(Curve):\n '''A line segment between (x0, y0) and (x1, y1).'''\n def __init__(self, x0, y0, hdg, length):\n super().__init__(x0, y0, hdg, length)\n # Endpoints of line.\n self.x1 = x0 + length * math.cos(hdg)\n self.y1 = y0 + length * math.sin(hdg)\n\n def point_at(self, s):\n return self.rel_to_abs((s, 0, s))\n\n\nclass Lane():\n def __init__(self, id_, type_, pred=None, succ=None):\n self.id_ = id_\n self.width = [] # List of tuples (Poly3, int) for width and s-offset.\n self.type_ = type_\n self.pred = pred\n self.succ = succ\n self.left_bounds = [] # to be filled in later\n self.right_bounds = []\n self.centerline = []\n self.parent_lane_poly = None\n\n def width_at(self, s):\n # S here is relative to start of LaneSection this lane is in.\n ind = 0\n while ind + 1 < len(self.width) and self.width[ind + 1][1] <= s:\n ind += 1\n assert self.width[ind][1] <= s, 'No matching width entry found.'\n w_poly, s_off = self.width[ind]\n w = w_poly.eval_at(s - s_off)\n if w < -1e-6: # allow for numerical error\n return 0\n # ^ added for Zalazone, road 8863\n raise RuntimeError('OpenDRIVE lane has negative width')\n return max(w, 0)\n\n\nclass LaneSection():\n def __init__(self, s0, left_lanes={}, right_lanes={}):\n self.s0 = s0\n self.left_lanes = left_lanes\n self.right_lanes = right_lanes\n self.left_lane_ids = sorted(self.left_lanes.keys())\n self.right_lane_ids = sorted(self.right_lanes.keys(), reverse=True)\n self.lanes = dict(list(left_lanes.items()) + list(right_lanes.items()))\n\n def get_lane(self, id_):\n if id_ in self.left_lanes:\n return self.left_lanes[id_]\n elif id_ in self.right_lanes:\n return self.right_lanes[id_]\n elif id_ == 0:\n return Lane(0, 'none')\n else:\n raise RuntimeError('Lane with id', id_, 'not found')\n\n def get_offsets(self, s):\n '''Returns dict of lane id and offset from\n reference line of lane boundary at coordinate S along line.\n By convention, left lanes have positive width offset and right lanes\n have negative.'''\n assert s >= self.s0, 'Input s is before lane start position.'\n offsets = {}\n for lane_id in self.left_lane_ids:\n if lane_id - 1 in self.left_lane_ids:\n offsets[lane_id] = offsets[lane_id - 1] \\\n + self.left_lanes[lane_id].width_at(s - self.s0)\n else:\n offsets[lane_id] = self.left_lanes[lane_id].width_at(s - self.s0)\n for lane_id in self.right_lane_ids:\n if lane_id + 1 in self.right_lane_ids:\n offsets[lane_id] = offsets[lane_id + 1] \\\n - self.right_lanes[lane_id].width_at(s - self.s0)\n else:\n offsets[lane_id] = -self.right_lanes[lane_id].width_at(s - self.s0)\n return offsets\n\n\nclass RoadLink:\n '''Indicates Roads a and b, with ids id_a and id_b respectively, are connected.'''\n def __init__(self, id_a, id_b, contact_a, contact_b):\n self.id_a = id_a\n self.id_b = id_b\n # contact_a and contact_b should be of value \"start\" or \"end\"\n # and indicate which end of each road is connected to the other.\n self.contact_a = contact_a\n self.contact_b = contact_b\n\n\nclass Junction:\n class Connection:\n def __init__(self, incoming_id, connecting_id, connecting_contact, lane_links):\n self.incoming_id = incoming_id\n # id of connecting road\n self.connecting_id = connecting_id\n # contact point ('start' or 'end') on connecting road\n self.connecting_contact = connecting_contact\n # dict mapping incoming to connecting lane ids (empty = identity mapping)\n self.lane_links = lane_links\n\n def __init__(self, id_, name):\n self.id_ = id_\n self.name = name\n self.connections = []\n # Ids of roads that are paths within junction:\n self.paths = []\n self.poly = None\n\n def add_connection(self, incoming_id, connecting_id, connecting_contact, lane_links):\n conn = Junction.Connection(incoming_id, connecting_id, connecting_contact, lane_links)\n self.connections.append(conn)\n\nclass Road:\n def __init__(self, name, id_, length, junction, drive_on_right=True):\n self.name = name\n self.id_ = id_\n self.length = length\n self.junction = junction if junction != '-1' else None\n self.predecessor = None\n self.successor = None\n self.signals = [] # List of Signal objects.\n self.lane_secs = [] # List of LaneSection objects.\n self.ref_line = [] # List of Curve objects defining reference line.\n # NOTE: sec_points, sec_polys, sec_lane_polys should be ordered according to lane_secs.\n self.sec_points = [] # List of lists of points, one for each LaneSection.\n self.sec_polys = [] # List of Polygons, one for each LaneSections.\n self.sec_lane_polys = [] # List of dict of lane id to Polygon for each LaneSection.\n self.lane_polys = [] # List of lane polygons. Not a dict b/c lane id is not unique along road.\n # Each polygon in lane_polys is the union of connected lane section polygons.\n # lane_polys is currently not used.\n # Reference line offset:\n self.offset = [] # List of tuple (Poly3, s-coordinate).\n self.drive_on_right = drive_on_right\n # Used to fill in gaps between roads:\n self.start_bounds_left = {}\n self.start_bounds_right = {}\n self.end_bounds_left = {}\n self.end_bounds_right = {}\n\n self.remappedStartLanes = None # hack for handling spurious initial lane sections\n\n def get_lane(self, id_, s):\n '''Returns Lane object with id_ at coordinate S along line.'''\n ind = 0\n while ind + 1 < len(self.lane_secs) and self.lane_secs[ind + 1].s0 <= s:\n ind += 1\n assert self.lane_secs[ind].s0 <= s, 'No matching lane section found.'\n return self.lane_secs[ind].get_lane(id_)\n\n def get_ref_line_offset(self, s):\n if not self.offset:\n return 0\n ind = 0\n while ind + 1 < len(self.offset) and self.offset[ind + 1][1] <= s:\n ind += 1\n poly, s0 = self.offset[ind]\n assert s >= s0\n return poly.eval_at(s - s0)\n\n def get_ref_points(self, num):\n '''Returns list of list of points for each piece of ref_line.\n List of list structure necessary because each piece needs to be\n constructed into Polygon separately then unioned afterwards to avoid\n self-intersecting lines.'''\n ref_points = []\n transition_points = [sec.s0 for sec in self.lane_secs[1:]]\n last_s = 0\n for piece in self.ref_line:\n piece_points = piece.to_points(num, extra_points=transition_points)\n assert piece_points, 'Failed to get piece points'\n if ref_points:\n last_s = ref_points[-1][-1][2]\n piece_points = [(p[0], p[1], p[2] + last_s)\n for p in piece_points]\n ref_points.append(piece_points)\n transition_points = [s - last_s for s in transition_points if s > last_s]\n return ref_points\n\n def get_lane_offsets(self, s):\n '''Returns dict of lane id and offset from\n reference line of lane boundary at coordinate S along line.'''\n s = float(s)\n ind = 0\n while ind + 1 < len(self.lane_secs) and self.lane_secs[ind + 1].s0 <= s:\n ind += 1\n assert self.lane_secs[ind].s0 <= s, 'No matching lane section found.'\n offsets = self.lane_secs[ind].get_offsets(s)\n offsets[0] = 0 # Center lane has width 0 by convention.\n for id_ in offsets.keys():\n offsets[id_] += self.get_ref_line_offset(s)\n return offsets\n\n def heading_at(self, point):\n # Convert point to shapely Point.\n point = Point(point.x, point.y)\n for i in range(len(self.lane_secs)):\n ref_points = self.sec_points[i]\n poly = self.sec_polys[i]\n if point.within(poly.buffer(1)):\n lane_id = None\n for id_ in self.sec_lane_polys[i].keys():\n if point.within(self.sec_lane_polys[i][id_].buffer(1)):\n lane_id = id_\n break\n assert lane_id is not None, 'Point not found in sec_lane_polys.'\n min_dist = float('inf')\n for i in range(len(ref_points)):\n cur_point = Point(ref_points[i][0], ref_points[i][1])\n if point.distance(cur_point) < min_dist:\n closest_idx = i\n if closest_idx >= len(ref_points) - 1:\n closest_idx = len(ref_points) - 2\n dy = ref_points[closest_idx + 1][1] - ref_points[closest_idx][1]\n dx = ref_points[closest_idx + 1][0] - ref_points[closest_idx][0]\n heading = math.atan2(dy, dx)\n # Right lanes have negative lane_id.\n # Flip heading if drive_on_right XOR right lane.\n if self.drive_on_right != (lane_id < 0):\n heading += math.pi\n # Heading 0 is defined differently between OpenDrive and Scenic(?)\n heading -= math.pi / 2\n return (heading + math.pi) % (2 * math.pi) - math.pi\n\n raise RuntimeError('Point not found in piece_polys')\n\n def calc_geometry_for_type(self, lane_types, num, tolerance, calc_gap=False):\n '''Given a list of lane types, returns a tuple of:\n - List of lists of points along the reference line, with same indexing as self.lane_secs\n - List of region polygons, with same indexing as self.lane_secs\n - List of dictionary of lane id to polygon, with same indexing as self.lane_secs\n - List of polygons for each lane (not necessarily by id, but respecting lane successor/predecessor)\n - Polygon for entire region.\n If calc_gap=True, fills in gaps between connected roads. This is fairly expensive.'''\n road_polygons = []\n ref_points = self.get_ref_points(num)\n self.ref_line_points = list(itertools.chain.from_iterable(ref_points))\n cur_lane_polys = {}\n sec_points = []\n sec_polys = []\n sec_lane_polys = []\n lane_polys = []\n last_lefts = None\n last_rights = None\n cur_p = None\n \n for i in range(len(self.lane_secs)):\n cur_sec = self.lane_secs[i]\n cur_sec_points = []\n if i < len(self.lane_secs) - 1:\n next_sec = self.lane_secs[i + 1]\n s_stop = next_sec.s0\n else:\n s_stop = float('inf')\n left_bounds = defaultdict(list)\n right_bounds = defaultdict(list)\n cur_sec_lane_polys = defaultdict(list)\n cur_sec_polys = []\n end_of_sec = False\n\n while ref_points and not end_of_sec:\n if not ref_points[0]:\n ref_points.pop(0)\n if not ref_points or (cur_p and cur_p[2] >= s_stop):\n # Case 1: We have processed the entire reference line.\n # Case 2: The s-coordinate has exceeded s_stop, so we should move\n # onto the next LaneSection.\n # Either way, we collect all the bound points so far into polygons.\n end_of_sec = True\n cur_last_lefts = {}\n cur_last_rights = {}\n for id_ in left_bounds:\n # Polygon for piece of lane:\n left = left_bounds[id_]\n right = right_bounds[id_][::-1]\n bounds = left + right\n\n if len(bounds) < 3:\n continue\n poly = cleanPolygon(Polygon(bounds), tolerance)\n if not poly.is_empty:\n if poly.geom_type == 'MultiPolygon':\n poly = MultiPolygon([p for p in list(poly)\n if not p.is_empty and p.exterior])\n cur_sec_polys.extend(list(poly))\n else:\n cur_sec_polys.append(poly)\n cur_sec_lane_polys[id_].append(poly)\n cur_last_lefts[id_] = left_bounds[id_][-1]\n cur_last_rights[id_] = right_bounds[id_][-1]\n if i == 0 or not self.start_bounds_left:\n self.start_bounds_left[id_] = left_bounds[id_][0]\n self.start_bounds_right[id_] = right_bounds[id_][0]\n\n left_bounds = defaultdict(list)\n right_bounds = defaultdict(list)\n if cur_last_lefts and cur_last_rights:\n last_lefts = cur_last_lefts\n last_rights = cur_last_rights\n else:\n cur_p = ref_points[0][0]\n cur_sec_points.append(cur_p)\n s = min(max(cur_p[2], cur_sec.s0), s_stop - 1e-6)\n offsets = cur_sec.get_offsets(s)\n offsets[0] = 0\n for id_ in offsets:\n offsets[id_] += self.get_ref_line_offset(s)\n if len(ref_points[0]) > 1:\n next_p = ref_points[0][1]\n tan_vec = (next_p[0] - cur_p[0],\n next_p[1] - cur_p[1])\n else:\n if len(cur_sec_points) >= 2:\n prev_p = cur_sec_points[-2]\n else:\n assert len(sec_points) > 0\n if sec_points[-1]:\n assert sec_points[-1][-1] == cur_p\n prev_p = sec_points[-1][-2]\n else:\n prev_p = sec_points[-2][-2]\n\n tan_vec = (cur_p[0] - prev_p[0],\n cur_p[1] - prev_p[1])\n tan_norm = math.hypot(tan_vec[0], tan_vec[1])\n assert tan_norm > 1e-10\n normal_vec = (-tan_vec[1] / tan_norm, tan_vec[0] / tan_norm)\n if cur_p[2] < s_stop:\n # if at end of section, keep current point to be included in\n # the next section as well; otherwise remove it\n ref_points[0].pop(0)\n elif len(ref_points[0]) == 1 and len(ref_points) > 1:\n # also get rid of point if this is the last point of the current geometry and\n # and there is another geometry following\n ref_points[0].pop(0)\n for id_ in offsets:\n lane = cur_sec.get_lane(id_)\n if lane.type_ in lane_types:\n if id_ > 0:\n prev_id = id_ - 1\n else:\n prev_id = id_ + 1\n left_bound = [cur_p[0] + normal_vec[0] * offsets[id_],\n cur_p[1] + normal_vec[1] * offsets[id_]]\n right_bound = [cur_p[0] + normal_vec[0] * offsets[prev_id],\n cur_p[1] + normal_vec[1] * offsets[prev_id]]\n if id_ < 0:\n left_bound, right_bound = right_bound, left_bound\n halfway = (offsets[id_] + offsets[prev_id]) / 2\n centerline = [cur_p[0] + normal_vec[0] * halfway,\n cur_p[1] + normal_vec[1] * halfway]\n left_bounds[id_].append(left_bound)\n right_bounds[id_].append(right_bound)\n lane.left_bounds.append(left_bound)\n lane.right_bounds.append(right_bound)\n lane.centerline.append(centerline)\n assert len(cur_sec_points) >= 2, i\n sec_points.append(cur_sec_points)\n sec_polys.append(buffer_union(cur_sec_polys, tolerance=tolerance))\n for id_ in cur_sec_lane_polys:\n poly = buffer_union(cur_sec_lane_polys[id_], tolerance=tolerance)\n cur_sec_lane_polys[id_] = poly\n cur_sec.get_lane(id_).poly = poly\n sec_lane_polys.append(dict(cur_sec_lane_polys))\n next_lane_polys = {}\n for id_ in cur_sec_lane_polys:\n pred_id = cur_sec.get_lane(id_).pred\n if pred_id and pred_id in cur_lane_polys:\n next_lane_polys[id_] = cur_lane_polys.pop(pred_id) \\\n + [cur_sec_lane_polys[id_]]\n else:\n next_lane_polys[id_] = [cur_sec_lane_polys[id_]]\n for id_ in cur_lane_polys:\n poly = buffer_union(cur_lane_polys[id_], tolerance=tolerance)\n lane_polys.append(poly)\n self.lane_secs[i-1].get_lane(id_).parent_lane_poly = poly\n cur_lane_polys = next_lane_polys\n for id_ in cur_lane_polys:\n poly = buffer_union(cur_lane_polys[id_], tolerance=tolerance)\n lane_polys.append(poly)\n cur_sec.get_lane(id_).parent_lane_poly = poly\n union_poly = buffer_union(sec_polys, tolerance=tolerance)\n if last_lefts and last_rights:\n self.end_bounds_left.update(last_lefts)\n self.end_bounds_right.update(last_rights)\n return (sec_points, sec_polys, sec_lane_polys, lane_polys, union_poly)\n\n def calculate_geometry(self, num, tolerance, calc_gap, drivable_lane_types,\n sidewalk_lane_types, shoulder_lane_types):\n # Note: this also calculates self.start_bounds_left, self.start_bounds_right,\n # self.end_bounds_left, self.end_bounds_right\n (self.sec_points, self.sec_polys, self.sec_lane_polys,\n self.lane_polys, self.drivable_region) = self.calc_geometry_for_type(\n drivable_lane_types, num, tolerance, calc_gap=calc_gap)\n\n for i, sec in enumerate(self.lane_secs):\n sec.drivable_lanes = {}\n sec.sidewalk_lanes = {}\n sec.shoulder_lanes = {}\n for id_, lane in sec.lanes.items():\n ty = lane.type_\n if ty in drivable_lane_types:\n sec.drivable_lanes[id_] = lane\n elif ty in sidewalk_lane_types:\n sec.sidewalk_lanes[id_] = lane\n elif ty in shoulder_lane_types:\n sec.shoulder_lanes[id_] = lane\n if not sec.drivable_lanes:\n continue\n\n rightmost = None\n for id_ in itertools.chain(reversed(sec.right_lane_ids), sec.left_lane_ids):\n if id_ in sec.drivable_lanes:\n rightmost = sec.lanes[id_]\n break\n assert rightmost is not None, i\n leftmost = None\n for id_ in itertools.chain(reversed(sec.left_lane_ids), sec.right_lane_ids):\n if id_ in sec.drivable_lanes:\n leftmost = sec.lanes[id_]\n break\n assert leftmost is not None, i\n sec.left_edge = leftmost.left_bounds\n assert len(sec.left_edge) >= 2\n sec.right_edge = rightmost.right_bounds\n assert len(sec.right_edge) >= 2\n\n _, _, _, _, self.sidewalk_region = self.calc_geometry_for_type(\n sidewalk_lane_types, num, tolerance, calc_gap=calc_gap)\n\n _, _, _, _, self.shoulder_region = self.calc_geometry_for_type(\n shoulder_lane_types, num, tolerance, calc_gap=calc_gap)\n\n def toScenicRoad(self, tolerance):\n assert self.sec_points\n allElements = []\n # Create lane and road sections\n roadSections = []\n last_section = None\n sidewalkSections = defaultdict(list)\n shoulderSections = defaultdict(list)\n for sec, pts, sec_poly, lane_polys in zip(self.lane_secs, self.sec_points,\n self.sec_polys, self.sec_lane_polys):\n assert sec.drivable_lanes\n laneSections = {}\n for id_, lane in sec.drivable_lanes.items():\n succ = None # will set this later\n if last_section and lane.pred:\n if lane.pred in last_section.lanesByOpenDriveID:\n pred = last_section.lanesByOpenDriveID[lane.pred]\n else:\n warn(f'road {self.id_} section {len(roadSections)} '\n f'lane {id_} has a non-drivable predecessor')\n pred = None\n else:\n pred = lane.pred # will correct inter-road links later\n left, center, right = lane.left_bounds, lane.centerline, lane.right_bounds\n if id_ > 0: # backward lane\n left, center, right = right[::-1], center[::-1], left[::-1]\n succ, pred = pred, succ\n section = roadDomain.LaneSection(\n id=f'road{self.id_}_sec{len(roadSections)}_lane{id_}',\n polygon=lane_polys[id_],\n centerline=PolylineRegion(cleanChain(center)),\n leftEdge=PolylineRegion(cleanChain(left)),\n rightEdge=PolylineRegion(cleanChain(right)),\n successor=succ,\n predecessor=pred,\n lane=None, # will set these later\n group=None,\n road=None,\n openDriveID=id_,\n isForward=id_ < 0\n )\n section._original_lane = lane\n laneSections[id_] = section\n allElements.append(section)\n section = roadDomain.RoadSection(\n id=f'road{self.id_}_sec{len(roadSections)}',\n polygon=sec_poly,\n centerline=PolylineRegion(cleanChain(pts)),\n leftEdge=PolylineRegion(cleanChain(sec.left_edge)),\n rightEdge=PolylineRegion(cleanChain(sec.right_edge)),\n successor=None,\n predecessor=last_section,\n road=None, # will set later\n lanesByOpenDriveID=laneSections\n )\n roadSections.append(section)\n allElements.append(section)\n last_section = section\n\n for id_, lane in sec.sidewalk_lanes.items():\n sidewalkSections[id_].append(lane)\n for id_, lane in sec.shoulder_lanes.items():\n shoulderSections[id_].append(lane)\n\n # Build sidewalks and shoulders\n # TODO improve this!\n forwardSidewalks, backwardSidewalks = [], []\n forwardShoulders, backwardShoulders = [], []\n for id_ in sidewalkSections:\n (forwardSidewalks if id_ < 0 else backwardSidewalks).append(id_)\n for id_ in shoulderSections:\n (forwardShoulders if id_ < 0 else backwardShoulders).append(id_)\n\n def combineSections(laneIDs, sections, name):\n leftmost, rightmost = max(laneIDs), min(laneIDs)\n if len(laneIDs) != leftmost-rightmost+1:\n warn(f'ignoring {name} in the middle of road {self.id_}')\n leftPoints, rightPoints = [], []\n if leftmost < 0:\n leftmost = rightmost\n while leftmost+1 in laneIDs:\n leftmost = leftmost+1\n leftSecs, rightSecs = sections[leftmost], sections[rightmost]\n for leftSec, rightSec in zip(leftSecs, rightSecs):\n leftPoints.extend(leftSec.left_bounds)\n rightPoints.extend(rightSec.right_bounds)\n else:\n rightmost = leftmost\n while rightmost-1 in laneIDs:\n rightmost = rightmost-1\n leftSecs = reversed(sections[leftmost])\n rightSecs = reversed(sections[rightmost])\n for leftSec, rightSec in zip(leftSecs, rightSecs):\n leftPoints.extend(reversed(rightSec.right_bounds))\n rightPoints.extend(reversed(leftSec.left_bounds))\n leftEdge = PolylineRegion(cleanChain(leftPoints))\n rightEdge = PolylineRegion(cleanChain(rightPoints))\n\n # Heuristically create some kind of reasonable centerline\n if len(leftPoints) == len(rightPoints):\n centerPoints = list(averageVectors(l, r) for l, r in zip(leftPoints, rightPoints))\n else:\n num = max(len(leftPoints), len(rightPoints))\n centerPoints = []\n for d in np.linspace(0, 1, num):\n l = leftEdge.lineString.interpolate(d, normalized=True)\n r = rightEdge.lineString.interpolate(d, normalized=True)\n centerPoints.append(averageVectors(l.coords[0], r.coords[0]))\n centerline = PolylineRegion(cleanChain(centerPoints))\n allPolys = (sec.poly\n for id_ in range(rightmost, leftmost+1)\n for sec in sections[id_])\n union = buffer_union(allPolys, tolerance=tolerance)\n id_ = f'road{self.id_}_{name}({leftmost},{rightmost})'\n return id_, union, centerline, leftEdge, rightEdge\n\n def makeSidewalk(laneIDs):\n if not laneIDs:\n return None\n id_, union, centerline, leftEdge, rightEdge = combineSections(\n laneIDs, sidewalkSections, 'sidewalk')\n sidewalk = roadDomain.Sidewalk(\n id=id_,\n polygon=union,\n centerline=centerline,\n leftEdge=leftEdge,\n rightEdge=rightEdge,\n road=None,\n crossings=() # TODO add crosswalks\n )\n allElements.append(sidewalk)\n return sidewalk\n\n forwardSidewalk = makeSidewalk(forwardSidewalks)\n backwardSidewalk = makeSidewalk(backwardSidewalks)\n\n def makeShoulder(laneIDs):\n if not laneIDs:\n return None\n id_, union, centerline, leftEdge, rightEdge = combineSections(\n laneIDs, shoulderSections, 'shoulder')\n shoulder = roadDomain.Shoulder(\n id=id_,\n polygon=union,\n centerline=centerline,\n leftEdge=leftEdge,\n rightEdge=rightEdge,\n road=None,\n )\n allElements.append(shoulder)\n return shoulder\n\n forwardShoulder = makeShoulder(forwardShoulders)\n backwardShoulder = makeShoulder(backwardShoulders)\n\n # Connect sections to their successors\n next_section = None\n for sec, section in reversed(list(zip(self.lane_secs, roadSections))):\n if next_section is None:\n next_section = section\n for id_, lane in sec.drivable_lanes.items():\n newLane = section.lanesByOpenDriveID[id_]\n if newLane.isForward:\n newLane._successor = lane.succ # will correct inter-road links later\n else:\n newLane._predecessor = lane.succ\n continue\n section._successor = next_section\n for id_, lane in sec.drivable_lanes.items():\n newLane = section.lanesByOpenDriveID[id_]\n if newLane.isForward:\n newLane._successor = next_section.lanesByOpenDriveID.get(lane.succ)\n else:\n newLane._predecessor = next_section.lanesByOpenDriveID.get(lane.succ)\n next_section = section\n\n # Connect lane sections to adjacent lane sections\n for section in roadSections:\n lanes = section.lanesByOpenDriveID\n for id_, lane in lanes.items():\n if id_ < -1:\n leftID = id_ + 1\n elif id_ == -1:\n leftID = 1\n elif id_ == 1:\n leftID = -1\n else:\n leftID = id_ - 1\n rightID = id_ - 1 if id_ < 0 else id_ + 1\n lane._laneToLeft = lanes.get(leftID)\n lane._laneToRight = lanes.get(rightID)\n if self.drive_on_right:\n lane._fasterLane = lane._laneToLeft\n lane._slowerLane = lane._laneToRight\n else:\n lane._slowerLane = lane._laneToLeft\n lane._fasterLane = lane._laneToRight\n if lane._fasterLane and lane._fasterLane.isForward != lane.isForward:\n lane._fasterLane = None\n if lane._slowerLane and lane._slowerLane.isForward != lane.isForward:\n lane._slowerLane = None\n adj = []\n if lane._laneToLeft:\n adj.append(lane._laneToLeft)\n if lane._laneToRight:\n adj.append(lane._laneToRight)\n lane.adjacentLanes = tuple(adj)\n\n # Gather lane sections into lanes\n nextID = 0\n forwardLanes, backwardLanes = [], []\n for roadSection in roadSections:\n for laneSection in roadSection.lanes:\n laneSection._visited = False\n for roadSection, sec in zip(roadSections, self.lane_secs):\n for laneSection in roadSection.lanes:\n if not laneSection._visited: # start of new lane\n forward = laneSection.isForward\n sections = []\n while True:\n sections.append(laneSection)\n laneSection._visited = True\n assert laneSection.isForward == forward\n if forward:\n nextSection = laneSection._successor\n else:\n nextSection = laneSection._predecessor\n if (not nextSection\n or not isinstance(nextSection, roadDomain.LaneSection)\n or nextSection._visited):\n break\n laneSection = nextSection\n ls = laneSection._original_lane\n assert ls.parent_lane_poly\n\n if not forward:\n sections = tuple(reversed(sections))\n leftPoints, rightPoints, centerPoints = [], [], []\n for section in sections:\n leftPoints.extend(section.leftEdge.points)\n rightPoints.extend(section.rightEdge.points)\n centerPoints.extend(section.centerline.points)\n leftEdge = PolylineRegion(cleanChain(leftPoints))\n rightEdge = PolylineRegion(cleanChain(rightPoints))\n centerline = PolylineRegion(cleanChain(centerPoints))\n lane = roadDomain.Lane(\n id=f'road{self.id_}_lane{nextID}',\n polygon=ls.parent_lane_poly,\n centerline=centerline,\n leftEdge=leftEdge,\n rightEdge=rightEdge,\n group=None,\n road=None,\n sections=tuple(sections)\n )\n nextID += 1\n for section in sections:\n section.lane = lane\n (forwardLanes if forward else backwardLanes).append(lane)\n allElements.append(lane)\n lanes = forwardLanes + backwardLanes\n assert lanes\n\n # Compute lane adjacencies\n for lane in lanes:\n adj = []\n for section in lane.sections:\n adj.extend(sec.lane for sec in section.adjacentLanes)\n lane.adjacentLanes = tuple(adj)\n\n # Create lane groups\n def getEdges(forward):\n if forward:\n sec = roadSections[0]\n startLanes = sec.forwardLanes\n else:\n sec = roadSections[-1]\n startLanes = sec.backwardLanes\n leftPoints = []\n current = startLanes[-1] # get leftmost lane of the first section\n while current and isinstance(current, roadDomain.LaneSection):\n if current._laneToLeft and current._laneToLeft.isForward == forward:\n current = current._laneToLeft\n leftPoints.extend(current.leftEdge.points)\n current = current._successor\n leftEdge = PolylineRegion(cleanChain(leftPoints))\n rightPoints = []\n current = startLanes[0] # get rightmost lane of the first section\n while current and isinstance(current, roadDomain.LaneSection):\n if current._laneToRight and current._laneToRight.isForward == forward:\n current = current._laneToRight\n rightPoints.extend(current.rightEdge.points)\n current = current._successor\n rightEdge = PolylineRegion(cleanChain(rightPoints))\n middleLane = startLanes[len(startLanes)//2].lane # rather arbitrary\n return leftEdge, middleLane.centerline, rightEdge\n\n if forwardLanes:\n leftEdge, centerline, rightEdge = getEdges(forward=True)\n forwardGroup = roadDomain.LaneGroup(\n id=f'road{self.id_}_forward',\n polygon=buffer_union((lane.polygon for lane in forwardLanes),\n tolerance=tolerance),\n centerline=centerline,\n leftEdge=leftEdge,\n rightEdge=rightEdge,\n road=None,\n lanes=tuple(forwardLanes),\n curb=(forwardShoulder.rightEdge if forwardShoulder else rightEdge),\n sidewalk=forwardSidewalk,\n bikeLane=None,\n shoulder=forwardShoulder,\n opposite=None,\n )\n allElements.append(forwardGroup)\n else:\n forwardGroup = None\n if backwardLanes:\n leftEdge, centerline, rightEdge = getEdges(forward=False)\n backwardGroup = roadDomain.LaneGroup(\n id=f'road{self.id_}_backward',\n polygon=buffer_union((lane.polygon for lane in backwardLanes),\n tolerance=tolerance),\n centerline=centerline,\n leftEdge=leftEdge,\n rightEdge=rightEdge,\n road=None,\n lanes=tuple(backwardLanes),\n curb=(backwardShoulder.rightEdge if backwardShoulder else rightEdge),\n sidewalk=backwardSidewalk,\n bikeLane=None,\n shoulder=backwardShoulder,\n opposite=forwardGroup,\n )\n allElements.append(backwardGroup)\n if forwardGroup:\n forwardGroup._opposite = backwardGroup\n else:\n backwardGroup = None\n\n # Create signal\n roadSignals = []\n for i, signal_ in enumerate(self.signals):\n signal = roadDomain.Signal(\n uid=f'signal{signal_.id_}_{self.id_}_{i}',\n openDriveID=signal_.id_,\n country=signal_.country,\n type=signal_.type_\n )\n roadSignals.append(signal)\n allElements.append(signal)\n\n # Create road\n assert forwardGroup or backwardGroup\n if forwardGroup:\n rightEdge = forwardGroup.rightEdge\n else:\n rightEdge = backwardGroup.leftEdge\n if backwardGroup:\n leftEdge = backwardGroup.rightEdge\n else:\n leftEdge = forwardGroup.leftEdge\n centerline = PolylineRegion(tuple(pt[:2] for pt in self.ref_line_points))\n road = roadDomain.Road(\n name=self.name,\n uid=f'road{self.id_}', # need prefix to prevent collisions with intersections\n id=self.id_,\n polygon=self.drivable_region,\n centerline=centerline,\n leftEdge=leftEdge,\n rightEdge=rightEdge,\n lanes=lanes,\n forwardLanes=forwardGroup,\n backwardLanes=backwardGroup,\n sections=roadSections,\n signals=tuple(roadSignals),\n crossings=(), # TODO add these!\n )\n allElements.append(road)\n\n # Set up parent references\n if forwardGroup:\n forwardGroup.road = road\n if forwardGroup._sidewalk:\n forwardGroup._sidewalk.road = road\n if forwardGroup._shoulder:\n forwardGroup._shoulder.road = road\n forwardGroup._shoulder.group = forwardGroup\n if backwardGroup:\n backwardGroup.road = road\n if backwardGroup._sidewalk:\n backwardGroup._sidewalk.road = road\n if backwardGroup._shoulder:\n backwardGroup._shoulder.road = road\n backwardGroup._shoulder.group = backwardGroup\n for section in roadSections:\n section.road = road\n for lane in forwardLanes:\n lane.group = forwardGroup\n lane.road = road\n for sec in lane.sections:\n sec.group = forwardGroup\n sec.road = road\n del sec._original_lane\n for lane in backwardLanes:\n lane.group = backwardGroup\n lane.road = road\n for sec in lane.sections:\n sec.group = backwardGroup\n sec.road = road\n del sec._original_lane\n\n return road, allElements\n\nclass Signal:\n '''Traffic lights, stop signs, etc.'''\n def __init__(self, id_, country, type_, subtype, orientation, validity=None):\n self.id_ = id_\n self.country = country\n self.type_ = type_\n self.subtype = subtype\n self.orientation = orientation\n self.validity = validity\n\n def is_valid(self):\n return self.validity is None or self.validity != [0, 0]\n\nclass SignalReference:\n def __init__(self, id_, orientation, validity=None):\n self.id_ = id_\n self.validity = validity\n self.orientation = orientation\n\n def is_valid(self):\n return self.validity is None or self.validity != [0, 0]\n\nclass RoadMap:\n defaultTolerance = 0.05\n\n def __init__(self, tolerance=None, fill_intersections=True,\n drivable_lane_types=('driving', 'entry', 'exit', 'offRamp', 'onRamp',\n 'connectingRamp'),\n sidewalk_lane_types=('sidewalk',),\n shoulder_lane_types=('shoulder', 'parking', 'stop', 'border'),\n elide_short_roads=False):\n self.tolerance = self.defaultTolerance if tolerance is None else tolerance\n self.roads = {}\n self.road_links = []\n self.junctions = {}\n self.sec_lane_polys = []\n self.lane_polys = []\n self.intersection_region = None\n self.fill_intersections = fill_intersections\n self.drivable_lane_types = drivable_lane_types\n self.sidewalk_lane_types = sidewalk_lane_types\n self.shoulder_lane_types = shoulder_lane_types\n self.elide_short_roads = elide_short_roads\n\n def calculate_geometry(self, num, calc_gap=False, calc_intersect=True):\n # If calc_gap=True, fills in gaps between connected roads.\n # If calc_intersect=True, calculates intersection regions.\n # These are fairly expensive.\n for road in self.roads.values():\n road.calculate_geometry(num, calc_gap=calc_gap, tolerance=self.tolerance,\n drivable_lane_types=self.drivable_lane_types,\n sidewalk_lane_types=self.sidewalk_lane_types,\n shoulder_lane_types=self.shoulder_lane_types)\n self.sec_lane_polys.extend(road.sec_lane_polys)\n self.lane_polys.extend(road.lane_polys)\n\n if calc_gap:\n drivable_polys = []\n sidewalk_polys = []\n shoulder_polys = []\n for road in self.roads.values():\n drivable_poly = road.drivable_region\n sidewalk_poly = road.sidewalk_region\n shoulder_poly = road.shoulder_region\n if not (drivable_poly is None or drivable_poly.is_empty):\n drivable_polys.append(drivable_poly)\n if not (sidewalk_poly is None or sidewalk_poly.is_empty):\n sidewalk_polys.append(sidewalk_poly)\n if not (shoulder_poly is None or shoulder_poly.is_empty):\n shoulder_polys.append(shoulder_poly)\n\n for link in self.road_links:\n road_a = self.roads[link.id_a]\n road_b = self.roads[link.id_b]\n assert link.contact_a in ['start', 'end'], 'Invalid link record.'\n assert link.contact_b in ['start', 'end'], 'Invalid link record.'\n if link.contact_a == 'start':\n a_sec = road_a.lane_secs[0]\n a_bounds_left = road_a.start_bounds_left\n a_bounds_right = road_a.start_bounds_right\n else:\n a_sec = road_a.lane_secs[-1]\n a_bounds_left = road_a.end_bounds_left\n a_bounds_right = road_a.end_bounds_right\n if link.contact_b == 'start':\n b_bounds_left = road_b.start_bounds_left\n b_bounds_right = road_b.start_bounds_right\n else:\n b_bounds_left = road_b.end_bounds_left\n b_bounds_right = road_b.end_bounds_right\n\n for id_, lane in a_sec.lanes.items():\n if link.contact_a == 'start':\n other_id = lane.pred\n else:\n other_id = lane.succ\n if other_id not in b_bounds_left or other_id not in b_bounds_right:\n continue\n if id_ not in a_bounds_left or id_ not in a_bounds_right:\n continue\n\n gap_poly = MultiPoint([\n a_bounds_left[id_], a_bounds_right[id_],\n b_bounds_left[other_id], b_bounds_right[other_id]\n ]).convex_hull\n if not gap_poly.is_valid:\n continue\n if gap_poly.geom_type == 'Polygon' and not gap_poly.is_empty:\n if lane.type_ in self.drivable_lane_types:\n drivable_polys.append(gap_poly)\n elif lane.type_ in self.sidewalk_lane_types:\n sidewalk_polys.append(gap_poly)\n elif lane.type_ in self.shoulder_lane_types:\n shoulder_polys.append(gap_poly)\n else:\n drivable_polys = [road.drivable_region for road in self.roads.values()]\n sidewalk_polys = [road.sidewalk_region for road in self.roads.values()]\n shoulder_polys = [road.shoulder_region for road in self.roads.values()]\n\n self.drivable_region = buffer_union(drivable_polys, tolerance=self.tolerance)\n self.sidewalk_region = buffer_union(sidewalk_polys, tolerance=self.tolerance)\n self.shoulder_region = buffer_union(shoulder_polys, tolerance=self.tolerance)\n\n if calc_intersect:\n self.calculate_intersections()\n\n def calculate_intersections(self):\n intersect_polys = []\n for junc in self.junctions.values():\n junc_polys = [self.roads[i].drivable_region for i in junc.paths]\n assert junc_polys, junc\n union = buffer_union(junc_polys, tolerance=self.tolerance)\n if self.fill_intersections:\n union = removeHoles(union)\n assert union.is_valid\n junc.poly = union\n intersect_polys.append(union)\n self.intersection_region = buffer_union(intersect_polys, tolerance=self.tolerance)\n\n def heading_at(self, point):\n '''Return the road heading at point.'''\n # Convert point to shapely Point.\n point = Point(point.x, point.y)\n for road in self.roads.values():\n if point.within(road.drivable_region.buffer(1)):\n return road.heading_at(point)\n #raise RuntimeError('Point not in RoadMap: ', point)\n return 0\n\n def plot_line(self, plt, num=500):\n '''Plot center line of road map for sanity check.'''\n for road in self.roads.values():\n for piece in road.ref_line:\n points = piece.to_points(num)\n x = [p[0] for p in points]\n y = [p[1] for p in points]\n plt.plot(x, y, 'b')\n plt.show()\n\n def plot_lanes(self, plt, num=500):\n '''Plot lane boundaries of road map for sanity check.'''\n bounds_x =[]\n bounds_y = []\n for road in self.roads.values():\n for piece in road.ref_line:\n ref_points = piece.to_points(num)\n for i in range(len(ref_points) - 1):\n offsets = road.get_lane_offsets(ref_points[i][2])\n tan_vec = (ref_points[i + 1][0] - ref_points[i][0],\n ref_points[i + 1][1] - ref_points[i][1])\n tan_norm = np.sqrt(tan_vec[0] ** 2 + tan_vec[1] ** 2)\n normal_vec = (-tan_vec[1] / tan_norm, tan_vec[0] / tan_norm)\n # ortho_line_x = []\n # ortho_line_y = []\n for id_ in offsets.keys():\n if road.get_lane(id_, ref_points[i][2]).type_ == 'driving':\n bounds_x.append(ref_points[i][0] + normal_vec[0] * offsets[id_])\n bounds_y.append(ref_points[i][1] + normal_vec[1] * offsets[id_])\n plt.scatter(bounds_x, bounds_y, c='r', s=2)\n plt.show()\n\n def __parse_lanes(self, lanes_elem):\n '''Lanes_elem should be <left> or <right> element.\n Returns dict of lane ids and Lane objects.'''\n lanes = {}\n for l in lanes_elem.iter('lane'):\n id_ = int(l.get('id'))\n type_ = l.get('type')\n link = l.find('link')\n pred = None\n succ = None\n if link is not None:\n pred_elem = link.find('predecessor')\n succ_elem = link.find('successor')\n if pred_elem is not None:\n pred = int(pred_elem.get('id'))\n if succ_elem is not None:\n succ = int(succ_elem.get('id'))\n lane = Lane(id_, type_, pred, succ)\n for w in l.iter('width'):\n w_poly = Poly3(float(w.get('a')),\n float(w.get('b')),\n float(w.get('c')),\n float(w.get('d')))\n lane.width.append((w_poly, float(w.get('sOffset'))))\n lanes[id_] = lane\n return lanes\n\n def __parse_link(self, link_elem, road, contact):\n if link_elem is None:\n return\n road_id = road.id_\n if link_elem.get('elementType') == 'road':\n id_b = int(link_elem.get('elementId'))\n contact_b = link_elem.get('contactPoint')\n link = RoadLink(road_id, id_b, contact, contact_b)\n self.road_links.append(link)\n return link\n else:\n assert link_elem.get('elementType') == 'junction', 'Unknown link type'\n junction = int(link_elem.get('elementId'))\n if junction not in self.junctions:\n return # junction had no connecting roads, so we skipped it\n if contact == 'start':\n road.predecessor = junction\n else:\n road.successor = junction\n connections = self.junctions[junction].connections\n for c in connections:\n if c.incoming_id == road_id:\n self.road_links.append(RoadLink(road_id,\n c.connecting_id,\n contact,\n c.connecting_contact))\n\n def __parse_signal_validity(self, validity_elem):\n if validity_elem is None:\n return None\n return [int(validity_elem.get('fromLane')), int(validity_elem.get('toLane'))]\n\n def __parse_signal(self, signal_elem):\n return Signal(\n signal_elem.get('id'),\n signal_elem.get('country'),\n signal_elem.get('type'),\n signal_elem.get('subtype'),\n signal_elem.get('orientation'),\n self.__parse_signal_validity(signal_elem.find('validity'))\n )\n\n def __parse_signal_reference(self, signal_reference_elem):\n return SignalReference(\n signal_reference_elem.get('id'),\n signal_reference_elem.get('orientation'),\n self.__parse_signal_validity(signal_reference_elem.find('validity'))\n )\n\n def parse(self, path):\n tree = ET.parse(path)\n root = tree.getroot()\n if root.tag != 'OpenDRIVE':\n raise RuntimeError(f'{path} does not appear to be an OpenDRIVE file')\n\n # parse junctions\n for j in root.iter('junction'):\n junction = Junction(int(j.get('id')), j.get('name'))\n for c in j.iter('connection'):\n ty = c.get('type', 'default')\n if ty != 'default':\n raise RuntimeError(f'unhandled \"{ty}\" type of junction connection')\n lane_links = {}\n for l in c.iter('laneLink'):\n lane_links[int(l.get('from'))] = int(l.get('to'))\n junction.add_connection(int(c.get('incomingRoad')),\n int(c.get('connectingRoad')),\n c.get('contactPoint'),\n lane_links)\n junction.paths.append(int(c.get('connectingRoad')))\n if not junction.paths:\n warn(f'junction {junction.id_} has no connecting roads; skipping it')\n continue\n self.junctions[junction.id_] = junction\n\n # Creating temporal signals container to resolve referenced signals.\n _temp_signals = {}\n for r in root.iter('road'):\n signals = r.find('signals')\n if signals is not None:\n for s in signals.iter('signal'):\n signal = self.__parse_signal(s)\n _temp_signals[signal.id_] = signal\n\n # parse roads\n self.elidedRoads = {}\n for r in root.iter('road'):\n road = Road(r.get('name'), int(r.get('id')), float(r.get('length')),\n r.get('junction'))\n link = r.find('link')\n if link is not None:\n pred_elem = link.find('predecessor')\n succ_elem = link.find('successor')\n pred_link = self.__parse_link(pred_elem, road, 'start')\n succ_link = self.__parse_link(succ_elem, road, 'end')\n else:\n pred_link = succ_link = None\n\n if road.length < self.tolerance:\n warn(f'road {road.id_} has length shorter than tolerance;'\n ' geometry may contain artifacts')\n if self.elide_short_roads:\n warn(f'attempting to elide road {road.id_} of length {road.length}')\n assert road.junction is None\n self.elidedRoads[road.id_] = road\n if pred_link:\n road.predecessor = pred_link.id_b\n road.predecessorContact = pred_link.contact_b\n else:\n road.predecessorContact = None\n if succ_link:\n road.successor = succ_link.id_b\n road.successorContact = succ_link.contact_b\n else:\n road.successorContact = None\n continue\n\n # Parse planView:\n plan_view = r.find('planView')\n curves = []\n for geom in plan_view.iter('geometry'):\n x0 = float(geom.get('x'))\n y0 = float(geom.get('y'))\n s0 = float(geom.get('s'))\n hdg = float(geom.get('hdg'))\n length = float(geom.get('length'))\n curve_elem = geom[0]\n curve = None\n if curve_elem.tag == 'line':\n curve = Line(x0, y0, hdg, length)\n elif curve_elem.tag == 'arc':\n # Arc is clothoid of constant curvature.\n curv = float(curve_elem.get('curvature'))\n curve = Clothoid(x0, y0, hdg, length, curv, curv)\n elif curve_elem.tag == 'spiral':\n curv0 = float(curve_elem.get('curvStart'))\n curv1 = float(curve_elem.get('curvEnd'))\n curve = Clothoid(x0, y0, hdg, length, curv0, curv1)\n elif curve_elem.tag == 'poly3':\n a, b, c, d = cubic_elem.get('a'), \\\n float(curve_elem.get('b')), \\\n float(curve_elem.get('c')), \\\n float(curve_elem.get('d'))\n curve = Cubic(x0, y0, hdg, length, a, b, c, d)\n elif curve_elem.tag == 'paramPoly3':\n au, bu, cu, du, av, bv, cv, dv = \\\n float(curve_elem.get('aU')), \\\n float(curve_elem.get('bU')), \\\n float(curve_elem.get('cU')), \\\n float(curve_elem.get('dU')), \\\n float(curve_elem.get('aV')), \\\n float(curve_elem.get('bV')), \\\n float(curve_elem.get('cV')), \\\n float(curve_elem.get('dV'))\n p_range = curve_elem.get('pRange')\n if p_range and p_range != 'normalized':\n # TODO support arcLength\n raise RuntimeError('unsupported pRange for paramPoly3')\n else:\n p_range = 1\n curve = ParamCubic(x0, y0, hdg, length,\n au, bu, cu, du, av, bv,\n cv, dv, p_range)\n curves.append((s0, curve))\n if not curves:\n raise RuntimeError(f'road {road.id_} has an empty planView')\n if not curves[0][0] == 0:\n raise RuntimeError(f'reference line of road {road.id_} does not start at s=0')\n lastS = 0\n lastCurve = curves[0][1]\n refLine = []\n for s0, curve in curves[1:]:\n l = s0 - lastS\n if abs(lastCurve.length - l) > 1e-4:\n raise RuntimeError(f'planView of road {road.id_} has inconsistent length')\n if l < 0:\n raise RuntimeError(f'planView of road {road.id_} is not in order')\n elif l < 1e-6:\n warn(f'road {road.id_} reference line has a geometry of '\n f'length {l}; skipping it')\n else:\n refLine.append(lastCurve)\n lastS = s0\n lastCurve = curve\n if refLine and lastCurve.length < 1e-6:\n warn(f'road {road.id_} reference line has a geometry of '\n f'length {lastCurve.length}; skipping it')\n else:\n # even if the last curve is shorter than the threshold, we'll keep it if\n # it is the only curve; getting rid of the road entirely is handled by\n # road elision above\n refLine.append(lastCurve)\n assert refLine\n road.ref_line = refLine\n\n # Parse lanes:\n lanes = r.find('lanes')\n for offset in lanes.iter('laneOffset'):\n road.offset.append((Poly3(float(offset.get('a')),\n float(offset.get('b')),\n float(offset.get('c')),\n float(offset.get('d'))),\n float(offset.get('s'))))\n\n def popLastSectionIfShort(l):\n if l < 1e-6:\n warn(f'road {road.id_} has a lane section of length {l}; skipping it')\n\n # delete the length-0 section and re-link lanes appropriately\n badSec = road.lane_secs.pop()\n if road.lane_secs:\n prev = road.lane_secs[-1]\n for id_, lane in prev.lanes.items():\n if lane.succ is not None:\n lane.succ = badSec.lanes[lane.succ].succ\n else:\n if road.remappedStartLanes is None:\n road.remappedStartLanes = { l: l for l in badSec.lanes }\n for start, current in road.remappedStartLanes.items():\n road.remappedStartLanes[start] = badSec.lanes[current].succ\n return badSec\n else:\n return None\n\n last_s = float('-inf')\n for ls_elem in lanes.iter('laneSection'):\n s = float(ls_elem.get('s'))\n l = s - last_s\n assert l >= 0\n badSec = popLastSectionIfShort(l)\n\n last_s = s\n left = ls_elem.find('left')\n right = ls_elem.find('right')\n left_lanes = {}\n right_lanes = {}\n\n if left is not None:\n left_lanes = self.__parse_lanes(left)\n\n if right is not None:\n right_lanes = self.__parse_lanes(right)\n\n lane_sec = LaneSection(s, left_lanes, right_lanes)\n\n if badSec is not None: # finish re-linking lanes across deleted section\n for id_, lane in lane_sec.lanes.items():\n if lane.pred is not None:\n lane.pred = badSec.lanes[lane.pred].pred\n\n road.lane_secs.append(lane_sec)\n\n # parse signals\n signals = r.find('signals')\n if signals is not None:\n for signal_elem in signals.iter('signal'):\n signal = self.__parse_signal(signal_elem)\n if signal.is_valid():\n road.signals.append(signal)\n\n for signal_ref_elem in signals.iter('signalReference'):\n signalReference = self.__parse_signal_reference(signal_ref_elem)\n if signalReference.is_valid():\n referencedSignal = _temp_signals[signalReference.id_]\n signal = Signal(\n referencedSignal.id_,\n referencedSignal.country,\n referencedSignal.type_,\n referencedSignal.subtype,\n signalReference.orientation,\n signalReference.validity\n )\n road.signals.append(signal)\n\n if len(road.lane_secs) > 1:\n popLastSectionIfShort(road.length - s)\n assert road.lane_secs\n self.roads[road.id_] = road\n\n # Handle links to/from elided roads\n new_links = []\n for link in self.road_links:\n if link.id_a in self.elidedRoads:\n continue\n if link.id_b in self.elidedRoads:\n elided = self.elidedRoads[link.id_b]\n if link.contact_b == 'start':\n link.id_b = elided.successor\n link.contact_b = elided.successorContact\n else:\n link.id_b = elided.predecessor\n link.contact_b = elided.predecessorContact\n if link.contact_b is None:\n continue # link to intersection\n new_links.append(link)\n self.road_links = new_links\n\n def toScenicNetwork(self):\n assert self.intersection_region is not None\n\n # Prepare registry of network elements\n allElements = {}\n def register(element):\n assert element.uid is not None\n assert element.uid not in allElements, element.uid\n allElements[element.uid] = element\n def registerAll(elements):\n for elt in elements:\n register(elt)\n\n # Convert roads\n mainRoads, connectingRoads, roads = {}, {}, {}\n for id_, road in self.roads.items():\n if road.drivable_region.is_empty:\n continue # not actually a road you can drive on\n newRoad, elts = road.toScenicRoad(tolerance=self.tolerance)\n registerAll(elts)\n (connectingRoads if road.junction else mainRoads)[id_] = newRoad\n roads[id_] = newRoad\n\n # Hook up inter-road links\n for link in self.road_links:\n if link.id_b in connectingRoads:\n continue # actually a road-to-junction link; handled later\n if link.id_a not in roads or link.id_b not in roads:\n continue # may link non-drivable roads we haven't parsed; ignore it\n\n # Work out connectivity of roads and adjacent sections\n roadA, roadB = roads[link.id_a], roads[link.id_b]\n if link.contact_a == 'start':\n secA = roadA.sections[0]\n roadA._predecessor = roadB\n forwardA = True\n else:\n secA = roadA.sections[-1]\n roadA._successor = roadB\n forwardA = False\n if link.contact_b == 'start':\n secB = roadB.sections[0]\n else:\n secB = roadB.sections[-1]\n\n # Connect corresponding lanes\n lanesB = secB.lanesByOpenDriveID\n for laneA in secA.lanes:\n if laneA.isForward == forwardA:\n pred = laneA._predecessor\n if pred is None:\n continue\n # assert pred in lanesB\n # ^ changed for Zalazone\n if pred not in lanesB:\n continue\n # ^ added for Zalazone\n laneB = lanesB[pred]\n laneA._predecessor = laneB\n laneA.lane._predecessor = laneB.lane\n laneA.lane.group._predecessor = laneB.lane.group\n else:\n succ = laneA._successor\n if succ is None:\n continue\n if succ not in lanesB:\n continue\n # ^ added for Zalazone\n assert succ in lanesB\n laneB = lanesB[succ]\n laneA._successor = laneB\n laneA.lane._successor = laneB.lane\n laneA.lane.group._successor = laneB.lane.group\n\n # Hook up connecting road links and create intersections\n intersections = {}\n for jid, junction in self.junctions.items():\n if not junction.connections:\n continue\n assert junction.poly is not None\n if junction.poly.is_empty:\n warn(f'skipping empty junction {jid}')\n continue\n\n # Gather all lanes involved in the junction's connections\n allIncomingLanes, allOutgoingLanes = [], []\n allRoads, seenRoads = [], set()\n allSignals, seenSignals = [], set()\n maneuversForLane = defaultdict(list)\n for connection in junction.connections:\n incomingID = connection.incoming_id\n incomingRoad = mainRoads.get(incomingID)\n if not incomingRoad:\n continue # incoming road has no drivable lanes; skip it\n\n connectingID = connection.connecting_id\n connectingRoad = connectingRoads.get(connectingID)\n if not connectingRoad:\n continue # connecting road has no drivable lanes; skip it\n\n for signal in connectingRoad.signals:\n if signal.openDriveID not in seenSignals:\n allSignals.append(signal)\n seenSignals.add(signal.openDriveID)\n\n # Find possible incoming lanes for this connection\n if incomingID not in seenRoads:\n allRoads.append(incomingRoad)\n seenRoads.add(incomingID)\n oldRoad = self.roads[incomingID]\n incomingSection = None\n if oldRoad.predecessor == jid:\n incomingSection = incomingRoad.sections[0]\n remapping = self.roads[incomingID].remappedStartLanes # could be None\n if oldRoad.successor == jid:\n assert incomingSection is None\n incomingSection = incomingRoad.sections[-1]\n remapping = None\n assert incomingSection is not None\n if remapping is None:\n incomingLaneIDs = incomingSection.lanesByOpenDriveID\n else:\n incomingLaneIDs = {}\n newIDs = incomingSection.lanesByOpenDriveID\n for start, remapped in remapping.items():\n if remapped in newIDs:\n incomingLaneIDs[start] = newIDs[remapped]\n assert len(incomingLaneIDs) == len(newIDs)\n\n # Connect incoming lanes to connecting road\n if connection.connecting_contact == 'start':\n connectingSection = connectingRoad.sections[0]\n remapping = self.roads[connectingID].remappedStartLanes # could be None\n else:\n connectingSection = connectingRoad.sections[-1]\n remapping = None\n if remapping is None:\n connectingLaneIDs = connectingSection.lanesByOpenDriveID\n else:\n connectingLaneIDs = {}\n newIDs = connectingSection.lanesByOpenDriveID\n for start, remapped in remapping.items():\n if remapped in newIDs:\n connectingLaneIDs[start] = newIDs[remapped]\n assert len(connectingLaneIDs) == len(newIDs)\n lane_links = connection.lane_links\n if not lane_links: # all lanes connect to that with the same id\n lane_links = { l: l for l in incomingLaneIDs }\n for fromID, fromLane in incomingLaneIDs.items():\n # Link incoming lane to connecting road\n # (we only handle lanes in incomingLaneIDs, thus skipping non-drivable lanes)\n if fromID not in lane_links:\n continue # lane not linked by this connection\n toID = lane_links[fromID]\n toLane = connectingLaneIDs[toID]\n if fromLane.lane not in allIncomingLanes:\n allIncomingLanes.append(fromLane.lane)\n fromLane._successor = toLane\n fromLane.lane._successor = toLane.lane\n toLane._predecessor = fromLane\n toLane.lane._predecessor = fromLane.lane\n\n # Collect outgoing lane and road\n # TODO why is it allowed for this not to exist?\n outgoingLane = toLane.lane._successor\n if outgoingLane is None:\n warn(f'connecting road {connectingID} lane {toID} has no successor lane')\n else:\n if outgoingLane not in allOutgoingLanes:\n allOutgoingLanes.append(outgoingLane)\n outgoingRoad = outgoingLane.road\n if outgoingRoad.id not in seenRoads:\n allRoads.append(outgoingRoad)\n seenRoads.add(outgoingRoad.id)\n\n # TODO future OpenDRIVE extension annotating left/right turns?\n maneuver = roadDomain.Maneuver(\n startLane=fromLane.lane,\n connectingLane=toLane.lane,\n endLane=outgoingLane,\n intersection=None # will be patched once the Intersection is created\n )\n maneuversForLane[fromLane.lane].append(maneuver)\n\n # Gather maneuvers\n allManeuvers = []\n for lane, maneuvers in maneuversForLane.items():\n # assert lane.maneuvers == ()\n # ^ changed for Zalazone\n lane.maneuvers = tuple(maneuvers)\n allManeuvers.extend(maneuvers)\n\n # Order connected roads and lanes by adjacency\n def cyclicOrder(elements, contactStart=None):\n points = []\n for element in elements:\n if contactStart is None:\n old = self.roads[element.id]\n assert old.predecessor == jid or old.successor == jid\n contactStart = (old.predecessor == jid)\n point = element.centerline[0 if contactStart else -1]\n points.append(point)\n centroid = sum(points, Vector(0, 0)) / len(points)\n pairs = sorted(zip(elements, points), key=lambda pair: centroid.angleTo(pair[1]))\n return tuple(elem for elem, pt in pairs)\n\n # Create intersection\n intersection = roadDomain.Intersection(\n polygon=junction.poly,\n name=junction.name,\n uid=f'intersection{jid}', # need prefix to prevent collisions with roads\n id=jid,\n roads=cyclicOrder(allRoads),\n incomingLanes=cyclicOrder(allIncomingLanes, contactStart=False),\n outgoingLanes=cyclicOrder(allOutgoingLanes, contactStart=True),\n maneuvers=tuple(allManeuvers),\n signals=tuple(allSignals),\n crossings=(), # TODO add these\n )\n register(intersection)\n intersections[jid] = intersection\n for maneuver in allManeuvers:\n object.__setattr__(maneuver, 'intersection', intersection)\n\n # Hook up road-intersection links\n for rid, oldRoad in self.roads.items():\n if rid not in roads:\n continue # road does not have any drivable lanes, so we skipped it\n newRoad = roads[rid]\n if oldRoad.predecessor:\n intersection = intersections[oldRoad.predecessor]\n newRoad._predecessor = intersection\n newRoad.sections[0]._predecessor = intersection\n if newRoad.backwardLanes:\n newRoad.backwardLanes._successor = intersection\n if oldRoad.successor:\n intersection = intersections[oldRoad.successor]\n newRoad._successor = intersection\n newRoad.sections[-1]._successor = intersection\n if newRoad.forwardLanes:\n newRoad.forwardLanes._successor = intersection\n\n # Gather all network elements\n roads = tuple(mainRoads.values())\n connectingRoads = tuple(connectingRoads.values())\n allRoads = roads + connectingRoads\n groups = []\n for road in allRoads:\n if road.forwardLanes:\n groups.append(road.forwardLanes)\n if road.backwardLanes:\n groups.append(road.backwardLanes)\n lanes = [lane for road in allRoads for lane in road.lanes]\n intersections = tuple(intersections.values())\n crossings = () # TODO add these\n sidewalks, shoulders = [], []\n for group in groups:\n sidewalk = group._sidewalk\n if sidewalk:\n sidewalks.append(sidewalk)\n shoulder = group._shoulder\n if shoulder:\n shoulders.append(shoulder)\n\n # Add dummy maneuvers for lanes which merge/turn into another lane\n for lane in lanes:\n if not lane.maneuvers and lane._successor:\n maneuver = roadDomain.Maneuver(type=roadDomain.ManeuverType.STRAIGHT,\n startLane=lane, endLane=lane._successor)\n lane.maneuvers = (maneuver,)\n\n def combine(regions):\n return PolygonalRegion.unionAll(regions, buf=self.tolerance)\n\n return roadDomain.Network(\n elements=allElements,\n roads=roads,\n connectingRoads=connectingRoads,\n laneGroups=tuple(groups),\n lanes=lanes,\n intersections=intersections,\n crossings=crossings,\n sidewalks=tuple(sidewalks),\n shoulders=tuple(shoulders),\n tolerance=self.tolerance,\n roadRegion=combine(roads),\n laneRegion=combine(lanes),\n intersectionRegion=combine(intersections),\n crossingRegion=combine(crossings),\n sidewalkRegion=combine(sidewalks)\n )\n"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"scipy.integrate.solve_ivp",
"scipy.integrate.quad",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"1.5",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
}
] |
velvetThunder25/Feature-based-Monocular-Visual-Odometry | [
"a7602edab934419c1ec73618ee655e18026f834f",
"a7602edab934419c1ec73618ee655e18026f834f"
] | [
"pyslam/keyframe.py",
"pyslam/feature_manager_adaptors.py"
] | [
"\"\"\"\n* This file is part of PYSLAM \n*\n* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com> \n*\n* PYSLAM is free software: you can redistribute it and/or modify\n* it under the terms of the GNU General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n*\n* PYSLAM is distributed in the hope that it will be useful,\n* but WITHOUT ANY WARRANTY; without even the implied warranty of\n* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n* GNU General Public License for more details.\n*\n* You should have received a copy of the GNU General Public License\n* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\n\nimport cv2\nimport numpy as np\n\nfrom scipy.spatial import cKDTree\n\nfrom threading import RLock\n\nfrom parameters import Parameters \nfrom utils_sys import Printer\nfrom collections import defaultdict, OrderedDict, Counter\n\nfrom frame import Frame \nfrom camera_pose import CameraPose\n\n\nclass KeyFrameGraph(object):\n def __init__(self):\n self._lock_connections = RLock() \n # spanning tree\n self.init_parent = False # is parent initialized? \n self.parent = None \n self.children = set()\n # loop edges \n self.loop_edges = set() \n self.not_to_erase = False # if there is a loop edge then you cannot erase this keyframe \n # covisibility graph \n self.connected_keyframes_weights = Counter() #defaultdict(int) \n self.ordered_keyframes_weights = OrderedDict() # ordered list of connected keyframes (on the basis of the number of map points with this keyframe)\n # \n self.is_first_connection=True \n \n # =============================== \n # spanning tree \n def add_child(self, keyframe):\n with self._lock_connections:\n self.children.add(keyframe)\n \n def erase_child(self, keyframe):\n with self._lock_connections:\n try: \n self.children.remove(keyframe) \n except:\n pass \n \n def set_parent(self, keyframe):\n with self._lock_connections:\n if self == keyframe: \n if __debug__:\n Printer.orange('KeyFrameGraph.set_parent - trying to set self as parent')\n return \n self.parent = keyframe \n keyframe.add_child(self)\n \n def get_children(self):\n with self._lock_connections:\n return self.children.copy()\n \n def get_parent(self):\n with self._lock_connections:\n return self.parent \n \n def has_child(self, keyframe): \n with self._lock_connections:\n return keyframe in self.children \n \n \n # =============================== \n # loop edges \n def add_loop_edge(self, keyframe):\n with self._lock_connections:\n self.not_to_erase = True \n self.loop_edges.add(keyframe)\n \n def get_loop_edges(self):\n with self._lock_connections: \n return self.loop_edges.copy()\n \n \n # =============================== \n # covisibility \n \n def reset_covisibility(self): \n self.connected_keyframes_weights = Counter() \n self.ordered_keyframes_weights = OrderedDict() \n \n def add_connection(self, keyframe, weigth):\n with self._lock_connections: \n self.connected_keyframes_weights[keyframe]=weigth\n self.update_best_covisibles()\n \n def erase_connection(self, keyframe):\n with self._lock_connections: \n try:\n del self.connected_keyframes_weights[keyframe] \n self.update_best_covisibles()\n except: \n pass \n \n def update_best_covisibles(self):\n with self._lock_connections: \n self.ordered_keyframes_weights = OrderedDict(sorted(self.connected_keyframes_weights.items(), key=lambda x: x[1], reverse=True)) # order by value (decreasing order)\n \n # get a list of all the keyframe that shares points \n def get_connected_keyframes(self): \n with self._lock_connections: \n return list(self.connected_keyframes_weights.keys()) # returns a copy \n \n # get an ordered list of covisible keyframes \n def get_covisible_keyframes(self):\n with self._lock_connections: \n return list(self.ordered_keyframes_weights.keys()) # returns a copy \n \n # get an ordered list of covisible keyframes \n def get_best_covisible_keyframes(self,N):\n with self._lock_connections: \n return list(self.ordered_keyframes_weights.keys())[:N] # returns a copy \n \n def get_covisible_by_weight(self,weight): \n with self._lock_connections: \n return [kf for kf,w in self.ordered_keyframes_weights.items() if w > weight]\n \n def get_weight(self,keyframe): \n with self._lock_connections: \n return self.connected_keyframes_weights[keyframe] \n \n\nclass KeyFrame(Frame,KeyFrameGraph):\n def __init__(self, frame, img=None):\n KeyFrameGraph.__init__(self)\n Frame.__init__(self, img=None, camera=frame.camera, pose=frame.pose, id=frame.id, timestamp=frame.timestamp) # here we MUST have img=None in order to avoid recomputing keypoint info\n \n if frame.img is not None: \n self.img = frame.img # this is already a copy of an image \n else:\n if img is not None: \n self.img = img.copy()\n \n self.map = None \n \n self.is_keyframe = True \n self.kid = None # keyframe id \n \n self._is_bad = False \n self.to_be_erased = False \n \n # pose relative to parent (this is computed when bad flag is activated)\n self._pose_Tcp = CameraPose() \n\n # share keypoints info with frame (these are computed once for all on frame initialization and they are not changed anymore)\n self.kps = frame.kps # keypoint coordinates [Nx2]\n self.kpsu = frame.kpsu # [u]ndistorted keypoint coordinates [Nx2]\n self.kpsn = frame.kpsn # [n]ormalized keypoint coordinates [Nx2] (Kinv * [kp,1]) \n self.octaves = frame.octaves # keypoint octaves [Nx1]\n self.sizes = frame.sizes # keypoint sizes [Nx1] \n self.angles = frame.angles # keypoint angles [Nx1] \n self.des = frame.des # keypoint descriptors [NxD] where D is the descriptor length \n \n if hasattr(frame, '_kd'): \n self._kd = frame._kd \n else: \n Printer.orange('KeyFrame %d computing kdtree for input frame %d'%(self.id,frame.id))\n self._kd = cKDTree(self.kpsu)\n \n # map points information arrays (copy points coming from frame)\n self.points = frame.get_points() # map points => self.points[idx] is the map point matched with self.kps[idx] (if is not None)\n self.outliers = np.full(self.kpsu.shape[0], False, dtype=bool) # used just in propagate_map_point_matches() \n \n # associate matched map points to observations \n def init_observations(self):\n with self._lock_features: \n for idx,p in enumerate(self.points):\n if p is not None and not p.is_bad: \n if p.add_observation(self, idx):\n p.update_info() \n \n def update_connections(self):\n # for all map points of this keyframe check in which other keyframes they are seen\n # build a counter for these other keyframes \n points = self.get_matched_good_points()\n assert len(points) > 0\n viewing_keyframes = [kf for p in points for kf in p.keyframes() if kf.kid != self.kid] # exclude this keyframe \n viewing_keyframes = Counter(viewing_keyframes) \n if not viewing_keyframes: # if empty (https://www.pythoncentral.io/how-to-check-if-a-list-tuple-or-dictionary-is-empty-in-python/)\n return \n # order the keyframes \n covisible_keyframes = viewing_keyframes.most_common() \n #print('covisible_keyframes: ', covisible_keyframes)\n # get keyframe that shares most points \n kf_max, w_max = covisible_keyframes[0]\n # if the counter is greater than threshold add connection\n # otherwise add the one with maximum counter \n with self._lock_connections: \n if w_max >= Parameters.kMinNumOfCovisiblePointsForCreatingConnection:\n self.connected_keyframes_weights = viewing_keyframes \n self.ordered_keyframes_weights = OrderedDict()\n for kf,w in covisible_keyframes:\n if w >= Parameters.kMinNumOfCovisiblePointsForCreatingConnection:\n kf.add_connection(self,w)\n self.ordered_keyframes_weights[kf] = w\n else:\n break \n else:\n self.connected_keyframes_weights = Counter({kf_max,w_max}) \n self.ordered_keyframes_weights = OrderedDict({kf_max,w_max}) \n kf_max.add_connection(self,w_max) \n # update spanning tree \n if self.is_first_connection and self.kid!=0: \n self.set_parent(kf_max)\n self.is_first_connection = False \n #print('ordered_keyframes_weights: ', self.ordered_keyframes_weights) \n \n @property \n def is_bad(self): \n with self._lock_connections: \n return self._is_bad \n\n def set_not_erase(self): \n with self._lock_connections: \n not_to_erase = True \n \n def set_erase(self): \n with self._lock_connections: \n if len(self.loop_edges)==0: \n self.not_to_erase = False \n if self.to_be_erased: \n self.set_bad() \n\n def set_bad(self): \n with self._lock_connections: \n if self.kid == 0: \n return \n if self.not_to_erase: \n self.to_be_erased = True \n return\n \n # update covisibility graph \n for kf_connected in list(self.connected_keyframes_weights.keys()): \n kf_connected.erase_connection(self) \n \n for idx,p in enumerate(self.points): \n if p is not None: \n p.remove_observation(self,idx)\n \n self.reset_covisibility()\n \n # update spanning tree: each children must be connected to a new parent \n \n # build a set of parent candidates for the children \n parent_candidates = set() \n assert(self.parent is not None)\n parent_candidates.add(self.parent)\n \n # each child must be connected to a new parent (the candidate parent with highest covisibility weight)\n # once a child is connected to a new parent, include the child as new parent candidate for the rest \n while not len(self.children)==0: \n w_max = 0\n child_to_connect = None \n parent_to_connect = None \n found_connection = False \n for kf_child in self.children: \n if kf_child.is_bad:\n continue\n # check if a candidate parent is connected to kf_child and compute the candidate parent with highest covisibility weight \n covisible_keyframes = kf_child.get_covisible_keyframes()\n for candidate_parent in parent_candidates: \n if candidate_parent in covisible_keyframes:\n w = kf_child.get_weight(candidate_parent)\n if w > w_max: \n w_max = w \n child_to_connect = kf_child\n parent_to_connect = candidate_parent \n found_connection = True \n if found_connection: \n child_to_connect.set_parent(parent_to_connect)\n parent_candidates.add(child_to_connect)\n self.children.remove(child_to_connect)\n else: \n break # stop since there is no connection with covisibility weight>0\n\n # if a child has no covisibility connections with any parent candidate, connect it with the original parent of this keyframe\n if not len(self.children)==0:\n for kf_child in self.children: \n kf_child.set_parent(self.parent)\n \n self.parent.erase_child(self)\n self._pose_Tcp.update(self.Tcw @ self.parent.Twc)\n self._is_bad = True \n \n if self.map is not None:\n self.map.remove_keyframe(self)\n",
"\"\"\"\n* This file is part of PYSLAM \n*\n* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com> \n*\n* PYSLAM is free software: you can redistribute it and/or modify\n* it under the terms of the GNU General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n*\n* PYSLAM is distributed in the hope that it will be useful,\n* but WITHOUT ANY WARRANTY; without even the implied warranty of\n* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n* GNU General Public License for more details.\n*\n* You should have received a copy of the GNU General Public License\n* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\n\nimport math\nfrom enum import Enum\n \nimport numpy as np \nimport cv2\n\nfrom concurrent.futures import ThreadPoolExecutor, as_completed, wait\n\nfrom utils_img import img_blocks, img_mask_blocks\nfrom utils_features import sat_num_features\nfrom utils_sys import Printer \nfrom pyramid import Pyramid, PyramidType\n\n\nkVerbose = False \n\nkAdaptorNumRowDivs = 5 #2, 3\nkAdaptorNumColDivs = 5 #2, 3\n\nkNumLevelsInitSigma = 20\n\nkBlockAdaptorUseParallelComputations = True \nkBlockAdaptorMaxNumWorkers = 4\n\nkPyramidAdaptorUseParallelComputations = True \nkPyramidAdaptorMaxNumWorkers = 4\n\n\nif not kVerbose:\n def print(*args, **kwargs):\n pass \n\n\n# BlockAdaptor divides the image in row_divs x col_divs cells and extracts features in each of these cells\nclass BlockAdaptor(object): \n def __init__(self, \n detector, \n descriptor = None, \n row_divs = kAdaptorNumRowDivs, \n col_divs = kAdaptorNumColDivs,\n do_parallel = kBlockAdaptorUseParallelComputations): \n self.detector = detector \n self.descriptor = descriptor \n self.row_divs = row_divs\n self.col_divs = col_divs \n self.do_parallel = do_parallel # do parallel computations \n self.is_detector_equal_to_descriptor = (self.detector == self.descriptor)\n\n\n def detect(self, frame, mask=None):\n if self.row_divs == 1 and self.col_divs == 1: \n return self.detector.detect(frame, mask)\n else: \n if kVerbose: \n print('BlockAdaptor ', self.row_divs, 'x', self.col_divs)\n block_generator = img_mask_blocks(frame, mask, self.row_divs, self.col_divs)\n kps_all = [] # list are thread-safe \n \n def detect_block(b_m_i_j): \n b, m, i, j = b_m_i_j\n if kVerbose and False: \n print('BlockAdaptor in block (',i,',',j,')') \n kps = self.detector.detect(b, mask=m)\n #print('adaptor: detected #features: ', len(kps), ' in block (',i,',',j,')') \n for kp in kps:\n #print('kp.pt before: ', kp.pt)\n kp.pt = (kp.pt[0] + j, kp.pt[1] + i) \n #print('kp.pt after: ', kp.pt) \n kps_all.extend(kps) \n \n if not self.do_parallel:\n # process the blocks sequentially \n for b, m, i, j in block_generator:\n detect_block((b,m,i,j))\n else: \n with ThreadPoolExecutor(max_workers = 4) as executor:\n executor.map(detect_block, block_generator) # automatic join() at the end of the `width` block \n return np.array(kps_all)\n \n \n def detectAndCompute(self, frame, mask=None):\n if self.row_divs == 1 and self.col_divs == 1: \n return self.detector.detectAndCompute(frame, mask)\n else: \n if kVerbose: \n print('BlockAdaptor ', self.row_divs, 'x', self.col_divs)\n block_generator = img_mask_blocks(frame, mask, self.row_divs, self.col_divs)\n kps_all = []\n des_all = []\n kps_des_map = {} # (i,j) -> (kps,des) \n \n def detect_and_compute_block(b_m_i_j): \n b, m, i, j = b_m_i_j\n if kVerbose and False: \n print('BlockAdaptor in block (',i,',',j,')') \n if self.is_detector_equal_to_descriptor: \n kps, des = self.detector.detectAndCompute(b, mask=m)\n else:\n kps = self.detector.detect(b, mask=m) \n kps, des = self.descriptor.compute(b, kps) \n #print('adaptor: detected #features: ', len(kps), ' in block (',i,',',j,')') \n # transform the points \n for kp in kps:\n #print('kp.pt before: ', kp.pt)\n kp.pt = (kp.pt[0] + j, kp.pt[1] + i) \n #print('kp.pt after: ', kp.pt) \n kps_des_map[(i,j)] = (kps,des) \n \n if not self.do_parallel:\n # process the blocks sequentially \n for b, m, i, j in block_generator:\n detect_and_compute_block((b, m, i, j)) \n else: \n with ThreadPoolExecutor(max_workers = kBlockAdaptorMaxNumWorkers) as executor:\n executor.map(detect_and_compute_block, block_generator) # automatic join() at the end of the `width` block \n \n # now merge the computed results \n for ij,(kps,des) in kps_des_map.items():\n kps_all.extend(kps) \n if des is not None and len(des)>0: \n if len(des_all)>0:\n des_all = np.vstack([des_all, des]) \n else: \n des_all = des \n return np.array(kps_all), np.array(des_all) \n\n\n# PyramidAdaptor generate a pyramid of num_levels images and extracts features in each of these images\n# TODO: check if a point on one level 'overlaps' with a point on other levels or add such option (DONE by FeatureManager.kdt_nms() )\nclass PyramidAdaptor(object): \n def __init__(self, \n detector, \n descriptor=None,\n num_features=2000, \n num_levels=4, \n scale_factor=1.2, \n sigma0=1.0, # N.B.: SIFT use 1.6 for this value\n first_level=0, \n pyramid_type=PyramidType.RESIZE, \n use_block_adaptor=False,\n do_parallel = kPyramidAdaptorUseParallelComputations,\n do_sat_features_per_level = False): \n self.detector = detector \n self.descriptor = descriptor \n self.num_features = num_features\n self.is_detector_equal_to_descriptor = (self.detector == self.descriptor) \n self.num_levels = num_levels\n self.scale_factor = scale_factor \n self.inv_scale_factor = 1./scale_factor \n self.sigma0 = sigma0 \n self.first_level = first_level\n self.pyramid_type = pyramid_type\n self.use_block_adaptor = use_block_adaptor\n self.do_parallel = do_parallel # do parallel computations \n self.do_sat_features_per_level = do_sat_features_per_level # saturate number of features for each level \n \n self.pyramid = Pyramid(num_levels=num_levels, \n scale_factor=scale_factor, \n sigma0=sigma0,\n first_level=first_level,\n pyramid_type=pyramid_type)\n self.initSigmaLevels()\n \n self.block_adaptor = None \n if self.use_block_adaptor:\n self.block_adaptor = BlockAdaptor(self.detector, self.descriptor, row_divs = kAdaptorNumRowDivs, col_divs = kAdaptorNumColDivs, do_parallel=False) \n\n\n def initSigmaLevels(self): \n num_levels = max(kNumLevelsInitSigma, self.num_levels)\n self.scale_factors = np.zeros(num_levels)\n self.inv_scale_factors = np.zeros(num_levels)\n self.scale_factors[0]=1.0 \n \n # compute desired number of features per level (by using the scale factor)\n self.num_features_per_level = np.zeros(num_levels,dtype=np.int)\n num_desired_features_per_level = self.num_features*(1 - self.inv_scale_factor)/(1 - math.pow(self.inv_scale_factor, self.num_levels))\n sum_num_features = 0\n for level in range(self.num_levels-1):\n self.num_features_per_level[level] = int(round(num_desired_features_per_level))\n sum_num_features += self.num_features_per_level[level];\n num_desired_features_per_level *= self.inv_scale_factor\n self.num_features_per_level[self.num_levels-1] = max(self.num_features - sum_num_features, 0) \n #print('num_features_per_level:',self.num_features_per_level)\n \n if self.first_level==-1:\n self.scale_factors[0]=1.0/self.scale_factor \n self.inv_scale_factors[0]=1.0/self.scale_factors[0] \n for i in range(1,num_levels):\n self.scale_factors[i]=self.scale_factors[i-1]*self.scale_factor\n self.inv_scale_factors[i]=1.0/self.scale_factors[i]\n #print('self.inv_scale_factors: ', self.inv_scale_factors) \n \n \n # detect on 'unfiltered' pyramid images ('unfiltered' meanining depends on the selected pyramid type) \n def detect(self, frame, mask=None): \n if self.num_levels == 1: \n return self.detector.detect(frame, mask)\n else: \n #TODO: manage mask \n if kVerbose: \n print('PyramidAdaptor #levels:', self.num_levels,'(from',self.first_level,'), scale_factor:', self.scale_factor,', sigma0:', self.sigma0,', type:', self.pyramid_type.name)\n self.pyramid.compute(frame)\n kps_all = [] # list are thread-safe \n \n def detect_level(scale,pyr_cur,i):\n kps = [] \n if self.block_adaptor is None: \n kps = self.detector.detect(pyr_cur) \n else:\n kps = self.block_adaptor.detect(pyr_cur)\n if kVerbose and False: \n print(\"PyramidAdaptor - level\", i, \", shape: \", pyr_cur.shape) \n for kp in kps:\n #print('kp.pt before: ', kp.pt)\n kp.pt = (kp.pt[0]*scale, kp.pt[1]*scale) \n kp.size = kp.size*scale \n kp.octave = i \n #print('kp: ', kp.pt, kp.octave) \n if self.do_sat_features_per_level: \n kps, _ = sat_num_features(kps, None, self.num_features_per_level[i]) # experimental \n kps_all.extend(kps)\n \n if not self.do_parallel:\n #print('sequential computations') \n # process the blocks sequentially \n for i in range(0,self.num_levels): \n scale = self.scale_factors[i]\n pyr_cur = self.pyramid.imgs[i] \n detect_level(scale,pyr_cur,i) \n else: \n #print('parallel computations') \n futures = []\n with ThreadPoolExecutor(max_workers = 4) as executor:\n for i in range(0,self.num_levels): \n scale = self.scale_factors[i]\n pyr_cur = self.pyramid.imgs[i] \n futures.append(executor.submit(detect_level, scale, pyr_cur, i))\n wait(futures) # wait all the task are completed \n \n return np.array(kps_all) \n \n \n # detect on 'unfiltered' pyramid images ('unfiltered' meanining depends on the selected pyramid type) \n # compute descriptors on 'filtered' pyramid images ('filtered' meanining depends on the selected pyramid type) \n def detectAndCompute(self, frame, mask=None): \n if self.num_levels == 1: \n return self.detector.detectAndCompute(frame, mask)\n else: \n if kVerbose: \n print('PyramidAdaptor [dc] #levels:', self.num_levels,'(from',self.first_level,'), scale_factor:', self.scale_factor,', sigma0:', self.sigma0,', type:', self.pyramid_type.name)\n self.pyramid.compute(frame)\n kps_all = []\n des_all = [] \n kps_des_map = {} # i -> (kps,des)\n \n def detect_and_compute_level(scale, pyr_cur, pyr_cur_filtered, N, i): \n kps = [] \n if self.block_adaptor is None: \n #kps, des = self.detector.detectAndCompute(pyr_cur)\n if self.is_detector_equal_to_descriptor: \n kps, des = self.detector.detectAndCompute(pyr_cur)\n else:\n kps = self.detector.detect(pyr_cur) \n #print('description of filtered')\n kps, des = self.descriptor.compute(pyr_cur_filtered, kps) \n else:\n kps, des = self.block_adaptor.detectAndCompute(pyr_cur)\n if kVerbose and False: \n print(\"PyramidAdaptor - level\", i, \", shape: \", pyr_cur.shape) \n for kp in kps:\n #print('before: kp.pt:', kp.pt,', size:',kp.size,', octave:',kp.octave,', angle:',kp.angle) \n kp.pt = (kp.pt[0]*scale, kp.pt[1]*scale) \n kp.size = kp.size*scale \n kp.octave = i \n #print('after: kp.pt:', kp.pt,', size:',kp.size,', octave:',kp.octave,', angle:',kp.angle) \n if self.do_sat_features_per_level: \n kps, des = sat_num_features(kps, des, N) # experimental \n kps_des_map[i] = (kps,des) \n \n if not self.do_parallel:\n #print('sequential computations') \n # process the blocks sequentially \n for i in range(0,self.num_levels): \n scale = self.scale_factors[i]\n pyr_cur = self.pyramid.imgs[i] \n pyr_cur_filtered = self.pyramid.imgs_filtered[i] \n detect_and_compute_level(scale, pyr_cur, pyr_cur_filtered, self.num_features_per_level[i], i)\n else: \n #print('parallel computations') \n futures = []\n with ThreadPoolExecutor(max_workers = 4) as executor:\n for i in range(0,self.num_levels): \n scale = self.scale_factors[i]\n pyr_cur = self.pyramid.imgs[i] \n pyr_cur_filtered = self.pyramid.imgs_filtered[i] \n futures.append(executor.submit(detect_and_compute_level, scale, pyr_cur, pyr_cur_filtered, self.num_features_per_level[i], i))\n wait(futures) # wait all the task are completed \n \n # now merge the computed results \n for i,(kps,des) in kps_des_map.items():\n kps_all.extend(kps) \n if des is not None and len(des)>0: \n if len(des_all)>0:\n des_all = np.vstack([des_all, des]) \n else: \n des_all = des \n return np.array(kps_all), np.array(des_all) \n \n\n"
] | [
[
"scipy.spatial.cKDTree",
"numpy.full"
],
[
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
christina-aigner/midnite | [
"a042ec775dc66ca68291ebaf0eb1a3be9ef28cf6"
] | [
"tests/unit/compound_methods_test.py"
] | [
"\"\"\"Unit test for the compound methods.\"\"\"\nimport pytest\nimport torch\nfrom assertpy import assert_that\nfrom numpy.testing import assert_array_equal\nfrom torch.nn import functional\nfrom torch.nn import Module\n\nfrom midnite.visualization import compound_methods\nfrom midnite.visualization.compound_methods import _prepare_input\nfrom midnite.visualization.compound_methods import _top_k_mask\nfrom midnite.visualization.compound_methods import _top_k_selector\nfrom midnite.visualization.compound_methods import _upscale\nfrom midnite.visualization.compound_methods import guided_gradcam\n\n\[email protected]\ndef img(mocker):\n img = torch.zeros((1, 3, 4, 4))\n img.detach = mocker.Mock(return_value=img)\n img.to = mocker.Mock(return_value=img)\n return img\n\n\ndef test_prepare_input(img, mocker):\n \"\"\"Test image preparations.\"\"\"\n img_clone = torch.tensor((1, 3, 4, 4))\n img_clone.clone = mocker.Mock(return_value=img)\n out = _prepare_input(img_clone)\n\n img_clone.clone.assert_called_once()\n img.detach.assert_called_once()\n img.to.assert_called_once()\n assert_that(out).is_same_as(img)\n\n\ndef test_prepare_invalid_sizes():\n \"\"\"Test errors for input that are not images.\"\"\"\n with pytest.raises(ValueError):\n _prepare_input(torch.zeros((2, 4, 4)))\n with pytest.raises(ValueError):\n _prepare_input(torch.zeros(1, 2, 4, 4))\n with pytest.raises(ValueError):\n _prepare_input(torch.zeros(2, 3))\n\n\ndef test_top_k_mask():\n \"\"\"Test top-k masking.\"\"\"\n out = torch.tensor([0, 3, 0, 2, 1, 0])\n mask = _top_k_mask(out, 2)\n assert_array_equal(mask, [0, 1, 0, 1, 0, 0])\n\n\ndef test_top_k_selector(mocker):\n \"\"\"Test that the selector predicts and selects\"\"\"\n out = mocker.Mock(spec=torch.Tensor)\n out.squeeze = mocker.Mock(return_value=out)\n net = mocker.Mock(spec=Module, return_value=out)\n net.to = mocker.Mock(return_value=net)\n net.eval = mocker.Mock(return_value=net)\n mask = torch.tensor((0, 1, 0))\n mocker.patch(\n \"midnite.visualization.compound_methods._top_k_mask\", return_value=mask\n )\n\n sel = _top_k_selector(net, mocker.Mock(spec=torch.Tensor), 5)\n\n compound_methods._top_k_mask.assert_called_with(out, 5)\n assert_that(sel.get_mask([3])).is_same_as(mask)\n\n\ndef test_upscale(mocker):\n \"\"\"Check upscale wiring.\"\"\"\n scaled = torch.zeros((4, 4))\n scaled.squeeze = mocker.Mock(return_value=scaled)\n mocker.patch(\"torch.nn.functional.interpolate\", return_value=scaled)\n img = torch.zeros(2, 2)\n img.unsqueeze = mocker.Mock(return_value=img)\n\n res = _upscale(img, (4, 4))\n\n functional.interpolate.assert_called_with(\n img, size=(4, 4), mode=\"bilinear\", align_corners=True\n )\n assert_that(res).is_same_as(scaled)\n assert_that(scaled.squeeze.call_count).is_equal_to(2)\n assert_that(img.unsqueeze.call_count).is_equal_to(2)\n\n\ndef test_guided_gradcam(mocker):\n \"\"\"Check the guided gradcam wiring.\"\"\"\n input_ = torch.zeros((3, 5, 5))\n gradcam_out = torch.ones((5, 5)).mul_(2)\n backprop_out = torch.ones((5, 5)).mul_(3)\n mocker.patch(\n \"midnite.visualization.compound_methods.gradcam\", return_value=gradcam_out\n )\n mocker.patch(\n \"midnite.visualization.compound_methods.guided_backpropagation\",\n return_value=backprop_out,\n )\n\n res = guided_gradcam([mocker.Mock(spec=Module)], [mocker.Mock(spec=Module)], input_)\n\n compound_methods.gradcam.assert_called_once()\n compound_methods.gradcam.assert_called_once()\n assert_that(res.size()).is_equal_to((5, 5))\n assert_that(res.sum()).is_equal_to(5 * 5 * 6)\n"
] | [
[
"torch.ones",
"torch.zeros",
"torch.tensor",
"numpy.testing.assert_array_equal",
"torch.nn.functional.interpolate.assert_called_with"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Calychas/peap-project | [
"14eb75cdaa60842dd9b530c10c47985aa77f8ff7"
] | [
"src/visualization/tweets_data_visualizer.py"
] | [
"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport sys\n\n\ndef plot_tweets_counts(accounts: pd.DataFrame) -> None:\n plt.figure(figsize=(12, 12))\n sns.histplot(data=accounts, x=\"tweets_count\", binwidth=500)\n plt.title(\"Twitter activity (tweets, retweets, replies) for all accounts\")\n plt.xlabel(\"Number of tweets for a user\")\n plt.ylabel(\"Number of accounts\")\n plt.savefig(\"reports/plots/all_replies_count_hist_full.png\")\n plt.close()\n\n accounts_with_less_than_500_tweets = accounts[accounts['tweets_count'] <= 500]\n\n plt.figure(figsize=(12, 12))\n sns.histplot(data=accounts_with_less_than_500_tweets, x=\"tweets_count\", binwidth=20)\n plt.title(\"Twitter activity (tweets, retweets, replies) for accounts with less than 500 posts\")\n plt.xlabel(\"Number of tweets for a user\")\n plt.ylabel(\"Number of accounts\")\n plt.savefig(\"reports/plots/all_replies_count_less_than_500_hist.png\")\n plt.close()\n\n\nif __name__ == '__main__':\n accounts_processed_file_path = sys.argv[1]\n accounts_df = pd.read_csv(accounts_processed_file_path)\n plot_tweets_counts(accounts_df)\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
vsocrates/medtype | [
"16c6f39d38a73c4c44258bbdf78074a81e07b1c7"
] | [
"medtype-trainer/medtype.py"
] | [
"from helper import *\nfrom models import BertPlain, BertCombined\nfrom dataloader import MedTypeDataset\n\nfrom torch.utils.data import DataLoader\nfrom transformers.optimization import AdamW\nfrom transformers import get_linear_schedule_with_warmup, BertTokenizer\nfrom sklearn.metrics import average_precision_score\n\nclass MedType(object):\n\n\tdef load_data(self):\n\t\t\"\"\"\n\t\tReads in the data for training MedType.\n\n\t\tParameters\n\t\t----------\n\t\tself.p.data: \tTakes in the name of the dataset (FB15k-237, WN18RR, YAGO3-10)\n\t\t\n\t\tReturns\n\t\t-------\n\t\tself.type2id: Semantic Type to unique identifier mapping\n\t\tself.data['train']: Stores the training split of the dataset\n\t\tself.data['valid']: Stores the validation split of the dataset\n\t\tself.data['test']: Stores the test split of the dataset\n\t\tself.data_iter:\t\tThe dataloader for different data splits\n\t\t\"\"\"\n\n\t\tself.type2id\t= json.load(open('{}/type2id.json'.format(self.p.config_dir)))\n\t\tself.num_class\t= len(self.type2id)\n\t\ttype_remap \t= json.load(open('{}/type_remap.json'.format(self.p.config_dir)))\n\t\tself.data \t= {'train': [], 'test': [], 'valid': []}\n\n\t\tif self.p.data == 'pubmed':\n\t\t\tfor root, dirs, files in os.walk('./{}/pubmed_processed'.format(self.p.data_dir)):\n\n\t\t\t\tfor file in tqdm(files):\n\t\t\t\t\tfname = os.path.join(root, file)\n\t\t\t\t\tfor line in open(fname):\n\t\t\t\t\t\tdoc \t\t= json.loads(line.strip())\n\t\t\t\t\t\tdoc['label'] \t= list(set([self.type2id[type_remap[x]] for x in doc['label']]))\n\t\t\t\t\t\tdel doc['prev_toks'], doc['after_toks']\n\t\t\t\t\t\tself.data['train'].append(doc)\n\t\t\t\n\t\t\t# In case of PubMedDS, test and valid split of Medmentions datasets is used.\n\t\t\tdata = load_pickle('{}/medmentions.pkl'.format(self.p.data_dir))\n\n\t\t\tfor doc in data:\n\t\t\t\tif doc['split'] in ['valid', 'test']:\n\t\t\t\t\tdoc['label'] = list(set([self.type2id[type_remap[x]] for x in doc['label']]))\n\t\t\t\t\tdel doc['prev_toks'], doc['after_toks']\n\t\t\t\t\tself.data[doc['split']].append(doc)\n\n\t\telse:\n\t\t\tdata = load_pickle('{}/{}.pkl'.format(self.p.data_dir, self.p.data))\n\n\t\t\tfor doc in data:\n\t\t\t\tdoc['label'] = list(set([self.type2id[type_remap[x]] for x in doc['label']]))\n\t\t\t\tself.data[doc.get('split', 'train')].append(doc)\n\n\t\tself.logger.info('\\nDataset size -- Train: {}, Valid: {}, Test:{}'.format(len(self.data['train']), len(self.data['valid']), len(self.data['test'])))\n\n\t\tself.tokenizer \t= BertTokenizer.from_pretrained(self.p.bert_model)\n\t\tself.tokenizer.add_tokens(['[MENTION]', '[/MENTION]'])\n\n\t\tdef get_data_loader(split, shuffle=True):\n\t\t\tdataset\t= MedTypeDataset(self.data[split], self.num_class, self.tokenizer, self.p)\n\t\t\treturn DataLoader(\n\t\t\t\t\tdataset,\n\t\t\t\t\tbatch_size = self.p.batch_size * self.p.batch_factor,\n\t\t\t\t\tshuffle = shuffle,\n\t\t\t\t\tnum_workers = self.p.num_workers,\n\t\t\t\t\tcollate_fn = dataset.collate_fn\n\t\t\t\t)\n\n\t\tself.data_iter = {\n\t\t\t'train'\t: get_data_loader('train'),\n\t\t\t'valid'\t: get_data_loader('valid', shuffle=False),\n\t\t\t'test'\t: get_data_loader('test', shuffle=False),\n\t\t}\n\n\tdef add_model(self):\n\t\t\"\"\"\n\t\tCreates the computational graph\n\t\tParameters\n\t\t----------\n\t\t\n\t\tReturns\n\t\t-------\n\t\tCreates the computational graph for model and initializes it\n\t\t\n\t\t\"\"\"\n\t\tif \tself.p.model == 'bert_plain': \t\tmodel = BertPlain(self.p, len(self.tokenizer), self.num_class)\n\t\telif \tself.p.model == 'bert_combined': \tmodel = BertCombined(self.p, len(self.tokenizer), self.num_class)\n\t\telse:\traise NotImplementedError\n\n\t\tmodel = model.to(self.device)\n\n\t\tif len(self.gpu_list) > 1:\n\t\t\tprint ('Using multiple GPUs ', self.p.gpu)\n\t\t\tmodel = nn.DataParallel(model, device_ids = list(range(len(self.p.gpu.split(',')))))\n\t\t\ttorch.backends.cudnn.benchmark = True\n\n\t\treturn model\n\n\tdef add_optimizer(self, model, train_dataset_length):\n\t\t\"\"\"\n\t\tCreates an optimizer for training the parameters\n\t\tParameters\n\t\t----------\n\t\tparameters: The parameters of the model\n\t\t\n\t\tReturns\n\t\t-------\n\t\tReturns an optimizer and scheduler for learning the parameters of the model\n\t\t\n\t\t\"\"\"\n\t\twarmup_proportion \t= 0.1\n\t\tn_train_steps\t\t= int(train_dataset_length / self.p.batch_size ) * self.p.max_epochs\n\t\tnum_warmup_steps\t= int(float(warmup_proportion) * float(n_train_steps))\n\t\tparam_optimizer\t\t= list(model.named_parameters())\n\n\t\t# Keeping bert params fixed for bert_combined model\n\t\tif self.p.model == 'bert_combined':\n\t\t\tparam_optimizer = [x for x in param_optimizer if 'bert' not in x[0]]\n\n\t\tparam_optimizer\t= [n for n in param_optimizer if 'pooler' not in n[0]]\n\t\tno_decay\t= ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n\n\t\toptimizer_grouped_parameters = [\n\t\t\t{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n\t\t\t{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n\t\t]\n\n\t\toptimizer = AdamW(optimizer_grouped_parameters, lr=self.p.lr)\n\t\tscheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=n_train_steps)\n\t\treturn optimizer, scheduler\n\n\tdef __init__(self, params):\n\t\t\"\"\"\n\t\tConstructor of the runner class\n\t\tParameters\n\t\t----------\n\t\tparams: List of hyper-parameters of the model\n\t\t\n\t\tReturns\n\t\t-------\n\t\tCreates computational graph and optimizer\n\t\t\n\t\t\"\"\"\n\t\tself.p = params\n\n\t\tif not os.path.exists(self.p.log_dir): os.system('mkdir -p {}'.format(self.p.log_dir))\t\t# Create log directory if doesn't exist\n\t\tif not os.path.exists(self.p.model_dir): os.system('mkdir -p {}'.format(self.p.model_dir))\t\t# Create model directory if doesn't exist\n\n\t\t# Get Logger\n\t\tself.logger\t= get_logger(self.p.name, self.p.log_dir, self.p.config_dir)\n\t\tself.logger.info(vars(self.p)); pprint(vars(self.p))\n\n\t\tself.gpu_list = self.p.gpu.split(',')\n\t\tif self.p.gpu != '-1' and torch.cuda.is_available():\n\t\t\tself.device = torch.device('cuda')\n\t\t\ttorch.cuda.set_rng_state(torch.cuda.get_rng_state())\n\t\t\ttorch.backends.cudnn.deterministic = True\n\t\telse:\n\t\t\tself.device = torch.device('cpu')\n\n\t\tself.load_data()\n\t\tself.model\t\t\t= self.add_model()\n\t\tself.optimizer,self.scheduler\t= self.add_optimizer(self.model, len(self.data['train']))\n\n\tdef load_model(self, load_path):\n\t\t\"\"\"\n\t\tFunction to load a saved model\n\t\tParameters\n\t\t----------\n\t\tload_path: path to the saved model\n\t\t\n\t\tReturns\n\t\t-------\n\t\t\"\"\"\n\t\tstate = torch.load('{}/{}'.format(load_path, self.p.name))\n\t\tself.best_val\t\t= 0.0\n\t\tself.best_test\t\t= 0.0\n\t\tself.best_epoch\t\t= 0\n\n\t\tif len(self.gpu_list) > 1:\n\t\t\tstate_dict \t= state['state_dict']\n\t\t\tnew_state_dict = OrderedDict()\n\n\t\t\tfor k, v in state_dict.items():\n\t\t\t\tif 'module' not in k: \tk = 'module.' + k\n\t\t\t\telse: \t\t\tk = k.replace('features.module.', 'module.features.')\n\t\t\t\tnew_state_dict[k] = v\n\n\t\t\tself.model.load_state_dict(new_state_dict)\n\t\telse:\n\t\t\tstate_dict \t= state['state_dict']\n\t\t\tnew_state_dict = OrderedDict()\n\n\t\t\tfor k, v in state_dict.items():\n\t\t\t\tif 'module' in k:\n\t\t\t\t\tk = k.replace('module.', '')\n\n\t\t\t\tnew_state_dict[k] = v\n\n\t\t\tself.model.load_state_dict(new_state_dict)\n\n\t\tif self.p.restore_opt:\n\t\t\tself.optimizer.load_state_dict(state['optimizer'])\n\t\t\tself.best_test\t= state['best_test']\n\t\t\tself.best_val\t= state['best_val']\n\t\t\tself.best_epoch\t= state['best_epoch']\n\n\tdef save_model(self, save_path):\n\t\t\"\"\"\n\t\tFunction to save a model. It saves the model parameters, best validation scores,\n\t\tbest epoch corresponding to best validation, state of the optimizer and all arguments for the run.\n\t\tParameters\n\t\t----------\n\t\tsave_path: path where the model is saved\n\t\t\n\t\tReturns\n\t\t-------\n\t\t\"\"\"\n\t\tstate = {\n\t\t\t'state_dict'\t: self.model.state_dict(),\n\t\t\t'best_test'\t: self.best_test,\n\t\t\t'best_val'\t: self.best_val,\n\t\t\t'best_epoch'\t: self.best_epoch,\n\t\t\t'optimizer'\t: self.optimizer.state_dict(),\n\t\t\t'args'\t\t: vars(self.p)\n\t\t}\n\t\ttorch.save(state, '{}/{}'.format(save_path, self.p.name))\n\n\tdef evaluate(self, logits, labels):\n\t\t\"\"\"\n\t\tFunction to evaluate the model on validation or test set\n\n\t\tParameters\n\t\t----------\n\t\tlogits: Predictions by the model\n\t\tlabels: Ground truth labels\n\t\t\n\t\tReturns\n\t\t-------\n\t\tArea under PR-curve\n\t\t\"\"\"\n\t\tall_logits = np.concatenate(logits, axis=0)\n\t\tall_labels = np.concatenate(labels, axis=0)\n\t\tresult = np.round(average_precision_score(all_labels.reshape(-1), all_logits.reshape(-1)), 3)\n\t\treturn result\n\n\tdef execute(self, batch):\n\t\tbatch\t\t= to_gpu(batch, self.device)\n\t\tloss, logits \t= self.model(\n\t\t\t\t\tinput_ids\t= batch['tok_pad'], \n\t\t\t\t\tattention_mask\t= batch['tok_mask'], \n\t\t\t\t\tmention_pos_idx\t= batch['men_pos'],\n\t\t\t\t\tlabels\t\t= batch['labels']\n\t\t\t\t)\n\n\t\tif len(self.gpu_list) > 1:\n\t\t\tloss = loss.mean()\n\n\t\treturn loss, logits\n\n\tdef predict(self, epoch, split, return_extra=False):\n\t\t\"\"\"\n\t\tFunction \n\n\t\tParameters\n\t\t----------\n\t\tsplit: (string) \tIf split == 'valid' then evaluate on the validation set, else the test set\n\t\t\n\t\tReturns\n\t\t-------\n\t\tLoss and performance on the split\n\t\t\"\"\"\n\t\tself.model.eval()\n\n\t\tall_eval_loss, all_logits, all_labels, all_rest, cnt = [], [], [], [], 0\n\n\t\twith torch.no_grad():\n\t\t\tfor batches in self.data_iter[split]:\n\t\t\t\tfor k, batch in enumerate(batches):\n\t\t\t\t\teval_loss, logits = self.execute(batch)\n\n\t\t\t\t\tif (k+1) % self.p.log_freq == 0:\n\t\t\t\t\t\teval_res = self.evaluate(all_logits, all_labels)\n\t\t\t\t\t\tself.logger.info('[E: {}] | {:.3}% | {} | Eval {} --> Loss: {:.3}, Eval Acc: {}'.format(epoch, \\\n\t\t\t\t\t\t\t100*cnt/len(self.data[split]), self.p.name, split, np.mean(all_eval_loss), eval_res))\n\n\t\t\t\t\tall_eval_loss.append(eval_loss.item())\n\t\t\t\t\tall_logits.append(logits.cpu().numpy())\n\t\t\t\t\tall_labels.append(batch['labels'].cpu().numpy())\n\n\t\t\t\t\tif return_extra: all_rest.append(batch['_rest'])\n\n\t\t\t\t\tcnt += batch['tok_len'].shape[0]\n\n\t\teval_res = self.evaluate(all_logits, all_labels)\n\n\t\tif return_extra: return np.mean(all_eval_loss), eval_res, all_logits, all_labels, all_rest\n\t\telse: \t\t return np.mean(all_eval_loss), eval_res\n\n\tdef check_and_save(self, epoch):\n\t\tvalid_loss, valid_acc = self.predict(epoch, 'valid')\n\n\t\tif valid_acc > self.best_val:\n\t\t\tself.best_val\t\t= valid_acc\n\t\t\t_, self.best_test\t= self.predict(epoch, 'test')\n\t\t\tself.best_epoch\t\t= epoch\n\t\t\tself.save_model(self.p.model_dir)\n\t\t\treturn True\n\t\n\t\treturn False\n\n\n\tdef run_epoch(self, epoch, shuffle=True):\n\t\t\"\"\"\n\t\tFunction to run one epoch of training\n\t\tParameters\n\t\t----------\n\t\tepoch: current epoch count\n\t\t\n\t\tReturns\n\t\t-------\n\t\tloss: The loss value after the completion of one epoch\n\t\t\"\"\"\n\t\t\n\t\tself.model.train()\n\n\t\tall_train_loss, all_score, cnt = [], [], 0\n\n\t\tfor batches in self.data_iter['train']:\n\t\t\tfor k, batch in enumerate(batches):\n\t\t\t\tself.optimizer.zero_grad()\n\n\t\t\t\ttrain_loss, logits = self.execute(batch)\n\n\t\t\t\tif (k+1) % self.p.log_freq == 0:\n\t\t\t\t\teval_res = np.round(np.mean(all_score), 3)\n\n\t\t\t\t\tself.logger.info('[E: {}] | {:.3}% | {} | L: {:.3}, T: {}, B-V:{}'.format(epoch, \\\n\t\t\t\t\t\t100*cnt/len(self.data['train']), self.p.name, np.mean(all_train_loss), eval_res, self.best_val))\n\n\n\t\t\t\tall_train_loss.append(train_loss.item())\n\t\t\t\tall_score.append(self.evaluate([logits.detach().cpu().numpy()], [batch['labels'].cpu().numpy()]))\n\n\t\t\t\ttrain_loss.backward()\n\t\t\t\tself.optimizer.step()\n\t\t\t\tself.scheduler.step()\n\n\t\t\t\tcnt += batch['tok_len'].shape[0]\n\t\t\t\t\n\t\teval_res = np.round(np.mean(all_score), 3)\n\n\t\treturn np.mean(all_train_loss), eval_res\n\n\n\tdef fit(self):\n\t\t\"\"\"\n\t\tFunction to run training and evaluation of model\n\t\tParameters\n\t\t----------\n\t\t\n\t\tReturns\n\t\t-------\n\t\t\"\"\"\n\n\t\tself.best_val, self.best_test, self.best_epoch = 0.0, 0.0, 0\n\n\t\tif self.p.restore:\n\t\t\tself.load_model(self.p.model_dir)\n\n\t\t\tif self.p.dump_only:\n\t\t\t\tall_logits, all_labels, all_rest = [], [], []\n\n\t\t\t\tfor split in ['test', 'valid']:\n\t\t\t\t\tloss, acc, logits, labels, rest = self.predict(0, split, return_extra=True)\n\t\t\t\t\tprint('Score: Loss: {}, Acc:{}'.format(loss, acc))\n\n\t\t\t\t\tall_logits\t+= logits\n\t\t\t\t\tall_labels\t+= labels\n\t\t\t\t\tall_rest\t+= rest\n\n\t\t\t\tdump_dir = './predictions/{}'.format(self.p.data); make_dir(dump_dir)\n\t\t\t\tdump_pickle({\n\t\t\t\t\t'logits': all_logits,\n\t\t\t\t\t'labels': all_labels,\n\t\t\t\t\t'others': all_rest\n\t\t\t\t}, '{}/{}'.format(dump_dir, self.p.name))\n\n\t\t\t\texit(0)\n\n\t\tearly_stop = 0\n\t\tfor epoch in range(self.p.max_epochs):\n\t\t\ttrain_loss, train_acc\t= self.run_epoch(epoch)\n\n\t\t\tif self.check_and_save(epoch): \n\t\t\t\tearly_stop = 0\n\t\t\telse:\n\t\t\t\tearly_stop += 1\n\t\t\t\tif early_stop > self.p.early_stop:\n\t\t\t\t\tself.logger.info('Early Stopping!')\n\t\t\t\t\tbreak\n\n\t\t\tself.logger.info('Train loss: {:3}, Valid Perf: {:.3}'.format(train_loss, self.best_val))\n\n\t\tself.logger.info('Best Performance: {}'.format(self.best_test)) \n\nif __name__== \"__main__\":\n\n\tparser = argparse.ArgumentParser(description='MedType Model Trainer')\n\n\tparser.add_argument('--gpu', \tdefault='0', \t\t\t\thelp='GPU to use')\n\tparser.add_argument(\"--model\", \t\tdefault='bert_plain', \ttype=str, \t\t\thelp='Type of model architecture. Options: `bert_plain` and `bert_combined`')\n\n\t# Model Specific\n\tparser.add_argument('--max_seq_len', \tdefault=128, \t\ttype=int, \t\t\thelp='Max allowed length of utt')\n\tparser.add_argument('--bert_model', \tdefault='bert-base-cased', \t\ttype=str, \thelp='Which Bert model')\n\tparser.add_argument('--data', \t \tdefault='medmentions', \t\t\ttype=str, \thelp='Which data')\n\tparser.add_argument('--model_wiki', \tdefault=None, \t\t\t\ttype=str, \thelp='Application when model == bert_combined | BERT model trained on WikiMed ')\n\tparser.add_argument('--model_pubmed', \tdefault=None, \t\t\t\ttype=str, \thelp='Application when model == bert_combined | BERT model trained on PubMedDS')\n\n\tparser.add_argument('--early_stop', \tdest='early_stop',\tdefault=5, \ttype=int, help='Early Stop Count')\n\tparser.add_argument('--epoch', \tdest='max_epochs',\tdefault=100, type=int, help='Max epochs')\n\tparser.add_argument('--batch', \tdest='batch_size',\tdefault=16, type=int, \thelp='Batch size')\n\tparser.add_argument('--batch_factor', dest='batch_factor',\tdefault=50, type=int, \thelp='Number of batches to generate at one time')\n\tparser.add_argument('--num_workers',\ttype=int,\t\tdefault=2, \thelp='Number of cores used for preprocessing data')\n\tparser.add_argument('--lr', \t \tdefault=1e-3, \t\ttype=float, \t\t\thelp='The initial learning rate for Adam.')\n\tparser.add_argument('--l2', \t \tdefault=0.0, \t\ttype=float, \t\t\thelp='The initial learning rate for Adam.')\n\tparser.add_argument('--drop', \t \tdefault=0.1, \t\ttype=float, \t\t\thelp='The initial learning rate for Adam.')\n\n\tparser.add_argument('--seed', \tdefault=1234, \ttype=int, \t\thelp='Seed for randomization')\n\tparser.add_argument('--log_freq', \tdefault=10, \t\ttype=int, \t\t\thelp='Display performance after these number of batches')\n\tparser.add_argument('--name', \tdefault='test', \t\t\t\thelp='Name of the run')\n\tparser.add_argument('--restore', \t\t\t\taction='store_true', \thelp='Restore from the previous best saved model')\n\tparser.add_argument('--restore_opt', \t\t\t\taction='store_true', \thelp='Restore Optimizer from the previous best saved model')\n\tparser.add_argument('--dump_only', \t\t\t\taction='store_true', \thelp='Dumps predictions for Entity Linking')\n\n\tparser.add_argument('--config_dir', \tdefault='../config', \t\t\t\thelp='Config directory')\n\tparser.add_argument('--data_dir', \tdefault='./data', \t\t\t\thelp='Config directory')\n\tparser.add_argument('--model_dir', \tdefault='./models', \t\t\t\thelp='Model directory')\n\tparser.add_argument('--log_dir', \tdefault='./logs', \t \t\t\t\thelp='Log directory')\n\n\targs = parser.parse_args()\n\tset_gpu(args.gpu)\n\n\t# Set seed\n\tnp.random.seed(args.seed)\n\trandom.seed(args.seed)\n\ttorch.manual_seed(args.seed)\n\n\t# Create Model\n\tmodel = MedType(args)\n\tmodel.fit()\n\tprint('Model Trained Successfully!!')"
] | [
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ekmixon/gamechanger-ml | [
"e7967261a4b2f21b06347020cd7e6a010538eb8f",
"e7967261a4b2f21b06347020cd7e6a010538eb8f"
] | [
"gamechangerml/src/search/evaluation/evaltool.py",
"gamechangerml/src/utilities/np_utils.py"
] | [
"import os\nimport json\nimport logging\nimport mlflow\n\nimport matplotlib.pyplot as plt\n\nimport argparse\n\nlogger = logging.getLogger(__name__)\n\n\nclass EvalTool(object):\n \"\"\"\n Object class that holds all functions for evaluating and\n plotting score metrics for ranked retrieval.\n\n The predictions are expected to come in this format:\n\n {\n \"query_id_1\": {\n \"document_id_1\": 1,\n \"document_id_2\": 2,\n \"document_id_3\": 3\n }\n }\n\n Args:\n prediction (string): File path of JSON or dictionary\n containing the model predictions\n ground_truth (string): File path of JSON or dictionary\n containing the ground truth\n k_s (list): List of values for k with which the model is\n evaluated\n params (dict): Dictionary of parameters used for the model\n \"\"\"\n\n def __init__(self, prediction, ground_truth, k_s=None, params=None):\n\n if os.path.isfile(prediction) and prediction.endswith(\"json\"):\n self.prediction = self._load_json(prediction)\n else:\n raise FileNotFoundError(\"Prediction file was not found. Please make sure you are pointing to the correct JSON file...\")\n\n if os.path.isfile(ground_truth) and ground_truth.endswith(\"json\"):\n self.ground_truth = self._load_json(ground_truth)\n else:\n raise FileNotFoundError(\"Ground truth file was not found. Please makue sure you are point to the correct JSON file...\")\n\n if k_s is None:\n self.k_s = [1] + [i * 5 for i in range(1, 21)]\n else:\n self.k_s = k_s\n\n self.params = params\n self.metrics_at_k = None\n\n def _load_json(self, json_path):\n \"\"\"\n Load a JSON file\n\n Args:\n json_path (string): File path of JSON file to be loaded\n\n Returns:\n json_dict (dict): Dictionary of loaded JSON file\n \"\"\"\n with open(json_path, \"r\") as fp:\n json_dict = json.load(fp)\n\n return json_dict\n\n def _score_prediction(self, predicted_ranking, relevant_documents):\n \"\"\"\n Evaluate and retrieve scores from a ranked dictionary of predicted\n relevant documents and a list of relevant documents\n\n Args:\n predicted_ranking (dict): Dictionary containing document ids\n ranked based on relevance\n relevant_documents(list): List of documents ids that are considered\n relevant. Relevant documents are ranked\n as 1 regardless of their number\n\n Returns:\n precision (float): Precision score\n recall (float): Recall score\n best_rank (int): Best rank for any of the relevant documents\n \"\"\"\n\n prediction_count = len(predicted_ranking)\n\n TP_count = 0\n FP_count = 0\n FN_count = 0\n\n best_rank = 1_000_000\n\n for relevant_doc in relevant_documents:\n if relevant_doc in predicted_ranking:\n TP_count += 1\n rank = predicted_ranking[relevant_doc]\n\n if best_rank > rank:\n best_rank = rank\n else:\n FN_count += 1\n FP_count = prediction_count - TP_count\n\n precision = TP_count / (TP_count + FP_count)\n recall = TP_count / (TP_count + FN_count)\n\n return precision, recall, best_rank\n\n def _filter_predictions(self, predictions, k=100):\n \"\"\"\n Filter predictions to only ones that are k or better.\n\n Args:\n predictions (dict): Dictionary containing document ids\n ranked based on relevance\n k (int): Minimum rank for a prediction to be considered\n\n Returns:\n sub_predictions (dict): Dictionary containing document ids\n ranked based on relevance filtered\n \"\"\"\n sub_predictions = {}\n\n for query_id, document_rank in predictions.items():\n subset_document_rank = {}\n\n for doc_id, rank in document_rank.items():\n if rank <= k:\n subset_document_rank[doc_id] = rank\n\n sub_predictions[query_id] = subset_document_rank\n\n return sub_predictions\n\n def evaluate(self, get_plot=True):\n \"\"\"\n Evaluates the entire prediction dictionary with the ground data.\n A `metrics.json` file is generated in the same directory as the\n prediction file. If `get_plot` is True, a graph of the precision,\n recall, and MRR is plotted and saved in the same directory.\n\n Args:\n get_plot (bool): If true, a graph is generated\n\n Returns:\n metrics_at_k (dict): Dictionary of k's and metrics at each k.\n \"\"\"\n metrics_at_k = {}\n\n for k in self.k_s:\n subset_prediction = self._filter_predictions(self.prediction, k=k)\n precision_scores = []\n recall_scores = []\n best_ranks = []\n\n for query_id in self.prediction:\n if query_id in subset_prediction:\n try:\n prediction_ranks = subset_prediction[query_id]\n relevant_docs = self.ground_truth[query_id]\n precision, recall, best_rank = self._score_prediction(\n prediction_ranks, relevant_docs\n )\n except:\n pass\n else:\n precision, recall, best_rank = 0.0, 0.0, 1_000_000\n\n precision_scores.append(precision)\n recall_scores.append(recall)\n best_ranks.append(best_rank)\n\n precision_at_k = sum(precision_scores) / len(precision_scores)\n recall_at_k = sum(recall_scores) / len(recall_scores)\n\n reciprocal_ranks = [1.0 / rank for rank in best_ranks]\n mrr_at_k = sum(reciprocal_ranks) / len(reciprocal_ranks)\n\n metrics_at_k[k] = {\n \"precision\": round(precision_at_k, 6),\n \"recall\": round(recall_at_k, 6),\n \"mrr_at_k\": round(mrr_at_k, 6),\n }\n\n self.metrics_at_k = metrics_at_k\n\n return metrics_at_k\n\n def plot_metrics(self, folder_path):\n \"\"\"\n Save precision, recall, and MRR plots of the Evaluation\n\n Args:\n folder_path (str): Folder location where the results will be stored\n\n \"\"\"\n\n # Precision\n precision_path = os.path.join(folder_path, \"precision.png\")\n plt.figure(figsize = (8,6))\n\n k_values = []\n precision_scores = []\n for k, scores in self.metrics_at_k.items():\n precision = scores[\"precision\"]\n k_values.append(k)\n precision_scores.append(precision)\n \n plt.plot(k_values, precision_scores, label = \"Precision\")\n plt.xlabel(\"k values\")\n plt.ylabel(\"Precision\")\n plt.legend()\n plt.grid()\n plt.ylim(0, 1)\n plt.tight_layout()\n plt.savefig(precision_path)\n plt.clf()\n\n # Recall\n recall_path = os.path.join(folder_path, \"recall.png\")\n plt.figure(figsize = (8,6))\n\n k_values = []\n recall_scores = []\n for k, scores in self.metrics_at_k.items():\n recall = scores[\"recall\"]\n k_values.append(k)\n recall_scores.append(recall)\n \n plt.plot(k_values, recall_scores, label = \"Recall\")\n plt.xlabel(\"k values\")\n plt.ylabel(\"Recall\")\n plt.legend()\n plt.grid()\n plt.ylim(0, 1)\n plt.tight_layout()\n plt.savefig(recall_path)\n plt.clf()\n\n # MRR\n mrr_path = os.path.join(folder_path, \"mrr.png\")\n plt.figure(figsize = (8,6))\n\n k_values = []\n mrr_scores = []\n for k, scores in self.metrics_at_k.items():\n mrr = scores[\"mrr_at_k\"]\n k_values.append(k)\n mrr_scores.append(mrr)\n \n plt.plot(k_values, mrr_scores, label = \"MRR\")\n plt.xlabel(\"k values\")\n plt.ylabel(\"MRR\")\n plt.legend()\n plt.grid()\n plt.ylim(0, 1)\n plt.tight_layout()\n plt.savefig(mrr_path)\n plt.clf()\n\n def log_mflow(self, experiment_name=\"default\", tracking_uri=None):\n \"\"\"\n Log model parameters and metrics stored in EvalTool to an MLFlow\n server\n\n Args:\n experiment_name (str): Name of experiment to be stored\n tracking_uri (str): MLFlow server location where parameters\n and metrics will be logged\n \"\"\"\n\n # Connect to MLFlow Server\n if tracking_uri is not None:\n try:\n mlflow.set_tracking_uri(tracking_uri)\n logger.info(f\"Connected to {tracking_uri}\")\n except mlflow.exceptions.MlflowException as e:\n logger.error(\"Error accessing tracking uri\")\n raise e\n\n # Create or connect to existing experiment\n try:\n mlflow_id = mlflow.create_experiment(name=experiment_name)\n except (\n mlflow.exceptions.RestException,\n mlflow.exceptions.MlflowException,\n ) as e:\n mlflow_id = mlflow.get_experiment_by_name(experiment_name).experiment_id\n logger.info(f\"Experiment exists: {mlflow_id}\")\n\n # Log parameters and metrics\n if self.metrics_at_k is None:\n logger.info(\"Nothing to log\")\n return None\n\n metric_head = {}\n\n for k, value in self.metrics_at_k.items():\n for metric, score in value.items():\n metric_head[f\"{metric}_at_{k}\"] = value[metric]\n\n with mlflow.start_run(experiment_id=mlflow_id):\n mlflow.log_metrics(metric_head)\n\n if self.params is not None:\n mlflow.log_params(self.params)\n\n return None\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-p\",\n \"--prediction_path\",\n dest=\"prediction_path\",\n required=True,\n type=str,\n help=\"File path to JSON file with predictions of ranked retrieval\",\n )\n parser.add_argument(\n \"-g\",\n \"--ground_truth_path\",\n dest=\"ground_truth_path\",\n required=True,\n type=str,\n help=\"File path to JSON file with ground truth of ranked retrieval\",\n )\n parser.add_argument(\n \"-m\",\n \"--metrics-path\",\n dest=\"metrics_path\",\n required=True,\n type=str,\n help=\"Path to store metrics of evaluation to a JSON file\",\n )\n args = parser.parse_args()\n\n ev = EvalTool(args.prediction_path, args.ground_truth_path)\n metrics = ev.evaluate()\n\n ev.plot_metrics(args.metrics_path)\n\n metrics_json = os.path.join(args.metrics_path, \"metrics.json\")\n with open(metrics_json, \"w\") as fp:\n json.dump(metrics, fp)\n",
"\"\"\"\nA collection of `numpy` utilities.\n\"\"\"\nimport logging\n\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\n\ndef is_zero_vector(v):\n \"\"\"\n Tests if a vector is a zero vector.\n\n Args:\n v (numpy.ndarray): vector\n\n Returns:\n boolean: True if every element is zero\n \"\"\"\n return np.all(v == 0.0)\n\n\ndef l2_norm_by_row(matrix):\n \"\"\"\n Row by row l2 norm of a matrix using Einstein summation.\n\n Args:\n matrix (numpy.ndarray): the matrix\n\n Returns:\n numpy.ndarray\n\n \"\"\"\n return np.sqrt(np.einsum(\"ij,ij->i\", matrix, matrix))\n\n\ndef l2_normed_matrix(matrix):\n \"\"\"\n Normalizes a matrix using the `l2` norm.\n\n Args:\n matrix (numpy.ndarray): the matrix\n\n Returns:\n numpy.ndarray\n \"\"\"\n l2 = l2_norm_by_row(matrix)\n return matrix / l2[:, None]\n\n\ndef l2_norm_vector(vector):\n if not np.isfinite(vector).all() or is_zero_vector(vector):\n logger.warning(\"invalid vector\")\n if is_zero_vector(vector):\n logger.warning(\"zero vector\")\n norm_ = np.linalg.norm(vector)\n # logger.info(\"{} {}\".format(vector.shape, norm_))\n return np.true_divide(vector, norm_)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
],
[
"numpy.true_divide",
"numpy.isfinite",
"numpy.einsum",
"numpy.linalg.norm",
"numpy.all"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shan18/Caption-Generation-from-Images | [
"6917ddfcac661684171663f6cb06b17e5b858def"
] | [
"capsnet_create_dataset.py"
] | [
"import os\nimport random\nimport h5py\nimport pickle\nimport argparse\nimport numpy as np\n\nfrom utils import load_image, print_progress_bar\n\n\ndef load_data(input_path):\n \"\"\" Load coco dataset \"\"\"\n with open(input_path, 'rb') as file:\n coco_raw = pickle.load(file)\n image_categories = coco_raw['image_categories']\n image_file = coco_raw['image_file']\n category_id = coco_raw['category_id']\n\n return image_categories, image_file, category_id\n\n\ndef encode_images(image_ids, image_file, params):\n \"\"\" Store images in a numpy array \"\"\"\n\n images = []\n\n # Initial call to print 0% progress\n print_progress_bar_counter = 0\n print_progress_bar(print_progress_bar_counter, params['dataset_size'], prefix = 'Progress:', suffix = 'Complete', length = 50)\n \n for image_id in image_ids:\n img_array = load_image(\n os.path.join(params['input_images'], image_file[image_id]),\n size=(params['image_size'], params['image_size']),\n grayscale=params['grayscale']\n )\n images.append(img_array)\n\n # Update Progress Bar\n print_progress_bar_counter += 1\n print_progress_bar(print_progress_bar_counter, params['dataset_size'], prefix = 'Progress:', suffix = 'Complete', length = 50)\n \n return np.array(images, dtype=np.float32)\n\n\ndef encode_categories(image_ids, image_categories, category_id, params):\n \"\"\" Replace all category names with their respective IDs and\n store them in a numpy array as a multi-hot vector.\n \"\"\"\n\n categories = []\n\n # Initial call to print 0% progress\n print_progress_bar_counter = 0\n print_progress_bar(print_progress_bar_counter, params['dataset_size'], prefix = 'Progress:', suffix = 'Complete', length = 50)\n\n for image_id in image_ids:\n one_hot = [0] * len(category_id)\n if params['single_label']:\n one_hot[category_id[random.choice(image_categories[image_id])]] = 1\n else:\n for category in image_categories[image_id]:\n one_hot[category_id[category]] = 1\n categories.append(one_hot)\n\n # Update Progress Bar\n print_progress_bar_counter += 1\n print_progress_bar(print_progress_bar_counter, params['dataset_size'], prefix = 'Progress:', suffix = 'Complete', length = 50)\n\n return np.array(categories, dtype=np.float32)\n\n\ndef save_dataset(x, y, out_path):\n \"\"\" Save dataset in a '.h5' file \"\"\"\n\n path = '{}/capsnet_train_data.h5'.format(out_path)\n h5f = h5py.File(path, 'w')\n h5f.create_dataset('x', data=x)\n h5f.create_dataset('y', data=y)\n h5f.close()\n\n print('Done.')\n print('Data saved to:', path)\n\n\ndef create_dataset(image_categories, image_file, category_id, params):\n \"\"\" Create training dataset \"\"\"\n\n image_ids = list(image_categories.keys())\n random.shuffle(image_ids)\n image_ids = image_ids[:params['dataset_size']]\n\n # encode images\n print('\\nEncoding images...')\n x = encode_images(image_ids, image_file, params)\n print('Done.')\n\n # encode categories\n print('\\nEncoding categories...')\n y = encode_categories(image_ids, image_categories, category_id, params)\n print('Done.')\n\n # save dataset\n print('\\nSaving dataset...')\n save_dataset(x, y, params['output'])\n\n\ndef main(params):\n image_categories, image_file, category_id = load_data(params['input_raw'])\n\n if len(image_categories) < params['dataset_size']:\n print('Invalid dataset size')\n return\n\n print('\\nCreating and saving dataset...')\n # create and save dataset\n create_dataset(image_categories, image_file, category_id, params)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Create dataset for training the Capsule Network Model')\n parser.add_argument(\n '--input_raw',\n default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dataset/coco_raw.pickle'),\n help='Path to file containing the raw data'\n )\n parser.add_argument(\n '--input_images',\n default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dataset'),\n help='Root directory containing the folders having images'\n )\n parser.add_argument(\n '--output',\n default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dataset'),\n help='Path to store the dataset'\n )\n parser.add_argument('--dataset_size', default=12500, type=int, help='Size of dataset')\n parser.add_argument('--image_size', default=250, type=int, help='Image size to use in dataset')\n parser.add_argument('--single_label', action='store_true', help='Image label will store only one label per image')\n parser.add_argument('--grayscale', action='store_true', help='Images will be stored in grayscale')\n args = parser.parse_args()\n \n params = vars(args) # convert to dictionary\n main(params)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
UKA-SHA/3D_Appearance_SR | [
"0e4d3a4476afe1ccf16f0e8fee3d697c0d204395",
"0e4d3a4476afe1ccf16f0e8fee3d697c0d204395"
] | [
"code/utils/myssim.py",
"code/data/srdata.py"
] | [
"from __future__ import division, absolute_import, print_function\n\n\n\nimport numpy as np\nfrom numpy.lib.arraypad import _validate_lengths\nfrom scipy.ndimage import uniform_filter, gaussian_filter\n\ndtype_range = {np.bool_: (False, True),\n np.bool8: (False, True),\n np.uint8: (0, 255),\n np.uint16: (0, 65535),\n np.uint32: (0, 2**32 - 1),\n np.uint64: (0, 2**64 - 1),\n np.int8: (-128, 127),\n np.int16: (-32768, 32767),\n np.int32: (-2**31, 2**31 - 1),\n np.int64: (-2**63, 2**63 - 1),\n np.float16: (-1, 1),\n np.float32: (-1, 1),\n np.float64: (-1, 1)}\n\n\ndef crop(ar, crop_width, copy=False, order='K'):\n \"\"\"Crop array `ar` by `crop_width` along each dimension.\n Parameters\n ----------\n ar : array-like of rank N\n Input array.\n crop_width : {sequence, int}\n Number of values to remove from the edges of each axis.\n ``((before_1, after_1),`` ... ``(before_N, after_N))`` specifies\n unique crop widths at the start and end of each axis.\n ``((before, after),)`` specifies a fixed start and end crop\n for every axis.\n ``(n,)`` or ``n`` for integer ``n`` is a shortcut for\n before = after = ``n`` for all axes.\n copy : bool, optional\n If `True`, ensure the returned array is a contiguous copy. Normally,\n a crop operation will return a discontiguous view of the underlying\n input array.\n order : {'C', 'F', 'A', 'K'}, optional\n If ``copy==True``, control the memory layout of the copy. See\n ``np.copy``.\n Returns\n -------\n cropped : array\n The cropped array. If ``copy=False`` (default), this is a sliced\n view of the input array.\n \"\"\"\n ar = np.array(ar, copy=False)\n crops = _validate_lengths(ar, crop_width)\n slices = [slice(a, ar.shape[i] - b) for i, (a, b) in enumerate(crops)]\n if copy:\n cropped = np.array(ar[slices], order=order, copy=True)\n else:\n cropped = ar[slices]\n return cropped\n\ndef compare_ssim(X, Y, win_size=None, gradient=False,\n data_range=None, multichannel=False, gaussian_weights=False,\n full=False, dynamic_range=None, **kwargs):\n \"\"\"Compute the mean structural similarity index between two images.\n Parameters\n ----------\n X, Y : ndarray\n Image. Any dimensionality.\n win_size : int or None\n The side-length of the sliding window used in comparison. Must be an\n odd value. If `gaussian_weights` is True, this is ignored and the\n window size will depend on `sigma`.\n gradient : bool, optional\n If True, also return the gradient.\n data_range : int, optional\n The data range of the input image (distance between minimum and\n maximum possible values). By default, this is estimated from the image\n data-type.\n multichannel : bool, optional\n If True, treat the last dimension of the array as channels. Similarity\n calculations are done independently for each channel then averaged.\n gaussian_weights : bool, optional\n If True, each patch has its mean and variance spatially weighted by a\n normalized Gaussian kernel of width sigma=1.5.\n full : bool, optional\n If True, return the full structural similarity image instead of the\n mean value.\n Other Parameters\n ----------------\n use_sample_covariance : bool\n if True, normalize covariances by N-1 rather than, N where N is the\n number of pixels within the sliding window.\n K1 : float\n algorithm parameter, K1 (small constant, see [1]_)\n K2 : float\n algorithm parameter, K2 (small constant, see [1]_)\n sigma : float\n sigma for the Gaussian when `gaussian_weights` is True.\n Returns\n -------\n mssim : float\n The mean structural similarity over the image.\n grad : ndarray\n The gradient of the structural similarity index between X and Y [2]_.\n This is only returned if `gradient` is set to True.\n S : ndarray\n The full SSIM image. This is only returned if `full` is set to True.\n Notes\n -----\n To match the implementation of Wang et. al. [1]_, set `gaussian_weights`\n to True, `sigma` to 1.5, and `use_sample_covariance` to False.\n References\n ----------\n .. [1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P.\n (2004). Image quality assessment: From error visibility to\n structural similarity. IEEE Transactions on Image Processing,\n 13, 600-612.\n https://ece.uwaterloo.ca/~z70wang/publications/ssim.pdf,\n DOI:10.1.1.11.2477\n .. [2] Avanaki, A. N. (2009). Exact global histogram specification\n optimized for structural similarity. Optical Review, 16, 613-621.\n http://arxiv.org/abs/0901.0065,\n DOI:10.1007/s10043-009-0119-z\n \"\"\"\n if not X.dtype == Y.dtype:\n raise ValueError('Input images must have the same dtype.')\n\n if not X.shape == Y.shape:\n raise ValueError('Input images must have the same dimensions.')\n\n if dynamic_range is not None:\n #warn('`dynamic_range` has been deprecated in favor of '\n # '`data_range`. The `dynamic_range` keyword argument '\n # 'will be removed in v0.14', skimage_deprecation)\n data_range = dynamic_range\n\n if multichannel:\n # loop over channels\n args = dict(win_size=win_size,\n gradient=gradient,\n data_range=data_range,\n multichannel=False,\n gaussian_weights=gaussian_weights,\n full=full)\n args.update(kwargs)\n nch = X.shape[-1]\n mssim = np.empty(nch)\n if gradient:\n G = np.empty(X.shape)\n if full:\n S = np.empty(X.shape)\n for ch in range(nch):\n ch_result = compare_ssim(X[..., ch], Y[..., ch], **args)\n if gradient and full:\n mssim[..., ch], G[..., ch], S[..., ch] = ch_result\n elif gradient:\n mssim[..., ch], G[..., ch] = ch_result\n elif full:\n mssim[..., ch], S[..., ch] = ch_result\n else:\n mssim[..., ch] = ch_result\n mssim = mssim.mean()\n if gradient and full:\n return mssim, G, S\n elif gradient:\n return mssim, G\n elif full:\n return mssim, S\n else:\n return mssim\n\n K1 = kwargs.pop('K1', 0.01)\n K2 = kwargs.pop('K2', 0.03)\n sigma = kwargs.pop('sigma', 1.5)\n if K1 < 0:\n raise ValueError(\"K1 must be positive\")\n if K2 < 0:\n raise ValueError(\"K2 must be positive\")\n if sigma < 0:\n raise ValueError(\"sigma must be positive\")\n use_sample_covariance = kwargs.pop('use_sample_covariance', True)\n\n if win_size is None:\n if gaussian_weights:\n win_size = 11 # 11 to match Wang et. al. 2004\n else:\n win_size = 7 # backwards compatibility\n\n if np.any((np.asarray(X.shape) - win_size) < 0):\n raise ValueError(\n \"win_size exceeds image extent. If the input is a multichannel \"\n \"(color) image, set multichannel=True.\")\n\n if not (win_size % 2 == 1):\n raise ValueError('Window size must be odd.')\n\n if data_range is None:\n dmin, dmax = dtype_range[X.dtype.type]\n data_range = dmax - dmin\n\n ndim = X.ndim\n\n if gaussian_weights:\n # sigma = 1.5 to approximately match filter in Wang et. al. 2004\n # this ends up giving a 13-tap rather than 11-tap Gaussian\n filter_func = gaussian_filter\n filter_args = {'sigma': sigma}\n\n else:\n filter_func = uniform_filter\n filter_args = {'size': win_size}\n\n # ndimage filters need floating point data\n X = X.astype(np.float64)\n Y = Y.astype(np.float64)\n\n NP = win_size ** ndim\n\n # filter has already normalized by NP\n if use_sample_covariance:\n cov_norm = NP / (NP - 1) # sample covariance\n else:\n cov_norm = 1.0 # population covariance to match Wang et. al. 2004\n\n # compute (weighted) means\n ux = filter_func(X, **filter_args)\n uy = filter_func(Y, **filter_args)\n\n # compute (weighted) variances and covariances\n uxx = filter_func(X * X, **filter_args)\n uyy = filter_func(Y * Y, **filter_args)\n uxy = filter_func(X * Y, **filter_args)\n vx = cov_norm * (uxx - ux * ux)\n vy = cov_norm * (uyy - uy * uy)\n vxy = cov_norm * (uxy - ux * uy)\n\n R = data_range\n C1 = (K1 * R) ** 2\n C2 = (K2 * R) ** 2\n\n A1, A2, B1, B2 = ((2 * ux * uy + C1,\n 2 * vxy + C2,\n ux ** 2 + uy ** 2 + C1,\n vx + vy + C2))\n D = B1 * B2\n S = (A1 * A2) / D\n\n # to avoid edge effects will ignore filter radius strip around edges\n pad = (win_size - 1) // 2\n\n # compute (weighted) mean of ssim\n mssim = crop(S, pad).mean()\n\n if gradient:\n # The following is Eqs. 7-8 of Avanaki 2009.\n grad = filter_func(A1 / D, **filter_args) * X\n grad += filter_func(-S / B2, **filter_args) * Y\n grad += filter_func((ux * (A2 - A1) - uy * (B2 - B1) * S) / D,\n **filter_args)\n grad *= (2 / X.size)\n\n if full:\n return mssim, grad, S\n else:\n return mssim, grad\n else:\n if full:\n return mssim, S\n else:\n return mssim\n\n",
"import os\n\nfrom data import common\n\nimport numpy as np\nimport scipy.misc as misc\n\nimport torch\nimport torch.utils.data as data\n\nclass SRData(data.Dataset):\n def __init__(self, args, train=True, benchmark=False):\n self.args = args\n self.train = train\n self.split = 'train' if train else 'test'\n self.benchmark = benchmark\n self.scale = args.scale\n self.idx_scale = 0\n self.color = 'RGB' if args.n_colors == 3 else 'Y'\n\n self._set_filesystem(args.dir_data)\n\n def _load_bin():\n self.images_hr = np.load(self._name_hrbin())\n self.images_lr = [\n np.load(self._name_lrbin(s)) for s in self.scale\n ]\n\n if args.ext == 'img' or benchmark:\n self.images_hr, self.images_lr = self._scan()\n elif args.ext.find('sep') >= 0:\n self.images_hr, self.images_lr = self._scan()\n if args.ext.find('reset') >= 0:\n print('Preparing seperated binary files')\n for v in self.images_hr:\n \n hr = misc.imread(v)\n name_sep = v.replace(self.ext, '.npy')\n np.save(name_sep, hr)\n # from IPython import embed; embed(); exit()\n for si, s in enumerate(self.scale):\n for v in self.images_lr[si]:\n lr = misc.imread(v)\n name_sep = v.replace(self.ext, '.npy')\n np.save(name_sep, lr)\n\n self.images_hr = [\n v.replace(self.ext, '.npy') for v in self.images_hr\n ]\n self.images_lr = [\n [v.replace(self.ext, '.npy') for v in self.images_lr[i]]\n for i in range(len(self.scale))\n ]\n # from IPython import embed; embed(); exit()\n elif args.ext.find('bin') >= 0:\n try:\n if args.ext.find('reset') >= 0:\n raise IOError\n print('Loading a binary file')\n _load_bin()\n except:\n print('Preparing a binary file')\n bin_path = os.path.join(self.apath, 'bin')\n if not os.path.isdir(bin_path):\n os.mkdir(bin_path)\n\n list_hr, list_lr = self._scan()\n hr = [misc.imread(f) for f in list_hr]\n np.save(self._name_hrbin(), hr)\n del hr\n for si, s in enumerate(self.scale):\n lr_scale = [misc.imread(f) for f in list_lr[si]]\n np.save(self._name_lrbin(s), lr_scale)\n del lr_scale\n _load_bin()\n else:\n print('Please define data type')\n\n def _scan(self):\n raise NotImplementedError\n\n def _set_filesystem(self, dir_data):\n raise NotImplementedError\n\n def _name_hrbin(self):\n raise NotImplementedError\n\n def _name_lrbin(self, scale):\n raise NotImplementedError\n\n def __getitem__(self, idx):\n lr, hr, filename = self._load_file(idx)\n lr, hr = self._get_patch(lr, hr)\n lr, hr = common.set_channel([lr, hr], self.args.n_colors)\n lr_tensor, hr_tensor = common.np2Tensor([lr, hr], self.args.rgb_range)\n return lr_tensor, hr_tensor, filename\n\n def __len__(self):\n return len(self.images_hr) if not self.benchmark else len(self.images_hr[0])\n\n def _get_index(self, idx):\n return idx\n\n def _load_file(self, idx):\n idx = self._get_index(idx)\n # from IPython import embed; embed()\n lr = self.images_lr[self.idx_scale][idx]\n hr = self.images_hr[idx] if not self.benchmark else self.images_hr[self.idx_scale][idx]\n if self.args.ext == 'img' or self.benchmark:\n filename = hr\n lr = misc.imread(lr)\n hr = misc.imread(hr)\n elif self.args.ext.find('sep') >= 0:\n filename = hr\n lr = np.load(lr)\n hr = np.load(hr)\n else:\n filename = str(idx + 1)\n\n filename = os.path.splitext(os.path.split(filename)[-1])[0]\n\n return lr, hr, filename\n\n def _get_patch(self, lr, hr):\n patch_size = self.args.patch_size\n scale = self.scale[self.idx_scale]\n multi_scale = len(self.scale) > 1\n if self.train:\n #from IPython import embed; embed(); exit()\n lr, hr = common.get_patch(\n lr, hr, patch_size, scale, multi_scale=multi_scale\n )\n lr, hr = common.augment([lr, hr])\n lr = common.add_noise(lr, self.args.noise)\n else:\n ih, iw = lr.shape[0:2]\n hr = hr[0:ih * scale, 0:iw * scale]\n\n return lr, hr\n\n def set_scale(self, idx_scale):\n self.idx_scale = idx_scale\n\n"
] | [
[
"numpy.asarray",
"numpy.array",
"numpy.empty",
"numpy.lib.arraypad._validate_lengths"
],
[
"numpy.load",
"scipy.misc.imread",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"1.0",
"0.19",
"0.18",
"1.2",
"0.12",
"0.10",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
elhananby/flydra | [
"09b86859b1863700cdea0bbcdd4758da6c83930b",
"09b86859b1863700cdea0bbcdd4758da6c83930b"
] | [
"flydra_analysis/flydra_analysis/a2/flydra_textlog2csv.py",
"flydra_analysis/flydra_analysis/analysis/reconstruct_orientation.py"
] | [
"from __future__ import print_function\n\nif 1:\n # deal with old files, forcing to numpy\n import tables.flavor\n\n tables.flavor.restrict_flavors(keep=[\"numpy\"])\n\nimport numpy\nimport sys, os, time\nfrom optparse import OptionParser\nimport tables\nimport matplotlib.mlab as mlab\n\n\ndef convert(\n infilename, outfilename,\n):\n\n results = tables.open_file(infilename, mode=\"r\")\n ra = results.root.textlog[:]\n results.close()\n mlab.rec2csv(ra, outfilename)\n\n\ndef main():\n usage = \"%prog FILE [options]\"\n parser = OptionParser(usage)\n (options, args) = parser.parse_args()\n\n if len(args) > 1:\n print(\"arguments interpreted as FILE supplied more than once\", file=sys.stderr)\n parser.print_help()\n return\n\n if len(args) < 1:\n parser.print_help()\n return\n\n infilename = args[0]\n outfilename = os.path.splitext(infilename)[0] + \".textlog\"\n convert(infilename, outfilename)\n\n\nif __name__ == \"__main__\":\n main()\n",
"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport tables as PT\nimport numpy\nfrom numpy import nan\nfrom . import result_utils\nimport flydra_core.reconstruct\nimport flydra_core._reconstruct_utils as ru\n\n\ndef reconstruct_line_3ds(kresults, recon2, use_obj_id, return_fXl=False):\n\n data2d = kresults.root.data2d_distorted # make sure we have 2d data table\n camn2cam_id, cam_id2camns = result_utils.get_caminfo_dicts(kresults)\n kobs_2d = kresults.root.ML_estimates_2d_idxs\n\n obj_ids = kresults.root.kalman_estimates.read(field=\"obj_id\", flavor=\"numpy\")\n\n if PT.__version__ <= \"1.3.3\":\n obj_id_find = int(use_obj_id)\n else:\n obj_id_find = use_obj_id\n\n observation_frame_idxs = kresults.root.ML_estimates.get_where_list(\n kresults.root.ML_estimates.cols.obj_id == obj_id_find, flavor=\"numpy\"\n )\n\n observation_frames = kresults.root.ML_estimates.read_coordinates(\n observation_frame_idxs, field=\"frame\", flavor=\"numpy\"\n )\n observation_xs = kresults.root.ML_estimates.read_coordinates(\n observation_frame_idxs, field=\"x\", flavor=\"numpy\"\n )\n observation_ys = kresults.root.ML_estimates.read_coordinates(\n observation_frame_idxs, field=\"y\", flavor=\"numpy\"\n )\n observation_zs = kresults.root.ML_estimates.read_coordinates(\n observation_frame_idxs, field=\"z\", flavor=\"numpy\"\n )\n obs_2d_idxs = kresults.root.ML_estimates.read_coordinates(\n observation_frame_idxs, field=\"obs_2d_idx\", flavor=\"numpy\"\n )\n\n line3d_by_frame = {}\n if return_fXl:\n X_by_frame = {}\n\n for frame_i, (kframe, obs_2d_idx) in enumerate(\n zip(observation_frames, obs_2d_idxs)\n ):\n ## if frame_i >= 10:\n ## break\n if frame_i % 100 == 0:\n print(\"frame %d of %d\" % (frame_i, len(observation_frames)))\n\n if PT.__version__ <= \"1.3.3\":\n obs_2d_idx_find = int(obs_2d_idx)\n kframe_find = int(kframe)\n else:\n obs_2d_idx_find = obs_2d_idx\n kframe_find = kframe\n\n kobs_2d_data = kobs_2d.read(start=obs_2d_idx_find, stop=obs_2d_idx_find + 1)\n\n assert len(kobs_2d_data) == 1\n\n kobs_2d_data = kobs_2d_data[0]\n this_camns = kobs_2d_data[0::2]\n this_camn_idxs = kobs_2d_data[1::2]\n\n # print\n print(\"kframe\", kframe)\n\n # print ' this_camns',this_camns\n # print ' this_camn_idxs',this_camn_idxs\n\n done_frame = True\n\n # Really, I want to iterate through this_camns, but this\n # (iterating through pytables using a condition) will be much\n # faster.\n\n by_this_camns = {}\n\n for row in data2d.where(data2d.cols.frame == kframe_find):\n # print '*',row\n\n camn = row[\"camn\"]\n done = False\n\n if camn not in this_camns:\n continue\n\n want_pt_idx = this_camn_idxs[this_camns == camn]\n\n frame_pt_idx = row[\"frame_pt_idx\"]\n\n if want_pt_idx != frame_pt_idx:\n continue\n\n varnames = \"x\", \"y\", \"eccentricity\", \"p1\", \"p2\", \"p3\", \"p4\", \"area\", \"slope\"\n by_this_camns[camn] = {}\n for varname in varnames:\n by_this_camns[camn][varname] = row[varname]\n\n # by_this_camns[camn] = row['x'], row['y'], row['eccentricity'], row['p1'], row['p2'], row['p3'], row['p4']\n # print '-> usign previous row'\n cam_id = camn2cam_id[camn]\n\n if len(by_this_camns) < len(this_camns):\n print(\"WARNING: missing data.\")\n continue\n\n d2 = {}\n for camn, row in by_this_camns.items():\n cam_id = camn2cam_id[camn]\n rx = row[\"x\"]\n ry = row[\"y\"]\n # rx,ry=reconstructor.undistort(cam_id,(rx,ry))\n rx, ry = recon2.undistort(cam_id, (rx, ry))\n d2[cam_id] = (\n rx,\n ry,\n row[\"area\"],\n row[\"slope\"],\n row[\"eccentricity\"],\n row[\"p1\"],\n row[\"p2\"],\n row[\"p3\"],\n row[\"p4\"],\n )\n\n (\n X,\n line3d,\n cam_ids_used,\n # mean_dist) = ru.find_best_3d(reconstructor,d2)\n mean_dist,\n ) = ru.find_best_3d(recon2, d2)\n\n try:\n # make sure reconstructed 3D point matches original\n X_orig = numpy.array(\n (\n observation_xs[frame_i],\n observation_ys[frame_i],\n observation_zs[frame_i],\n )\n )\n assert numpy.allclose(X, X_orig)\n except AssertionError as err:\n print(\"*\" * 80)\n print(\"*\" * 80)\n print()\n print(\"WARNING: 3D positions and original 3D positions not the same!\")\n print(\"X\", X)\n print(\"X_orig\", X_orig)\n print()\n print(\"*\" * 80)\n print(\"*\" * 80)\n\n if return_fXl:\n X_by_frame[int(kframe)] = X\n line3d_by_frame[int(kframe)] = line3d\n\n if return_fXl:\n frames = X_by_frame.keys()\n frames.sort()\n fXl = []\n for frame in frames:\n\n X_by_frame[frame]\n\n line3d_by_frame[frame]\n\n list(X_by_frame[frame])\n\n # print 'line3d_by_frame[frame]',line3d_by_frame[frame]\n\n L = line3d_by_frame[frame]\n if L is None:\n L = (nan, nan, nan, nan, nan, nan)\n\n fXl.append([frame] + list(X_by_frame[frame]) + list(L))\n fXl = numpy.array(fXl, dtype=numpy.float)\n return fXl\n else:\n return line3d_by_frame\n\n\nif __name__ == \"__main__\":\n import sys\n\n filename = sys.argv[1]\n use_obj_id = int(sys.argv[2])\n\n kresults = result_utils.get_results(filename, mode=\"r+\")\n reconstructor = flydra_core.reconstruct.Reconstructor(kresults)\n recon2 = reconstructor.get_scaled(reconstructor.scale_factor)\n\n fXl = reconstruct_line_3ds(kresults, recon2, use_obj_id, return_fXl=True)\n import pickle\n\n fd = open(\"fXl.pkl\", mode=\"wb\")\n pickle.dump(fXl, fd)\n fd.close()\n"
] | [
[
"matplotlib.mlab.rec2csv"
],
[
"numpy.array",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zxhhh97/GLM | [
"2c35d245d76dd01c854a34004172d5aa8bcb26a1"
] | [
"tasks/seq2seq/finetune.py"
] | [
"# coding=utf-8\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Race.\"\"\"\nimport torch\nimport mpu\nimport functools\nfrom tasks.eval_utils import accuracy_func_provider\nfrom finetune_glm import finetune\nfrom pretrain_glm import get_batch\nfrom collections import OrderedDict\nfrom tasks.seq2seq.dataset import Seq2SeqDataset, BlankLMDataset\nfrom tasks.seq2seq.evaluate import rouge_metric, DecoderEvaluater, BlankLMEvaluater\n\nglobal_tokenizer = None\n\n\ndef seq2seq_forward_step(data, model, args, timers, mems):\n \"\"\"Forward step.\"\"\"\n\n # Get the batch.\n if timers is not None:\n timers('batch generator').start()\n tokens, labels, loss_mask, attention_mask, position_ids = get_batch(data, args)\n if timers is not None:\n timers('batch generator').stop()\n # Forward model.\n logits, *mems = model(tokens, position_ids, attention_mask, *mems)\n logits, loss_mask = logits[:, args.src_seq_length:], loss_mask[:, args.src_seq_length:]\n labels = labels[:, args.src_seq_length:]\n losses = mpu.vocab_parallel_cross_entropy(logits.contiguous().float(), labels)\n if args.label_smoothing > 0.0:\n epsilon = args.label_smoothing\n smooth_loss = -torch.nn.functional.log_softmax(logits, dim=-1).mean(dim=-1)\n losses = (1 - epsilon) * losses + epsilon * smooth_loss\n loss_mask = loss_mask.reshape(-1)\n # The loss is not normalized for fair comparison\n loss = torch.sum(losses.reshape(-1) * loss_mask) / loss_mask.sum()\n return loss, mems, 'bert'\n\n\ndef train_valid_datasets_provider(args, tokenizer):\n \"\"\"Provide train and validation datasets.\"\"\"\n if args.task.lower() == 'blank':\n train_dataset = BlankLMDataset(args, split='train', tokenizer=tokenizer)\n valid_dataset = None\n else:\n train_dataset = Seq2SeqDataset(args, split='train', tokenizer=tokenizer)\n valid_dataset = None\n global global_tokenizer\n global_tokenizer = tokenizer\n return train_dataset, valid_dataset\n\n\ndef metrics_func_provider(args, tokenizer, is_test):\n \"\"\"Privde metrics callback function.\"\"\"\n\n def single_dataset_provider(split):\n if args.task.lower() == 'blank':\n return BlankLMDataset(args, split=split, tokenizer=tokenizer)\n else:\n return Seq2SeqDataset(args, split=split, tokenizer=tokenizer)\n\n if args.task.lower() == 'blank':\n evaluater = BlankLMEvaluater(args, tokenizer)\n eval_func = evaluater.evaluate\n metric_dict = {}\n else:\n evaluater = DecoderEvaluater(args, tokenizer)\n eval_func = evaluater.evaluate\n if args.tokenizer_type == \"BertWordPieceTokenizer\":\n dataset = 'cnn_dm'\n elif args.task.lower() == 'gigaword':\n dataset = 'gigaword'\n else:\n dataset = 'cnn_dm_org'\n metric_dict = OrderedDict({\"rouge-1\": functools.partial(rouge_metric, metric=\"rouge-1\", dataset=dataset),\n \"rouge-2\": functools.partial(rouge_metric, metric=\"rouge-2\", dataset=dataset),\n \"rouge-l\": functools.partial(rouge_metric, metric=\"rouge-l\", dataset=dataset)})\n\n def output_func(predictions, examples, output_file):\n with open(output_file + \".hyps\", \"w\", encoding='utf-8') as output:\n for prediction in predictions:\n output.write(prediction)\n output.write(\"\\n\")\n with open(output_file + \".refs\", \"w\", encoding='utf-8') as output:\n for example in examples:\n output.write(example.meta[\"ref\"])\n output.write(\"\\n\")\n if args.task.lower() == 'squad_generation':\n with open(output_file + \".source\", \"w\", encoding='utf-8') as output:\n for example in examples:\n output.write(example.text_a.replace(\"\\n\", \" \") + \" Answer: \" + example.meta[\"answer\"])\n output.write(\"\\n\")\n\n return accuracy_func_provider(single_dataset_provider, metric_dict, args, is_test=is_test, eval_func=eval_func,\n output_func=output_func, only_rank0=False)\n\n\ndef main(args):\n if args.src_seq_length > args.max_position_embeddings:\n args.max_position_embeddings = args.src_seq_length\n if args.task.lower() in ['cnn_dm', 'cnn_dm_original', 'gigaword', 'blank', 'squad_generation', 'xsum']:\n finetune(args, train_valid_datasets_provider, {}, end_of_epoch_callback_provider=metrics_func_provider,\n forward_step=seq2seq_forward_step)\n else:\n raise NotImplementedError(args.task)\n"
] | [
[
"torch.nn.functional.log_softmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ubco-mds-2020-labs/WineVison | [
"93778bf055c3c59913718f802a652c47ec6a849a"
] | [
"pages/Wine_type.py"
] | [
"import numpy as np\nimport pandas as pd\nimport altair as alt\n\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nimport dash_bootstrap_components as dbc\n\nfrom utils import Header, make_dash_table,Header2\n\n\n# Allow large data set\nalt.data_transformers.disable_max_rows()\n\n# Get data\nwine = pd.read_csv(\"data/processed/wine_quality.csv\")\ncorr_df = pd.read_csv(\"data/processed/correlation.csv\")\n\n\n# Get a list of unique column names\nvariables = corr_df[\"level_0\"].unique()\nvariables = np.delete(variables, np.argwhere(variables == \"Quality Factor\"))\n\n# Don't want this as an option in scatterplot\nvariables = np.delete(variables, np.argwhere(\n variables == \"Quality Factor Numeric\"))\n\n# Setup app\ndef create_layout(app):\n # Page layouts\n return html.Div(\n [Header(app),\n dbc.Container([\n dbc.Row([\n dbc.Col([\n html.Iframe(\n id = \"matrix\",\n style={'border-width': '0', 'width': '500px', 'height': '500px'}),\n\n html.H5(\"Wine Type\"),\n\n dcc.Checklist(\n id = \"winetype\",\n options = [\n {\"label\": \"White Wines\", \"value\": \"white\"},\n {\"label\": \"Red Wines\", \"value\": \"red\"}\n ],\n value = [\"red\", \"white\"],\n labelStyle={\"display\": \"inline-block\"}\n ),\n\n html.H5(\"Quality\"),\n\n dcc.Slider(\n id = \"quality\",\n min=0,\n max=3,\n step=1,\n value = 1,\n marks={\n 0: \"below average\",\n 1: \"average\",\n 2: \"above average\",\n 3: \"any\"\n }\n )\n\n ]),\n dbc.Col([\n html.Iframe(\n id = \"scatter\",\n style={'border-width': '0', 'width': '500px', 'height': '500px'}),\n \n html.H5(\"x-axis:\"),\n\n dcc.Dropdown(\n id = \"x-axis\",\n options=[{\"label\": i, \"value\": i} for i in variables],\n value = \"Alcohol (%)\",\n clearable = False\n ),\n\n html.H5(\"y-axis\"),\n\n dcc.Dropdown(\n id = \"y-axis\",\n options=[{\"label\": i, \"value\": i} for i in variables],\n value = \"Chlorides (g/dm^3)\",\n clearable = False),\n \n ])\n ]),\n dbc.Row([\n html.Iframe(\n id = \"densityplot\",\n style={'border-width': '0', 'width': '1200px', 'height': '400px'}\n ),\n ]),\n\n dbc.Row([html.H5(\"\\t Density Plot Variable\")]),\n\n dcc.Dropdown(\n id = \"densvalue\",\n options=[{\"label\": i, \"value\": i} for i in variables],\n value = \"Chlorides (g/dm^3)\",\n clearable = False)\n\n \n])\n\n\n ])\n \n \ndef create_layout_fullview(app):\n # Page layouts\n return html.Div(\n [html.Div(\n html.H1(\"Graphics\")\n ),\n dbc.Container([\n dbc.Row([\n dbc.Col([\n html.Iframe(\n id = \"matrix\",\n style={'border-width': '0', 'width': '500px', 'height': '500px'}),\n\n html.H5(\"Wine Type\"),\n\n dcc.Checklist(\n id = \"winetype\",\n options = [\n {\"label\": \"White Wines\", \"value\": \"white\"},\n {\"label\": \"Red Wines\", \"value\": \"red\"}\n ],\n value = [\"red\", \"white\"],\n labelStyle={\"display\": \"inline-block\"}\n ),\n\n html.H5(\"Quality\"),\n\n dcc.Slider(\n id = \"quality\",\n min=0,\n max=3,\n step=1,\n value = 1,\n marks={\n 0: \"below average\",\n 1: \"average\",\n 2: \"above average\",\n 3: \"any\"\n }\n )\n\n ]),\n dbc.Col([\n html.Iframe(\n id = \"scatter\",\n style={'border-width': '0', 'width': '500px', 'height': '500px'}),\n \n html.H5(\"x-axis:\"),\n\n dcc.Dropdown(\n id = \"x-axis\",\n options=[{\"label\": i, \"value\": i} for i in variables],\n value = \"Alcohol (%)\",\n clearable = False\n ),\n\n html.H5(\"y-axis\"),\n\n dcc.Dropdown(\n id = \"y-axis\",\n options=[{\"label\": i, \"value\": i} for i in variables],\n value = \"Chlorides (g/dm^3)\",\n clearable = False),\n \n ])\n ]),\n dbc.Row([\n html.Iframe(\n id = \"densityplot\",\n style={'border-width': '0', 'width': '1200px', 'height': '400px'}\n ),\n ]),\n\n dbc.Row([html.H5(\"\\t Density Plot Variable\")]),\n\n dcc.Dropdown(\n id = \"densvalue\",\n options=[{\"label\": i, \"value\": i} for i in variables],\n value = \"Chlorides (g/dm^3)\",\n clearable = False)\n\n \n])\n\n\n ])"
] | [
[
"pandas.read_csv",
"numpy.argwhere"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
poodarchu/SelfSup | [
"29f7d338bef20f927bb0898f2c98da4f14b60ab1"
] | [
"examples/barlowtwins/BarlowTwins.res50.imagenet.256bs.224size.300e/net.py"
] | [
"from torch import nn\n\nfrom cvpods.layers import ShapeSpec\nfrom cvpods.modeling.backbone import Backbone\nfrom cvpods.modeling.backbone import build_resnet_backbone\n\nfrom cvpods.utils import comm\n\nfrom barlow_twins import BarlowTwins\n\n\ndef build_backbone(cfg, input_shape=None):\n \"\"\"\n Build a backbone from `cfg.MODEL.BACKBONE.NAME`.\n\n Returns:\n an instance of :class:`Backbone`\n \"\"\"\n if input_shape is None:\n input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))\n\n backbone = build_resnet_backbone(cfg, input_shape)\n assert isinstance(backbone, Backbone)\n return backbone\n\n\ndef build_model(cfg):\n\n cfg.build_backbone = build_backbone\n\n model = BarlowTwins(cfg)\n if comm.get_world_size() > 1:\n model = nn.SyncBatchNorm.convert_sync_batchnorm(model)\n\n return model\n"
] | [
[
"torch.nn.SyncBatchNorm.convert_sync_batchnorm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LTTM/LSR | [
"ab204895a86160a5d278fe3cee14c11532251218"
] | [
"datasets/crosscity_Dataset.py"
] | [
"# -*- coding: utf-8 -*-\nimport random\nimport scipy.io\nfrom PIL import Image, ImageOps, ImageFilter, ImageFile\nimport numpy as np\nimport copy\nimport os\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as ttransforms\n\nfrom datasets.cityscapes_Dataset import City_Dataset, City_DataLoader\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nclass CrossCity_Dataset(City_Dataset):\n def __init__(self,\n args,\n data_root_path='./datasets/NTHU_Datasets/Rio',\n list_path='./datasets/NTHU_list/Rio/List',\n split='train',\n base_size=769,\n crop_size=769,\n training=True,\n class_13=False,\n is_source=False):\n\n # setup attributes\n self.args = args\n self.data_path=data_root_path\n self.list_path=list_path\n self.split=split\n self.base_size = base_size if isinstance(base_size, tuple) else (base_size, base_size)\n self.crop_size = crop_size if isinstance(crop_size, tuple) else (crop_size, crop_size)\n self.training = training\n self.is_source = is_source\n \n # crosscity is never a source dataset!\n self.use_weights = False\n\n # compute the lower limit for the rescaling process\n # relevant only when using random rescaling\n self.min_ratio = min(self.crop_size[0]/self.base_size[0], self.crop_size[1]/self.base_size[1]) # round to 3 decimal digits by excess\n self.min_ratio = max(self.min_ratio, 0.5)\n\n self.random_mirror = args.random_mirror\n self.random_crop = args.random_crop\n self.resize = args.resize\n self.gaussian_blur = args.gaussian_blur\n\n if self.split == 'train':\n item_list_filepath = os.path.join(self.list_path, \"train.txt\")\n self.image_filepath = os.path.join(self.data_path, \"Images/Train\")\n self.gt_filepath = os.path.join(self.data_path, \"Labels/Train\")\n elif self.split == 'val':\n item_list_filepath = os.path.join(self.list_path, \"test.txt\")\n self.image_filepath = os.path.join(self.data_path, \"Images/Test\")\n self.gt_filepath = os.path.join(self.data_path, \"Labels/Test\")\n else:\n raise Warning(\"split must be train/val\")\n\n self.items = [id.strip() for id in open(item_list_filepath)]\n\n ignore_label = -1\n self.id_to_trainid = {7: 0, 8: 1, 11: 2, 12: 3, 13: 4, 17: 5,\n 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12,\n 26: 13, 27: 14, 28: 15, 31: 16, 32: 17, 33: 18}\n self.class_16 = False\n # only consider 13 shared classes\n self.class_13 = self.args.class_13 or class_13\n synthia_set_13 = [0, 1, 2, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18]\n self.trainid_to_13id = {id:i for i,id in enumerate(synthia_set_13)}\n\n print(\"{} num images in City {} set have been loaded.\".format(len(self.items), self.split))\n\n # override get item, during training we don't have labels\n def __getitem__(self, item):\n id = self.items[item]\n\n image_path = os.path.join(self.image_filepath, \"{}.jpg\".format(id))\n if not os.path.exists(image_path):\n image_path = os.path.join(self.image_filepath, \"{}.png\".format(id))\n image = Image.open(image_path).convert(\"RGB\")\n\n if self.split == \"train\" and self.training:\n image = self._train_sync_transform(image, None)\n return image, image, image, item\n else:\n gt_image_path = os.path.join(self.gt_filepath, \"{}_eval.png\".format(id))\n gt_image = Image.open(gt_image_path)\n image, gt_image, gt_down = self._val_sync_transform(image, gt_image)\n\n return image, gt_image, gt_down, item\n\nclass CrossCity_DataLoader():\n def __init__(self, args, training=True, **kwargs):\n\n self.args = args\n\n data_set = CrossCity_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=args.split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=training)\n self.len = len(data_set)\n\n if self.args.split == \"train\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=True,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n else:\n raise Warning(\"split must be train\")\n\n val_split = 'val'\n val_set = CrossCity_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=val_split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=False)\n self.val_loader = data.DataLoader(val_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n \n self.valid_iterations = (len(val_set) + self.args.batch_size) // self.args.batch_size\n self.num_iterations = (len(data_set) + self.args.batch_size) // self.args.batch_size\n \n def __len__(self):\n return self.len"
] | [
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mikita95/tensor2tensor | [
"7bb67a18e1e4a0cddd1d61c65c937f14c1c124e3",
"7bb67a18e1e4a0cddd1d61c65c937f14c1c124e3",
"7bb67a18e1e4a0cddd1d61c65c937f14c1c124e3"
] | [
"tensor2tensor/data_generators/gym_problems_test.py",
"tensor2tensor/data_generators/text_encoder_test.py",
"tensor2tensor/utils/trainer_lib_test.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Gym generators tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\n\nfrom tensor2tensor.data_generators import gym_problems\n\nimport tensorflow as tf\n\n\nclass GymProblemsTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.tmp_dir = tf.test.get_temp_dir()\n shutil.rmtree(cls.tmp_dir)\n os.mkdir(cls.tmp_dir)\n\n def testGymAtariBoots(self):\n problem = gym_problems.GymPongRandom()\n self.assertEqual(210, problem.frame_height)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tensor2tensor.data_generators.text_encoder.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport collections\nimport io\nimport os\nimport random\nimport shutil\nimport string\n\nimport mock\nimport six\nfrom six.moves import range # pylint: disable=redefined-builtin\n\nfrom tensor2tensor.data_generators import text_encoder\nimport tensorflow as tf\n\n\nclass NativeToUnicodeTest(tf.test.TestCase):\n\n def test_native_to_unicode(self):\n s = r\"foo bar\"\n s_unicode = text_encoder.native_to_unicode(s)\n if six.PY2:\n self.assertIsInstance(s_unicode, unicode)\n self.assertEqual(s_unicode, u\"foo bar\")\n\n\nclass EscapeUnescapeTokenTest(tf.test.TestCase):\n\n def test_escape_token(self):\n escaped = text_encoder._escape_token(\n \"Foo! Bar.\\nunder_score back\\\\slash\",\n set(\"abcdefghijklmnopqrstuvwxyz .\\n\") | text_encoder._ESCAPE_CHARS)\n\n self.assertEqual(\n \"\\\\70;oo\\\\33; \\\\66;ar.\\\\10;under\\\\uscore back\\\\\\\\slash_\", escaped)\n\n def test_unescape_token(self):\n unescaped = text_encoder._unescape_token(\n \"\\\\70;oo\\\\33; \\\\66;ar.\\\\10;under\\\\uscore back\\\\\\\\slash_\")\n\n self.assertEqual(\n \"Foo! Bar.\\nunder_score back\\\\slash\", unescaped)\n\n\nclass TokenTextEncoderTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Make sure the test dir exists and is empty.\"\"\"\n cls.test_temp_dir = os.path.join(tf.test.get_temp_dir(), \"encoder_test\")\n shutil.rmtree(cls.test_temp_dir, ignore_errors=True)\n tf.gfile.MakeDirs(cls.test_temp_dir)\n\n def test_save_and_reload(self):\n \"\"\"Test that saving and reloading doesn't change the vocab.\n\n Note that this test reads and writes to the filesystem, which necessitates\n that this test size be \"large\".\n \"\"\"\n\n corpus = \"A B C D E F G H I J K L M N O P Q R S T U V W X Y Z\"\n vocab_filename = os.path.join(self.test_temp_dir, \"abc.vocab\")\n\n # Make text encoder from a list and store vocab to fake filesystem.\n encoder = text_encoder.TokenTextEncoder(None, vocab_list=corpus.split())\n encoder.store_to_file(vocab_filename)\n\n # Load back the saved vocab file from the fake_filesystem.\n new_encoder = text_encoder.TokenTextEncoder(vocab_filename)\n\n self.assertEqual(encoder._id_to_token, new_encoder._id_to_token)\n self.assertEqual(encoder._token_to_id, new_encoder._token_to_id)\n\n def test_reserved_tokens_in_corpus(self):\n \"\"\"Test that we handle reserved tokens appearing in the corpus.\"\"\"\n corpus = \"A B {} D E F {} G {}\".format(text_encoder.EOS,\n text_encoder.EOS,\n text_encoder.PAD)\n\n encoder = text_encoder.TokenTextEncoder(None, vocab_list=corpus.split())\n\n all_tokens = encoder._id_to_token.values()\n\n # If reserved tokens are removed correctly, then the set of tokens will\n # be unique.\n self.assertEqual(len(all_tokens), len(set(all_tokens)))\n\n\nclass SubwordTextEncoderTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Make sure the test dir exists and is empty.\"\"\"\n cls.test_temp_dir = os.path.join(tf.test.get_temp_dir(), \"encoder_test\")\n shutil.rmtree(cls.test_temp_dir, ignore_errors=True)\n tf.gfile.MakeDirs(cls.test_temp_dir)\n\n def test_encode_decode(self):\n corpus = (\n \"This is a corpus of text that provides a bunch of tokens from which \"\n \"to build a vocabulary. It will be used when strings are encoded \"\n \"with a TextEncoder subclass. The encoder was coded by a coder.\")\n token_counts = collections.Counter(corpus.split(\" \"))\n alphabet = set(corpus) - {\" \"}\n\n original = \"This is a coded sentence encoded by the SubwordTextEncoder.\"\n token_counts.update(original.split(\" \"))\n\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 10)\n\n # Encoding should be reversible.\n encoded = encoder.encode(original)\n decoded = encoder.decode(encoded)\n self.assertEqual(original, decoded)\n\n # The substrings coded and coder are frequent enough in the corpus that\n # they should appear in the vocabulary even though they are substrings\n # of other included strings.\n subtoken_strings = {encoder.all_subtoken_strings[i] for i in encoded}\n self.assertIn(\"encoded_\", subtoken_strings)\n self.assertIn(\"coded_\", subtoken_strings)\n self.assertIn(\"TextEncoder\", encoder.all_subtoken_strings)\n self.assertIn(\"coder\", encoder.all_subtoken_strings)\n\n # Every character in the corpus should be in the encoders alphabet and\n # its subtoken vocabulary.\n self.assertTrue(alphabet.issubset(encoder._alphabet))\n for a in alphabet:\n self.assertIn(a, encoder.all_subtoken_strings)\n\n def test_unicode(self):\n corpus = \"Cat emoticons. \\U0001F638 \\U0001F639 \\U0001F63A \\U0001F63B\"\n token_counts = collections.Counter(corpus.split(\" \"))\n\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 10)\n\n self.assertIn(\"\\U0001F638\", encoder._alphabet)\n self.assertIn(\"\\U0001F63B\", encoder.all_subtoken_strings)\n\n def test_small_vocab(self):\n corpus = \"The quick brown fox jumps over the lazy dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n alphabet = set(corpus) - {\" \"}\n\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 10, token_counts, 2, 10)\n\n # All vocabulary elements are in the alphabet and subtoken strings even\n # if we requested a smaller vocabulary to assure all expected strings\n # are encodable.\n self.assertTrue(alphabet.issubset(encoder._alphabet))\n for a in alphabet:\n self.assertIn(a, encoder.all_subtoken_strings)\n\n def test_long_tokens(self):\n \"\"\"Subword tokenization should still run efficiently with long tokens.\n\n To make it run efficiently, we need to use the `max_subtoken_length`\n argument when calling SubwordTextEncoder.build_to_target_size.\n \"\"\"\n token_length = 4000\n num_tokens = 50\n target_vocab_size = 600\n max_subtoken_length = 10 # Set this to `None` to get problems.\n max_count = 500\n\n # Generate some long random strings.\n random.seed(0)\n long_tokens = []\n for _ in range(num_tokens):\n long_token = \"\".join([random.choice(string.ascii_uppercase)\n for _ in range(token_length)])\n long_tokens.append(long_token)\n\n corpus = \" \".join(long_tokens)\n token_counts = collections.Counter(corpus.split(\" \"))\n alphabet = set(corpus) - {\" \"}\n\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n target_vocab_size, token_counts, 1, max_count, num_iterations=1,\n max_subtoken_length=max_subtoken_length)\n\n # All vocabulary elements are in the alphabet and subtoken strings even\n # if we requested a smaller vocabulary to assure all expected strings\n # are encodable.\n self.assertTrue(alphabet.issubset(encoder._alphabet))\n for a in alphabet:\n self.assertIn(a, encoder.all_subtoken_strings)\n\n def test_custom_reserved_tokens(self):\n \"\"\"Test that we can pass custom reserved tokens to SubwordTextEncoder.\"\"\"\n corpus = \"The quick brown fox jumps over the lazy dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n\n start_symbol = \"<S>\"\n end_symbol = \"<E>\"\n reserved_tokens = text_encoder.RESERVED_TOKENS + [start_symbol,\n end_symbol]\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 10, token_counts, 2, 10, reserved_tokens=reserved_tokens)\n\n # Make sure that reserved tokens appear in the right places.\n start_id = encoder._subtoken_string_to_id[start_symbol]\n end_id = encoder._subtoken_string_to_id[end_symbol]\n self.assertEqual(start_id, 2)\n self.assertEqual(end_id, 3)\n\n # Make sure that we haven't messed up the ability to reconstruct.\n reconstructed_corpus = encoder.decode(encoder.encode(corpus))\n self.assertEqual(corpus, reconstructed_corpus)\n\n def test_encodable_when_not_in_alphabet(self):\n corpus = \"the quick brown fox jumps over the lazy dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 10)\n original = \"This has UPPER CASE letters that are out of alphabet\"\n\n # Early versions could have an infinite loop when breaking into subtokens\n # if there was any out-of-alphabet characters in the encoded string.\n encoded = encoder.encode(original)\n decoded = encoder.decode(encoded)\n\n self.assertEqual(original, decoded)\n encoded_str = \"\".join(encoder.all_subtoken_strings[i] for i in encoded)\n self.assertIn(\"\\\\84;\", encoded_str)\n\n @mock.patch.object(text_encoder, \"_ESCAPE_CHARS\", new=set(\"\\\\_;13579\"))\n def test_raises_exception_when_not_encodable(self):\n corpus = \"the quick brown fox jumps over the lazy dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n\n # Deliberately exclude some required encoding chars from the alphabet\n # and token list, making some strings unencodable.\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 10)\n original = \"This has UPPER CASE letters that are out of alphabet\"\n\n # Previously there was a bug which produced an infinite loop in this case.\n with self.assertRaises(AssertionError):\n encoder.encode(original)\n\n def test_load_from_file(self):\n # Test a vocab file with words not wrapped with single quotes\n encoder = text_encoder.SubwordTextEncoder()\n correct_vocab = [\"the\", \"and\", \"of\"]\n vocab = io.StringIO(\"the\\n\"\n \"and\\n\"\n \"of\\n\")\n encoder._load_from_file_object(vocab)\n self.assertAllEqual(encoder.all_subtoken_strings, correct_vocab)\n\n # Test a vocab file with words wrapped in single quotes\n encoder = text_encoder.SubwordTextEncoder()\n vocab = io.StringIO(\"\\\"the\\\"\\n\"\n \"\\\"and\\\"\\n\"\n \"\\\"of\\\"\\n\")\n encoder._load_from_file_object(vocab)\n self.assertAllEqual(encoder.all_subtoken_strings, correct_vocab)\n\n def test_reserved_token_chars_not_in_alphabet(self):\n corpus = \"dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n encoder1 = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 100)\n filename = os.path.join(self.test_temp_dir, \"out.voc\")\n encoder1.store_to_file(filename)\n encoder2 = text_encoder.SubwordTextEncoder(filename=filename)\n\n self.assertEqual(encoder1._alphabet, encoder2._alphabet)\n\n for t in text_encoder.RESERVED_TOKENS:\n for c in t:\n # Verify that encoders can encode all reserved token chars.\n encoder1.encode(c)\n encoder2.encode(c)\n\n def test_save_and_reload(self):\n corpus = \"the quick brown fox jumps over the lazy dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n\n # Deliberately exclude some required encoding chars from the alphabet\n # and token list, making some strings unencodable.\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 10)\n\n filename = os.path.join(self.test_temp_dir, \"out.voc\")\n encoder.store_to_file(filename)\n new_encoder = text_encoder.SubwordTextEncoder(filename)\n\n self.assertEqual(encoder._alphabet, new_encoder._alphabet)\n self.assertEqual(encoder.all_subtoken_strings,\n new_encoder.all_subtoken_strings)\n self.assertEqual(encoder._subtoken_string_to_id,\n new_encoder._subtoken_string_to_id)\n self.assertEqual(encoder._max_subtoken_len, new_encoder._max_subtoken_len)\n\n def test_save_and_reload_no_single_quotes(self):\n corpus = \"the quick brown fox jumps over the lazy dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n\n # Deliberately exclude some required encoding chars from the alphabet\n # and token list, making some strings unencodable.\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 10)\n\n filename = os.path.join(self.test_temp_dir, \"out.voc\")\n encoder.store_to_file(filename, add_single_quotes=False)\n new_encoder = text_encoder.SubwordTextEncoder(filename)\n\n self.assertEqual(encoder._alphabet, new_encoder._alphabet)\n self.assertEqual(encoder.all_subtoken_strings,\n new_encoder.all_subtoken_strings)\n self.assertEqual(encoder._subtoken_string_to_id,\n new_encoder._subtoken_string_to_id)\n self.assertEqual(encoder._max_subtoken_len, new_encoder._max_subtoken_len)\n\n def test_build_from_generator(self):\n\n corpus = \"The quick brown fox jumps over the lazy dog\"\n\n def gen():\n for _ in range(3):\n yield corpus\n\n start_symbol = \"<S>\"\n end_symbol = \"<E>\"\n reserved_tokens = text_encoder.RESERVED_TOKENS + [start_symbol,\n end_symbol]\n encoder = text_encoder.SubwordTextEncoder.build_from_generator(\n gen(), 10, reserved_tokens=reserved_tokens)\n\n # Make sure that reserved tokens appear in the right places.\n start_id = encoder._subtoken_string_to_id[start_symbol]\n end_id = encoder._subtoken_string_to_id[end_symbol]\n self.assertEqual(start_id, 2)\n self.assertEqual(end_id, 3)\n\n self.assertEqual(\"hi%s\" % start_symbol,\n encoder.decode(encoder.encode(\"hi\") + [2]))\n\n # Make sure that we haven't messed up the ability to reconstruct.\n reconstructed_corpus = encoder.decode(encoder.encode(corpus))\n self.assertEqual(corpus, reconstructed_corpus)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for trainer_lib.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nfrom tensor2tensor import models # pylint: disable=unused-import\nfrom tensor2tensor.data_generators import algorithmic\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem as problem_lib\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import trainer_lib\n\nimport tensorflow as tf\n\n\[email protected]_problem\nclass TinyAlgo(algorithmic.AlgorithmicIdentityBinary40):\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n del tmp_dir, task_id\n identity_problem = algorithmic.AlgorithmicIdentityBinary40()\n generator_utils.generate_files(\n identity_problem.generator(self.num_symbols, 40, 100000),\n self.training_filepaths(data_dir, 1, shuffled=True), 100)\n generator_utils.generate_files(\n identity_problem.generator(self.num_symbols, 400, 10000),\n self.dev_filepaths(data_dir, 1, shuffled=True), 100)\n\n\nclass TrainerLibTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n tmp_dir = tf.test.get_temp_dir()\n shutil.rmtree(tmp_dir)\n os.mkdir(tmp_dir)\n cls.data_dir = tmp_dir\n\n # Generate a small test dataset\n registry.problem(\"tiny_algo\").generate_data(cls.data_dir, None)\n\n def testExperiment(self):\n exp_fn = trainer_lib.create_experiment_fn(\n \"transformer\",\n \"tiny_algo\",\n self.data_dir,\n train_steps=1,\n eval_steps=1,\n min_eval_frequency=1,\n use_tpu=False)\n run_config = trainer_lib.create_run_config(\n model_dir=self.data_dir, num_gpus=0, use_tpu=False)\n hparams = registry.hparams(\"transformer_tiny_tpu\")\n exp = exp_fn(run_config, hparams)\n exp.test()\n\n def testModel(self):\n # HParams\n hparams = trainer_lib.create_hparams(\n \"transformer_tiny\", data_dir=self.data_dir, problem_name=\"tiny_algo\")\n\n # Dataset\n problem = hparams.problem\n dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, self.data_dir)\n dataset = dataset.repeat(None).padded_batch(10, dataset.output_shapes)\n features = dataset.make_one_shot_iterator().get_next()\n features = problem_lib.standardize_shapes(features)\n\n # Model\n model = registry.model(\"transformer\")(hparams, tf.estimator.ModeKeys.TRAIN)\n logits, losses = model(features)\n\n self.assertTrue(\"training\" in losses)\n loss = losses[\"training\"]\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n logits_val, loss_val = sess.run([logits, loss])\n logits_shape = list(logits_val.shape)\n logits_shape[1] = None\n self.assertAllEqual(logits_shape, [10, None, 1, 1, 4])\n self.assertEqual(loss_val.shape, tuple())\n\n def testMultipleTargetModalities(self):\n # HParams\n hparams = trainer_lib.create_hparams(\n \"transformer_tiny\", data_dir=self.data_dir, problem_name=\"tiny_algo\")\n tm = hparams.problem.get_hparams().target_modality\n hparams.problem.get_hparams().target_modality = {\n \"targets\": tm,\n \"A\": tm,\n \"B\": tm\n }\n\n # Dataset\n problem = hparams.problem\n dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, self.data_dir)\n dataset = dataset.repeat(None).padded_batch(10, dataset.output_shapes)\n features = dataset.make_one_shot_iterator().get_next()\n features = problem_lib.standardize_shapes(features)\n features[\"A\"] = features[\"B\"] = features[\"targets\"]\n\n # Model\n model = registry.model(\"transformer\")(hparams, tf.estimator.ModeKeys.TRAIN)\n\n def body(args, mb=model.body):\n out = mb(args)\n return {\"targets\": out, \"A\": out, \"B\": out}\n\n model.body = body\n\n logits, losses = model(features)\n\n self.assertTrue(\"training\" in losses)\n loss = losses[\"training\"]\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run([logits, loss])\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.test.main",
"tensorflow.test.get_temp_dir"
],
[
"tensorflow.gfile.MakeDirs",
"tensorflow.test.main",
"tensorflow.test.get_temp_dir"
],
[
"tensorflow.global_variables_initializer",
"tensorflow.test.main",
"tensorflow.test.get_temp_dir"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jvanhoefer/PEtab | [
"3f1d7be6402a889b255036ad158e38b0d5f78c5e",
"3f1d7be6402a889b255036ad158e38b0d5f78c5e"
] | [
"petab/parameters.py",
"petab/visualize/helper_functions.py"
] | [
"\"\"\"Functions operating on the PEtab parameter table\"\"\"\n\nimport numbers\nimport pandas as pd\nimport numpy as np\nfrom collections import OrderedDict\nfrom typing import Iterable, Set, List, Tuple, Dict, Union\n\nimport libsbml\n\nfrom . import lint, core, measurements, conditions, observables\nfrom .C import * # noqa: F403\n\n\ndef get_parameter_df(\n parameter_file: Union[str, List[str], pd.DataFrame, None]\n) -> pd.DataFrame:\n \"\"\"\n Read the provided parameter file into a ``pandas.Dataframe``.\n\n Arguments:\n parameter_file: Name of the file to read from or pandas.Dataframe.\n\n Returns:\n Parameter DataFrame\n \"\"\"\n if parameter_file is None:\n return parameter_file\n\n parameter_df = None\n\n if isinstance(parameter_file, pd.DataFrame):\n parameter_df = parameter_file\n\n if isinstance(parameter_file, str):\n parameter_df = pd.read_csv(parameter_file, sep='\\t')\n\n if isinstance(parameter_file, list):\n parameter_df = pd.concat([pd.read_csv(subset_file, sep='\\t')\n for subset_file in parameter_file])\n # Remove identical parameter definitions\n parameter_df.drop_duplicates(inplace=True, ignore_index=True)\n # Check for contradicting parameter definitions\n parameter_duplicates = set(parameter_df[PARAMETER_ID].loc[\n parameter_df[PARAMETER_ID].duplicated()])\n if parameter_duplicates:\n raise ValueError(\n f'The values of {PARAMETER_ID} must be unique or'\n ' identical between all parameter subset files. The'\n ' following duplicates were found:\\n'\n f'{parameter_duplicates}'\n )\n\n lint.assert_no_leading_trailing_whitespace(\n parameter_df.columns.values, \"parameter\")\n\n if not isinstance(parameter_df.index, pd.RangeIndex):\n parameter_df.reset_index(inplace=True)\n\n try:\n parameter_df.set_index([PARAMETER_ID], inplace=True)\n except KeyError:\n raise KeyError(\n f\"Parameter table missing mandatory field {PARAMETER_ID}.\")\n\n return parameter_df\n\n\ndef write_parameter_df(df: pd.DataFrame, filename: str) -> None:\n \"\"\"Write PEtab parameter table\n\n Arguments:\n df: PEtab parameter table\n filename: Destination file name\n \"\"\"\n with open(filename, 'w') as fh:\n df.to_csv(fh, sep='\\t', index=True)\n\n\ndef get_optimization_parameters(parameter_df: pd.DataFrame) -> List[str]:\n \"\"\"\n Get list of optimization parameter IDs from parameter table.\n\n Arguments:\n parameter_df: PEtab parameter DataFrame\n\n Returns:\n List of IDs of parameters selected for optimization.\n \"\"\"\n return list(parameter_df.index[parameter_df[ESTIMATE] == 1])\n\n\ndef get_optimization_parameter_scaling(\n parameter_df: pd.DataFrame) -> Dict[str, str]:\n \"\"\"\n Get Dictionary with optimization parameter IDs mapped to parameter scaling\n strings.\n\n Arguments:\n parameter_df: PEtab parameter DataFrame\n\n Returns:\n Dictionary with optimization parameter IDs mapped to parameter scaling\n strings.\n \"\"\"\n estimated_df = parameter_df.loc[parameter_df[ESTIMATE] == 1]\n return dict(zip(estimated_df.index, estimated_df[PARAMETER_SCALE]))\n\n\ndef create_parameter_df(sbml_model: libsbml.Model,\n condition_df: pd.DataFrame,\n observable_df: pd.DataFrame,\n measurement_df: pd.DataFrame,\n include_optional: bool = False,\n parameter_scale: str = LOG10,\n lower_bound: Iterable = None,\n upper_bound: Iterable = None) -> pd.DataFrame:\n \"\"\"Create a new PEtab parameter table\n\n All table entries can be provided as string or list-like with length\n matching the number of parameters\n\n Arguments:\n sbml_model: SBML Model\n condition_df: PEtab condition DataFrame\n measurement_df: PEtab measurement DataFrame\n include_optional: By default this only returns parameters that are\n required to be present in the parameter table. If set to True,\n this returns all parameters that are allowed to be present in the\n parameter table (i.e. also including parameters specified in the\n SBML model).\n parameter_scale: parameter scaling\n lower_bound: lower bound for parameter value\n upper_bound: upper bound for parameter value\n\n Returns:\n The created parameter DataFrame\n \"\"\"\n\n if include_optional:\n parameter_ids = list(get_valid_parameters_for_parameter_table(\n sbml_model=sbml_model, condition_df=condition_df,\n observable_df=observable_df, measurement_df=measurement_df))\n else:\n parameter_ids = list(get_required_parameters_for_parameter_table(\n sbml_model=sbml_model, condition_df=condition_df,\n observable_df=observable_df, measurement_df=measurement_df))\n\n df = pd.DataFrame(\n data={\n PARAMETER_ID: parameter_ids,\n PARAMETER_NAME: parameter_ids,\n PARAMETER_SCALE: parameter_scale,\n LOWER_BOUND: lower_bound,\n UPPER_BOUND: upper_bound,\n NOMINAL_VALUE: np.nan,\n ESTIMATE: 1,\n INITIALIZATION_PRIOR_TYPE: '',\n INITIALIZATION_PRIOR_PARAMETERS: '',\n OBJECTIVE_PRIOR_TYPE: '',\n OBJECTIVE_PRIOR_PARAMETERS: '',\n })\n df.set_index([PARAMETER_ID], inplace=True)\n\n # For SBML model parameters, set nominal values as defined in the model\n for parameter_id in df.index:\n try:\n parameter = sbml_model.getParameter(parameter_id)\n if parameter:\n df.loc[parameter_id, NOMINAL_VALUE] = parameter.getValue()\n except ValueError:\n # parameter was introduced as condition-specific override and\n # is potentially not present in the model\n pass\n return df\n\n\ndef get_required_parameters_for_parameter_table(\n sbml_model: libsbml.Model,\n condition_df: pd.DataFrame,\n observable_df: pd.DataFrame,\n measurement_df: pd.DataFrame) -> Set[str]:\n \"\"\"\n Get set of parameters which need to go into the parameter table\n\n Arguments:\n sbml_model: PEtab SBML model\n condition_df: PEtab condition table\n observable_df: PEtab observable table\n measurement_df: PEtab measurement table\n\n Returns:\n Set of parameter IDs which PEtab requires to be present in the\n parameter table. That is all {observable,noise}Parameters from the\n measurement table as well as all parametric condition table overrides\n that are not defined in the SBML model.\n \"\"\"\n\n # use ordered dict as proxy for ordered set\n parameter_ids = OrderedDict()\n\n # Add parameters from measurement table, unless they are fixed parameters\n def append_overrides(overrides):\n for p in overrides:\n if isinstance(p, str) and p not in condition_df.columns:\n parameter_ids[p] = None\n\n for _, row in measurement_df.iterrows():\n # we trust that the number of overrides matches\n append_overrides(measurements.split_parameter_replacement_list(\n row.get(OBSERVABLE_PARAMETERS, None)))\n append_overrides(measurements.split_parameter_replacement_list(\n row.get(NOISE_PARAMETERS, None)))\n\n # Add output parameters except for placeholders\n output_parameters = observables.get_output_parameters(\n observable_df, sbml_model)\n placeholders = observables.get_placeholders(observable_df)\n for p in output_parameters:\n if p not in placeholders and sbml_model.getParameter(p) is None:\n parameter_ids[p] = None\n\n # Add condition table parametric overrides unless already defined in the\n # SBML model\n for p in conditions.get_parametric_overrides(condition_df):\n if sbml_model.getParameter(p) is None:\n parameter_ids[p] = None\n\n return parameter_ids.keys()\n\n\ndef get_valid_parameters_for_parameter_table(\n sbml_model: libsbml.Model,\n condition_df: pd.DataFrame,\n observable_df: pd.DataFrame,\n measurement_df: pd.DataFrame) -> Set[str]:\n \"\"\"\n Get set of parameters which may be present inside the parameter table\n\n Arguments:\n sbml_model: PEtab SBML model\n condition_df: PEtab condition table\n observable_df: PEtab observable table\n measurement_df: PEtab measurement table\n\n Returns:\n Set of parameter IDs which PEtab allows to be present in the\n parameter table.\n \"\"\"\n\n # - grab all model parameters\n # - grab all output parameters defined in {observable,noise}Formula\n # - grab all parameters from measurement table\n # - grab all parametric overrides from condition table\n # - remove parameters for which condition table columns exist\n # - remove observables assigment targets\n # - remove sigma assignment targets\n # - remove placeholder parameters\n # (only partial overrides are not supported)\n\n placeholders = set(observables.get_placeholders(observable_df))\n\n # exclude rule targets\n assignment_targets = {ar.getVariable()\n for ar in sbml_model.getListOfRules()}\n\n # must not go into parameter table\n blackset = set()\n # collect assignment targets\n blackset |= placeholders\n blackset |= assignment_targets\n blackset |= set(condition_df.columns.values) - {CONDITION_NAME}\n\n # use ordered dict as proxy for ordered set\n parameter_ids = OrderedDict.fromkeys(\n p.getId() for p in sbml_model.getListOfParameters()\n if p.getId() not in blackset)\n\n # add output parameters from observables table\n output_parameters = observables.get_output_parameters(\n observable_df, sbml_model)\n for p in output_parameters:\n if p not in blackset:\n parameter_ids[p] = None\n\n # Append parameters from measurement table, unless they occur as condition\n # table columns\n def append_overrides(overrides):\n for p in overrides:\n if isinstance(p, str) and p not in blackset:\n parameter_ids[p] = None\n\n for _, row in measurement_df.iterrows():\n # we trust that the number of overrides matches\n append_overrides(measurements.split_parameter_replacement_list(\n row.get(OBSERVABLE_PARAMETERS, None)))\n append_overrides(measurements.split_parameter_replacement_list(\n row.get(NOISE_PARAMETERS, None)))\n\n # Append parameter overrides from condition table\n for p in conditions.get_parametric_overrides(condition_df):\n parameter_ids[p] = None\n\n return parameter_ids.keys()\n\n\ndef get_priors_from_df(parameter_df: pd.DataFrame,\n mode: str) -> List[Tuple]:\n \"\"\"Create list with information about the parameter priors\n\n Arguments:\n parameter_df: PEtab parameter table\n mode: 'initialization' or 'objective'\n\n Returns:\n List with prior information.\n \"\"\"\n\n # get types and parameters of priors from dataframe\n par_to_estimate = parameter_df.loc[parameter_df[ESTIMATE] == 1]\n\n prior_list = []\n for _, row in par_to_estimate.iterrows():\n # retrieve info about type\n prior_type = str(row.get(f'{mode}PriorType', ''))\n if core.is_empty(prior_type):\n prior_type = PARAMETER_SCALE_UNIFORM\n\n # retrieve info about parameters of priors, make it a tuple of floats\n pars_str = str(row.get(f'{mode}PriorParameters', ''))\n if core.is_empty(pars_str):\n lb, ub = map_scale([row[LOWER_BOUND], row[UPPER_BOUND]],\n [row[PARAMETER_SCALE]] * 2)\n pars_str = f'{lb};{ub}'\n prior_pars = tuple([float(entry) for entry in pars_str.split(';')])\n\n # add parameter scale and bounds, as this may be needed\n par_scale = row[PARAMETER_SCALE]\n par_bounds = (row[LOWER_BOUND], row[UPPER_BOUND])\n\n # if no prior is specified, we assume a non-informative (uniform) one\n if prior_type == 'nan':\n prior_type = PARAMETER_SCALE_UNIFORM\n prior_pars = (row[LOWER_BOUND], row[UPPER_BOUND])\n\n prior_list.append((prior_type, prior_pars, par_scale, par_bounds))\n\n return prior_list\n\n\ndef scale(parameter: numbers.Number, scale_str: 'str') -> numbers.Number:\n \"\"\"Scale parameter according to scale_str\n\n Arguments:\n parameter:\n Parameter to be scaled.\n scale_str:\n One of 'lin' (synonymous with ''), 'log', 'log10'.\n\n Returns:\n parameter:\n The scaled parameter.\n \"\"\"\n\n if scale_str == LIN or not scale_str:\n return parameter\n if scale_str == LOG:\n return np.log(parameter)\n if scale_str == LOG10:\n return np.log10(parameter)\n raise ValueError(\"Invalid parameter scaling: \" + scale_str)\n\n\ndef unscale(parameter: numbers.Number, scale_str: 'str') -> numbers.Number:\n \"\"\"Unscale parameter according to scale_str\n\n Arguments:\n parameter:\n Parameter to be unscaled.\n scale_str:\n One of 'lin' (synonymous with ''), 'log', 'log10'.\n\n Returns:\n parameter:\n The unscaled parameter.\n \"\"\"\n\n if scale_str == LIN or not scale_str:\n return parameter\n if scale_str == LOG:\n return np.exp(parameter)\n if scale_str == LOG10:\n return 10**parameter\n raise ValueError(\"Invalid parameter scaling: \" + scale_str)\n\n\ndef map_scale(parameters: Iterable[numbers.Number],\n scale_strs: Iterable[str]) -> Iterable[numbers.Number]:\n \"\"\"As scale(), but for Iterables\"\"\"\n return map(lambda x: scale(x[0], x[1]), zip(parameters, scale_strs))\n\n\ndef normalize_parameter_df(parameter_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Add missing columns and fill in default values.\"\"\"\n df = parameter_df.copy(deep=True)\n\n if PARAMETER_NAME not in df:\n df[PARAMETER_NAME] = df.reset_index()[PARAMETER_ID]\n\n prior_type_cols = [INITIALIZATION_PRIOR_TYPE,\n OBJECTIVE_PRIOR_TYPE]\n prior_par_cols = [INITIALIZATION_PRIOR_PARAMETERS,\n OBJECTIVE_PRIOR_PARAMETERS]\n # iterate over initialization and objective priors\n for prior_type_col, prior_par_col in zip(prior_type_cols, prior_par_cols):\n # fill in default values for prior type\n if prior_type_col not in df:\n df[prior_type_col] = PARAMETER_SCALE_UNIFORM\n else:\n for irow, row in df.iterrows():\n if core.is_empty(row[prior_type_col]):\n df.loc[irow, prior_type_col] = PARAMETER_SCALE_UNIFORM\n if prior_par_col not in df:\n df[prior_par_col] = None\n for irow, row in df.iterrows():\n if core.is_empty(row[prior_par_col]) \\\n and row[prior_type_col] == PARAMETER_SCALE_UNIFORM:\n lb, ub = map_scale([row[LOWER_BOUND], row[UPPER_BOUND]],\n [row[PARAMETER_SCALE]] * 2)\n df.loc[irow, prior_par_col] = f'{lb};{ub}'\n\n return df\n",
"\"\"\"\nThis file should contain the functions, which PEtab internally needs for\nplotting, but which are not meant to be used by non-developers and should\nhence not be directly visible/usable when using `import petab.visualize`.\n\"\"\"\n\nimport functools\nimport warnings\nfrom numbers import Number\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport petab\nimport seaborn as sns\n\nfrom .plotting_config import plot_lowlevel\nfrom ..C import *\n\nfrom typing import Dict, List, Optional, Tuple, Union\n\nsns.set()\n\n# for typehints\nIdsList = List[str]\nNumList = List[int]\n\n\ndef import_from_files(\n data_file_path: str,\n condition_file_path: str,\n simulation_file_path: str,\n dataset_id_list: List[IdsList],\n sim_cond_id_list: List[IdsList],\n sim_cond_num_list: List[NumList],\n observable_id_list: List[IdsList],\n observable_num_list: List[NumList],\n plotted_noise: str,\n visualization_file_path: str = None\n) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Helper function for plotting data and simulations, which imports data\n from PEtab files. If `visualization_file_path` is not provided, the\n visualisation specification DataFrame will be generated automatically.\n\n For documentation, see main function plot_data_and_simulation()\n\n Returns:\n A tuple of experimental data, experimental conditions,\n visualization specification and simulation data DataFrames.\n \"\"\"\n\n # import measurement data and experimental condition\n exp_data = petab.get_measurement_df(data_file_path)\n exp_conditions = petab.get_condition_df(condition_file_path)\n\n # import visualization specification, if file was specified\n if visualization_file_path:\n vis_spec = petab.get_visualization_df(visualization_file_path)\n else:\n # create them based on simulation conditions\n vis_spec, exp_data = get_default_vis_specs(exp_data,\n exp_conditions,\n dataset_id_list,\n sim_cond_id_list,\n sim_cond_num_list,\n observable_id_list,\n observable_num_list,\n plotted_noise)\n\n # import simulation file, if file was specified\n if simulation_file_path != '':\n sim_data = petab.get_simulation_df(simulation_file_path)\n else:\n sim_data = None\n\n return exp_data, exp_conditions, vis_spec, sim_data\n\n\ndef check_vis_spec_consistency(\n exp_data: pd.DataFrame,\n dataset_id_list: Optional[List[IdsList]] = None,\n sim_cond_id_list: Optional[List[IdsList]] = None,\n sim_cond_num_list: Optional[List[NumList]] = None,\n observable_id_list: Optional[List[IdsList]] = None,\n observable_num_list: Optional[List[NumList]] = None) -> str:\n \"\"\"\n Helper function for plotting data and simulations, which checks the\n visualization setting, if no visualization specification file is provided.\n\n For documentation, see main function plot_data_and_simulation()\n\n Returns:\n group_by:\n Specifies the grouping of data to plot.\n \"\"\"\n\n # We have no vis_spec file. Check how data should be grouped\n group_by = ''\n if dataset_id_list is not None:\n group_by += 'dataset'\n\n # check whether grouping by simulation condition should be done\n if sim_cond_id_list is not None and sim_cond_num_list is not None:\n raise NotImplementedError(\n \"Either specify a list of simulation condition IDs or a list of \"\n \"simulation condition numbers, but not both. Stopping.\")\n if sim_cond_id_list is not None or sim_cond_num_list is not None:\n group_by += 'simulation'\n\n # check whether grouping by observable should be done\n if observable_id_list is not None and observable_num_list is not None:\n raise NotImplementedError(\n \"Either specify a list of observable IDs or a list \"\n \"of observable numbers, but not both. Stopping.\")\n if observable_id_list is not None or observable_num_list is not None:\n group_by += 'observable'\n # consistency check. Warn or error, if grouping not clear\n if group_by == 'datasetsimulation':\n warnings.warn(\"Found grouping by datasetId and simulation condition. \"\n \"Using datasetId, omitting simulation condition.\")\n group_by = 'dataset'\n\n elif group_by == 'datasetobservable':\n warnings.warn(\"Found grouping by datasetId and observable. \"\n \"Using datasetId, omitting observable.\")\n group_by = 'dataset'\n\n elif group_by == 'datasetsimulationobservable':\n warnings.warn(\"Found grouping by datasetId, simulation condition, and \"\n \"observable. Using datasetId, omitting simulation \"\n \"condition and observable.\")\n group_by = 'dataset'\n\n elif group_by == 'simulationobservable':\n raise NotImplementedError(\n \"Plotting without visualization specification file and datasetId \"\n \"can be performed via grouping by simulation conditions OR \"\n \"observables, but not both. Stopping.\")\n elif group_by in ['simulation', 'observable', 'dataset']:\n pass\n # if group_by is still empty (if visuSpec file is available but datasetId\n # is not available), default: observables\n elif group_by == '':\n group_by = 'observable'\n warnings.warn('Default plotting: grouping by observable. If you want '\n 'to specify another grouping option, please add '\n '\\'datasetId\\' columns.')\n else:\n raise NotImplementedError(\n \"No information provided, how to plot data. Stopping.\")\n\n if group_by != 'dataset':\n # group plots not by dataset. Check, whether such a column would\n # have been available (and give a warning, if so)\n if 'datasetId' in exp_data.columns:\n warnings.warn(\"DatasetIds would have been available, but other \"\n \"grouping was requested. Consider using datasetId.\")\n else:\n if 'datasetId' not in exp_data.columns:\n raise NotImplementedError(\n \"Data should be grouped by datasetId, but no datasetId is \"\n \"given in the measurement file. Stopping.\")\n\n return group_by\n\n\ndef create_dataset_id_list(\n simcond_id_list: List[IdsList],\n simcond_num_list: List[NumList],\n observable_id_list: List[IdsList],\n observable_num_list: List[NumList],\n exp_data: pd.DataFrame,\n exp_conditions: pd.DataFrame,\n group_by: str) -> Tuple[pd.DataFrame, List[IdsList], Dict, Dict]:\n \"\"\"\n Create dataset id list and corresponding plot legends.\n Additionally, update/create DATASET_ID column of exp_data\n\n Parameters:\n group_by: defines grouping of data to plot\n\n Returns:\n A tuple of experimental DataFrame, list of datasetIds and\n dictionary of plot legends, corresponding to the datasetIds\n\n For additional documentation, see main function plot_data_and_simulation()\n \"\"\"\n # create a column of dummy datasetIDs and legend entries: preallocate\n dataset_id_column = []\n legend_dict = {}\n yvalues_dict = {}\n\n # loop over experimental data table, create datasetId for each entry\n tmp_simcond = list(exp_data[SIMULATION_CONDITION_ID])\n tmp_obs = list(exp_data[OBSERVABLE_ID])\n for ind, cond_id in enumerate(tmp_simcond):\n # create and add dummy datasetID\n dataset_id = tmp_simcond[ind] + '_' + tmp_obs[ind]\n dataset_id_column.append(dataset_id)\n\n # create nicer legend entries from condition names instead of IDs\n if dataset_id not in legend_dict.keys():\n tmp = exp_conditions.loc[exp_conditions.index == cond_id]\n if CONDITION_NAME not in tmp.columns or tmp[\n CONDITION_NAME].isna().any():\n tmp.loc[:, CONDITION_NAME] = tmp.index.tolist()\n legend_dict[dataset_id] = tmp[CONDITION_NAME][0] + ' - ' + \\\n tmp_obs[ind]\n yvalues_dict[dataset_id] = tmp_obs[ind]\n\n # add these column to the measurement table (possibly overwrite)\n if DATASET_ID in exp_data.columns:\n exp_data = exp_data.drop(DATASET_ID, axis=1)\n exp_data.insert(loc=exp_data.columns.size, column=DATASET_ID,\n value=dataset_id_column)\n\n # make dummy dataset names unique and iterable\n unique_dataset_list = functools.reduce(\n lambda tmp, x: tmp.append(x) or tmp if x not in tmp else tmp,\n list(exp_data[DATASET_ID]), [])\n unique_simcond_list = functools.reduce(\n lambda tmp, x: tmp.append(x) or tmp if x not in tmp else tmp,\n list(exp_data[SIMULATION_CONDITION_ID]), [])\n unique_obs_list = functools.reduce(\n lambda tmp, x: tmp.append(x) or tmp if x not in tmp else tmp,\n list(exp_data[OBSERVABLE_ID]), [])\n\n # we will need a dictionary for mapping simulation conditions\n # /observables to datasets\n ds_dict = {}\n dataset_id_list = []\n if group_by == 'simulation':\n if simcond_id_list is None:\n simcond_id_list = [[unique_simcond_list[i_cond] for i_cond in\n i_cond_list] for i_cond_list in\n simcond_num_list]\n for simcond in unique_simcond_list:\n # ds_dict[simcond] = [ds for ds in unique_dataset_list if ds[\n # 0:len(simcond)+3] == simcond + ' - ']\n # ds_dict[simcond] = [ds for ds in unique_dataset_list if ds[\n # 0:len(simcond) + 3] == simcond + '_']\n ds_dict[simcond] = [ds for ds in unique_dataset_list if ds[\n 0:len(simcond)] == simcond]\n grouped_list = simcond_id_list\n\n elif group_by == 'observable':\n if not observable_id_list and not observable_num_list:\n observable_id_list = [unique_obs_list]\n if observable_id_list is None:\n observable_id_list = [[unique_obs_list[i_obs] for i_obs in\n i_obs_list] for i_obs_list in\n observable_num_list]\n for observable in unique_obs_list:\n # ds_dict[observable] = [ds for ds in unique_dataset_list if ds[\n # -len(observable)-3:] == ' - ' + observable]\n ds_dict[observable] = [ds for ds in unique_dataset_list if ds[\n -len(observable) - 1:] == '_' + observable]\n grouped_list = observable_id_list\n\n else:\n raise NotImplementedError(\n \"Very, very weird error. Should not have happened. Something \"\n \"went wrong in how datasets should be grouped. Very weird...\")\n\n for sublist in grouped_list:\n datasets_for_this_plot = [dset for sublist_entry in sublist\n for dset in ds_dict[sublist_entry]]\n dataset_id_list.append(datasets_for_this_plot)\n\n return exp_data, dataset_id_list, legend_dict, yvalues_dict\n\n\ndef create_figure(\n uni_plot_ids: np.ndarray,\n plots_to_file: bool) -> Tuple[plt.Figure,\n Union[Dict[str, plt.Subplot],\n 'np.ndarray[plt.Subplot]']]:\n \"\"\"\n Helper function for plotting data and simulations, open figure and axes\n\n Parameters\n ----------\n uni_plot_ids:\n Array with unique plot indices\n plots_to_file:\n Indicator if plots are saved to file\n\n Returns\n -------\n fig: Figure object of the created plot.\n ax: Axis object of the created plot.\n \"\"\"\n\n # Set Options for plots\n # possible options: see: plt.rcParams.keys()\n plt.rcParams['font.size'] = 10\n plt.rcParams['axes.titlesize'] = 10\n plt.rcParams['figure.figsize'] = [20, 10]\n plt.rcParams['errorbar.capsize'] = 2\n\n # Set Colormap\n sns.set(style=\"ticks\", palette=\"colorblind\")\n\n # Check if plots are saved to file and return single subplot axis\n if plots_to_file:\n num_subplot = 1\n else:\n num_subplot = len(uni_plot_ids)\n\n # compute, how many rows and columns we need for the subplots\n num_row = int(np.round(np.sqrt(num_subplot)))\n num_col = int(np.ceil(num_subplot / num_row))\n\n fig, axes = plt.subplots(num_row, num_col, squeeze=False)\n\n if not plots_to_file:\n for ax in axes.flat[num_subplot:]:\n ax.remove()\n\n axes = dict(zip(uni_plot_ids, axes.flat))\n\n return fig, axes\n\n\ndef get_default_vis_specs(\n exp_data: pd.DataFrame,\n exp_conditions: pd.DataFrame,\n dataset_id_list: Optional[List[IdsList]] = None,\n sim_cond_id_list: Optional[List[IdsList]] = None,\n sim_cond_num_list: Optional[List[NumList]] = None,\n observable_id_list: Optional[List[IdsList]] = None,\n observable_num_list: Optional[List[NumList]] = None,\n plotted_noise: Optional[str] = MEAN_AND_SD\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Helper function for plotting data and simulations, which creates a\n default visualization table and updates/creates DATASET_ID column of\n exp_data\n\n Returns:\n A tuple of visualization specification DataFrame and experimental\n DataFrame.\n\n For documentation, see main function plot_data_and_simulation()\n \"\"\"\n warnings.warn(\"This function will be removed in future releases. \",\n DeprecationWarning)\n\n # check consistency of settings\n group_by = check_vis_spec_consistency(\n exp_data, dataset_id_list, sim_cond_id_list, sim_cond_num_list,\n observable_id_list, observable_num_list)\n\n if group_by != 'dataset':\n # datasetId_list will be created (possibly overwriting previous list\n # - only in the local variable, not in the tsv-file)\n exp_data, dataset_id_list, legend_dict, _ = \\\n create_dataset_id_list(sim_cond_id_list, sim_cond_num_list,\n observable_id_list, observable_num_list,\n exp_data, exp_conditions, group_by)\n\n dataset_id_column = [i_dataset for sublist in dataset_id_list\n for i_dataset in sublist]\n if group_by != 'dataset':\n dataset_label_column = [legend_dict[i_dataset] for sublist in\n dataset_id_list for i_dataset in sublist]\n else:\n dataset_label_column = dataset_id_column\n\n # get number of plots and create plotId-lists\n plot_id_list = ['plot%s' % str(ind + 1) for ind, inner_list in enumerate(\n dataset_id_list) for _ in inner_list]\n\n # create dataframe\n vis_spec = pd.DataFrame({PLOT_ID: plot_id_list,\n DATASET_ID: dataset_id_column,\n LEGEND_ENTRY: dataset_label_column})\n\n # fill columns with default values\n fill_vis_spec = ((2, Y_LABEL, 'value'),\n (2, Y_OFFSET, 0),\n (2, Y_VALUES, ''),\n (2, X_LABEL, 'time'),\n (2, X_OFFSET, 0),\n (2, X_VALUES, 'time'),\n (1, Y_SCALE, LIN),\n (1, X_SCALE, LIN),\n (0, PLOT_TYPE_DATA, plotted_noise),\n (0, PLOT_TYPE_SIMULATION, LINE_PLOT),\n (0, PLOT_NAME, ''))\n for pos, col, val in fill_vis_spec:\n vis_spec.insert(loc=pos, column=col, value=val)\n\n return vis_spec, exp_data\n\n\ndef get_vis_spec_dependent_columns_dict(\n exp_data: pd.DataFrame,\n exp_conditions: pd.DataFrame,\n dataset_id_list: Optional[List[IdsList]] = None,\n sim_cond_id_list: Optional[List[IdsList]] = None,\n sim_cond_num_list: Optional[List[NumList]] = None,\n observable_id_list: Optional[List[IdsList]] = None,\n observable_num_list: Optional[List[NumList]] = None\n) -> Tuple[pd.DataFrame, Dict]:\n \"\"\"\n Helper function for creating values for columns PLOT_ID, DATASET_ID,\n LEGEND_ENTRY, Y_VALUES for visualization specification file.\n DATASET_ID column of exp_data is updated accordingly.\n\n Returns:\n A tuple of experimental DataFrame and a dictionary with values for\n columns PLOT_ID, DATASET_ID, LEGEND_ENTRY, Y_VALUES for visualization\n specification file.\n \"\"\"\n\n # check consistency of settings\n group_by = check_vis_spec_consistency(\n exp_data, dataset_id_list, sim_cond_id_list, sim_cond_num_list,\n observable_id_list, observable_num_list)\n\n if group_by != 'dataset':\n # datasetId_list will be created (possibly overwriting previous list\n # - only in the local variable, not in the tsv-file)\n exp_data, dataset_id_list, legend_dict, yvalues_dict = \\\n create_dataset_id_list(sim_cond_id_list, sim_cond_num_list,\n observable_id_list, observable_num_list,\n exp_data, exp_conditions, group_by)\n\n dataset_id_column = [i_dataset for sublist in dataset_id_list\n for i_dataset in sublist]\n\n if group_by != 'dataset':\n dataset_label_column = [legend_dict[i_dataset] for sublist in\n dataset_id_list for i_dataset in sublist]\n yvalues_column = [yvalues_dict[i_dataset] for sublist in\n dataset_id_list for i_dataset in sublist]\n else:\n dataset_label_column = dataset_id_column\n yvalues_column = ['']*len(dataset_id_column)\n\n # get number of plots and create plotId-lists\n plot_id_column = ['plot%s' % str(ind + 1) for ind, inner_list in enumerate(\n dataset_id_list) for _ in inner_list]\n\n columns_dict = {PLOT_ID: plot_id_column,\n DATASET_ID: dataset_id_column,\n LEGEND_ENTRY: dataset_label_column,\n Y_VALUES: yvalues_column}\n return exp_data, columns_dict\n\n\ndef expand_vis_spec_settings(vis_spec, columns_dict):\n \"\"\"\n only makes sense if DATASET_ID is not in vis_spec.columns?\n\n Returns:\n A visualization specification DataFrame\n \"\"\"\n columns_to_expand = [PLOT_NAME, PLOT_TYPE_SIMULATION, PLOT_TYPE_DATA,\n X_VALUES, X_OFFSET, X_LABEL, X_SCALE, Y_OFFSET,\n Y_LABEL, Y_SCALE, LEGEND_ENTRY]\n\n for column in vis_spec.columns:\n if column in columns_to_expand:\n column_entries = []\n if Y_VALUES in vis_spec.columns:\n for i, plot_id in enumerate(columns_dict[PLOT_ID]):\n select_conditions = (vis_spec[PLOT_ID] == plot_id) & (\n vis_spec[Y_VALUES] == columns_dict[Y_VALUES][i])\n column_entries.append(\n vis_spec[select_conditions].loc[:, column].values[0])\n else:\n for plot_id in columns_dict[PLOT_ID]:\n select_conditions = vis_spec[PLOT_ID] == plot_id\n column_entries.append(\n vis_spec[select_conditions].loc[:, column].values[0])\n columns_dict[column] = column_entries\n vis_spec = pd.DataFrame(columns_dict)\n return vis_spec\n\n\ndef create_or_update_vis_spec(\n exp_data: pd.DataFrame,\n exp_conditions: pd.DataFrame,\n vis_spec: Optional[pd.DataFrame] = None,\n dataset_id_list: Optional[List[IdsList]] = None,\n sim_cond_id_list: Optional[List[IdsList]] = None,\n sim_cond_num_list: Optional[List[NumList]] = None,\n observable_id_list: Optional[List[IdsList]] = None,\n observable_num_list: Optional[List[NumList]] = None,\n plotted_noise: Optional[str] = MEAN_AND_SD):\n \"\"\"\n Helper function for plotting data and simulations, which updates vis_spec\n file if necessary or creates a default visualization table and\n updates/creates DATASET_ID column of exp_data. As a result, a visualization\n specification file exists with columns PLOT_ID, DATASET_ID, Y_VALUES and\n LEGEND_ENTRY\n\n Returns:\n A tuple of visualization specification DataFrame and experimental\n DataFrame.\n \"\"\"\n if vis_spec is None:\n # create dataframe\n exp_data, columns_dict = \\\n get_vis_spec_dependent_columns_dict(exp_data,\n exp_conditions,\n dataset_id_list,\n sim_cond_id_list,\n sim_cond_num_list,\n observable_id_list,\n observable_num_list)\n vis_spec = pd.DataFrame(columns_dict)\n else:\n # TODO: do validation issue #190\n # so, plotid is definitely there\n if DATASET_ID not in vis_spec.columns:\n if Y_VALUES in vis_spec.columns:\n plot_id_list = np.unique(vis_spec[PLOT_ID])\n\n observable_id_list = [vis_spec[vis_spec[PLOT_ID] ==\n plot_id].loc[:, Y_VALUES].values\n for plot_id in plot_id_list]\n exp_data, columns_dict = \\\n get_vis_spec_dependent_columns_dict(\n exp_data,\n exp_conditions,\n observable_id_list=observable_id_list)\n else:\n # PLOT_ID is there, but NOT DATASET_ID and not Y_VALUES,\n # but potentially some settings.\n # TODO: multiple plotids with diff settings\n exp_data, columns_dict = \\\n get_vis_spec_dependent_columns_dict(\n exp_data,\n exp_conditions)\n # get other settings that could have potentially been there\n # and expand according to plot_id_column\n vis_spec = expand_vis_spec_settings(vis_spec, columns_dict)\n\n # if dataset_id is there, then nothing to expand?\n vis_spec[PLOT_TYPE_DATA] = plotted_noise\n\n # check columns, and add non-mandatory default columns\n vis_spec = check_ex_visu_columns(vis_spec)\n return exp_data, vis_spec\n\n\ndef check_ex_visu_columns(vis_spec: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Check the columns in Visu_Spec file, if non-mandotory columns does not\n exist, create default columns\n\n Returns:\n Updated visualization specification DataFrame\n \"\"\"\n if PLOT_NAME not in vis_spec.columns:\n vis_spec[PLOT_NAME] = ''\n if PLOT_TYPE_SIMULATION not in vis_spec.columns:\n vis_spec[PLOT_TYPE_SIMULATION] = LINE_PLOT\n if PLOT_TYPE_DATA not in vis_spec.columns:\n vis_spec[PLOT_TYPE_DATA] = MEAN_AND_SD\n if X_VALUES not in vis_spec.columns:\n vis_spec[X_VALUES] = 'time'\n if X_OFFSET not in vis_spec.columns:\n vis_spec[X_OFFSET] = 0\n if X_LABEL not in vis_spec.columns:\n vis_spec[X_LABEL] = 'time'\n if X_SCALE not in vis_spec.columns:\n vis_spec[X_SCALE] = LIN\n if Y_VALUES not in vis_spec.columns:\n vis_spec[Y_VALUES] = ''\n if Y_OFFSET not in vis_spec.columns:\n vis_spec[Y_OFFSET] = 0\n if Y_LABEL not in vis_spec.columns:\n vis_spec[Y_LABEL] = 'value'\n if Y_SCALE not in vis_spec.columns:\n vis_spec[Y_SCALE] = LIN\n if LEGEND_ENTRY not in vis_spec.columns:\n vis_spec[LEGEND_ENTRY] = vis_spec[DATASET_ID]\n\n return vis_spec\n\n\ndef check_ex_exp_columns(\n exp_data: pd.DataFrame,\n dataset_id_list: List[IdsList],\n sim_cond_id_list: List[IdsList],\n sim_cond_num_list: List[NumList],\n observable_id_list: List[IdsList],\n observable_num_list: List[NumList],\n exp_conditions: pd.DataFrame,\n sim: Optional[bool] = False\n) -> Tuple[pd.DataFrame, List[IdsList], Dict]:\n \"\"\"\n Check the columns in measurement file, if non-mandotory columns does not\n exist, create default columns\n\n Returns:\n A tuple of experimental DataFrame, list of datasetIds and\n dictionary of plot legends, corresponding to the datasetIds\n \"\"\"\n data_type = MEASUREMENT\n if sim:\n data_type = SIMULATION\n # mandatory columns\n if OBSERVABLE_ID not in exp_data.columns:\n raise NotImplementedError(\n f\"Column \\'observableId\\' is missing in {data_type} file. \")\n if SIMULATION_CONDITION_ID not in exp_data.columns:\n raise NotImplementedError(\n f\"Column \\'simulationConditionId\\' is missing in {data_type} \"\n f\"file. \")\n if data_type not in exp_data.columns:\n raise NotImplementedError(\n f\"Column \\'{data_type}\\' is missing in {data_type} \"\n f\"file. \")\n if TIME not in exp_data.columns:\n raise NotImplementedError(\n f\"Column \\'time\\' is missing in {data_type} \"\n f\"file. \")\n # non-mandatory columns\n if PREEQUILIBRATION_CONDITION_ID not in exp_data.columns:\n exp_data.insert(loc=1, column=PREEQUILIBRATION_CONDITION_ID,\n value='')\n if OBSERVABLE_PARAMETERS not in exp_data.columns:\n exp_data.insert(loc=4, column=OBSERVABLE_PARAMETERS,\n value='')\n if NOISE_PARAMETERS not in exp_data.columns:\n exp_data.insert(loc=4, column=NOISE_PARAMETERS,\n value=0)\n if REPLICATE_ID not in exp_data.columns:\n exp_data.insert(loc=4, column=REPLICATE_ID,\n value='')\n legend_dict = {}\n if DATASET_ID not in exp_data.columns:\n if dataset_id_list is not None:\n exp_data.insert(loc=4, column=DATASET_ID,\n value=dataset_id_list)\n else:\n # datasetId_list will be created (possibly overwriting previous\n # list - only in the local variable, not in the tsv-file)\n # check consistency of settings\n group_by = check_vis_spec_consistency(exp_data,\n dataset_id_list,\n sim_cond_id_list,\n sim_cond_num_list,\n observable_id_list,\n observable_num_list)\n observable_id_list = \\\n [[el] for el in exp_data.observableId.unique()]\n\n exp_data, dataset_id_list, legend_dict, _ = create_dataset_id_list(\n sim_cond_id_list, sim_cond_num_list, observable_id_list,\n observable_num_list, exp_data, exp_conditions, group_by)\n\n return exp_data, dataset_id_list, legend_dict\n\n\ndef handle_dataset_plot(plot_spec: pd.Series,\n ax: plt.Axes,\n exp_data: pd.DataFrame,\n exp_conditions: pd.DataFrame,\n sim_data: pd.DataFrame):\n \"\"\"\n Handle dataset plot\n \"\"\"\n # get datasetID and independent variable of first entry of plot1\n dataset_id = plot_spec[DATASET_ID]\n indep_var = plot_spec[X_VALUES]\n\n # define index to reduce exp_data to data linked to datasetId\n ind_dataset = exp_data[DATASET_ID] == dataset_id\n\n # gather simulationConditionIds belonging to datasetId\n uni_condition_id, uind = np.unique(\n exp_data[ind_dataset][SIMULATION_CONDITION_ID],\n return_index=True)\n # keep the ordering which was given by user from top to bottom\n # (avoid ordering by names '1','10','11','2',...)'\n uni_condition_id = uni_condition_id[np.argsort(uind)]\n col_name_unique = SIMULATION_CONDITION_ID\n\n # Case separation of independent parameter: condition, time or custom\n if indep_var == TIME:\n # obtain unique observation times\n uni_condition_id = np.unique(exp_data[ind_dataset][TIME])\n col_name_unique = TIME\n conditions = uni_condition_id\n elif indep_var == 'condition':\n conditions = None\n else:\n # extract conditions (plot input) from condition file\n ind_cond = exp_conditions.index.isin(uni_condition_id)\n conditions = exp_conditions[ind_cond][indep_var]\n\n # retrieve measurements from dataframes\n measurement_to_plot = get_data_to_plot(plot_spec, exp_data, sim_data,\n uni_condition_id, col_name_unique)\n\n # check, whether simulation should be plotted\n plot_sim = sim_data is not None\n\n # plot data\n nan_set = all([np.isnan(val) for val in measurement_to_plot['mean']])\n if not nan_set:\n plot_lowlevel(plot_spec, ax, conditions, measurement_to_plot, plot_sim)\n\n # Beautify plots\n ax.set_xlabel(\n plot_spec.xLabel)\n ax.set_ylabel(\n plot_spec.yLabel)\n\n\ndef matches_plot_spec(df: pd.DataFrame,\n col_id: str,\n x_value: Union[float, str],\n plot_spec: pd.Series) -> pd.Series:\n \"\"\"\n constructs an index for subsetting of the dataframe according to what is\n specified in plot_spec.\n\n Parameters:\n df:\n pandas data frame to subset, can be from measurement file or\n simulation file\n col_id:\n name of the column that will be used for indexing in x variable\n x_value:\n subsetted x value\n plot_spec:\n visualization spec from the visualization file\n\n Returns:\n index:\n Boolean series that can be used for subsetting of the passed\n dataframe\n \"\"\"\n subset = (\n (df[col_id] == x_value) &\n (df[DATASET_ID] == plot_spec[DATASET_ID])\n )\n if plot_spec[Y_VALUES] == '':\n if len(df.loc[subset, OBSERVABLE_ID].unique()) > 1:\n ValueError(\n f'{Y_VALUES} must be specified in visualization table if '\n f'multiple different observables are available.'\n )\n else:\n subset &= (df[OBSERVABLE_ID] == plot_spec[Y_VALUES])\n return subset\n\n\ndef get_data_to_plot(plot_spec: pd.Series,\n m_data: pd.DataFrame,\n simulation_data: pd.DataFrame,\n condition_ids: np.ndarray,\n col_id: str,\n simulation_field: str = SIMULATION) -> pd.DataFrame:\n \"\"\"\n Group the data, which should be plotted and return it as dataframe.\n\n Parameters:\n plot_spec:\n information about contains defined data format (visualization file)\n m_data:\n contains defined data format (measurement file)\n simulation_data:\n contains defined data format (simulation file)\n condition_ids:\n contains all unique condition IDs which should be\n plotted in one figure (can be found in measurementData file,\n column simulationConditionId)\n col_id:\n the name of the column in visualization file, whose entries\n should be unique (depends on condition in column\n xValues)\n simulation_field:\n Column name in ``simulation_data`` that contains the actual\n simulation result.\n\n Returns:\n data_to_plot:\n Contains the data which should be plotted\n (Mean and Std)\n \"\"\"\n\n # create empty dataframe for means and SDs\n data_to_plot = pd.DataFrame(\n columns=['mean', 'noise_model', 'sd', 'sem', 'repl', 'sim'],\n index=condition_ids\n )\n\n for var_cond_id in condition_ids:\n\n # TODO (#117): Here not the case: So, if entries in measurement file:\n # preequCondId, time, observableParams, noiseParams,\n # are not the same, then -> differ these data into\n # different groups!\n # now: go in simulationConditionId, search group of unique\n # simulationConditionId e.g. rows 0,6,12,18 share the same\n # simulationCondId, then check if other column entries are the same\n # (now: they are), then take intersection of rows 0,6,12,18 and checked\n # other same columns (-> now: 0,6,12,18) and then go on with code.\n # if there is at some point a difference in other columns, say e.g.\n # row 12,18 have different noiseParams than rows 0,6, the actual code\n # would take rows 0,6 and forget about rows 12,18\n\n # compute mean and standard deviation across replicates\n subset = matches_plot_spec(m_data, col_id, var_cond_id, plot_spec)\n data_measurements = m_data.loc[\n subset,\n MEASUREMENT\n ]\n\n data_to_plot.at[var_cond_id, 'mean'] = np.mean(data_measurements)\n data_to_plot.at[var_cond_id, 'sd'] = np.std(data_measurements)\n\n if (plot_spec.plotTypeData == PROVIDED) & sum(subset):\n if len(m_data.loc[subset, NOISE_PARAMETERS].unique()) > 1:\n raise NotImplementedError(\n f\"Datapoints with inconsistent {NOISE_PARAMETERS} is \"\n f\"currently not implemented. Stopping.\")\n tmp_noise = m_data.loc[subset, NOISE_PARAMETERS].values[0]\n if isinstance(tmp_noise, str):\n raise NotImplementedError(\n \"No numerical noise values provided in the measurement \"\n \"table. Stopping.\")\n if isinstance(tmp_noise, Number) or tmp_noise.dtype == 'float64':\n data_to_plot.at[var_cond_id, 'noise_model'] = tmp_noise\n\n # standard error of mean\n data_to_plot.at[var_cond_id, 'sem'] = \\\n np.std(data_measurements) / np.sqrt(len(data_measurements))\n\n # single replicates\n data_to_plot.at[var_cond_id, 'repl'] = \\\n data_measurements\n\n if simulation_data is not None:\n simulation_measurements = simulation_data.loc[\n matches_plot_spec(simulation_data, col_id, var_cond_id,\n plot_spec),\n simulation_field\n ]\n data_to_plot.at[var_cond_id, 'sim'] = np.mean(\n simulation_measurements\n )\n\n return data_to_plot\n"
] | [
[
"numpy.log",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.log10",
"numpy.exp"
],
[
"numpy.sqrt",
"numpy.unique",
"numpy.isnan",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.ceil",
"numpy.std",
"numpy.mean",
"numpy.argsort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
G-AshwinKumar/experiment-notebook | [
"aae1c5fb9ef8f84dce5d75989ed8975797282f37"
] | [
"plugins/plugin_mcalic/mcalic_codecs.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Wrappers for E. Maglis's M-CALIC implementation\n\"\"\"\n__author__ = \"Miguel Hernández Cabronero <[email protected]>\"\n__date__ = \"30/04/2020\"\n\nimport os\nimport sortedcontainers\nimport tempfile\nimport numpy as np\nimport glob\nimport copy\nimport subprocess\nimport shutil\nimport random\n\nfrom enb import icompression\nfrom enb.config import get_options\nfrom enb import isets\nfrom enb import tarlite\n\noptions = get_options()\n\n\nclass MCALIC_Magli(icompression.LosslessCodec, icompression.NearLosslessCodec, icompression.WrapperCodec):\n FORMAT_BSQ, FORMAT_BIL = range(2)\n default_format = FORMAT_BSQ\n\n max_tested_spatial_size = 17418240\n max_dimension_size = 2500\n split_height_count = 3\n split_width_count = 3\n\n def __init__(self, max_error=0, bin_dir=None, data_format=None, output_invocation_dir=None):\n \"\"\"\n :param max_error: maximum pixelwise error allowed. Use 0 for lossless\n compression\n :param bin_dir: path to the directory that contains the\n ldc_encoder, ldc_decoder and ldc_header_tool binaries. If it is None,\n options.external_bin_base_dir is None. If this is None as well, the\n same directory of this script is used by default.\n :param data_format: bsq/bil format of the expected data.\n If none, the default (BSQ) is used\n \"\"\"\n bin_dir = bin_dir if bin_dir is not None else options.external_bin_base_dir\n bin_dir = bin_dir if bin_dir is not None else os.path.dirname(__file__)\n assert os.path.isdir(bin_dir), f\"Invalid binary dir {bin_dir}.\"\n\n param_dict = sortedcontainers.SortedDict()\n max_error = int(max_error)\n assert max_error >= 0, f\"Invalid max_error {max_error}\"\n param_dict[\"max_error\"] = max_error\n data_format = data_format if data_format is not None else self.default_format\n assert data_format in [self.FORMAT_BSQ, self.FORMAT_BIL], f\"Invalid data format {data_format}\"\n param_dict[\"data_format\"] = data_format\n icompression.WrapperCodec.__init__(\n self, compressor_path=os.path.join(bin_dir, \"Mcalic_enc_nl\"),\n decompressor_path=os.path.join(bin_dir, \"Mcalic_dec_nl\"),\n param_dict=param_dict, output_invocation_dir=output_invocation_dir)\n\n def compress(self, original_path: str, compressed_path: str, original_file_info=None):\n # Tested limit: self.max_tested_spatial_size\n if original_file_info[\"width\"] <= self.max_dimension_size \\\n and original_file_info[\"height\"] <= self.max_dimension_size:\n return self.compress_one(original_path=original_path,\n compressed_path=compressed_path,\n original_file_info=original_file_info)\n else:\n tl_writer = tarlite.TarliteWriter()\n img = isets.load_array_bsq(file_or_path=original_path, image_properties_row=original_file_info)\n with tempfile.TemporaryDirectory(dir=options.base_tmp_dir) as tmp_dir:\n compound_size = 0\n total_compression_time = 0\n for y in range(self.split_height_count):\n for x in range(self.split_width_count):\n small_array = \\\n img[x * (original_file_info[\"width\"] // self.split_width_count):\n (((x + 1) * (original_file_info[\n \"width\"] // self.split_width_count)) if x < self.split_width_count - 1 else\n original_file_info[\"width\"]),\n y * (original_file_info[\"height\"] // self.split_height_count):\n (((y + 1) * (original_file_info[\n \"height\"] // self.split_height_count)) if y < self.split_height_count - 1 else\n original_file_info[\"height\"]),\n :]\n small_path = os.path.join(tmp_dir, f\"{x}_{y}.raw\")\n small_compressed_path = os.path.join(tmp_dir, f\"{x}_{y}.mcalic\")\n isets.dump_array_bsq(small_array, small_path)\n small_file_info = copy.copy(original_file_info)\n small_file_info[\"width\"], small_file_info[\"height\"], small_file_info[\n \"component_count\"] = small_array.shape\n compression_results = self.compress_one(\n original_path=small_path, compressed_path=small_compressed_path,\n original_file_info=small_file_info)\n total_compression_time += compression_results.compression_time_seconds\n tl_writer.add_file(small_compressed_path)\n os.remove(small_path)\n compound_size += small_array.size\n\n assert compound_size == original_file_info[\n \"samples\"], f\"compound_size = {compound_size} != {original_file_info['samples']} = original samples\"\n tl_writer.write(output_path=compressed_path)\n\n compression_results = self.compression_results_from_paths(original_path=original_path,\n compressed_path=compressed_path)\n compression_results.compression_time_seconds = total_compression_time\n return compression_results\n\n def compress_one(self, original_path: str, compressed_path: str, original_file_info=None):\n \"\"\"Compress one image tile with M-CALIC.\n \"\"\"\n assert original_file_info[\"bytes_per_sample\"] == 2, \\\n f\"This implementation of M-CALIC ({self.compressor_path}) only supports 16bpp\"\n assert original_file_info[\"component_count\"] > 1, \\\n f\"This implementation of M-CALIC ({self.compressor_path}) only supports images with more than one component\"\n\n\n with tempfile.NamedTemporaryFile(\n dir=options.base_tmp_dir, prefix=f\"bil_le_{os.path.basename(original_path)}\") as bil_le_file:\n # M-Calic implementation requires little endian, unsigned 16bpp BIL format\n original_dtype = isets.iproperties_row_to_numpy_dtype(image_properties_row=original_file_info)\n img = np.fromfile(original_path, dtype=original_dtype).reshape(\n original_file_info[\"component_count\"], original_file_info[\"height\"], original_file_info[\"width\"])\n \n\n offset = None\n if original_file_info[\"signed\"]:\n offset, original_max = int(img.min()), int(img.max())\n offset = min(offset, 0)\n assert original_max - offset <= 2 ** 15 - 1, \\\n f\"Invalid dynamic range of signed image ({offset}, {original_max})\"\n img = (img.astype(\"i4\") - offset).astype(original_dtype.replace(\"i\", \"u\"))\n\n if original_file_info[\"big_endian\"]:\n img = img.astype(original_dtype.replace(\">\", \"<\").replace(\"i\", \"u\"))\n img.swapaxes(0, 1).tofile(bil_le_file.name)\n\n if original_file_info[\"signed\"]:\n with tempfile.NamedTemporaryFile(dir=options.base_tmp_dir,\n prefix=f\"bil_le_{os.path.basename(original_path)}\",\n suffix=\".mcalic\") as tmp_compressed_file, \\\n tempfile.NamedTemporaryFile(dir=options.base_tmp_dir,\n prefix=f\"side_info_{os.path.basename(original_path)}\",\n suffix=\".txt\", mode=\"w\") as si_file:\n si_file.write(f\"{abs(offset):d}\")\n si_file.flush()\n compression_results = super().compress(original_path=bil_le_file.name,\n compressed_path=tmp_compressed_file.name,\n original_file_info=original_file_info)\n tarlite.TarliteWriter(initial_input_paths=[si_file.name, tmp_compressed_file.name]).write(\n compressed_path)\n compression_results.original_path = original_path\n compression_results.compressed_path = compressed_path\n return compression_results\n else:\n compression_results = super().compress(\n original_path=bil_le_file.name, compressed_path=compressed_path,\n original_file_info=original_file_info)\n compression_results.original_path = original_path\n return compression_results\n\n def get_compression_params(self, original_path, compressed_path, original_file_info):\n s = f\"{original_path} {compressed_path} \" \\\n f\"{original_file_info['component_count']} {original_file_info['height']} {original_file_info['width']} \" \\\n f\"{8 * original_file_info['bytes_per_sample']} {self.param_dict['max_error']} {self.param_dict['data_format']}\"\n return s\n\n def decompress(self, compressed_path, reconstructed_path, original_file_info=None):\n if original_file_info[\"width\"] <= self.max_dimension_size and original_file_info[\n \"height\"] <= self.max_dimension_size:\n return self.decompress_one(compressed_path=compressed_path,\n reconstructed_path=reconstructed_path,\n original_file_info=original_file_info)\n else:\n tl_reader = tarlite.TarliteReader(tarlite_path=compressed_path)\n img = np.zeros(\n (original_file_info[\"width\"], original_file_info[\"height\"], original_file_info[\"component_count\"]),\n dtype=isets.iproperties_row_to_numpy_dtype(image_properties_row=original_file_info))\n total_decompression_time = 0\n with tempfile.TemporaryDirectory(dir=options.base_tmp_dir) as tmp_dir:\n tl_reader.extract_all(output_dir_path=tmp_dir)\n invocation = f\"ls -lah {tmp_dir}\"\n status, output = subprocess.getstatusoutput(invocation)\n if status != 0:\n raise Exception(\"Status = {} != 0.\\nInput=[{}].\\nOutput=[{}]\".format(\n status, invocation, output))\n\n for y in range(self.split_height_count):\n for x in range(self.split_width_count):\n small_compressed_path = os.path.join(tmp_dir, f\"{x}_{y}.mcalic\")\n assert os.path.exists(small_compressed_path)\n small_path = os.path.join(tmp_dir, f\"{x}_{y}.raw\")\n small_file_info = copy.copy(original_file_info)\n\n small_file_info[\"height\"] = original_file_info[\"height\"] // self.split_height_count \\\n if y < self.split_height_count - 1 \\\n else original_file_info[\"height\"] - (self.split_height_count - 1) * (\n original_file_info[\"height\"] // self.split_height_count)\n\n small_file_info[\"width\"] = original_file_info[\"width\"] // self.split_width_count \\\n if x < self.split_width_count - 1 \\\n else original_file_info[\"width\"] - (self.split_width_count - 1) * (\n original_file_info[\"width\"] // self.split_width_count)\n\n dr = self.decompress_one(compressed_path=small_compressed_path, reconstructed_path=small_path,\n original_file_info=small_file_info)\n total_decompression_time += dr.decompression_time_seconds\n small_array = isets.load_array_bsq(file_or_path=small_path,\n image_properties_row=small_file_info)\n img[x * (original_file_info[\"width\"] // self.split_width_count):\n (((x + 1) * (original_file_info[\n \"width\"] // self.split_width_count)) if x < self.split_width_count - 1 else\n original_file_info[\"width\"]),\n y * (original_file_info[\"height\"] // self.split_height_count):\n (((y + 1) * (original_file_info[\n \"height\"] // self.split_height_count)) if y < self.split_height_count - 1 else\n original_file_info[\"height\"]),\n :] = small_array\n isets.dump_array_bsq(array=img, file_or_path=reconstructed_path)\n\n decompression_results = self.decompression_results_from_paths(\n compressed_path=compressed_path, reconstructed_path=reconstructed_path)\n decompression_results.decompression_time_seconds = total_decompression_time\n return decompression_results\n\n def decompress_one(self, compressed_path, reconstructed_path, original_file_info=None):\n total_decompression_time = 0\n with tempfile.NamedTemporaryFile(\n dir=options.base_tmp_dir,\n prefix=f\"reconstructed_{os.path.basename(reconstructed_path)}\",\n suffix=\".raw\") as bil_le_file:\n offset = 0\n if original_file_info[\"signed\"]:\n reader = tarlite.TarliteReader(compressed_path)\n with tempfile.TemporaryDirectory(dir=options.base_tmp_dir) as tmp_extract_dir:\n reader.extract_all(tmp_extract_dir)\n\n import subprocess\n invocation = f\"ls -lah {tmp_extract_dir}\"\n status, output = subprocess.getstatusoutput(invocation)\n if status != 0:\n raise Exception(\"Status = {} != 0.\\nInput=[{}].\\nOutput=[{}]\".format(\n status, invocation, output))\n\n with open(glob.glob(os.path.join(tmp_extract_dir, \"*.txt\"))[0]) as si_file:\n offset = int(si_file.read())\n assert offset >= 0\n\n os.path.getsize(tmp_extract_dir)\n inner_compressed_path = glob.glob(os.path.join(tmp_extract_dir, \"*.mcalic\"))[0]\n\n dr = self.decompress_short_names(compressed_path=inner_compressed_path,\n reconstructed_path=bil_le_file.name,\n original_file_info=original_file_info)\n total_decompression_time += dr.decompression_time_seconds\n else:\n dr = self.decompress_short_names(compressed_path=compressed_path, reconstructed_path=bil_le_file.name,\n original_file_info=original_file_info)\n total_decompression_time += dr.decompression_time_seconds\n\n original_dtype = isets.iproperties_row_to_numpy_dtype(image_properties_row=original_file_info)\n\n img = np.fromfile(bil_le_file.name, dtype=original_dtype.replace(\">\", \"<\").replace(\"i\", \"u\")).reshape(\n original_file_info[\"height\"], original_file_info[\"component_count\"], original_file_info[\"width\"])\n\n if original_file_info[\"signed\"]:\n if offset != 0:\n img = (img.astype(\"i4\") - offset).astype(original_dtype)\n else:\n img = img.astype(original_dtype)\n\n img = img.swapaxes(0, 1)\n if original_file_info[\"big_endian\"] and not original_file_info[\"signed\"]:\n # Signed file are already converted back to big_endian if necessary in the previous call to astype()\n img = img.astype(original_dtype)\n img.tofile(reconstructed_path)\n\n try:\n # The decoder always produces this file\n os.remove(\"seqRec\")\n except FileNotFoundError:\n pass\n\n decompression_results = self.decompression_results_from_paths(compressed_path=compressed_path,\n reconstructed_path=reconstructed_path)\n decompression_results.decompression_time_seconds = total_decompression_time\n return decompression_results\n\n def decompress_short_names(self, compressed_path, reconstructed_path, original_file_info):\n \"\"\"Binary seems to have problems with too long file names\n \"\"\"\n c_path = f\"{random.randint(0, 10000000)}.raw\"\n r_path = f\"{random.randint(0, 10000000)}.raw\"\n try:\n shutil.copyfile(compressed_path, c_path)\n decompression_results = super().decompress(compressed_path=c_path,\n reconstructed_path=r_path,\n original_file_info=original_file_info)\n shutil.copyfile(r_path, reconstructed_path)\n decompression_results.compressed_path = compressed_path\n decompression_results.reconstructed_path = reconstructed_path\n return decompression_results\n finally:\n for p in (c_path, r_path):\n if os.path.exists(p):\n os.remove(p)\n\n def get_decompression_params(self, compressed_path, reconstructed_path, original_file_info):\n return f\"{reconstructed_path} {compressed_path} \" \\\n f\"{original_file_info['component_count']} {original_file_info['height']} {original_file_info['width']} \" \\\n f\"{8*original_file_info['bytes_per_sample']} {self.param_dict['max_error']} {self.param_dict['data_format']}\"\n\n @property\n def label(self):\n return \"M-CALIC\"\n"
] | [
[
"numpy.fromfile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luizsantos-1/tf-keras-vis | [
"4d26dc9c65270f738987b653d5956ea082c35c2b",
"4d26dc9c65270f738987b653d5956ea082c35c2b"
] | [
"tf_keras_vis/utils/regularizers.py",
"tf_keras_vis/utils/model_modifiers.py"
] | [
"import warnings\nfrom abc import ABC, abstractmethod\n\nimport numpy as np\nimport tensorflow as tf\nfrom deprecated import deprecated\n\nwarnings.warn(('`tf_keras_vis.utils.regularizers` module is deprecated. '\n 'Please use `tf_keras_vis.activation_maximization.regularizers` instead.'),\n DeprecationWarning)\n\n\n@deprecated(version='0.7.0',\n reason=\"Please use `tf_keras_vis.activation_maximization.regularizers.Regularizer`\"\n \" class instead of this.\")\nclass LegacyRegularizer(ABC):\n def __init__(self, name):\n self.name = name\n\n @abstractmethod\n def __call__(self, inputs):\n raise NotImplementedError()\n\n\nRegularizer = LegacyRegularizer\n\n\n@deprecated(version='0.7.0',\n reason=\"The class has a bug that the calculated value is incorrect (too small) \"\n \"when the `batch_size` is greater than one. So please use \"\n \"`tf_keras_vis.activation_maximization.regularizers.TotalVariation2D`\"\n \" class instead of this.\")\nclass TotalVariation2D(LegacyRegularizer):\n def __init__(self, weight=10., name='TotalVariation2D'):\n super().__init__(name)\n self.weight = weight\n\n def __call__(self, overall_inputs):\n tv = 0.\n for X in overall_inputs:\n tv += tf.image.total_variation(X) / np.prod(X.shape)\n return self.weight * tv\n\n\n@deprecated(\n version='0.6.0',\n reason=\"Please use `tf_keras_vis.activation_maximization.regularizers.TotalVariation2D`\"\n \" class instead of this.\")\nclass TotalVariation(TotalVariation2D):\n def __init__(self, weight=10.):\n super().__init__(weight=weight, name='TotalVariation') # pragma: no cover\n\n\n@deprecated(version='0.7.0',\n reason=\"The class has a bug that the calculated value is incorrect (too small). \"\n \"So please use `tf_keras_vis.activation_maximization.regularizers.Norm`\"\n \" class instead of this.\")\nclass Norm(LegacyRegularizer):\n def __init__(self, weight=10., p=2, name='Norm'):\n super().__init__(name)\n self.weight = weight\n self.p = p\n\n def __call__(self, overall_inputs):\n norm = 0.\n for X in overall_inputs:\n X = tf.reshape(X, (X.shape[0], -1))\n norm += tf.norm(X, ord=self.p, axis=-1) / X.shape[1]\n return self.weight * norm\n\n\n@deprecated(version='0.6.0',\n reason=\"Please use `tf_keras_vis.activation_maximization.regularizers.Norm`\"\n \" class instead of this.\")\nclass L2Norm(Norm):\n def __init__(self, weight=10.):\n super().__init__(weight=weight, p=2, name='L2Norm') # pragma: no cover\n",
"from abc import ABC, abstractmethod\nfrom typing import Union\n\nimport tensorflow as tf\nfrom packaging.version import parse as version\n\nif version(tf.version.VERSION) < version(\"2.6.0rc0\"):\n from tensorflow.python.keras.layers.convolutional import Conv\nelse:\n from keras.layers.convolutional import Conv\n\nfrom . import find_layer\n\n\nclass ModelModifier(ABC):\n \"\"\"Abstract class for defining a model modifier.\n \"\"\"\n @abstractmethod\n def __call__(self, model) -> Union[None, tf.keras.Model]:\n \"\"\"Implement modification to the model before processing gradient descent.\n\n Args:\n model: A model instance.\n\n Raises:\n NotImplementedError: This method must be overwritten.\n\n Returns: Modified model or None.\n \"\"\"\n raise NotImplementedError()\n\n\nclass ReplaceToLinear(ModelModifier):\n \"\"\"A model modifier that replaces the activation functions of all output layers to\n `tf.keras.activations.linear`.\n\n Please note that this modifier must be set the end of modifiers list\n that is passed to `ModelVisualization#__init__()`. For example::\n\n # When visualizing `intermediate-1` layer.\n ActivationMaximization(YOUR_MODEL,\n model_modifier=[ExtractIntermediateLayer(\"intermediate-1\"),\n ReplaceToLinear()])\n \"\"\"\n def __call__(self, model) -> None:\n layers = (model.get_layer(name=name) for name in model.output_names)\n for layer in layers:\n layer.activation = tf.keras.activations.linear\n\n\nclass ExtractIntermediateLayer(ModelModifier):\n \"\"\"A model modifier that constructs new model instance\n whose output layer is an intermediate layer of `model`.\n\n This modifier will be used to visualize the features of the model layer.\n \"\"\"\n def __init__(self, index_or_name) -> None:\n if not isinstance(index_or_name, (str, int)):\n raise TypeError(\"The type of `index_or_name` must be a object of string or integer.\"\n f\"index_or_name: {index_or_name}\")\n self.index_or_name = index_or_name\n\n def __call__(self, model) -> tf.keras.Model:\n if isinstance(self.index_or_name, int):\n target_layer = model.get_layer(index=self.index_or_name)\n if isinstance(self.index_or_name, str):\n target_layer = model.get_layer(name=self.index_or_name)\n return tf.keras.Model(inputs=model.inputs, outputs=target_layer.output)\n\n\nclass GuidedBackpropagation(ModelModifier):\n \"\"\"A model modifier that replaces the gradient calculation of activation functions to\n Guided calculation.\n\n For details on Guided back propagation, see the papers:\n\n References:\n * String For Simplicity: The All Convolutional Net (https://arxiv.org/pdf/1412.6806.pdf)\n * Grad-CAM: Why did you say that? Visual Explanations from Deep Networks via\n Gradient-based Localization (https://arxiv.org/pdf/1610.02391v1.pdf)\n\n Warnings:\n Please note that there is a discussion that Guided Backpropagation is not working well as\n model explanations.\n\n * Sanity Checks for Saliency Maps (https://arxiv.org/pdf/1810.03292.pdf)\n * Guided Grad-CAM is Broken! Sanity Checks for Saliency Maps\n (https://glassboxmedicine.com/2019/10/12/guided-grad-cam-is-broken-sanity-checks-for-saliency-maps/)\n \"\"\"\n def __init__(self, target_activations=[tf.keras.activations.relu]) -> None:\n self.target_activations = target_activations\n\n def _get_guided_activation(self, activation):\n @tf.custom_gradient\n def guided_activation(x):\n def grad(dy):\n return tf.cast(dy > 0, dy.dtype) * tf.cast(x > 0, dy.dtype) * dy\n\n return activation(x), grad\n\n return guided_activation\n\n def __call__(self, model) -> None:\n for layer in (layer for layer in model.layers if hasattr(layer, \"activation\")):\n if layer.activation in self.target_activations:\n layer.activation = self._get_guided_activation(layer.activation)\n\n\nclass ExtractIntermediateLayerForGradcam(ModelModifier):\n def __init__(self, penultimate_layer=None, seek_conv_layer=True, include_model_outputs=True):\n self.penultimate_layer = penultimate_layer\n self.seek_conv_layer = seek_conv_layer\n self.include_model_outputs = include_model_outputs\n\n def __call__(self, model):\n _layer = self.penultimate_layer\n if not isinstance(_layer, tf.keras.layers.Layer):\n if _layer is None:\n _layer = -1\n if isinstance(_layer, int) and _layer < len(model.layers):\n _layer = model.layers[_layer]\n elif isinstance(_layer, str):\n _layer = find_layer(model, lambda l: l.name == _layer)\n else:\n raise ValueError(f\"Invalid argument. `penultimate_layer`={self.penultimate_layer}\")\n if _layer is not None and self.seek_conv_layer:\n _layer = find_layer(model, lambda l: isinstance(l, Conv), offset=_layer)\n if _layer is None:\n raise ValueError(\"Unable to determine penultimate `Conv` layer. \"\n f\"`penultimate_layer`={self.penultimate_layer}\")\n penultimate_output = _layer.output\n if len(penultimate_output.shape) < 3:\n raise ValueError(\n \"Penultimate layer's output tensor MUST have \"\n f\"samples, spaces and channels dimensions. [{penultimate_output.shape}]\")\n outputs = [penultimate_output]\n if self.include_model_outputs:\n outputs = model.outputs + outputs\n return tf.keras.Model(inputs=model.inputs, outputs=outputs)\n"
] | [
[
"tensorflow.norm",
"tensorflow.reshape",
"numpy.prod",
"tensorflow.image.total_variation"
],
[
"tensorflow.cast",
"tensorflow.keras.Model"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
chalmersplasmatheory/DREAM | [
"715637ada94f5e35db16f23c2fd49bb7401f4a27"
] | [
"py/DREAM/Settings/Equations/DistributionFunction.py"
] | [
"\nimport numpy as np\nfrom . EquationException import EquationException\nfrom . UnknownQuantity import UnknownQuantity\nfrom .. import AdvectionInterpolation\nfrom .. TransportSettings import TransportSettings\n\n\n# BOUNDARY CONDITIONS (WHEN f_re IS DISABLED)\nBC_F_0 = 1\nBC_PHI_CONST = 2\nBC_DPHI_CONST = 3\n\n# Interpolation methods for advection term in kinetic equation\n# (we keep these for backwards compatibility)\nAD_INTERP_CENTRED = AdvectionInterpolation.AD_INTERP_CENTRED\nAD_INTERP_UPWIND = AdvectionInterpolation.AD_INTERP_UPWIND\nAD_INTERP_UPWIND_2ND_ORDER = AdvectionInterpolation.AD_INTERP_UPWIND_2ND_ORDER\nAD_INTERP_DOWNWIND = AdvectionInterpolation.AD_INTERP_DOWNWIND\nAD_INTERP_QUICK = AdvectionInterpolation.AD_INTERP_QUICK \nAD_INTERP_SMART = AdvectionInterpolation.AD_INTERP_SMART \nAD_INTERP_MUSCL = AdvectionInterpolation.AD_INTERP_MUSCL \nAD_INTERP_OSPRE = AdvectionInterpolation.AD_INTERP_OSPRE \nAD_INTERP_TCDF = AdvectionInterpolation.AD_INTERP_TCDF \n\nAD_INTERP_JACOBIAN_LINEAR = AdvectionInterpolation.AD_INTERP_JACOBIAN_LINEAR\nAD_INTERP_JACOBIAN_FULL = AdvectionInterpolation.AD_INTERP_JACOBIAN_FULL \nAD_INTERP_JACOBIAN_UPWIND = AdvectionInterpolation.AD_INTERP_JACOBIAN_UPWIND\n\nSYNCHROTRON_MODE_NEGLECT = 1\nSYNCHROTRON_MODE_INCLUDE = 2\n\nRIPPLE_MODE_NEGLECT = 1\nRIPPLE_MODE_BOX = 2\nRIPPLE_MODE_GAUSSIAN = 3\n\nDISTRIBUTION_MODE_NUMERICAL = 1\nDISTRIBUTION_MODE_ANALYTICAL = 2\n\nclass DistributionFunction(UnknownQuantity):\n \n\n def __init__(self, settings, name, grid,\n f=[0], initr=[0], initp=[0], initxi=[0],\n initppar=None, initpperp=None,\n rn0=None, n0=None, rT0=None, T0=None, bc=BC_PHI_CONST,\n ad_int_r=AD_INTERP_CENTRED, ad_int_p1=AD_INTERP_CENTRED,\n ad_int_p2=AD_INTERP_CENTRED, ad_jac_r=AD_INTERP_JACOBIAN_FULL,\n ad_jac_p1=AD_INTERP_JACOBIAN_FULL, ad_jac_p2=AD_INTERP_JACOBIAN_FULL,\n mode = DISTRIBUTION_MODE_NUMERICAL, fluxlimiterdamping=1.0):\n \"\"\"\n Constructor.\n \"\"\"\n super().__init__(settings=settings)\n\n self.name = name\n self.grid = grid\n\n self.boundarycondition = bc\n \n self.mode = mode\n self.ripplemode = RIPPLE_MODE_NEGLECT\n self.synchrotronmode = SYNCHROTRON_MODE_NEGLECT\n self.transport = TransportSettings(kinetic=True)\n self.fullIonJacobian = True\n\n self.advectionInterpolation = AdvectionInterpolation.AdvectionInterpolation(\n kinetic=True,\n ad_int_r=ad_int_r, ad_int_p1=ad_int_p1, ad_int_p2=ad_int_p2,\n ad_jac_r=ad_jac_r, ad_jac_p1=ad_jac_p1, ad_jac_p2=ad_jac_p2,\n fluxlimiterdamping=fluxlimiterdamping)\n\n self.n0 = rn0\n self.rn0 = n0\n\n self.T0 = rT0\n self.rT0 = T0\n\n self.init = None\n\n if f is not None:\n self.setInitialValue(f, r=initr, p=initp, xi=initxi, ppar=initppar, pperp=initpperp)\n elif n0 is not None:\n self.setInitialProfiles(rn0=rn0, n0=n0, rT0=rT0, T0=T0)\n\n\n def setBoundaryCondition(self, bc):\n \"\"\"\n Sets the boundary condition at p=pmax. For 'f_hot', this boundary\n condition is only used when 'f_re' is disabled.\n\n :param int bc: Flag specifying which boundary condition to use.\n \"\"\"\n self.boundarycondition = bc\n\n def setAdvectionInterpolationMethod(self,ad_int=None, ad_int_r=AD_INTERP_CENTRED,\n ad_int_p1=AD_INTERP_CENTRED, ad_int_p2=AD_INTERP_CENTRED, ad_jac=None, \n ad_jac_r=AD_INTERP_JACOBIAN_FULL, ad_jac_p1=AD_INTERP_JACOBIAN_FULL,\n ad_jac_p2=AD_INTERP_JACOBIAN_FULL, fluxlimiterdamping=1.0):\n \"\"\"\n Sets the interpolation method that is used in the advection terms of\n the kinetic equation. To set all three components, provide ad_int and/or ad_jac.\n Otherwise the three components can use separate interpolation methods.\n \n :param int ad_int: Interpolation method to use for all coordinates.\n :param int ad_int_r: Interpolation method to use for the radial coordinate.\n :param int ad_int_p1: Interpolation method to use for the first momentum coordinate.\n :param int ad_int_p2: Interpolation method to use for the second momentum coordinate.\n :param int ad_jac: Jacobian interpolation mode to use for all coordinates.\n :param int ad_jac_r: Jacobian interpolation mode to use for the radial coordinate.\n :param int ad_jac_p1: Jacobian interpolation mode to use for the first momentum coordinate.\n :param int ad_jac_p2: Jacobian interpolation mode to use for the second momentum coordinate.\n :param float fluxlimiterdamping: Damping parameter used to under-relax the interpolation coefficients during non-linear iterations (should be between 0 and 1).\n \"\"\"\n self.advectionInterpolation.setMethod(ad_int=ad_int, ad_int_r=ad_int_r,\n ad_int_p1=ad_int_p1, ad_int_p2=ad_int_p2, ad_jac=ad_jac, \n ad_jac_r=ad_jac_r, ad_jac_p1=ad_jac_p1,\n ad_jac_p2=ad_jac_p2, fluxlimiterdamping=fluxlimiterdamping)\n\n\n def setInitialProfiles(self, n0, T0, rn0=None, rT0=None):\n \"\"\"\n Sets the initial density and temperature profiles of the electron\n population.\n\n :param rn0: Radial grid on which the density is given.\n :param n0: Electron density profile.\n :param rT0: Radial grid on which the temperature is given.\n :param T0: Electron temperature profile.\n \"\"\"\n if rn0 is not None:\n self.rn0 = np.asarray(rn0)\n else:\n if not np.isscalar(n0):\n raise EquationException(\"{}: Non-scalar initial density profile given, but no radial grid specified.\".format(self.name))\n self.rn0 = np.array([0])\n\n if rT0 is not None:\n self.rT0 = np.asarray(rT0)\n else:\n if not np.isscalar(T0):\n raise EquationException(\"{}: Non-scalar initial temperature profile given, but no radial grid specified.\".format(self.name))\n self.rT0 = np.array([0])\n\n self.n0 = np.asarray(n0)\n self.T0 = np.asarray(T0)\n\n if self.rn0.ndim == 0: self.rn0 = np.asarray([self.rn0])\n if self.n0.ndim == 0: self.n0 = np.asarray([self.n0])\n if self.rT0.ndim == 0: self.rT0 = np.asarray([self.rT0])\n if self.T0.ndim == 0: self.T0 = np.asarray([self.T0])\n\n # Reset numerically provided distribution (if any)\n self.init = None\n\n self.verifyInitialProfiles()\n\n\n def setInitialValue(self, f, r, p=None, xi=None, ppar=None, pperp=None):\n \"\"\"\n Set the initial value of this electron distribution function. Only one\n of the pairs (p, xi) and (ppar, pperp) of momentum grids need to be\n given.\n\n :param f: Array representing the distribution function value on the grid (must have size (nr, nxi, np) or (nr, npperp, nppar))\n :param r: Radial grid on which the initial distribution is given.\n :param p: Momentum grid.\n :param xi: Pitch grid.\n :param ppar: Parallel momentum grid.\n :param pperp: Perpendicular momentum grid.\n \"\"\"\n self.init = {}\n\n def conv(v):\n if type(v) == list:\n return np.array(v)\n elif type(v) == float or type(v) == int:\n return np.array([float(v)])\n else:\n return v\n\n ff = conv(f)\n self.init['r'] = conv(r)\n\n if p is not None and xi is not None:\n self.init['p'] = conv(p)\n self.init['xi'] = conv(xi)\n self.init['ppar'] = np.array([])\n self.init['pperp'] = np.array([])\n\n if ff.size == 1:\n ff = ff * np.ones((self.init['r'].size, self.init['xi'].size, self.init['p'].size))\n elif ppar is not None and pperp is not None:\n self.init['ppar'] = conv(ppar)\n self.init['pperp'] = conv(pperp)\n self.init['p'] = np.array([])\n self.init['xi'] = np.array([])\n\n if ff.size == 1:\n ff = ff * np.ones((self.init['r'].size, self.init['pperp'].size, self.init['ppar'].size))\n else:\n raise EquationException(\"{}: No momentum grid given for initial value.\".format(self.name))\n\n self.init['x'] = ff\n\n # Reset initial profiles (if any)\n self.rn0 = self.rT0 = None\n self.n0 = self.T0 = None\n\n self.verifyInitialDistribution()\n\n\n def enableAnalyticalDistribution(self, mode=True):\n \"\"\"\n Enables/disables the use of an analytical distribution\n function to represent the electron population\n \"\"\"\n if mode:\n self.mode = DISTRIBUTION_MODE_ANALYTICAL\n else:\n self.mode = DISTRIBUTION_MODE_NUMERICAL\n\n\n def setRippleMode(self, mode):\n \"\"\"\n Enables/disables inclusion of pitch scattering due to the magnetic ripple.\n\n :param int mode: Flag indicating whether or not to include magnetic ripple effects.\n \"\"\"\n if type(mode) == bool:\n self.ripplemode = RIPPLE_MODE_BOX if mode else RIPPLE_MODE_NEGLECT\n else:\n self.ripplemode = int(mode)\n\n\n def setSynchrotronMode(self, mode):\n \"\"\"\n Sets the type of synchrotron losses to have (either enabled or disabled).\n\n :param int mode: Flag indicating whether or not to enable synchrotron losses (may be bool).\n \"\"\"\n if type(mode) == bool:\n self.synchrotronmode = SYNCHROTRON_MODE_INCLUDE if mode else SYNCHROTRON_MODE_NEGLECT\n else:\n self.synchrotronmode = int(mode)\n\n def enableIonJacobian(self, includeJacobian):\n \"\"\"\n Enables/disables the ion jacobian in the kinetic equation.\n\n :param bool includeJacobian: Flag indicating whether the ion jacobian will be added. True by default, False to disable.\n \"\"\"\n self.fullIonJacobian = includeJacobian\n\n def fromdict(self, data):\n \"\"\"\n Load data for this object from the given dictionary.\n\n :param dict data: Dictionary to load distribution function from.\n \"\"\"\n def scal(v):\n if type(v) == np.ndarray: return v[0]\n else: return v\n\n if 'mode' in data:\n self.mode = data['mode']\n if 'boundarycondition' in data:\n self.boundarycondition = data['boundarycondition']\n\n if 'adv_interp' in data:\n self.advectionInterpolation.fromdict(data['adv_interp'])\n if 'init' in data:\n self.init = data['init']\n elif ('n0' in data) and ('T0' in data):\n self.rn0 = data['n0']['r']\n self.n0 = data['n0']['x']\n self.rT0 = data['T0']['r']\n self.T0 = data['T0']['x']\n elif self.grid.enabled:\n raise EquationException(\"{}: Unrecognized specification of initial distribution function.\".format(self.name))\n\n if 'ripplemode' in data:\n self.ripplemode = int(scal(data['ripplemode']))\n\n if 'synchrotronmode' in data:\n self.synchrotronmode = data['synchrotronmode']\n if type(self.synchrotronmode) != int:\n self.synchrotronmode = int(self.synchrotronmode[0])\n\n if 'transport' in data:\n self.transport.fromdict(data['transport'])\n\n if 'fullIonJacobian' in data:\n self.fullIonJacobian = bool(data['fullIonJacobian'])\n\n self.verifySettings()\n\n\n def todict(self):\n \"\"\"\n Returns a Python dictionary containing all settings of this\n DistributionFunction object.\n\n :return: a dictionary, containing all settings of this object, which can be directly given to DREAM.\n \"\"\"\n data = {}\n data['mode'] = self.mode\n if self.grid.enabled:\n data = {'boundarycondition': self.boundarycondition}\n\n # Advection interpolation\n data['adv_interp'] = self.advectionInterpolation.todict()\n\n if self.init is not None:\n data['init'] = {}\n data['init']['x'] = self.init['x']\n data['init']['r'] = self.init['r']\n\n if self.init['p'].size > 0 and self.init['xi'].size > 0:\n data['init']['p'] = self.init['p']\n data['init']['xi'] = self.init['xi']\n elif self.init['ppar'].size > 0 and self.init['pperp'].size > 0:\n data['init']['ppar'] = self.init['ppar']\n data['init']['pperp'] = self.init['pperp']\n elif self.n0 is not None:\n data['n0'] = { 'r': self.rn0, 'x': self.n0 }\n data['T0'] = { 'r': self.rT0, 'x': self.T0 }\n \n data['ripplemode'] = self.ripplemode\n data['synchrotronmode'] = self.synchrotronmode\n data['transport'] = self.transport.todict()\n data['fullIonJacobian'] = self.fullIonJacobian\n\n if self.mode != DISTRIBUTION_MODE_NUMERICAL:\n data['n0'] = { 'r': self.rn0, 'x': self.n0 }\n data['T0'] = { 'r': self.rT0, 'x': self.T0 }\n\n\n return data\n\n\n def verifySettings(self):\n \"\"\"\n Verify that the settings of this unknown are correctly set.\n \"\"\"\n if self.grid.enabled:\n if self.mode != DISTRIBUTION_MODE_NUMERICAL:\n raise EquationException(\"{}: Invalid mode set. Must be 'NUMERICAL' when the grid is 'enabled'.\".format(self.name))\n bc = self.boundarycondition\n if (bc != BC_F_0) and (bc != BC_PHI_CONST) and (bc != BC_DPHI_CONST):\n raise EquationException(\"{}: Invalid external boundary condition set: {}.\".format(self.name, bc))\n if self.init is not None:\n self.verifyInitialDistribution()\n elif (self.n0 is not None) or (self.T0 is not None):\n self.verifyInitialProfiles()\n else:\n raise EquationException(\"{}: Invalid/no initial condition set for the distribution function.\".format(self.name))\n\n self.advectionInterpolation.verifySettings()\n\n if type(self.ripplemode) == bool:\n self.setRippleMode(self.ripplemode)\n elif type(self.ripplemode) != int:\n raise EquationException(\"{}: Invalid type of ripple mode option: {}\".format(self.name, type(self.ripplemode)))\n else:\n opt = [RIPPLE_MODE_NEGLECT, RIPPLE_MODE_BOX, RIPPLE_MODE_GAUSSIAN]\n if self.ripplemode not in opt:\n raise EquationException(\"{}: Invalid option for ripple mode.\".format(self.name, self.ripplemode))\n \n if type(self.synchrotronmode) == bool:\n self.setSynchrotronMode(self.synchrotronmode)\n elif type(self.synchrotronmode) != int:\n raise EquationException(\"{}: Invalid type of synchrotron mode option: {}\".format(self.name, type(self.synchrotronmode)))\n else:\n opt = [SYNCHROTRON_MODE_NEGLECT, SYNCHROTRON_MODE_INCLUDE]\n if self.synchrotronmode not in opt:\n raise EquationException(\"{}: Invalid option for synchrotron mode.\".format(self.name, self.synchrotronmode))\n\n self.transport.verifySettings()\n elif self.mode != DISTRIBUTION_MODE_NUMERICAL:\n # if fluid mode and analytical distribution,\n # initial profiles must be provided:\n self.verifyInitialProfiles()\n\n\n def verifyInitialDistribution(self):\n \"\"\"\n Verifies that the initial distribution function has\n been set correctly and consistently.\n \"\"\"\n if self.init is None:\n raise EquationException(\"{}: No initial distribution function specified.\".format(self.name))\n\n nr = self.init['r'].size\n p1, p2 = None, None\n p1name, p2name = None, None\n np1, np2 = 0, 0\n\n if self.init['p'].size > 0 and self.init['xi'].size > 0:\n p1name = 'p'\n p2name = 'xi'\n elif self.init['ppar'].size > 0 and self.init['pperp'].size > 0:\n p1name = 'ppar'\n p2name = 'pperp'\n else:\n raise EquationException(\"{}: No momentum grid given for initial value.\".format(self.name))\n\n p1 = self.init[p1name]\n p2 = self.init[p2name]\n\n if len(p1.shape) != 1:\n raise EquationException(\"{}: Invalid dimensions of momentum grid '{}'. Must be 1D array.\".format(self.name, p1name))\n elif len(p2.shape) != 1:\n raise EquationException(\"{}: Invalid dimensions of momentum grid '{}'. Must be 1D array.\".format(self.name, p2name))\n\n np1 = p1.size\n np2 = p2.size\n\n if self.init['x'].shape != (nr, np2, np1):\n raise EquationException(\"{}: Invalid size of initial distribution function: {}. Expected: {}.\".format(self.name, self.init['x'].shape, (nr, np2, np1)))\n\n\n def verifyInitialProfiles(self):\n \"\"\"\n Verifies that the initial density and temperature profiles\n are set correctly.\n \"\"\"\n if (self.n0 is None) or (self.T0 is None):\n raise EquationException(\"{}: No initial density and/or temperature profiles specified.\".format(self.name))\n if (self.rn0 is None) or (self.rT0 is None):\n raise EquationException(\"{}: No radial grids specified for the density and/or temperature profiles.\".format(self.name))\n\n if (self.n0.ndim != 1) or (self.rn0.ndim != 1) or (self.n0.size != self.rn0.size):\n raise EquationException(\"{}: Invalid number of elements of density profile: {}. Corresponding radial grid has {} elements.\"\n .format(self.name, self.n0.size, self.rn0.size))\n if (self.T0.ndim != 1) or (self.rT0.ndim != 1) or (self.T0.size != self.rT0.size):\n raise EquationException(\"{}: Invalid number of elements of temperature profile: {}. Corresponding radial grid has {} elements.\"\n .format(self.name, self.T0.size, self.rT0.size))\n\n\n"
] | [
[
"numpy.asarray",
"numpy.array",
"numpy.isscalar",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
janilbols-w/examples | [
"8df8e747857261ea481e0b2492413d52bf7cc3a8"
] | [
"super_resolution/model.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\n\n\nclass Net(nn.Module):\n def __init__(self, upscale_factor):\n super(Net, self).__init__()\n\n self.relu = nn.ReLU()\n self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))\n self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))\n self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))\n self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))\n self.pixel_shuffle = nn.PixelShuffle(upscale_factor)\n\n self._initialize_weights()\n\n def forward(self, x):\n x = self.relu(self.conv1(x))\n x = self.relu(self.conv2(x))\n x = self.relu(self.conv3(x))\n x = self.pixel_shuffle(self.conv4(x))\n return x\n\n def _initialize_weights(self):\n init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))\n init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))\n init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))\n init.orthogonal_(self.conv4.weight)\n"
] | [
[
"torch.nn.init.calculate_gain",
"torch.nn.Conv2d",
"torch.nn.PixelShuffle",
"torch.nn.init.orthogonal_",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vfonsecad/transfertools | [
"df2b25cd59c0cc55b0d41304ed346ba7607506b3"
] | [
"transfertools/models/locit.py"
] | [
"# -*- coding: UTF-8 -*-\n\"\"\"\n\nLocalized instance transfer.\n\nReference:\n V. Vercruyssen, W. Meert, J. Davis.\n Transfer Learning for Anomaly Detection through Localized and Unsupervised Instance Selection.\n In AAAI Conference on Artificial Intelligence, New York, 2020.\n\n:author: Vincent Vercruyssen (2019)\n:license: Apache License, Version 2.0, see LICENSE for details.\n\"\"\"\n\nimport numpy as np\nimport scipy as sp\n\nfrom sklearn.base import BaseEstimator\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import BallTree\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\n\nfrom .base import BaseDetector\nfrom ..utils.preprocessing import TransferScaler\n\n\n# ----------------------------------------------------------------------------\n# LocIT class\n# ----------------------------------------------------------------------------\n\nclass LocIT(BaseEstimator, BaseDetector):\n \"\"\" Localized instance transfer algorithm.\n\n Parameters\n ----------\n psi : int (default=10)\n Neighborhood size.\n\n transfer_threshold : float in [0.0, 1.0], optional (default=0.5)\n Threshold for the classifier that predicts whether a source\n instance will be transferred or not. The higher, the stricter.\n A threshold of 0.0 (1.0) means all (no) instances are transferred.\n\n train_selection : str (default='random')\n How to select the negative training instances:\n 'farthest' --> select the farthest instance\n 'random' --> random instance selected\n 'edge' --> select the (psi+1)'th instance\n\n scaling : str (default='standard')\n Scale the source and target domain before transfer.\n\n Attributes\n ----------\n type_ : str\n The type of transfer learning (e.g., domain adaptation).\n\n X_trans_ : np.array of shape (<= n_samples,)\n The (transformed) source instances that are transferred.\n\n Ixs_trans_ : np.array of shape (n_samples, n_features)\n The indices of the instances selected for transfer.\n \"\"\"\n\n def __init__(self,\n psi=10,\n transfer_threshold=0.5,\n train_selection='random',\n scaling='standard',\n tol=1e-8,\n verbose=False):\n super().__init__(\n scaling=scaling,\n tol=tol,\n verbose=verbose)\n \n # initialize parameters\n self.psi = int(psi)\n self.transfer_threshold = float(transfer_threshold)\n self.train_selection = str(train_selection).lower()\n\n # type\n self.type_ = 'instance_selection'\n\n def fit(self, Xs=None, Xt=None, ys=None, yt=None):\n \"\"\" Fit the model on data X.\n\n Parameters\n ----------\n Xs : np.array of shape (n_samples, n_features), optional (default=None)\n The source instances.\n Xt : np.array of shape (n_samples, n_features), optional (default=None)\n The target instances.\n ys : np.array of shape (n_samples,), optional (default=None)\n The ground truth of the source instances.\n yt : np.array of shape (n_samples,), optional (default=None)\n The ground truth of the target instances.\n\n Returns\n -------\n self : object\n \"\"\"\n\n # check all inputs\n Xs, Xt, ys, yt = self._check_all_inputs(Xs, Xt, ys, yt)\n\n ns, _ = Xs.shape\n nt, _ = Xt.shape\n\n # align means: feature normalization/standardization!\n self.target_scaler_ = TransferScaler(self.scaling)\n self.source_scaler_ = TransferScaler(self.scaling)\n Xt = self.target_scaler_.fit_transform(Xt)\n Xs = self.source_scaler_.fit_transform(Xs)\n\n self.Xt_trans_ = Xt\n\n # fit classifier on the target domain\n self._fit_transfer_classifier(Xt)\n\n # transferred instances\n self.Ixs_trans_ = self._predict_transfer_classifier(Xs)\n self.Xs_trans_ = Xs[self.Ixs_trans_, :]\n \n return self\n\n def transfer(self, Xs, ys=None, return_indices=False):\n \"\"\" Apply transfer to the source instances.\n \n Parameters\n ----------\n Xs : np.array of shape (n_samples, n_features)\n The source instances.\n ys : np.array of shape (n_samples,), optional (default=None)\n The ground truth of the source instances.\n return_indices : bool, optional (default=False)\n Also return the indices of the source instances\n selected for transfer.\n\n Returns\n -------\n Xs_trans : np.array of shape (n_samples, n_features)\n The (transformed) source instances after transfer.\n Ixs_trans : np.array of shape (<= n_samples,)\n The indices of the source instances selected for transfer.\n \"\"\"\n \n # check all inputs\n Xs, ys = self._check_all_inputs(Xs=Xs, ys=ys)\n\n ns, _ = Xs.shape\n\n # scaling\n Xs = self.source_scaler_.transform(Xs)\n\n # transferred instances\n Ixs_trans = self._predict_transfer_classifier(Xs)\n Xs_trans = Xs[Ixs_trans, :]\n\n if return_indices:\n return Xs_trans, Ixs_trans\n return Xs_trans\n\n def _fit_transfer_classifier(self, Xt):\n \"\"\" Fit the transfer classifier.\n\n Parameters\n ----------\n Xt : np.array of shape (n_samples, n_features)\n The target instances.\n \"\"\"\n\n n, _ = Xt.shape\n\n # nearest neighbor search structures\n self.target_tree_ = BallTree(Xt, leaf_size=32, metric='euclidean')\n _, Ixs = self.target_tree_.query(Xt, k=n)\n\n # construct training instances\n X_train = np.zeros((2 * n, 2), dtype=float)\n y_train = np.zeros(2 * n, dtype=float)\n random_ixs = np.arange(0, n, 1)\n np.random.shuffle(random_ixs)\n\n for i in range(n):\n # POS training instances\n # local mean and covaraiance matrix of the current point\n NN_x = Xt[Ixs[i, 1:self.psi+1], :]\n mu_x = np.mean(NN_x, axis=0)\n C_x = np.cov(NN_x.T)\n\n # local mean and covariance matrix of the nearest neighbor\n nn_ix = Ixs[i, 1]\n NN_nn = Xt[Ixs[nn_ix, 1:self.psi+1], :]\n mu_nn = np.mean(NN_nn, axis=0)\n C_nn = np.cov(NN_nn.T)\n\n # NEG training instances\n # local mean and covariance matrix\n if self.train_selection == 'farthest':\n r_ix = Ixs[i, -1]\n elif self.train_selection == 'edge':\n r_ix = Ixs[i, self.psi+2]\n elif self.train_selection == 'random':\n r_ix = random_ixs[i]\n else:\n raise ValueError(self.train_selection,\n 'not in [farthest, edge, random]')\n NN_r = Xt[Ixs[r_ix, 1:self.psi], :]\n mu_r = np.mean(NN_r, axis=0)\n C_r = np.cov(NN_r.T)\n \n # training instances\n f_pos = np.array([float(np.linalg.norm(mu_x - mu_nn)), float(\n np.linalg.norm(C_x - C_nn)) / float(np.linalg.norm(C_x) + self.tol)])\n f_neg = np.array([float(np.linalg.norm(mu_x - mu_r)), float(\n np.linalg.norm(C_x - C_r)) / float(np.linalg.norm(C_x) + self.tol)])\n\n # labels\n X_train[2*i, :] = f_pos\n y_train[2*i] = 1.0\n X_train[2*i+1, :] = f_neg\n y_train[2*i+1] = 0.0\n \n # replace NaN and inf by\n X_train = np.nan_to_num(X_train)\n\n # scale training instances\n self.scaler_ = StandardScaler()\n X_train = self.scaler_.fit_transform(X_train)\n\n # train the classifier\n self.clf = self._optimal_transfer_classifier(X_train, y_train)\n\n def _predict_transfer_classifier(self, X):\n \"\"\" Predict transfer with the classifier.\n\n Parameters\n ----------\n X : np.array of shape (n_samples, n_features)\n The input instances.\n\n Returns\n -------\n Ix_trans : np.array of shape (<= n_samples,)\n The indices of the source instances selected for transfer.\n\n Comments\n --------\n Needs the target data.\n \"\"\"\n\n n, _ = X.shape\n\n # nearest neighbor search structures\n self.source_tree_ = BallTree(X, leaf_size=32, metric='euclidean')\n _, Ixs = self.source_tree_.query(X, k=self.psi+1)\n _, Ixt = self.target_tree_.query(X, k=self.psi)\n\n # construct feature vectors\n X_feat = np.zeros((n, 2), dtype=float)\n \n for i in range(n):\n # local mean and covariance matrix in the source domain\n NN_s = X[Ixs[i, 1:self.psi+1], :]\n mu_s = np.mean(NN_s, axis=0)\n C_s = np.cov(NN_s.T)\n\n # local mean and covariance matrix in the target domain\n NN_t = self.Xt_trans_[Ixt[i, :self.psi], :]\n mu_t = np.mean(NN_t, axis=0)\n C_t = np.cov(NN_t.T)\n\n # feature vector\n f = np.array([float(np.linalg.norm(mu_s - mu_t)), float(\n np.linalg.norm(C_s - C_t)) / float(np.linalg.norm(C_s) + self.tol)])\n X_feat[i, :] = f\n\n # nan to num\n X_feat = np.nan_to_num(X_feat)\n\n # scaling + predict\n X_feat = self.scaler_.transform(X_feat)\n labels = self.clf.predict(X_feat)\n Ix_trans = np.where(labels == 1.0)[0]\n\n return Ix_trans\n\n def _optimal_transfer_classifier(self, X, y):\n \"\"\" Optimal transfer classifier based on SVC.\n \"\"\"\n \n # parameters to tune\n tuned_parameters = [{'C': [0.01, 0.1, 0.5, 1, 10, 100],\n 'gamma': [0.01, 0.1, 0.5, 1, 10, 100],\n 'kernel': ['rbf']},\n {'kernel': ['linear'],\n 'C': [0.01, 0.1, 0.5, 1, 10, 100]}]\n \n # grid search\n svc = SVC(probability=True)\n clf = GridSearchCV(svc, tuned_parameters, cv=3, refit=True)\n clf.fit(X, y)\n \n return clf"
] | [
[
"sklearn.model_selection.GridSearchCV",
"sklearn.neighbors.BallTree",
"numpy.arange",
"numpy.nan_to_num",
"numpy.random.shuffle",
"numpy.linalg.norm",
"numpy.cov",
"numpy.mean",
"sklearn.svm.SVC",
"sklearn.preprocessing.StandardScaler",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FabioRosado/arviz | [
"6b958cc5abeb0cc3a1186f4a3dbeeaba73b899ee",
"6b958cc5abeb0cc3a1186f4a3dbeeaba73b899ee",
"6b958cc5abeb0cc3a1186f4a3dbeeaba73b899ee"
] | [
"arviz/plots/separationplot.py",
"arviz/data/io_numpyro.py",
"arviz/plots/backends/matplotlib/traceplot.py"
] | [
"\"\"\"Separation plot for discrete outcome models.\"\"\"\nimport warnings\n\nimport numpy as np\nimport xarray as xr\n\nfrom ..data import InferenceData\nfrom ..rcparams import rcParams\nfrom .plot_utils import get_plotting_function\n\n\ndef plot_separation(\n idata=None,\n y=None,\n y_hat=None,\n y_hat_line=False,\n expected_events=False,\n figsize=None,\n textsize=None,\n color=\"C0\",\n legend=True,\n ax=None,\n plot_kwargs=None,\n y_hat_line_kwargs=None,\n exp_events_kwargs=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n):\n \"\"\"Separation plot for binary outcome models.\n\n Model predictions are sorted and plotted using a color code according to\n the observed data.\n\n Parameters\n ----------\n idata : InferenceData\n InferenceData object.\n y : array, DataArray or str\n Observed data. If str, idata must be present and contain the observed data group\n y_hat : array, DataArray or str\n Posterior predictive samples for ``y``. It must have the same shape as y. If str or\n None, idata must contain the posterior predictive group.\n y_hat_line : bool, optional\n Plot the sorted `y_hat` predictions.\n expected_events : bool, optional\n Plot the total number of expected events.\n figsize : figure size tuple, optional\n If None, size is (8 + numvars, 8 + numvars)\n textsize: int, optional\n Text size for labels. If None it will be autoscaled based on figsize.\n color : str, optional\n Color to assign to the postive class. The negative class will be plotted using the\n same color and an `alpha=0.3` transparency.\n legend : bool, optional\n Show the legend of the figure.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n plot_kwargs : dict, optional\n Additional keywords passed to :meth:`mpl:matplotlib.axes.Axes.bar` or\n :meth:`bokeh:bokeh.plotting.Figure.vbar` for separation plot.\n y_hat_line_kwargs : dict, optional\n Additional keywords passed to ax.plot for `y_hat` line.\n exp_events_kwargs : dict, optional\n Additional keywords passed to ax.scatter for expected_events marker.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show : bool, optional\n Call backend show function.\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n\n References\n ----------\n .. [1] Greenhill, B. *et al.*, The Separation Plot: A New Visual Method\n for Evaluating the Fit of Binary Models, *American Journal of\n Political Science*, (2011) see https://doi.org/10.1111/j.1540-5907.2011.00525.x\n\n Examples\n --------\n Separation plot for a logistic regression model.\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> idata = az.load_arviz_data('classification10d')\n >>> az.plot_separation(idata=idata, y='outcome', y_hat='outcome', figsize=(8, 1))\n\n \"\"\"\n label_y_hat = \"y_hat\"\n if idata is not None and not isinstance(idata, InferenceData):\n raise ValueError(\"idata must be of type InferenceData or None\")\n\n if idata is None:\n if not all(isinstance(arg, (np.ndarray, xr.DataArray)) for arg in (y, y_hat)):\n raise ValueError(\n \"y and y_hat must be array or DataArray when idata is None \"\n \"but they are of types {}\".format([type(arg) for arg in (y, y_hat)])\n )\n else:\n\n if y_hat is None and isinstance(y, str):\n label_y_hat = y\n y_hat = y\n elif y_hat is None:\n raise ValueError(\"y_hat cannot be None if y is not a str\")\n\n if isinstance(y, str):\n y = idata.observed_data[y].values\n elif not isinstance(y, (np.ndarray, xr.DataArray)):\n raise ValueError(\"y must be of types array, DataArray or str, not {}\".format(type(y)))\n\n if isinstance(y_hat, str):\n label_y_hat = y_hat\n y_hat = idata.posterior_predictive[y_hat].mean(dim=(\"chain\", \"draw\")).values\n elif not isinstance(y_hat, (np.ndarray, xr.DataArray)):\n raise ValueError(\n \"y_hat must be of types array, DataArray or str, not {}\".format(type(y_hat))\n )\n\n if len(y) != len(y_hat):\n warnings.warn(\n \"y and y_hat must be the same lenght\",\n UserWarning,\n )\n\n locs = np.linspace(0, 1, len(y_hat))\n width = np.diff(locs).mean()\n\n separation_kwargs = dict(\n y=y,\n y_hat=y_hat,\n y_hat_line=y_hat_line,\n label_y_hat=label_y_hat,\n expected_events=expected_events,\n figsize=figsize,\n textsize=textsize,\n color=color,\n legend=legend,\n locs=locs,\n width=width,\n ax=ax,\n plot_kwargs=plot_kwargs,\n y_hat_line_kwargs=y_hat_line_kwargs,\n exp_events_kwargs=exp_events_kwargs,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n plot = get_plotting_function(\"plot_separation\", \"separationplot\", backend)\n axes = plot(**separation_kwargs)\n\n return axes\n",
"\"\"\"NumPyro-specific conversion code.\"\"\"\nimport logging\nfrom typing import Callable, Optional\n\nimport numpy as np\n\nfrom .. import utils\nfrom ..rcparams import rcParams\nfrom .base import dict_to_dataset, requires\nfrom .inference_data import InferenceData\n\n_log = logging.getLogger(__name__)\n\n\nclass NumPyroConverter:\n \"\"\"Encapsulate NumPyro specific logic.\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n\n model = None # type: Optional[Callable]\n nchains = None # type: int\n ndraws = None # type: int\n\n def __init__(\n self,\n *,\n posterior=None,\n prior=None,\n posterior_predictive=None,\n predictions=None,\n constant_data=None,\n predictions_constant_data=None,\n log_likelihood=None,\n index_origin=None,\n coords=None,\n dims=None,\n pred_dims=None,\n num_chains=1\n ):\n \"\"\"Convert NumPyro data into an InferenceData object.\n\n Parameters\n ----------\n posterior : numpyro.mcmc.MCMC\n Fitted MCMC object from NumPyro\n prior: dict\n Prior samples from a NumPyro model\n posterior_predictive : dict\n Posterior predictive samples for the posterior\n predictions: dict\n Out of sample predictions\n constant_data: dict\n Dictionary containing constant data variables mapped to their values.\n predictions_constant_data: dict\n Constant data used for out-of-sample predictions.\n index_origin : int, optinal\n coords : dict[str] -> list[str]\n Map of dimensions to coordinates\n dims : dict[str] -> list[str]\n Map variable names to their coordinates\n pred_dims: dict\n Dims for predictions data. Map variable names to their coordinates.\n num_chains: int\n Number of chains used for sampling. Ignored if posterior is present.\n \"\"\"\n import jax\n import numpyro\n\n self.posterior = posterior\n self.prior = jax.device_get(prior)\n self.posterior_predictive = jax.device_get(posterior_predictive)\n self.predictions = predictions\n self.constant_data = constant_data\n self.predictions_constant_data = predictions_constant_data\n self.log_likelihood = (\n rcParams[\"data.log_likelihood\"] if log_likelihood is None else log_likelihood\n )\n self.index_origin = rcParams[\"data.index_origin\"] if index_origin is None else index_origin\n self.coords = coords\n self.dims = dims\n self.pred_dims = pred_dims\n self.numpyro = numpyro\n\n def arbitrary_element(dct):\n return next(iter(dct.values()))\n\n if posterior is not None:\n samples = jax.device_get(self.posterior.get_samples(group_by_chain=True))\n if not isinstance(samples, dict):\n # handle the case we run MCMC with a general potential_fn\n # (instead of a NumPyro model) whose args is not a dictionary\n # (e.g. f(x) = x ** 2)\n tree_flatten_samples = jax.tree_util.tree_flatten(samples)[0]\n samples = {\n \"Param:{}\".format(i): jax.device_get(v)\n for i, v in enumerate(tree_flatten_samples)\n }\n self._samples = samples\n self.nchains, self.ndraws = (\n posterior.num_chains,\n posterior.num_samples // posterior.thinning,\n )\n self.model = self.posterior.sampler.model\n # model arguments and keyword arguments\n self._args = self.posterior._args # pylint: disable=protected-access\n self._kwargs = self.posterior._kwargs # pylint: disable=protected-access\n else:\n self.nchains = num_chains\n get_from = None\n if predictions is not None:\n get_from = predictions\n elif posterior_predictive is not None:\n get_from = posterior_predictive\n elif prior is not None:\n get_from = prior\n if get_from is None and constant_data is None and predictions_constant_data is None:\n raise ValueError(\n \"When constructing InferenceData must have at least\"\n \" one of posterior, prior, posterior_predictive or predictions.\"\n )\n if get_from is not None:\n aelem = arbitrary_element(get_from)\n self.ndraws = aelem.shape[0] // self.nchains\n\n observations = {}\n if self.model is not None:\n seeded_model = numpyro.handlers.seed(self.model, jax.random.PRNGKey(0))\n trace = numpyro.handlers.trace(seeded_model).get_trace(*self._args, **self._kwargs)\n observations = {\n name: site[\"value\"]\n for name, site in trace.items()\n if site[\"type\"] == \"sample\" and site[\"is_observed\"]\n }\n self.observations = observations if observations else None\n\n @requires(\"posterior\")\n def posterior_to_xarray(self):\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\n data = self._samples\n return dict_to_dataset(\n data,\n library=self.numpyro,\n coords=self.coords,\n dims=self.dims,\n index_origin=self.index_origin,\n )\n\n @requires(\"posterior\")\n def sample_stats_to_xarray(self):\n \"\"\"Extract sample_stats from NumPyro posterior.\"\"\"\n rename_key = {\n \"potential_energy\": \"lp\",\n \"adapt_state.step_size\": \"step_size\",\n \"num_steps\": \"n_steps\",\n \"accept_prob\": \"acceptance_rate\",\n }\n data = {}\n for stat, value in self.posterior.get_extra_fields(group_by_chain=True).items():\n if isinstance(value, (dict, tuple)):\n continue\n name = rename_key.get(stat, stat)\n value = value.copy()\n data[name] = value\n if stat == \"num_steps\":\n data[\"tree_depth\"] = np.log2(value).astype(int) + 1\n return dict_to_dataset(\n data,\n library=self.numpyro,\n dims=None,\n coords=self.coords,\n index_origin=self.index_origin,\n )\n\n @requires(\"posterior\")\n @requires(\"model\")\n def log_likelihood_to_xarray(self):\n \"\"\"Extract log likelihood from NumPyro posterior.\"\"\"\n if not self.log_likelihood:\n return None\n data = {}\n if self.observations is not None:\n samples = self.posterior.get_samples(group_by_chain=False)\n log_likelihood_dict = self.numpyro.infer.log_likelihood(\n self.model, samples, *self._args, **self._kwargs\n )\n for obs_name, log_like in log_likelihood_dict.items():\n shape = (self.nchains, self.ndraws) + log_like.shape[1:]\n data[obs_name] = np.reshape(log_like.copy(), shape)\n return dict_to_dataset(\n data,\n library=self.numpyro,\n dims=self.dims,\n coords=self.coords,\n index_origin=self.index_origin,\n skip_event_dims=True,\n )\n\n def translate_posterior_predictive_dict_to_xarray(self, dct, dims):\n \"\"\"Convert posterior_predictive or prediction samples to xarray.\"\"\"\n data = {}\n for k, ary in dct.items():\n shape = ary.shape\n if shape[0] == self.nchains and shape[1] == self.ndraws:\n data[k] = ary\n elif shape[0] == self.nchains * self.ndraws:\n data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))\n else:\n data[k] = utils.expand_dims(ary)\n _log.warning(\n \"posterior predictive shape not compatible with number of chains and draws. \"\n \"This can mean that some draws or even whole chains are not represented.\"\n )\n return dict_to_dataset(\n data,\n library=self.numpyro,\n coords=self.coords,\n dims=dims,\n index_origin=self.index_origin,\n )\n\n @requires(\"posterior_predictive\")\n def posterior_predictive_to_xarray(self):\n \"\"\"Convert posterior_predictive samples to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(\n self.posterior_predictive, self.dims\n )\n\n @requires(\"predictions\")\n def predictions_to_xarray(self):\n \"\"\"Convert predictions to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.predictions, self.pred_dims)\n\n def priors_to_xarray(self):\n \"\"\"Convert prior samples (and if possible prior predictive too) to xarray.\"\"\"\n if self.prior is None:\n return {\"prior\": None, \"prior_predictive\": None}\n if self.posterior is not None:\n prior_vars = list(self._samples.keys())\n prior_predictive_vars = [key for key in self.prior.keys() if key not in prior_vars]\n else:\n prior_vars = self.prior.keys()\n prior_predictive_vars = None\n priors_dict = {}\n for group, var_names in zip(\n (\"prior\", \"prior_predictive\"), (prior_vars, prior_predictive_vars)\n ):\n priors_dict[group] = (\n None\n if var_names is None\n else dict_to_dataset(\n {k: utils.expand_dims(self.prior[k]) for k in var_names},\n library=self.numpyro,\n coords=self.coords,\n dims=self.dims,\n index_origin=self.index_origin,\n )\n )\n return priors_dict\n\n @requires(\"observations\")\n @requires(\"model\")\n def observed_data_to_xarray(self):\n \"\"\"Convert observed data to xarray.\"\"\"\n return dict_to_dataset(\n self.observations,\n library=self.numpyro,\n dims=self.dims,\n coords=self.coords,\n default_dims=[],\n index_origin=self.index_origin,\n )\n\n @requires(\"constant_data\")\n def constant_data_to_xarray(self):\n \"\"\"Convert constant_data to xarray.\"\"\"\n return dict_to_dataset(\n self.constant_data,\n library=self.numpyro,\n dims=self.dims,\n coords=self.coords,\n default_dims=[],\n index_origin=self.index_origin,\n )\n\n @requires(\"predictions_constant_data\")\n def predictions_constant_data_to_xarray(self):\n \"\"\"Convert predictions_constant_data to xarray.\"\"\"\n return dict_to_dataset(\n self.predictions_constant_data,\n library=self.numpyro,\n dims=self.pred_dims,\n coords=self.coords,\n default_dims=[],\n index_origin=self.index_origin,\n )\n\n def to_inference_data(self):\n \"\"\"Convert all available data to an InferenceData object.\n\n Note that if groups can not be created (i.e., there is no `trace`, so\n the `posterior` and `sample_stats` can not be extracted), then the InferenceData\n will not have those groups.\n \"\"\"\n return InferenceData(\n **{\n \"posterior\": self.posterior_to_xarray(),\n \"sample_stats\": self.sample_stats_to_xarray(),\n \"log_likelihood\": self.log_likelihood_to_xarray(),\n \"posterior_predictive\": self.posterior_predictive_to_xarray(),\n \"predictions\": self.predictions_to_xarray(),\n **self.priors_to_xarray(),\n \"observed_data\": self.observed_data_to_xarray(),\n \"constant_data\": self.constant_data_to_xarray(),\n \"predictions_constant_data\": self.predictions_constant_data_to_xarray(),\n }\n )\n\n\ndef from_numpyro(\n posterior=None,\n *,\n prior=None,\n posterior_predictive=None,\n predictions=None,\n constant_data=None,\n predictions_constant_data=None,\n log_likelihood=None,\n index_origin=None,\n coords=None,\n dims=None,\n pred_dims=None,\n num_chains=1\n):\n \"\"\"Convert NumPyro data into an InferenceData object.\n\n For a usage example read the\n :ref:`Creating InferenceData section on from_numpyro <creating_InferenceData>`\n\n Parameters\n ----------\n posterior : numpyro.mcmc.MCMC\n Fitted MCMC object from NumPyro\n prior: dict\n Prior samples from a NumPyro model\n posterior_predictive : dict\n Posterior predictive samples for the posterior\n predictions: dict\n Out of sample predictions\n constant_data: dict\n Dictionary containing constant data variables mapped to their values.\n predictions_constant_data: dict\n Constant data used for out-of-sample predictions.\n index_origin : int, optional\n coords : dict[str] -> list[str]\n Map of dimensions to coordinates\n dims : dict[str] -> list[str]\n Map variable names to their coordinates\n pred_dims: dict\n Dims for predictions data. Map variable names to their coordinates.\n num_chains: int\n Number of chains used for sampling. Ignored if posterior is present.\n \"\"\"\n return NumPyroConverter(\n posterior=posterior,\n prior=prior,\n posterior_predictive=posterior_predictive,\n predictions=predictions,\n constant_data=constant_data,\n predictions_constant_data=predictions_constant_data,\n log_likelihood=log_likelihood,\n index_origin=index_origin,\n coords=coords,\n dims=dims,\n pred_dims=pred_dims,\n num_chains=num_chains,\n ).to_inference_data()\n",
"\"\"\"Matplotlib traceplot.\"\"\"\nimport warnings\nfrom itertools import cycle\n\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.lines import Line2D\nimport matplotlib.ticker as mticker\n\nfrom ....stats.density_utils import get_bins\nfrom ...distplot import plot_dist\nfrom ...plot_utils import _scale_fig_size, format_coords_as_labels\nfrom ...rankplot import plot_rank\nfrom . import backend_kwarg_defaults, backend_show, dealiase_sel_kwargs, matplotlib_kwarg_dealiaser\n\n\ndef plot_trace(\n data,\n var_names, # pylint: disable=unused-argument\n divergences,\n kind,\n figsize,\n rug,\n lines,\n circ_var_names,\n circ_var_units,\n compact,\n compact_prop,\n combined,\n chain_prop,\n legend,\n labeller,\n plot_kwargs,\n fill_kwargs,\n rug_kwargs,\n hist_kwargs,\n trace_kwargs,\n rank_kwargs,\n plotters,\n divergence_data,\n axes,\n backend_kwargs,\n backend_config, # pylint: disable=unused-argument\n show,\n):\n \"\"\"Plot distribution (histogram or kernel density estimates) and sampled values.\n\n If `divergences` data is available in `sample_stats`, will plot the location of divergences as\n dashed vertical lines.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names : string, or list of strings\n One or more variables to be plotted.\n divergences : {\"bottom\", \"top\", None, False}\n Plot location of divergences on the traceplots. Options are \"bottom\", \"top\", or False-y.\n kind : {\"trace\", \"rank_bar\", \"rank_vlines\"}, optional\n Choose between plotting sampled values per iteration and rank plots.\n figsize : figure size tuple\n If None, size is (12, variables * 2)\n rug : bool\n If True adds a rugplot. Defaults to False. Ignored for 2D KDE. Only affects continuous\n variables.\n lines : tuple or list\n List of tuple of (var_name, {'coord': selection}, [line_positions]) to be overplotted as\n vertical lines on the density and horizontal lines on the trace.\n circ_var_names : string, or list of strings\n List of circular variables to account for when plotting KDE.\n circ_var_units : str\n Whether the variables in `circ_var_names` are in \"degrees\" or \"radians\".\n combined : bool\n Flag for combining multiple chains into a single line. If False (default), chains will be\n plotted separately.\n legend : bool\n Add a legend to the figure with the chain color code.\n plot_kwargs : dict\n Extra keyword arguments passed to `arviz.plot_dist`. Only affects continuous variables.\n fill_kwargs : dict\n Extra keyword arguments passed to `arviz.plot_dist`. Only affects continuous variables.\n rug_kwargs : dict\n Extra keyword arguments passed to `arviz.plot_dist`. Only affects continuous variables.\n hist_kwargs : dict\n Extra keyword arguments passed to `arviz.plot_dist`. Only affects discrete variables.\n trace_kwargs : dict\n Extra keyword arguments passed to `plt.plot`\n rank_kwargs : dict\n Extra keyword arguments passed to `arviz.plot_rank`\n Returns\n -------\n axes : matplotlib axes\n\n\n Examples\n --------\n Plot a subset variables\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> data = az.load_arviz_data('non_centered_eight')\n >>> coords = {'school': ['Choate', 'Lawrenceville']}\n >>> az.plot_trace(data, var_names=('theta_t', 'theta'), coords=coords)\n\n Show all dimensions of multidimensional variables in the same plot\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_trace(data, compact=True)\n\n Combine all chains into one distribution\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_trace(data, var_names=('theta_t', 'theta'), coords=coords, combined=True)\n\n\n Plot reference lines against distribution and trace\n\n .. plot::\n :context: close-figs\n\n >>> lines = (('theta_t',{'school': \"Choate\"}, [-1]),)\n >>> az.plot_trace(data, var_names=('theta_t', 'theta'), coords=coords, lines=lines)\n\n \"\"\"\n # Set plot default backend kwargs\n if backend_kwargs is None:\n backend_kwargs = {}\n\n if circ_var_names is None:\n circ_var_names = []\n\n backend_kwargs = {**backend_kwarg_defaults(), **backend_kwargs}\n\n if lines is None:\n lines = ()\n\n num_chain_props = len(data.chain) + 1 if combined else len(data.chain)\n if not compact:\n chain_prop = \"color\" if chain_prop is None else chain_prop\n else:\n chain_prop = (\n {\n \"linestyle\": (\"solid\", \"dotted\", \"dashed\", \"dashdot\"),\n }\n if chain_prop is None\n else chain_prop\n )\n compact_prop = \"color\" if compact_prop is None else compact_prop\n\n if isinstance(chain_prop, str):\n chain_prop = {chain_prop: plt.rcParams[\"axes.prop_cycle\"].by_key()[chain_prop]}\n if isinstance(chain_prop, tuple):\n warnings.warn(\n \"chain_prop as a tuple will be deprecated in a future warning, use a dict instead\",\n FutureWarning,\n )\n chain_prop = {chain_prop[0]: chain_prop[1]}\n chain_prop = {\n prop_name: [prop for _, prop in zip(range(num_chain_props), cycle(props))]\n for prop_name, props in chain_prop.items()\n }\n\n if isinstance(compact_prop, str):\n compact_prop = {compact_prop: plt.rcParams[\"axes.prop_cycle\"].by_key()[compact_prop]}\n if isinstance(compact_prop, tuple):\n warnings.warn(\n \"compact_prop as a tuple will be deprecated in a future warning, use a dict instead\",\n FutureWarning,\n )\n compact_prop = {compact_prop[0]: compact_prop[1]}\n\n if figsize is None:\n figsize = (12, len(plotters) * 2)\n\n backend_kwargs.setdefault(\"figsize\", figsize)\n\n trace_kwargs = matplotlib_kwarg_dealiaser(trace_kwargs, \"plot\")\n trace_kwargs.setdefault(\"alpha\", 0.35)\n\n hist_kwargs = matplotlib_kwarg_dealiaser(hist_kwargs, \"hist\")\n hist_kwargs.setdefault(\"alpha\", 0.35)\n\n plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, \"plot\")\n fill_kwargs = matplotlib_kwarg_dealiaser(fill_kwargs, \"fill_between\")\n rug_kwargs = matplotlib_kwarg_dealiaser(rug_kwargs, \"scatter\")\n rank_kwargs = matplotlib_kwarg_dealiaser(rank_kwargs, \"bar\")\n\n textsize = plot_kwargs.pop(\"textsize\", 10)\n\n figsize, _, titlesize, xt_labelsize, linewidth, _ = _scale_fig_size(\n figsize, textsize, rows=len(plotters), cols=2\n )\n\n trace_kwargs.setdefault(\"linewidth\", linewidth)\n plot_kwargs.setdefault(\"linewidth\", linewidth)\n\n # Check the input for lines\n if lines is not None:\n all_var_names = set(plotter[0] for plotter in plotters)\n\n invalid_var_names = set()\n for line in lines:\n if line[0] not in all_var_names:\n invalid_var_names.add(line[0])\n if invalid_var_names:\n warnings.warn(\n \"A valid var_name should be provided, found {} expected from {}\".format(\n invalid_var_names, all_var_names\n )\n )\n\n if axes is None:\n fig = plt.figure(**backend_kwargs)\n spec = gridspec.GridSpec(ncols=2, nrows=len(plotters), figure=fig)\n\n # pylint: disable=too-many-nested-blocks\n for idx, (var_name, selection, isel, value) in enumerate(plotters):\n for idy in range(2):\n value = np.atleast_2d(value)\n\n circular = var_name in circ_var_names and not idy\n if var_name in circ_var_names and idy:\n circ_units_trace = circ_var_units\n else:\n circ_units_trace = False\n\n if axes is None:\n ax = fig.add_subplot(spec[idx, idy], polar=circular)\n else:\n ax = axes[idx, idy]\n\n if len(value.shape) == 2:\n if compact_prop:\n aux_plot_kwargs = dealiase_sel_kwargs(plot_kwargs, compact_prop, 0)\n aux_trace_kwargs = dealiase_sel_kwargs(trace_kwargs, compact_prop, 0)\n else:\n aux_plot_kwargs = plot_kwargs\n aux_trace_kwargs = trace_kwargs\n\n ax = _plot_chains_mpl(\n ax,\n idy,\n value,\n data,\n chain_prop,\n combined,\n xt_labelsize,\n rug,\n kind,\n aux_trace_kwargs,\n hist_kwargs,\n aux_plot_kwargs,\n fill_kwargs,\n rug_kwargs,\n rank_kwargs,\n circular,\n circ_var_units,\n circ_units_trace,\n )\n\n else:\n sub_data = data[var_name].sel(**selection)\n legend_labels = format_coords_as_labels(sub_data, skip_dims=(\"chain\", \"draw\"))\n legend_title = \", \".join(\n [\n \"{}\".format(coord_name)\n for coord_name in sub_data.coords\n if coord_name not in {\"chain\", \"draw\"}\n ]\n )\n value = value.reshape((value.shape[0], value.shape[1], -1))\n compact_prop_iter = {\n prop_name: [prop for _, prop in zip(range(value.shape[2]), cycle(props))]\n for prop_name, props in compact_prop.items()\n }\n handles = []\n for sub_idx, label in zip(range(value.shape[2]), legend_labels):\n aux_plot_kwargs = dealiase_sel_kwargs(plot_kwargs, compact_prop_iter, sub_idx)\n aux_trace_kwargs = dealiase_sel_kwargs(trace_kwargs, compact_prop_iter, sub_idx)\n ax = _plot_chains_mpl(\n ax,\n idy,\n value[..., sub_idx],\n data,\n chain_prop,\n combined,\n xt_labelsize,\n rug,\n kind,\n aux_trace_kwargs,\n hist_kwargs,\n aux_plot_kwargs,\n fill_kwargs,\n rug_kwargs,\n rank_kwargs,\n circular,\n circ_var_units,\n circ_units_trace,\n )\n if legend:\n handles.append(\n Line2D(\n [],\n [],\n label=label,\n **dealiase_sel_kwargs(aux_plot_kwargs, chain_prop, 0),\n )\n )\n if legend and idy == 0:\n ax.legend(handles=handles, title=legend_title)\n\n if value[0].dtype.kind == \"i\" and idy == 0:\n xticks = get_bins(value)\n ax.set_xticks(xticks[:-1])\n y = 1 / textsize\n if not idy:\n ax.set_yticks([])\n if circular:\n y = 0.13 if selection else 0.12\n ax.set_title(\n labeller.make_label_vert(var_name, selection, isel),\n fontsize=titlesize,\n wrap=True,\n y=textsize * y,\n )\n ax.tick_params(labelsize=xt_labelsize)\n\n xlims = ax.get_xlim()\n ylims = ax.get_ylim()\n\n if divergences:\n div_selection = {k: v for k, v in selection.items() if k in divergence_data.dims}\n divs = divergence_data.sel(**div_selection).values\n # if combined:\n # divs = divs.flatten()\n divs = np.atleast_2d(divs)\n\n for chain, chain_divs in enumerate(divs):\n div_draws = data.draw.values[chain_divs]\n div_idxs = np.arange(len(chain_divs))[chain_divs]\n if div_idxs.size > 0:\n if divergences == \"top\":\n ylocs = ylims[1]\n else:\n ylocs = ylims[0]\n values = value[chain, div_idxs]\n\n if circular:\n tick = [ax.get_rmin() + ax.get_rmax() * 0.60, ax.get_rmax()]\n for val in values:\n ax.plot(\n [val, val],\n tick,\n color=\"black\",\n markeredgewidth=1.5,\n markersize=30,\n alpha=trace_kwargs[\"alpha\"],\n zorder=0.6,\n )\n else:\n if kind == \"trace\" and idy:\n ax.plot(\n div_draws,\n np.zeros_like(div_idxs) + ylocs,\n marker=\"|\",\n color=\"black\",\n markeredgewidth=1.5,\n markersize=30,\n linestyle=\"None\",\n alpha=hist_kwargs[\"alpha\"],\n zorder=0.6,\n )\n elif not idy:\n ax.plot(\n values,\n np.zeros_like(values) + ylocs,\n marker=\"|\",\n color=\"black\",\n markeredgewidth=1.5,\n markersize=30,\n linestyle=\"None\",\n alpha=trace_kwargs[\"alpha\"],\n zorder=0.6,\n )\n\n for _, _, vlines in (j for j in lines if j[0] == var_name and j[1] == selection):\n if isinstance(vlines, (float, int)):\n line_values = [vlines]\n else:\n line_values = np.atleast_1d(vlines).ravel()\n if not np.issubdtype(line_values.dtype, np.number):\n raise ValueError(\n \"line-positions should be numeric, found {}\".format(line_values)\n )\n if idy:\n ax.hlines(\n line_values,\n xlims[0],\n xlims[1],\n colors=\"black\",\n linewidth=1.5,\n alpha=trace_kwargs[\"alpha\"],\n )\n\n else:\n ax.vlines(\n line_values,\n ylims[0],\n ylims[1],\n colors=\"black\",\n linewidth=1.5,\n alpha=trace_kwargs[\"alpha\"],\n )\n\n if kind == \"trace\" and idy:\n ax.set_xlim(left=data.draw.min(), right=data.draw.max())\n\n if legend:\n legend_kwargs = trace_kwargs if combined else plot_kwargs\n handles = [\n Line2D(\n [], [], label=chain_id, **dealiase_sel_kwargs(legend_kwargs, chain_prop, chain_id)\n )\n for chain_id in range(data.dims[\"chain\"])\n ]\n if combined:\n handles.insert(\n 0,\n Line2D(\n [], [], label=\"combined\", **dealiase_sel_kwargs(plot_kwargs, chain_prop, -1)\n ),\n )\n ax.figure.axes[0].legend(handles=handles, title=\"chain\", loc=\"upper right\")\n\n if axes is None:\n axes = np.array(ax.figure.axes).reshape(-1, 2)\n\n if backend_show(show):\n plt.show()\n\n return axes\n\n\ndef _plot_chains_mpl(\n axes,\n idy,\n value,\n data,\n chain_prop,\n combined,\n xt_labelsize,\n rug,\n kind,\n trace_kwargs,\n hist_kwargs,\n plot_kwargs,\n fill_kwargs,\n rug_kwargs,\n rank_kwargs,\n circular,\n circ_var_units,\n circ_units_trace,\n):\n\n if not circular:\n circ_var_units = False\n\n for chain_idx, row in enumerate(value):\n if kind == \"trace\":\n aux_kwargs = dealiase_sel_kwargs(trace_kwargs, chain_prop, chain_idx)\n if idy:\n axes.plot(data.draw.values, row, **aux_kwargs)\n if circ_units_trace == \"degrees\":\n y_tick_locs = axes.get_yticks()\n y_tick_labels = [i + 2 * 180 if i < 0 else i for i in np.rad2deg(y_tick_locs)]\n axes.yaxis.set_major_locator(mticker.FixedLocator(y_tick_locs))\n axes.set_yticklabels([f\"{i:.0f}°\" for i in y_tick_labels])\n\n if not combined:\n aux_kwargs = dealiase_sel_kwargs(plot_kwargs, chain_prop, chain_idx)\n if not idy:\n axes = plot_dist(\n values=row,\n textsize=xt_labelsize,\n rug=rug,\n ax=axes,\n hist_kwargs=hist_kwargs,\n plot_kwargs=aux_kwargs,\n fill_kwargs=fill_kwargs,\n rug_kwargs=rug_kwargs,\n backend=\"matplotlib\",\n show=False,\n is_circular=circ_var_units,\n circular=circular,\n )\n\n if kind == \"rank_bars\" and idy:\n axes = plot_rank(data=value, kind=\"bars\", ax=axes, **rank_kwargs)\n elif kind == \"rank_vlines\" and idy:\n axes = plot_rank(data=value, kind=\"vlines\", ax=axes, **rank_kwargs)\n\n if combined:\n aux_kwargs = dealiase_sel_kwargs(plot_kwargs, chain_prop, -1)\n if not idy:\n axes = plot_dist(\n values=value.flatten(),\n textsize=xt_labelsize,\n rug=rug,\n ax=axes,\n hist_kwargs=hist_kwargs,\n plot_kwargs=aux_kwargs,\n fill_kwargs=fill_kwargs,\n rug_kwargs=rug_kwargs,\n backend=\"matplotlib\",\n show=False,\n is_circular=circ_var_units,\n circular=circular,\n )\n return axes\n"
] | [
[
"numpy.diff"
],
[
"numpy.log2"
],
[
"numpy.issubdtype",
"numpy.rad2deg",
"numpy.atleast_1d",
"numpy.atleast_2d",
"numpy.zeros_like",
"matplotlib.ticker.FixedLocator",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dan-1d/ev_solar_models | [
"59f9da0122f1d83a663152744c9b3f8c13fad181"
] | [
"vehicles.py"
] | [
"import numpy as np\nimport pandas as pd\n\n\nclass vehicle_ice:\n def __init__(self, name, mpg_hwy, mpg_city, average_override=None):\n self.mpg_hwy = mpg_hwy\n self.mpg_city = mpg_city\n self.name = name\n if average_override != None:\n self.mpg_hwy = average_override\n self.mpg_city = average_override\n \n def get_ave_mpg(self, fract_hwy=.50):\n return fract_hwy*self.mpg_hwy + (1-fract_hwy)*self.mpg_city\n \n \nclass vehicle_ev:\n def __init__(self, name, kwh_per_mile, charge_rate_kw=3.6):\n self.kwh_per_mile = kwh_per_mile\n self.charge_rate_kw = charge_rate_kw\n \n def get_charge_needs_hourly(self, total_charge_kwh):\n daily_charge_needs_kwh = total_charge_kwh\n daily_charge_hourly_df = pd.DataFrame()\n daily_charge_hourly_df[\"hour\"] = np.arange(0,24,1)\n daily_charge_hourly_df[\"energy_kw\"] = np.zeros(24)\n charge_rate_kw = self.charge_rate_kw\n\n charge_remaining = daily_charge_needs_kwh\n current_hour = 0 #midnight\n while charge_remaining > charge_rate_kw:\n daily_charge_hourly_df[\"energy_kw\"]\n daily_charge_hourly_df.loc[daily_charge_hourly_df['hour'] == current_hour, \"energy_kw\"] = charge_rate_kw\n current_hour += 1\n charge_remaining -= charge_rate_kw\n # handle remaining charge for the fraction of an hour\n daily_charge_hourly_df.loc[daily_charge_hourly_df['hour'] == current_hour, \"energy_kw\"] = charge_remaining\n\n # print charge_remaining\n\n return daily_charge_hourly_df\n\n\nclass vehicle_usage:\n def __init__(self, miles_per_month, fract_hwy):\n self.miles_per_month = miles_per_month\n self.fract_hwy = fract_hwy\n\n \ndef gas_costs_per_month( miles_per_month, gas_dollars_per_gal, miles_per_gallon ):\n ## TODO: generalize so any input parameter can be a vector, and all combos are explored\n ## pass in: \n ## miles_per_month, miles_per_gallon: real number\n ## numpy arrays, \n ## Return: generate an ndarray\n ##\n dollars_per_month = miles_per_month * gas_dollars_per_gal * (1.0/miles_per_gallon)\n return dollars_per_month\n\n\ndef electr_cost_per_month( miles_per_month, ave_dollars_per_kwh, kwh_per_mile ):\n dollars_per_month = miles_per_month * ave_dollars_per_kwh * kwh_per_mile\n return dollars_per_month"
] | [
[
"numpy.arange",
"numpy.zeros",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
joseluisvelasco/STELLOPT | [
"e064ebb96414d5afc4e205f43b44766558dca2af"
] | [
"BENCHMARKS/DIAGNO_TEST/compare_bigtok.py"
] | [
"#!/usr/bin/env python3\nimport numpy as np #For Arrays\nfrom math import sqrt\n\nlfail = 0\nfailtol = 1.0\nfileext='bigtok'\nfilename='diagno_bench.'+fileext\ndata = np.loadtxt(filename)\n\nprint('===== B-Field ======')\nfor i in range(0, 75):\n cal = sqrt(data[i][6]*data[i][6] + data[i][7]*data[i][7] + data[i][8]*data[i][8])\n act = sqrt(data[i][12]*data[i][12] + data[i][13]*data[i][13] + data[i][14]*data[i][14])\n perct = 100*(abs(act-cal)/abs(act))\n print(' '+str(cal)+' '+str(act)+' '+str(int(perct))+'%')\n if perct > failtol:\n lfail = 1\nprint('==== Flux Loops ====')\nfilename='diagno_flux.'+fileext+'_j'\nfilename2='diagno_flux.'+fileext+'_b'\nfile_handle = open(filename,'r')\nfile_handle2 = open(filename2,'r')\nline1 = file_handle.readline()\nline2 = file_handle2.readline()\nnlines = int(line1)\nfor i in range(0,nlines):\n\tline1 = file_handle.readline()\n\tline2 = file_handle2.readline()\n\tcal = float(line1)\n\tact = float(line2)\n\tif abs(act) < 1E-3:\n\t\tcontinue\n\tif abs(cal) < 1E-3:\n\t\tcontinue\n\tperct = 100*(abs(act-cal)/abs(act))\n\tprint(' '+str(cal)+' '+str(act)+' '+str(int(perct))+'%')\n\tif perct > failtol:\n\t\tlfail = 1\nfile_handle.close()\nfile_handle2.close()\n#with open(filename) as myfile:\n# head = myfile.next()\n#print(head)\nprint('=================')\n\nif (lfail):\n print(' STATUS: FAIL!!!!!')\nelse:\n print(' STATUS: PASS')\n\nquit()\n\n\n\n\n"
] | [
[
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Andrewnar/ReChorus | [
"55ceb37beb7b9967a4d18d9899075a8d88d11ddb"
] | [
"src/models/developing/TiMiRecLight.py"
] | [
"# -*- coding: UTF-8 -*-\n# @Author : Chenyang Wang\n# @Email : [email protected]\n\nimport os\nimport logging\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\nfrom datetime import datetime\n\nfrom models.BaseModel import SequentialModel\n\n\nclass TiMiRecLight(SequentialModel):\n runner = 'TiMiRunner'\n extra_log_args = ['emb_size', 'attn_size', 'K', 'temp', 'add_pos', 'n_layers']\n\n @staticmethod\n def parse_model_args(parser):\n parser.add_argument('--emb_size', type=int, default=64,\n help='Size of embedding vectors.')\n parser.add_argument('--attn_size', type=int, default=8,\n help='Size of attention vectors.')\n parser.add_argument('--K', type=int, default=2,\n help='Number of hidden intent.')\n parser.add_argument('--add_pos', type=int, default=1,\n help='Whether add position embedding.')\n parser.add_argument('--temp', type=float, default=1,\n help='Temperature in knowledge distillation loss.')\n parser.add_argument('--n_layers', type=int, default=1,\n help='Number of the projection layer.')\n parser.add_argument('--stage', type=int, default=3,\n help='Stage of training: 1-pretrain_extractor, 2-pretrain_predictor, 3-joint_finetune.')\n return SequentialModel.parse_model_args(parser)\n\n def __init__(self, args, corpus):\n self.emb_size = args.emb_size\n self.attn_size = args.attn_size\n self.K = args.K\n self.add_pos = args.add_pos\n self.temp = args.temp\n self.n_layers = args.n_layers\n self.stage = args.stage\n self.max_his = args.history_max\n super().__init__(args, corpus)\n\n self.extractor_path = '../model/TiMiRecLight/Extractor__{}__{}__emb_size={}__K={}__add_pos={}.pt'\\\n .format(corpus.dataset, args.random_seed, self.emb_size, self.K, self.add_pos)\n self.predictor_path = '../model/TiMiRecLight/Predictor__{}__{}__emb_size={}.pt' \\\n .format(corpus.dataset, args.random_seed, self.emb_size)\n if self.stage == 1:\n self.model_path = self.extractor_path\n elif self.stage == 2:\n self.model_path = self.predictor_path\n\n def _define_params(self):\n if self.stage in [1, 3]:\n self.interest_extractor = MultiInterestExtractor(\n self.K, self.item_num, self.emb_size, self.attn_size, self.max_his, self.add_pos)\n if self.stage in [2, 3]:\n self.intent_predictor = IntentPredictor(self.item_num, self.emb_size)\n if self.stage == 3:\n self.proj = nn.Sequential()\n for i, _ in enumerate(range(self.n_layers - 1)):\n self.proj.add_module('proj_' + str(i), nn.Linear(self.emb_size, self.emb_size))\n self.proj.add_module('dropout_' + str(i), nn.Dropout(p=0.5))\n self.proj.add_module('relu_' + str(i), nn.ReLU(inplace=True))\n self.proj.add_module('proj_final', nn.Linear(self.emb_size, self.K))\n\n def load_model(self, model_path=None):\n if model_path is None:\n model_path = self.model_path\n model_dict = self.state_dict()\n state_dict = torch.load(model_path)\n exist_state_dict = {k: v for k, v in state_dict.items() if k in model_dict}\n model_dict.update(exist_state_dict)\n self.load_state_dict(model_dict)\n logging.info('Load model from ' + model_path)\n\n def actions_before_train(self):\n if self.stage == 3 and os.path.exists(self.extractor_path):\n self.load_model(self.extractor_path)\n # self.load_model(self.predictor_path)\n return\n logging.info('Train from scratch!')\n\n @staticmethod\n def similarity(a, b):\n a = F.normalize(a, dim=-1)\n b = F.normalize(b, dim=-1)\n return (a * b).sum(dim=-1)\n\n @staticmethod\n def js_div(p, q):\n kl = nn.KLDivLoss(reduction='none')\n p, q = p.softmax(-1), q.softmax(-1)\n log_mean = ((p + q) / 2).log()\n js = (kl(log_mean, p) + kl(log_mean, q)) / 2\n return js\n\n def forward(self, feed_dict):\n self.check_list = []\n i_ids = feed_dict['item_id'] # bsz, -1\n history = feed_dict['history_items'] # bsz, max_his + 1\n lengths = feed_dict['lengths'] # bsz\n batch_size, seq_len = history.shape\n\n out_dict = dict()\n if self.stage == 1: # pretrain extractor\n interest_vectors = self.interest_extractor(history, lengths) # bsz, K, emb\n i_vectors = self.interest_extractor.i_embeddings(i_ids)\n if feed_dict['phase'] == 'train':\n target_vector = i_vectors[:, 0] # bsz, emb\n target_intent = (interest_vectors * target_vector[:, None, :]).sum(-1) # bsz, K\n idx_select = target_intent.max(-1)[1] # bsz\n user_vector = interest_vectors[torch.arange(batch_size), idx_select, :] # bsz, emb\n prediction = (user_vector[:, None, :] * i_vectors).sum(-1)\n else:\n prediction = (interest_vectors[:, None, :, :] * i_vectors[:, :, None, :]).sum(-1) # bsz, -1, K\n prediction = prediction.max(-1)[0] # bsz, -1\n elif self.stage == 2: # pretrain predictor\n his_vector = self.intent_predictor(history, lengths)\n i_vectors = self.intent_predictor.i_embeddings(i_ids)\n prediction = (his_vector[:, None, :] * i_vectors).sum(-1)\n else: # finetune\n interest_vectors = self.interest_extractor(history, lengths) # bsz, K, emb\n i_vectors = self.interest_extractor.i_embeddings(i_ids)\n his_vector = self.intent_predictor(history, lengths) # bsz, K\n # pred_intent = self.similarity(interest_vectors.detach(), his_vector.unsqueeze(1)) # bsz, K\n pred_intent = self.proj(his_vector) # bsz, K\n user_vector = (interest_vectors * pred_intent.softmax(-1)[:, :, None]).sum(-2) # bsz, emb\n if feed_dict['phase'] == 'train':\n target_vector = i_vectors[:, 0] # bsz, emb\n target_intent = self.similarity(interest_vectors, target_vector.unsqueeze(1)) # bsz, K\n # idx_select = pred_intent.max(-1)[1] # bsz\n # user_vector = interest_vectors[torch.arange(batch_size), idx_select, :] # bsz, emb\n out_dict['pred_intent'] = pred_intent\n out_dict['target_intent'] = target_intent\n self.check_list.append(('intent', pred_intent.softmax(-1)))\n self.check_list.append(('target', target_intent.softmax(-1)))\n prediction = (user_vector[:, None, :] * i_vectors).sum(-1)\n out_dict['prediction'] = prediction.view(batch_size, -1)\n\n # For JS divergence analysis\n if self.stage != 2 and feed_dict['phase'] == 'test':\n target_vector = i_vectors[:, 0] # bsz, emb\n target_intent = self.similarity(interest_vectors, target_vector.unsqueeze(1)) # bsz, K\n idx = torch.from_numpy(np.arange(batch_size)).to(self.device)\n rec_vector = i_vectors[idx, prediction.max(-1)[1]]\n rec_intent = self.similarity(interest_vectors, rec_vector.unsqueeze(1)) # bsz, K\n out_dict['js'] = self.js_div(target_intent, rec_intent).sum(-1)\n out_dict['dis'] = (interest_vectors[:, 0, :] - interest_vectors[:, 0, :]).pow(2).sum(-1)\n for i in range(self.K - 1):\n for j in range(i + 1, self.K):\n out_dict['dis'] += (interest_vectors[:, i, :] - interest_vectors[:, j, :]).pow(2).sum(-1)\n out_dict['dis'] /= (self.K * (self.K - 1) / 2)\n\n return out_dict\n\n def loss(self, out_dict: dict):\n if self.stage in [1, 2]: # pretrain\n loss = super().loss(out_dict)\n else: # finetune\n pred_intent = out_dict['pred_intent'] / self.temp\n target_intent = out_dict['target_intent'].detach() / self.temp\n # target_intent = out_dict['target_intent'] / self.temp\n kl_criterion = nn.KLDivLoss(reduction='batchmean')\n loss = kl_criterion(F.log_softmax(pred_intent, dim=1), F.softmax(target_intent, dim=1))\n loss = super().loss(out_dict) + self.temp * self.temp * loss\n # loss = super().loss(out_dict)\n return loss\n\n\nclass MultiInterestExtractor(nn.Module):\n def __init__(self, k, item_num, emb_size, attn_size, max_his, add_pos):\n super(MultiInterestExtractor, self).__init__()\n self.max_his = max_his\n self.add_pos = add_pos\n\n self.i_embeddings = nn.Embedding(item_num, emb_size)\n if self.add_pos:\n self.p_embeddings = nn.Embedding(max_his + 1, emb_size)\n self.W1 = nn.Linear(emb_size, attn_size)\n self.W2 = nn.Linear(attn_size, k)\n\n def forward(self, history, lengths):\n batch_size, seq_len = history.shape\n valid_his = (history > 0).long()\n\n his_vectors = self.i_embeddings(history)\n if self.add_pos:\n len_range = torch.from_numpy(np.arange(self.max_his)).to(history.device)\n position = (lengths[:, None] - len_range[None, :seq_len]) * valid_his\n pos_vectors = self.p_embeddings(position)\n his_pos_vectors = his_vectors + pos_vectors\n else:\n his_pos_vectors = his_vectors\n\n # Multi-Interest Extraction\n attn_score = self.W2(self.W1(his_pos_vectors).tanh()) # bsz, his_max, K\n attn_score = attn_score.masked_fill(valid_his.unsqueeze(-1) == 0, -np.inf)\n attn_score = attn_score.transpose(-1, -2) # bsz, K, his_max\n attn_score = (attn_score - attn_score.max()).softmax(dim=-1)\n attn_score = attn_score.masked_fill(torch.isnan(attn_score), 0)\n interest_vectors = (his_vectors[:, None, :, :] * attn_score[:, :, :, None]).sum(-2) # bsz, K, emb\n return interest_vectors\n\n\nclass IntentPredictor(nn.Module):\n def __init__(self, item_num, emb_size):\n super(IntentPredictor, self).__init__()\n self.i_embeddings = nn.Embedding(item_num + 1, emb_size)\n self.rnn = nn.GRU(input_size=emb_size, hidden_size=emb_size, batch_first=True)\n\n def forward(self, history, lengths):\n his_vectors = self.i_embeddings(history)\n sort_lengths, sort_idx = torch.topk(lengths, k=len(lengths))\n sort_seq = his_vectors.index_select(dim=0, index=sort_idx)\n seq_packed = torch.nn.utils.rnn.pack_padded_sequence(sort_seq, sort_lengths.cpu(), batch_first=True)\n output, hidden = self.rnn(seq_packed, None)\n unsort_idx = torch.topk(sort_idx, k=len(lengths), largest=False)[1]\n his_vector = hidden[-1].index_select(dim=0, index=unsort_idx)\n return his_vector\n"
] | [
[
"torch.nn.functional.normalize",
"torch.nn.Sequential",
"torch.nn.functional.softmax",
"torch.nn.KLDivLoss",
"torch.nn.Dropout",
"torch.isnan",
"torch.load",
"torch.nn.functional.log_softmax",
"numpy.arange",
"torch.nn.GRU",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.arange",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
igorFNegrizoli/3D-modeller-and-viewer | [
"35bcc0f2d291244785f77ad1ac6798f3849edd90"
] | [
"GUI.py"
] | [
"from tkinter import *\nfrom tkinter import messagebox\n\nfrom numpy.core.arrayprint import BoolFormat\nfrom transformations import escalate, rotX, rotY, rotZ, translate\nfrom pipeline import convertMesh2SRT, perspProj, sru2src\nfrom pipeline import isMeshVisible\nfrom savePoly import salvaPoligono\nimport numpy as np\nfrom utils import rgba2hex\nfrom doisDCrop import cutBorder\n\nclass CanvasMenu(Frame):\n def __init__(self):\n\n super().__init__()\n\n self.initToolbar()\n self.initSideBar()\n self.initScreen()\n self.initPC()\n\n # Toolbar para a escolha da projeção e do sombreamento\n def initToolbar(self):\n global listProj\n\n toolBar = Frame(self.master, bg='#E0E0E0')\n\n Proj = BooleanVar()\n Proj.set(\"True\")\n listProj.append(Proj)\n\n labelProjection = Label(toolBar, text=\"Projeção:\", font=('Helvetica', 10, 'bold'), bg='#E0E0E0')\n labelProjection.grid(row=0, column= 1, padx=10)\n \n perspective = Radiobutton(toolBar, text=\"Perspectiva\", variable=Proj, value=True, command=lambda: clicked(Proj.get()), font=('Helvetica', 9), bg='#E0E0E0')\n perspective.grid(row=1, column=2, padx=5, pady=5)\n\n parallel = Radiobutton(toolBar, text=\"Paralela\", variable=Proj, value=False, command=lambda: clicked(Proj.get()), font=('Helvetica', 9), bg='#E0E0E0')\n parallel.grid(row=1, column=3, padx=5, pady=5)\n\n toolBar.pack(side=TOP, fill=X)\n\n # SideBar para definição do mundo e do objeto\n def initSideBar(self):\n sideBar = Frame(self.master)\n\n global worldList, objectDataList\n worldList = []\n objectDataList = []\n\n canvasBar = Canvas(sideBar, bg='#E0E0E0')\n scrollBar = Scrollbar(sideBar, command=canvasBar.yview)\n scrollableFrame = Frame(canvasBar, bg='#E0E0E0')\n\n scrollableFrame.bind(\n \"<Configure>\",\n lambda e: canvasBar.configure(\n scrollregion=canvasBar.bbox(\"all\")\n )\n )\n\n canvasBar.create_window((0,0), window=scrollableFrame, anchor='nw')\n canvasBar.config(yscrollcommand= scrollBar.set)\n\n # Adição dos widgets no frame \n labelObject = Label(scrollableFrame, text=\"Dados do mundo:\", justify=LEFT, anchor=\"w\", font=('Helvetica', 10, 'bold'), bg='#E0E0E0', fg='#990303')\n labelObject.grid(row=0, column=0, padx=20, pady=10, columnspan=4, sticky=W)\n\n labelWorldLimit = Label(scrollableFrame, text=\"View-port:\", justify=LEFT, anchor=\"w\", font=('Helvetica', 10, 'bold'), bg='#E0E0E0')\n labelWorldLimit.grid(row=1, column=0, padx=20, pady=10, columnspan=3, sticky=W)\n\n labelWarning = Label(scrollableFrame, text=\"(Limite máximo: 860x640)\", justify=LEFT, anchor=\"w\", font=('Helvetica', 8), bg='#E0E0E0')\n labelWarning.grid(row=1, column=1, padx=20, pady=10, columnspan=4, sticky=W)\n\n labelWorldLimitUMIN = Label(scrollableFrame, text=\"uMin\", font=('Helvetica', 9), bg='#E0E0E0')\n labelWorldLimitUMIN.grid(row=2, column=0, padx=20, pady=10)\n\n coorWorldLimitUMIN = Entry(scrollableFrame, width= 8)\n coorWorldLimitUMIN.grid(row=2, column=1, padx=20, pady=10)\n coorWorldLimitUMIN.insert(0, 0)\n worldList.append(coorWorldLimitUMIN)\n\n labelWorldLimitUMAX = Label(scrollableFrame, text=\"uMax\", font=('Helvetica', 9), bg='#E0E0E0')\n labelWorldLimitUMAX.grid(row=2, column=2, padx=15, pady=10)\n\n coorWorldLimitUMAX = Entry(scrollableFrame, width= 8)\n coorWorldLimitUMAX.grid(row=2, column=3, padx=20, pady=10)\n coorWorldLimitUMAX.insert(0, 860)\n worldList.append(coorWorldLimitUMAX)\n\n labelWorldLimitVMIN = Label(scrollableFrame, text=\"vMin\", font=('Helvetica', 9), bg='#E0E0E0')\n labelWorldLimitVMIN.grid(row=3, column=0, padx=20, pady=10)\n\n coorWorldLimitY1 = Entry(scrollableFrame, width= 8)\n coorWorldLimitY1.grid(row=3, column=1, padx=20, pady=10)\n coorWorldLimitY1.insert(0, 0)\n worldList.append(coorWorldLimitY1)\n\n labelWorldLimitVMAX = Label(scrollableFrame, text=\"vMax\", font=('Helvetica', 9), bg='#E0E0E0')\n labelWorldLimitVMAX.grid(row=3, column=2, padx=15, pady=10)\n\n coorWorldLimitVMAX = Entry(scrollableFrame, width= 8)\n coorWorldLimitVMAX.grid(row=3, column=3, padx=20, pady=10)\n coorWorldLimitVMAX.insert(0, 640)\n worldList.append(coorWorldLimitVMAX)\n \n labelViewUp = Label(scrollableFrame, text=\"View-up:\", justify=LEFT, anchor=\"w\", font=('Helvetica', 10, 'bold'), bg='#E0E0E0')\n labelViewUp.grid(row=4, column=0, padx=20, pady=10, columnspan=3, sticky=W)\n\n labelViewUpX = Label(scrollableFrame, text=\"X\", font=('Helvetica', 9), bg='#E0E0E0')\n labelViewUpX.grid(row=5, column=0, pady=10)\n\n coorViewUpX = Entry(scrollableFrame, width= 8)\n coorViewUpX.grid(row=5, column=1, pady=10)\n coorViewUpX.insert(0, 0)\n worldList.append(coorViewUpX)\n \n labelViewUpY = Label(scrollableFrame, text=\"Y\", font=('Helvetica', 9), bg='#E0E0E0')\n labelViewUpY.grid(row=6, column=0, pady=10)\n\n coorViewUpY = Entry(scrollableFrame, width= 8)\n coorViewUpY.grid(row=6, column=1, pady=10)\n coorViewUpY.insert(0, 1)\n worldList.append(coorViewUpY)\n\n labelViewUpZ = Label(scrollableFrame, text=\"Z\", font=('Helvetica', 9), bg='#E0E0E0')\n labelViewUpZ.grid(row=7, column=0, pady=10)\n\n coorViewUpZ = Entry(scrollableFrame, width= 8)\n coorViewUpZ.grid(row=7, column=1, pady=10)\n coorViewUpZ.insert(0, 0)\n worldList.append(coorViewUpZ)\n\n labelVPR = Label(scrollableFrame, text=\"VRP:\", font=('Helvetica', 10, 'bold'), bg='#E0E0E0')\n labelVPR.grid(row=4, column=2, pady=10)\n\n labelVRPX = Label(scrollableFrame, text=\"X\", font=('Helvetica', 9), bg='#E0E0E0')\n labelVRPX.grid(row=5, column=2, pady=10)\n\n coorVRPX = Entry(scrollableFrame, width=8)\n coorVRPX.grid(row=5, column=3, pady=10)\n coorVRPX.insert(0, 0)\n worldList.append(coorVRPX)\n\n labelVRPY = Label(scrollableFrame, text=\"Y\", font=('Helvetica', 9), bg='#E0E0E0')\n labelVRPY.grid(row=6, column=2, pady=10)\n \n coorVRPY = Entry(scrollableFrame, width=8)\n coorVRPY.grid(row=6, column=3, pady=10)\n coorVRPY.insert(0, 10)\n worldList.append(coorVRPY)\n\n labelVRPZ = Label(scrollableFrame, text=\"Z\", font=('Helvetica', 9), bg='#E0E0E0')\n labelVRPZ.grid(row=7, column=2, pady=10)\n \n coorVRPZ = Entry(scrollableFrame, width=8)\n coorVRPZ.grid(row=7, column=3, pady=10)\n coorVRPZ.insert(0, 10)\n worldList.append(coorVRPZ)\n \n labelFocalPoint = Label(scrollableFrame, text=\"Ponto focal:\", justify=LEFT, anchor=\"w\", font=('Helvetica', 10, 'bold'), bg='#E0E0E0')\n labelFocalPoint.grid(row=8, column=0, padx=20, pady=10, columnspan=3, sticky=W)\n\n labelFocalPointX = Label(scrollableFrame, text=\"X\", font=('Helvetica', 9), bg='#E0E0E0')\n labelFocalPointX.grid(row=9, column=0, pady=10)\n\n coorFocalPointX = Entry(scrollableFrame, width=8)\n coorFocalPointX.grid(row=9, column=1, pady=10)\n coorFocalPointX.insert(0, 0)\n worldList.append(coorFocalPointX)\n \n labelFocalPointY = Label(scrollableFrame, text=\"Y\", font=('Helvetica', 9), bg='#E0E0E0')\n labelFocalPointY.grid(row=10, column=0, pady=10)\n\n coorFocalPointY = Entry(scrollableFrame, width=8)\n coorFocalPointY.grid(row=10, column=1, pady=10)\n coorFocalPointY.insert(0, 0)\n worldList.append(coorFocalPointY)\n\n labelFocalPointZ = Label(scrollableFrame, text=\"Z\", font=('Helvetica', 9), bg='#E0E0E0')\n labelFocalPointZ.grid(row=11, column=0, pady=10)\n\n coorFocalPointZ = Entry(scrollableFrame, width=8)\n coorFocalPointZ.grid(row=11, column=1, pady=10)\n coorFocalPointZ.insert(0, 0)\n worldList.append(coorFocalPointZ)\n\n labelDistance = Label(scrollableFrame, text=\"Distância ao plano:\", justify=LEFT, anchor=\"w\", font=('Helvetica', 10, 'bold'), bg='#E0E0E0')\n labelDistance.grid(row=8, column=2, pady=8, columnspan=3, sticky=W)\n\n labelProjectionPlane = Label(scrollableFrame, text=\"Projeção\", font=('Helvetica', 9), bg='#E0E0E0')\n labelProjectionPlane.grid(row=9, column=2, pady=8)\n\n distProjectionPlane = Entry(scrollableFrame, width=8)\n distProjectionPlane.grid(row=9, column=3, pady=8)\n distProjectionPlane.insert(0, 10)\n worldList.append(distProjectionPlane)\n \n labelNearPlane = Label(scrollableFrame, text=\"Near\", font=('Helvetica', 9), bg='#E0E0E0')\n labelNearPlane.grid(row=10, column=2, pady=8)\n\n distNearPlane = Entry(scrollableFrame, width=8)\n distNearPlane.grid(row=10, column=3, pady=8)\n distNearPlane.insert(0, 5)\n worldList.append(distNearPlane)\n \n labelFarPlane = Label(scrollableFrame, text=\"Far\", font=('Helvetica', 9), bg='#E0E0E0')\n labelFarPlane.grid(row=11, column=2, pady=8)\n\n distFarPlane = Entry(scrollableFrame, width=8)\n distFarPlane.grid(row=11, column=3, pady=8)\n distFarPlane.insert(0, 15)\n worldList.append(distFarPlane)\n \n labelProjectionPlane = Label(scrollableFrame, text=\"World window:\", justify=LEFT, anchor=\"w\", font=('Helvetica', 10, 'bold'), bg='#E0E0E0')\n labelProjectionPlane.grid(row=12, column=0, padx=20, pady=10, columnspan=3, sticky=W)\n\n labelProjectionPlaneXMIN = Label(scrollableFrame, text=\"XMin\", font=('Helvetica', 9), bg='#E0E0E0')\n labelProjectionPlaneXMIN.grid(row=13, column=0, pady=10)\n\n coorProjectionPlaneXMIN = Entry(scrollableFrame, width= 8)\n coorProjectionPlaneXMIN.grid(row=13, column=1, pady=10)\n coorProjectionPlaneXMIN.insert(0, -10)\n worldList.append(coorProjectionPlaneXMIN)\n \n labelProjectionPlaneXMAX = Label(scrollableFrame, text=\"XMax\", font=('Helvetica', 9), bg='#E0E0E0')\n labelProjectionPlaneXMAX.grid(row=13, column=2, pady=10)\n\n coorProjectionPlaneXMAX = Entry(scrollableFrame, width= 8)\n coorProjectionPlaneXMAX.grid(row=13, column=3, pady=10)\n coorProjectionPlaneXMAX.insert(0,10)\n worldList.append(coorProjectionPlaneXMAX)\n\n labelProjectionPlaneYMIN = Label(scrollableFrame, text=\"YMin\", font=('Helvetica', 9), bg='#E0E0E0')\n labelProjectionPlaneYMIN.grid(row=14, column=0, pady=10)\n\n coorProjectionPlaneYMIN = Entry(scrollableFrame, width= 8)\n coorProjectionPlaneYMIN.grid(row=14, column=1, pady=10)\n coorProjectionPlaneYMIN.insert(0,-10)\n worldList.append(coorProjectionPlaneYMIN)\n \n labelProjectionPlaneYMAX = Label(scrollableFrame, text=\"YMax\", font=('Helvetica', 9), bg='#E0E0E0')\n labelProjectionPlaneYMAX.grid(row=14, column=2, pady=10)\n\n coorProjectionPlaneYMAX = Entry(scrollableFrame, width= 8)\n coorProjectionPlaneYMAX.grid(row=14, column=3, pady=10)\n coorProjectionPlaneYMAX.insert(0,10)\n worldList.append(coorProjectionPlaneYMAX)\n \n labelObject = Label(scrollableFrame, text=\"Dados do objeto:\", justify=LEFT, anchor=\"w\", font=('Helvetica', 10, 'bold'), bg='#E0E0E0', fg='#990303')\n labelObject.grid(row=15, column=0, padx=20, pady=10, columnspan=4, sticky=W)\n\n labelBaseRadius = Label(scrollableFrame, text=\"Raio da base\", font=('Helvetica', 9), bg='#E0E0E0')\n labelBaseRadius.grid(row=16, column=0, padx=20, pady=10)\n\n BaseRadius = Entry(scrollableFrame, width= 8)\n BaseRadius.grid(row=16, column=1, pady=10)\n BaseRadius.insert(0,5)\n objectDataList.append(BaseRadius)\n \n labelTopRadius = Label(scrollableFrame, text=\"Raio do topo\", font=('Helvetica', 9), bg='#E0E0E0')\n labelTopRadius.grid(row=16, column=2, pady=10)\n\n TopRadius = Entry(scrollableFrame, width= 8)\n TopRadius.grid(row=16, column=3, pady=10)\n TopRadius.insert(0,6)\n objectDataList.append(TopRadius)\n \n labelNumSides = Label(scrollableFrame, text=\"Nº de lados\", font=('Helvetica', 9), bg='#E0E0E0')\n labelNumSides.grid(row=17, column=0, padx=20, pady=10)\n\n NumSides = Entry(scrollableFrame, width= 8)\n NumSides.grid(row=17, column=1, pady=10)\n NumSides.insert(0,10)\n objectDataList.append(NumSides)\n\n labelObjHeight = Label(scrollableFrame, text=\"Altura\", font=('Helvetica', 9), bg='#E0E0E0')\n labelObjHeight.grid(row=17, column=2, pady=10)\n\n ObjHeight = Entry(scrollableFrame, width= 8)\n ObjHeight.grid(row=17, column=3, pady=10)\n ObjHeight.insert(0,5)\n objectDataList.append(ObjHeight)\n \n labelObjectCenter = Label(scrollableFrame, text=\"Centro geométrico:\", justify=LEFT, anchor=\"w\", font=('Helvetica', 10, 'bold'), bg='#E0E0E0')\n labelObjectCenter.grid(row=18, column=0, padx=20, pady=10, columnspan=4, sticky=W)\n\n objectCenterX = Label(scrollableFrame, text=\"X\", font=('Helvetica', 9), bg='#E0E0E0')\n objectCenterX.grid(row=19, column=0, pady=10)\n\n coorObjectCenterX = Entry(scrollableFrame, width=8)\n coorObjectCenterX.grid(row=19, column=1, pady=10)\n coorObjectCenterX.insert(0,10)\n objectDataList.append(coorObjectCenterX)\n\n objectCenterY = Label(scrollableFrame, text=\"Y\", font=('Helvetica', 9), bg='#E0E0E0')\n objectCenterY.grid(row=20, column=0, pady=10)\n\n coorObjectCenterY = Entry(scrollableFrame, width=8)\n coorObjectCenterY.grid(row=20, column=1, pady=10)\n coorObjectCenterY.insert(0,0)\n objectDataList.append(coorObjectCenterY)\n\n objectCenterZ = Label(scrollableFrame, text=\"Z\", font=('Helvetica', 9), bg='#E0E0E0')\n objectCenterZ.grid(row=21, column=0, pady=10)\n\n coorObjectCenterZ = Entry(scrollableFrame, width=8)\n coorObjectCenterZ.grid(row=21, column=1, pady=10)\n coorObjectCenterZ.insert(0,0)\n objectDataList.append(coorObjectCenterZ)\n\n \n labelSomb = Label(scrollableFrame, text=\"Sombreamento:\", justify=LEFT, anchor=\"w\", font=('Helvetica', 10, 'bold'), bg='#E0E0E0', fg='#990303')\n labelSomb.grid(row=22, column=0, padx=20, pady=10, columnspan=4, sticky=W)\n\n labelRGBKa = Label(scrollableFrame, text=\"Ka\", justify=LEFT, anchor=\"w\", font=('Helvetica', 9, 'bold'), bg='#E0E0E0')\n labelRGBKa.grid(row=23, column=0,padx=20, pady=10, sticky=W)\n\n RKa = Label(scrollableFrame, text=\"R\", font=('Helvetica', 9), bg='#E0E0E0')\n RKa.grid(row=24, column=0, pady=10)\n\n entryRKa = Entry(scrollableFrame, width=8)\n entryRKa.grid(row=24, column=1, pady=10)\n entryRKa.insert(0,0.4)\n objectDataList.append(entryRKa)\n\n GKa = Label(scrollableFrame, text=\"G\", font=('Helvetica', 9), bg='#E0E0E0')\n GKa.grid(row=25, column=0, pady=10)\n\n entryGKa = Entry(scrollableFrame, width=8)\n entryGKa.grid(row=25, column=1, pady=10)\n entryGKa.insert(0, 0.5)\n objectDataList.append(entryGKa)\n\n BKa = Label(scrollableFrame, text=\"B\", font=('Helvetica', 9), bg='#E0E0E0')\n BKa.grid(row=26, column=0, pady=10)\n\n entryBKa = Entry(scrollableFrame, width=8)\n entryBKa.grid(row=26, column=1, pady=10)\n entryBKa.insert(0, 0.6)\n objectDataList.append(entryBKa)\n\n labelRGBKd = Label(scrollableFrame, text=\"Kd\", justify=LEFT, anchor=\"w\", font=('Helvetica', 9, 'bold'), bg='#E0E0E0')\n labelRGBKd.grid(row=23, column=2, pady=10, sticky=W)\n \n RKd = Label(scrollableFrame, text=\"R\", font=('Helvetica', 9), bg='#E0E0E0')\n RKd.grid(row=24, column=2, pady=10)\n\n EntryRKd = Entry(scrollableFrame, width=8)\n EntryRKd.grid(row=24, column=3, pady=10)\n EntryRKd.insert(0, 0.7)\n objectDataList.append(EntryRKd)\n\n GKd = Label(scrollableFrame, text=\"G\", font=('Helvetica', 9), bg='#E0E0E0')\n GKd.grid(row=25, column=2, pady=10)\n\n EntryGKd = Entry(scrollableFrame, width=8)\n EntryGKd.grid(row=25, column=3, pady=10)\n EntryGKd.insert(0,0.8)\n objectDataList.append(EntryGKd)\n\n BKd = Label(scrollableFrame, text=\"B\", font=('Helvetica', 9), bg='#E0E0E0')\n BKd.grid(row=26, column=2, pady=10)\n\n EntryBKd = Entry(scrollableFrame, width=8)\n EntryBKd.grid(row=26, column=3, pady=10)\n EntryBKd.insert(0,0.9)\n objectDataList.append(EntryBKd)\n \n labelRGBKs = Label(scrollableFrame, text=\"Ks\", justify=LEFT, anchor=\"w\", font=('Helvetica', 9, 'bold'), bg='#E0E0E0')\n labelRGBKs.grid(row=27, column=0, padx=20, pady=10, sticky=W)\n\n RKs = Label(scrollableFrame, text=\"R\", font=('Helvetica', 9), bg='#E0E0E0')\n RKs.grid(row=28, column=0, pady=10)\n\n EntryRKs = Entry(scrollableFrame, width=8)\n EntryRKs.grid(row=28, column=1, pady=10)\n EntryRKs.insert(0, 0.5)\n objectDataList.append(EntryRKs)\n\n GKs = Label(scrollableFrame, text=\"G\", font=('Helvetica', 9), bg='#E0E0E0')\n GKs.grid(row=29, column=0, pady=10)\n \n EntryGKs = Entry(scrollableFrame, width=8)\n EntryGKs.grid(row=29, column=1, pady=10)\n EntryGKs.insert(0, 0.3)\n objectDataList.append(EntryGKs)\n\n BKs = Label(scrollableFrame, text=\"B\", font=('Helvetica', 9), bg='#E0E0E0')\n BKs.grid(row=30, column=0, pady=10)\n \n EntryBKs = Entry(scrollableFrame, width=8)\n EntryBKs.grid(row=30, column=1, pady=10)\n EntryBKs.insert(0, 0.2)\n objectDataList.append(EntryBKs)\n \n labelN = Label(scrollableFrame, text=\"n\", font=('Helvetica', 9), bg='#E0E0E0')\n labelN.grid(row=28, column=2, pady=10)\n\n EntryN = Entry(scrollableFrame, width=8)\n EntryN.grid(row=28, column=3, pady=10)\n EntryN.insert(0,2.15)\n objectDataList.append(EntryN)\n\n labelRGBIla = Label(scrollableFrame, text=\"Luz ambiente\", justify=LEFT, anchor=\"w\", font=('Helvetica', 9, 'bold'), bg='#E0E0E0')\n labelRGBIla.grid(row=31, column=0, padx=20, pady=10, columnspan=4, sticky=W)\n\n IlaR = Label(scrollableFrame, text=\"R\", font=('Helvetica', 9), bg='#E0E0E0')\n IlaR.grid(row=32, column=0, pady=10)\n\n EntryIlaR = Entry(scrollableFrame, width=8)\n EntryIlaR.grid(row=32, column=1, pady=10)\n EntryIlaR.insert(0,120)\n objectDataList.append(EntryIlaR)\n\n IlaG = Label(scrollableFrame, text=\"G\", font=('Helvetica', 9), bg='#E0E0E0')\n IlaG.grid(row=33, column=0, pady=10)\n \n EntryIlaG = Entry(scrollableFrame, width=8)\n EntryIlaG.grid(row=33, column=1, pady=10)\n EntryIlaG.insert(0,120)\n objectDataList.append(EntryIlaG)\n\n IlaB = Label(scrollableFrame, text=\"B\", font=('Helvetica', 9), bg='#E0E0E0')\n IlaB.grid(row=34, column=0, pady=10)\n \n EntryIlaB = Entry(scrollableFrame, width=8)\n EntryIlaB.grid(row=34, column=1, pady=10)\n EntryIlaB.insert(0,120)\n objectDataList.append(EntryIlaB)\n \n labelRGBfla = Label(scrollableFrame, text=\"Fonte luminosa\", justify=LEFT, anchor=\"w\", font=('Helvetica', 9, 'bold'), bg='#E0E0E0')\n labelRGBfla.grid(row=31, column=2, padx=20, pady=10, columnspan=4, sticky=W)\n\n IflR = Label(scrollableFrame, text=\"R\", font=('Helvetica', 9), bg='#E0E0E0')\n IflR.grid(row=32, column=2, pady=10)\n\n EntryIflR = Entry(scrollableFrame, width=8)\n EntryIflR.grid(row=32, column=3, pady=10)\n EntryIflR.insert(0,150)\n objectDataList.append(EntryIflR)\n\n IflG = Label(scrollableFrame, text=\"G\", font=('Helvetica', 9), bg='#E0E0E0')\n IflG.grid(row=33, column=2, pady=10)\n \n EntryIflG = Entry(scrollableFrame, width=8)\n EntryIflG.grid(row=33, column=3, pady=10)\n EntryIflG.insert(0,150)\n objectDataList.append(EntryIflG)\n\n IflB = Label(scrollableFrame, text=\"B\", font=('Helvetica', 9), bg='#E0E0E0')\n IflB.grid(row=34, column=2, pady=10)\n \n EntryIflB = Entry(scrollableFrame, width=8)\n EntryIflB.grid(row=34, column=3, pady=10)\n EntryIflB.insert(0,150)\n objectDataList.append(EntryIflB)\n\n labelCoorFontLum = Label(scrollableFrame, text=\"Coord. fonte luminosa:\", justify=LEFT, anchor=\"w\", font=('Helvetica', 10, 'bold'), bg='#E0E0E0')\n labelCoorFontLum.grid(row=35, column=0, padx=20, pady=10, columnspan=4, sticky=W)\n\n labelCoorFontLumX = Label(scrollableFrame, text=\"X\", font=('Helvetica', 9), bg='#E0E0E0')\n labelCoorFontLumX.grid(row=36, column=0, pady=10)\n\n coorFontLumX = Entry(scrollableFrame, width=8)\n coorFontLumX.grid(row=36, column=1, pady=10)\n coorFontLumX.insert(0, 70)\n objectDataList.append(coorFontLumX)\n\n labelCoorFontLumY = Label(scrollableFrame, text=\"Y\", font=('Helvetica', 9), bg='#E0E0E0')\n labelCoorFontLumY.grid(row=37, column=0, pady=10)\n\n coorFontLumY = Entry(scrollableFrame, width=8)\n coorFontLumY.grid(row=37, column=1, pady=10)\n coorFontLumY.insert(0, 20)\n objectDataList.append(coorFontLumY)\n\n labelCoorFontLumZ = Label(scrollableFrame, text=\"Z\", font=('Helvetica', 9), bg='#E0E0E0')\n labelCoorFontLumZ.grid(row=38, column=0, pady=10)\n\n coorFontLumZ = Entry(scrollableFrame, width=8)\n coorFontLumZ.grid(row=38, column=1, pady=10)\n coorFontLumZ.insert(0, 35)\n objectDataList.append(coorFontLumZ)\n\n novoMundo = Button(scrollableFrame, text=\"Novo mundo\", font=('Helvetica', 10), bg='#edb1ba', width=9, command = newWorld)\n novoMundo.grid(row=39, column=0, pady=10)\n\n novoObjeto = Button(scrollableFrame, text=\"Novo objeto\", font=('Helvetica', 10), bg='#edb1ba', width=9, command = newObject)\n novoObjeto.grid(row=39, column=1, pady=10)\n\n atualizarObjeto = Button(scrollableFrame, text=\"Att. Luz\", font=('Helvetica', 8), bg='#edb1ba', width=9, command = updateObject)\n atualizarObjeto.grid(row=39, column=2, pady=10)\n\n limparCena = Button(scrollableFrame, text=\"Limpar cena\", font=('Helvetica', 10), bg='#edb1ba', width=9, command= clearScreen)\n limparCena.grid(row=39, column=3, pady=10) \n\n sideBar.pack(side=RIGHT, fill=Y)\n canvasBar.pack(side=LEFT, fill=BOTH, expand=True)\n scrollBar.pack(side=RIGHT, fill=Y)\n\n def initPC(self):\n global canvasPC\n planoCartesiano = Frame(self.master)\n canvasPC = Canvas(planoCartesiano)\n planoCartesiano.place(x=20, y= 530, width=170, height=170)\n planoCartesiano.rowconfigure(0, weight = 1)\n planoCartesiano.columnconfigure(0, weight = 1)\n canvasPC.grid(sticky=\"nsew\")\n\n def initScreen(self):\n global screen, canvas \n screen = Frame(self.master, highlightbackground='gray', highlightthickness=1)\n\n screen.rowconfigure(0, weight = 5)\n screen.columnconfigure(0, weight = 5)\n\n canvas = Canvas(screen)\n\n screen.place(x=10, y= 70, width=860, height=640)\n\n canvas.grid(sticky=\"nsew\")\n\n\n\ndef popupShowErrorEmptyInput():\n messagebox.showerror(\"Erro!\", \"Campos vazios!\")\n\ndef popupShowErrorInput():\n messagebox.showerror(\"Erro!\", \"Entrada inválida!\")\n\ndef popupShowLimitErrorX():\n messagebox.showerror(\"Erro!\", \"Insira valores de 0 a 860 para X1 e X2!\")\n\ndef popupShowLimitErrorY():\n messagebox.showerror(\"Erro!\", \"Insira valores de 0 a 640 para Y1 e Y2!\")\n\ndef popupShowLimitError():\n messagebox.showerror(\"Erro!\", \"Limite máximo da tela atingido!\")\n\ndef popupShowNumSidesError():\n messagebox.showerror(\"Erro!\", \"Número de lados deve ser entre 3 e 20!\")\n\ndef newWorld():\n if len(worldList[0].get()) != 0 and len(worldList[1].get()) != 0 and len(worldList[2].get()) != 0 and len(worldList[3].get()) != 0 \\\n and len(worldList[4].get()) != 0 and len(worldList[5].get()) != 0 and len(worldList[6].get()) != 0 \\\n and len(worldList[7].get()) != 0 and len(worldList[8].get()) != 0 and len(worldList[9].get()) != 0 \\\n and len(worldList[10].get()) != 0 and len(worldList[11].get()) != 0 and len(worldList[12].get()) != 0 \\\n and len(worldList[13].get()) != 0 and len(worldList[14].get()) != 0 and len(worldList[15].get()) != 0 \\\n and len(worldList[16].get()) != 0 and len(worldList[17].get()) != 0 and len(worldList[18].get()) != 0 \\\n and len(objectDataList[17].get()) != 0 and len(objectDataList[18].get()) != 0 and len(objectDataList[19].get()) != 0 \\\n and len(objectDataList[20].get()) != 0 and len(objectDataList[21].get()) != 0 and len(objectDataList[22].get()) != 0 \\\n and len(objectDataList[23].get()) != 0 and len(objectDataList[24].get()) != 0 and len(objectDataList[25].get()) != 0:\n \n try:\n #View-port\n global listViewPort\n listViewPort = []\n listViewPort.append(int(worldList[0].get()))\n listViewPort.append(int(worldList[1].get()))\n listViewPort.append(int(worldList[2].get()))\n listViewPort.append(int(worldList[3].get()))\n \n #View-up\n global listViewUp\n listViewUp = []\n listViewUp.append(float(worldList[4].get()))\n listViewUp.append(float(worldList[5].get()))\n listViewUp.append(float(worldList[6].get()))\n \n #VRP\n global listVRP\n listVRP = []\n listVRP.append(float(worldList[7].get()))\n listVRP.append(float(worldList[8].get()))\n listVRP.append(float(worldList[9].get()))\n\n #Ponto Focal\n global listP\n listP = []\n listP.append(float(worldList[10].get()))\n listP.append(float(worldList[11].get()))\n listP.append(float(worldList[12].get()))\n\n # Distância ao plano de projeção, plano near e ao plano far\n global listDist\n listDist = []\n listDist.append(float(worldList[13].get()))\n listDist.append(float(worldList[14].get()))\n listDist.append(float(worldList[15].get()))\n\n # Janela do mundo\n global listWW\n listWW = []\n listWW.append(int(worldList[16].get()))\n listWW.append(int(worldList[17].get()))\n listWW.append(int(worldList[18].get()))\n listWW.append(int(worldList[19].get()))\n\n #luz ambiente\n global listLuz\n listLuz = []\n listLuz.append(int(objectDataList[17].get()))\n listLuz.append(int(objectDataList[18].get()))\n listLuz.append(int(objectDataList[19].get()))\n\n #fonte luminosa\n listLuz.append(int(objectDataList[20].get()))\n listLuz.append(int(objectDataList[21].get()))\n listLuz.append(int(objectDataList[22].get()))\n\n #coord. fonte luminosa\n listLuz.append(int(objectDataList[23].get()))\n listLuz.append(int(objectDataList[24].get()))\n listLuz.append(int(objectDataList[25].get()))\n \n except ValueError:\n popupShowErrorInput() \n \n else:\n if ((listViewPort[0] < 0) or (listViewPort[0] > 860)) or ((listViewPort[1] < 0) or (listViewPort[1] > 860)):\n popupShowLimitErrorX() \n elif ((listViewPort[2] < 0) or (listViewPort[2] > 640)) or ((listViewPort[3] < 0) or (listViewPort[3] > 640)):\n popupShowLimitErrorY() \n elif ((listViewPort[0] + listViewPort[1]) > 860) or ((listViewPort[2] + listViewPort[3]) > 640):\n popupShowLimitError() \n else:\n canvas.focus_set()\n placeScreen()\n redefineObject()\n deleteLabel()\n EixosSinalizadores()\n \n else:\n popupShowErrorEmptyInput()\n\ndef newObject():\n if len(objectDataList[0].get()) != 0 and len(objectDataList[1].get()) != 0 and len(objectDataList[2].get()) != 0 and len(objectDataList[3].get()) != 0 \\\n and len(objectDataList[4].get()) != 0 and len(objectDataList[5].get()) != 0 and len(objectDataList[6].get()) != 0 and len(objectDataList[7].get()) != 0 \\\n and len(objectDataList[8].get()) != 0 and len(objectDataList[9].get()) != 0 and len(objectDataList[10].get()) != 0 and len(objectDataList[11].get()) != 0 \\\n and len(objectDataList[12].get()) != 0 and len(objectDataList[13].get()) != 0 and len(objectDataList[14].get()) != 0 and len(objectDataList[15].get()) != 0 \\\n and len(objectDataList[16].get()) != 0:\n try:\n\n #Número de lados\n NL = int(objectDataList[2].get())\n\n if(NL >= 3):\n\n #Raio da base\n BR = float(objectDataList[0].get())\n\n #Raio do topo\n TR = float(objectDataList[1].get())\n\n #Altura do objeto\n OH = float(objectDataList[3].get())\n\n #Centro Geométrico\n coorOCX = float(objectDataList[4].get())\n coorOCY = float(objectDataList[5].get())\n coorOCZ = float(objectDataList[5].get())\n\n #Ka\n global listK\n listK = []\n listK.append(float(objectDataList[7].get()))\n listK.append(float(objectDataList[8].get()))\n listK.append(float(objectDataList[9].get()))\n\n #Kd\n listK.append(float(objectDataList[10].get()))\n listK.append(float(objectDataList[11].get()))\n listK.append(float(objectDataList[12].get()))\n\n #Ks\n listK.append(float(objectDataList[13].get()))\n listK.append(float(objectDataList[14].get()))\n listK.append(float(objectDataList[15].get()))\n \n #n\n listK.append(float(objectDataList[16].get()))\n\n canvas.focus_set()\n createObject(BR, TR, NL, OH, [coorOCX, coorOCY, coorOCZ])\n\n else:\n popupShowNumSidesError()\n\n except ValueError:\n popupShowErrorInput() \n else:\n popupShowErrorEmptyInput()\n\ndef deleteLabel():\n global labelXAxis, labelYAxis, labelZAxis\n if(labelXAxis != None and labelYAxis != None and labelZAxis != None):\n labelXAxis.place_forget()\n labelYAxis.place_forget()\n labelZAxis.place_forget()\n\ndef EixosSinalizadores():\n global listViewUp, listDist, listVRP, listP\n global labelXAxis, labelYAxis, labelZAxis\n\n if ((len(listVRP) != 0) and (len(listP) != 0) and (len(listDist) != 0) and (len(listViewUp) != 0)):\n\n canvasPC.delete(\"all\")\n tamanhoLinha = 60\n matrizPCsrt = None\n\n matrizPontos = np.array([[0, tamanhoLinha, 0, 0],[0, 0, tamanhoLinha, 0], [0, 0, 0, tamanhoLinha], [1, 1, 1, 1]])\n\n #print(listDist)\n #print(listP)\n #print(listViewUp)\n #print(listVRP)\n\n PCsrc = sru2src(np.array(listVRP),np.array(listP) ,np.array(listViewUp))\n \n matrizPCsrt = np.dot(PCsrc, matrizPontos)\n\n if listProj[0]:\n matrizPerpec = perspProj(listDist[0])\n matrizPCsrt = np.dot(matrizPerpec, matrizPCsrt)\n\n matrizPCsrt[1:]*=-1\n\n x = (matrizPCsrt[0][0] - 80)\n y = (matrizPCsrt[1][0] - 77)\n\n EndLineX = [matrizPCsrt[0][1] - x, matrizPCsrt[1][1] - y]\n EndLineY = [matrizPCsrt[0][2] - x, matrizPCsrt[1][2] - y]\n EndLineZ = [matrizPCsrt[0][3] - x, matrizPCsrt[1][3] - y]\n\n \n labelXAxis = Label(canvasPC, text=\"X\", font=('Helvetica', 9), fg=\"red\")\n labelXAxis.place(x= EndLineX[0], y= EndLineX[1]-10, anchor=E)\n canvasPC.create_line((matrizPCsrt[0][0] - x, matrizPCsrt[1][0] - y, EndLineX[0], EndLineX[1]), fill=\"red\", width=2)\n \n labelYAxis = Label(canvasPC, text=\"Y\", font=('Helvetica', 9), fg=\"green\")\n labelYAxis.place(x=EndLineY[0], y= EndLineY[1]-10, anchor=E)\n canvasPC.create_line((matrizPCsrt[0][0] - x, matrizPCsrt[1][0] - y, EndLineY[0], EndLineY[1]), fill=\"green\", width=2)\n\n labelZAxis = Label(canvasPC, text=\"Z\", font=('Helvetica', 9), fg=\"blue\")\n labelZAxis.place(x=EndLineZ[0], y= EndLineZ[1]+10, anchor=E)\n canvasPC.create_line((matrizPCsrt[0][0] - x, matrizPCsrt[1][0] - y, EndLineZ[0], EndLineZ[1]), fill=\"blue\", width=2)\n\n \ndef placeScreen ():\n screen.place(x = (listViewPort[0] + 10), y = (listViewPort[2] + 70), width= listViewPort[1], height= listViewPort[3])\n\ndef clearScreen():\n global meshAtual, kAtual\n canvas.delete(\"all\")\n meshAtual = None\n kAtual = None\n listMesh.clear()\n listObject.clear()\n listIlum.clear()\n\ndef updateObject():\n global meshAtual, kAtual\n if((canvas.find_all) != 0):\n \n if(len(objectDataList[7].get()) != 0 \\\n and len(objectDataList[8].get()) != 0 and len(objectDataList[9].get()) != 0 and len(objectDataList[10].get()) != 0 and len(objectDataList[11].get()) != 0 \\\n and len(objectDataList[12].get()) != 0 and len(objectDataList[13].get()) != 0 and len(objectDataList[14].get()) != 0 and len(objectDataList[15].get()) != 0 \\\n and len(objectDataList[16].get()) != 0 and len(objectDataList[17].get()) != 0 and len(objectDataList[18].get()) != 0 and len(objectDataList[19].get()) != 0 \\\n and len(objectDataList[20].get()) != 0 and len(objectDataList[21].get()) != 0 and len(objectDataList[22].get()) != 0 \\\n and len(objectDataList[23].get()) != 0 and len(objectDataList[24].get()) != 0 and len(objectDataList[25].get()) != 0):\n\n if(meshAtual is None):\n \n try:\n #luz ambiente\n global listLuz\n listLuz = []\n listLuz.append(int(objectDataList[17].get()))\n listLuz.append(int(objectDataList[18].get()))\n listLuz.append(int(objectDataList[19].get()))\n\n #fonte luminosa\n listLuz.append(int(objectDataList[20].get()))\n listLuz.append(int(objectDataList[21].get()))\n listLuz.append(int(objectDataList[22].get()))\n\n #coord. fonte luminosa\n listLuz.append(int(objectDataList[23].get()))\n listLuz.append(int(objectDataList[24].get()))\n listLuz.append(int(objectDataList[25].get()))\n canvas.focus_set()\n \n updateScreen()\n\n except ValueError:\n popupShowErrorInput() \n else:\n\n try:\n #Ka\n listK = []\n listK.append(float(objectDataList[7].get()))\n listK.append(float(objectDataList[8].get()))\n listK.append(float(objectDataList[9].get()))\n\n #Kd\n listK.append(float(objectDataList[10].get()))\n listK.append(float(objectDataList[11].get()))\n listK.append(float(objectDataList[12].get()))\n\n #Ks\n listK.append(float(objectDataList[13].get()))\n listK.append(float(objectDataList[14].get()))\n listK.append(float(objectDataList[15].get()))\n\n #n\n listK.append(float(objectDataList[16].get()))\n\n kAtual = listK\n for i in listMesh:\n if i[1] == idAtual:\n opCreate(i[0])\n\n\n except ValueError:\n popupShowErrorInput() \n else:\n popupShowErrorEmptyInput()\n\n \n \n\ndef redefineObject():\n\n if((canvas.find_all) != 0):\n\n canvas.delete(\"all\")\n updateScreen()\n\n\ndef createObject(raioBase, raioTopo, nLados, altura, GC):\n global listMesh, listObject, listIlum, contadorObj\n\n contadorObj += 1\n\n mesh = salvaPoligono(raioBase, raioTopo, nLados, altura, GC)\n listMesh.append([mesh, contadorObj])\n listIlum.append([listK, contadorObj])\n\n updateScreen()\n\ndef updateScreen():\n global listMesh, listObject\n canvas.delete(\"all\")\n\n faces = []\n\n for i in listMesh:\n idObj = i[1]\n\n listKAtual = []\n for j in listIlum:\n if j[1] == idObj:\n listKAtual = j[0]\n break\n\n meshSRT = convertMesh2SRT(i[0], np.array(listVRP), listDist[0], listWW[0], listWW[1], listWW[2], listWW[3], listViewPort[0], listViewPort[1], listViewPort[2], listViewPort[3], np.array(listP), np.array(listViewUp), listProj[0], np.array([listLuz[0], listLuz[1], listLuz[2]]), np.array([listLuz[3], listLuz[4], listLuz[5]]), np.array([listLuz[6], listLuz[7], listLuz[8]]), [listKAtual[0], listKAtual[1], listKAtual[2]], [listKAtual[3], listKAtual[4], listKAtual[5]], [listKAtual[6], listKAtual[7], listKAtual[8]], listKAtual[9])\n if(isMeshVisible(meshSRT, listDist[1], listDist[2])):\n for fh in meshSRT.faces():\n face = []\n faceProfundidade = 0\n for vh in meshSRT.fv(fh):\n point = meshSRT.point(vh)\n faceProfundidade += point[2]\n face.append([point[0], point[1]])\n newFace = cutBorder(face, listViewPort[0], listViewPort[1], listViewPort[2], listViewPort[3])\n if newFace != []:\n faces.append([newFace,faceProfundidade/3,rgba2hex(meshSRT.color(fh)), idObj])\n\n faces = sorted(faces , key=lambda k: k[1])\n listObject = []\n for currFace in faces:\n listObject.append([canvas.create_polygon(currFace[0], fill=currFace[2], tags=\"clickable\", outline=\"black\"), currFace[3]])\n\n\ndef identifyObject(event):\n canvas.focus_set()\n global meshAtual, listMesh, kAtual, idAtual\n meshAtual = None\n kAtual = None\n\n idWidget = event.widget.find_withtag(\"current\")\n\n if not idWidget:\n #print(\"O alvo do clique era um espaço vazio\")\n for i in listObject:\n canvas.itemconfig(i[0], outline=\"black\")\n else:\n idObj = -1\n for i in listObject:\n if i[0] == idWidget[0]:\n idObj = i[1]\n\n for i in listObject:\n if i[1] == idObj:\n canvas.itemconfig(i[0], outline=\"red\")\n else:\n canvas.itemconfig(i[0], outline=\"black\")\n\n for i in listMesh:\n if i[1] == idObj:\n meshAtual = i[0]\n break\n idAtual = idObj\n for i in listIlum:\n if i[1] == idObj:\n kAtual = i[0]\n break\n\ndef interfaceTeclas(event):\n if meshAtual is not None:\n translacao(event)\n escala(event)\n rotacao(event)\n\ndef opCreate(object):\n #deleta object\n global kAtual, idAtual, listMesh, listIlum\n \n for i in listMesh:\n if i[1] == idAtual:\n i[0] = object\n\n for i in listIlum:\n if i[1] == idAtual:\n i[0] = kAtual\n\n updateScreen()\n\n for i in listObject:\n if i[1] == idAtual:\n canvas.itemconfig(i[0], outline=\"red\")\n\ndef translacao(event):\n x, y, z = 0, 0, 0\n if event.char == \"q\": \n x, y, z = -0.1, 0, 0\n objectTrans = translate(meshAtual, x, y, z)\n opCreate(objectTrans)\n elif event.char == \"a\": \n x, y, z = 0.1, 0, 0\n objectTrans = translate(meshAtual, x, y, z)\n opCreate(objectTrans)\n elif event.char == \"w\": \n x, y, z = 0, 0, 0.1\n objectTrans = translate(meshAtual, x, y, z)\n opCreate(objectTrans)\n elif event.char == \"s\":\n x, y, z = 0, 0, -0.1\n objectTrans = translate(meshAtual, x, y, z)\n opCreate(objectTrans)\n elif event.char == \"e\":\n x, y, z = 0, 0.1, 0\n objectTrans = translate(meshAtual, x, y, z)\n opCreate(objectTrans)\n elif event.char == \"d\": \n x, y, z = 0, -0.1, 0\n objectTrans = translate(meshAtual, x, y, z)\n opCreate(objectTrans)\n\ndef escala(event):\n x, y, z = 0, 0, 0\n if event.char == \"r\": # diminui o objeto no eixo x\n x, y, z = 0.95, 1, 1\n objectEsc = escalate(meshAtual, x, y, z)\n opCreate(objectEsc)\n elif event.char == \"f\": # aumenta o objeto no eixo x\n x, y, z = 1.05, 1, 1\n objectEsc = escalate(meshAtual, x, y, z)\n opCreate(objectEsc)\n elif event.char == \"t\": # diminui o objeto no eixo z\n x, y, z = 1, 1, 0.95\n objectEsc = escalate(meshAtual, x, y, z)\n opCreate(objectEsc)\n elif event.char == \"g\": # aumenta o objeto no eixo z\n x, y, z = 1, 1, 1.05\n objectEsc = escalate(meshAtual, x, y, z)\n opCreate(objectEsc)\n elif event.char == \"y\": # diminui o objeto no eixo y\n x, y, z = 1, 0.95, 1\n objectEsc = escalate(meshAtual, x, y, z)\n opCreate(objectEsc)\n elif event.char == \"h\": # aumenta o objeto no eixo y\n x, y, z = 1, 1.05, 1\n objectEsc = escalate(meshAtual, x, y, z)\n opCreate(objectEsc)\n\ndef rotacao(event):\n angulo = 0\n if event.char == \"u\": # rotaciona para a esquerda ao redor do eixo xp\n angulo = 0.1\n objectRot = rotX(meshAtual, angulo)\n opCreate(objectRot)\n elif event.char == \"j\": # rotaciona para a direita ao redor do eixo x\n angulo = -0.1\n objectRot = rotX(meshAtual, angulo)\n opCreate(objectRot)\n elif event.char == \"i\": # rotaciona para a esquerda ao redor do eixo z\n angulo = 0.1\n objectRot = rotZ(meshAtual, angulo)\n opCreate(objectRot)\n elif event.char == \"k\": # rotaciona para a direita ao redor do eixo z\n angulo = -0.1\n objectRot = rotZ(meshAtual, angulo)\n opCreate(objectRot)\n elif event.char == \"o\": # rotaciona para a esquerda ao redor do eixo y\n angulo = 0.1\n objectRot = rotY(meshAtual, angulo)\n opCreate(objectRot)\n elif event.char == \"l\": # rotaciona para a direita ao redor do eixo y\n angulo = -0.1\n objectRot = rotY(meshAtual, angulo)\n opCreate(objectRot)\n\ndef clicked(value):\n listProj[0] = value\n if(canvas.find_all != 0):\n updateScreen()\n\ndef run_program():\n root = Tk()\n root.resizable(width=False, height=False)\n root.title('3D-modeller-and-viewer')\n\n width = 1280\n height = 720\n\n widthScreen = root.winfo_screenwidth()\n heightScreen = root.winfo_screenheight()\n\n posx = widthScreen/2 - width/2\n posy = (heightScreen/2 - height/2) - 30\n\n root.geometry(\"%dx%d+%d+%d\" % (width, height, posx, posy))\n \n global meshAtual, listObject, listMesh, listProj, listIlum, listVRP, listP, listViewUp, listDist, labelXAxis, labelYAxis, labelZAxis, contadorObj, idAtual\n \n meshAtual = None\n labelXAxis = None\n labelYAxis = None\n labelZAxis = None\n listMesh = []\n listObject = []\n listProj = []\n listIlum = []\n listViewUp = []\n listP = []\n listViewUp = []\n listDist = []\n contadorObj = 0\n idAtual = -1\n\n CanvasMenu()\n newWorld()\n canvas.bind(\"<Button-1>\", identifyObject)\n canvas.bind_all(\"<Key>\", interfaceTeclas)\n \n root.mainloop()\n\nif __name__ == '__main__':\n run_program()"
] | [
[
"numpy.dot",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mark-koren/flow | [
"f3f6d7e9c64f6b641a464a716c7f38ca00388805"
] | [
"flow/envs/loop_accel.py"
] | [
"from flow.envs.base_env import SumoEnvironment\nfrom flow.core import rewards\nfrom flow.core import multi_agent_rewards\n\nfrom gym.spaces.box import Box\nfrom gym.spaces.tuple_space import Tuple\n\nimport numpy as np\n\nclass SimpleAccelerationEnvironment(SumoEnvironment):\n \"\"\"\n Fully functional environment for single lane closed loop settings. Takes in\n an *acceleration* as an action. Reward function is negative norm of the\n difference between the velocities of each vehicle, and the target velocity.\n State function is a vector of the velocities and absolute positions for each\n vehicle.\n \"\"\"\n\n @property\n def action_space(self):\n \"\"\"\n See parent class\n\n Actions are a set of accelerations from max-deacc to max-acc for each\n rl vehicle.\n \"\"\"\n return Box(low=-np.abs(self.env_params.max_deacc),\n high=self.env_params.max_acc,\n shape=(self.vehicles.num_rl_vehicles, ))\n\n @property\n def observation_space(self):\n \"\"\"\n See parent class\n\n An observation is an array the velocities and absolute positions for\n each vehicle\n \"\"\"\n self.obs_var_labels = [\"Velocity\", \"Absolute_pos\"]\n speed = Box(low=0, high=np.inf, shape=(self.vehicles.num_vehicles,))\n absolute_pos = Box(low=0., high=np.inf, shape=(self.vehicles.num_vehicles,))\n return Tuple((speed, absolute_pos))\n\n def apply_rl_actions(self, rl_actions):\n \"\"\"\n See parent class\n\n Accelerations are applied to rl vehicles in accordance with the commands\n provided by rllab. These actions may be altered by flow's failsafes or\n sumo-defined speed modes.\n \"\"\"\n sorted_rl_ids = [veh_id for veh_id in self.sorted_ids if veh_id in self.rl_ids]\n self.apply_acceleration(sorted_rl_ids, rl_actions)\n\n def compute_reward(self, state, rl_actions, **kwargs):\n \"\"\"\n See parent class\n \"\"\"\n # reward desired velocity\n reward = rewards.desired_velocity(self, fail=kwargs[\"fail\"])\n\n return reward\n\n def get_state(self, **kwargs):\n \"\"\"\n See parent class\n\n The state is an array of velocities and absolute positions for each\n vehicle\n \"\"\"\n scaled_pos = [self.vehicles.get_absolute_position(veh_id) /\n self.scenario.length for veh_id in self.sorted_ids]\n scaled_vel = [self.vehicles.get_speed(veh_id) /\n self.env_params.get_additional_param(\"target_velocity\")\n for veh_id in self.sorted_ids]\n\n return np.array([[scaled_vel[i], scaled_pos[i]]\n for i in range(len(self.sorted_ids))])\n\n\nclass SimpleMultiAgentAccelerationEnvironment(SimpleAccelerationEnvironment):\n \"\"\"\n An extension of SimpleAccelerationEnvironment which treats each autonomous\n vehicles as a separate rl agent, thereby allowing autonomous vehicles to be\n trained in multi-agent settings.\n \"\"\"\n\n @property\n def action_space(self):\n \"\"\"\n See parent class\n\n Actions are a set of accelerations from max-deacc to max-acc for each\n rl vehicle.\n \"\"\"\n action_space = []\n for veh_id in self.rl_ids:\n action_space.append(Box(low=self.env_params.max_deacc,\n high=self.env_params.max_acc, shape=(1, )))\n return action_space\n\n @property\n def observation_space(self):\n \"\"\"\n See parent class\n \"\"\"\n num_vehicles = self.scenario.num_vehicles\n observation_space = []\n speed = Box(low=0, high=np.inf, shape=(num_vehicles,))\n absolute_pos = Box(low=0., high=np.inf, shape=(num_vehicles,))\n obs_tuple = Tuple((speed, absolute_pos))\n for veh_id in self.rl_ids:\n observation_space.append(obs_tuple)\n return observation_space\n\n def compute_reward(self, state, rl_actions, **kwargs):\n \"\"\"\n See parent class\n \"\"\"\n return multi_agent_rewards.desired_velocity(\n state, rl_actions,\n fail=kwargs[\"fail\"],\n target_velocity=self.env_params.get_additional_param(\"target_velocity\"))\n\n def get_state(self, **kwargs):\n \"\"\"\n See parent class\n The state is an array the velocities and absolute positions for\n each vehicle.\n \"\"\"\n obs_arr = []\n for i in range(self.scenario.num_rl_vehicles):\n speed = [self.vehicles.get_speed(veh_id)\n for veh_id in self.sorted_ids]\n abs_pos = [self.vehicles.get_absolute_position(veh_id)\n for veh_id in self.sorted_ids]\n tup = (speed, abs_pos)\n obs_arr.append(tup)\n\n return obs_arr\n\n\nclass SimplePartiallyObservableEnvironment(SimpleAccelerationEnvironment):\n \"\"\"\n This environment is an extension of the SimpleAccelerationEnvironment, with\n the exception that only local information is provided to the agent about the\n network; i.e. headway, velocity, and velocity difference. The reward\n function, however, continues to reward global network performance.\n\n NOTE: The environment also assumes that there is only one autonomous vehicle\n is in the network.\n \"\"\"\n\n @property\n def observation_space(self):\n \"\"\"\n See parent class\n \"\"\"\n return Box(low=-np.inf, high=np.inf, shape=(3,))\n\n def get_state(self, **kwargs):\n \"\"\"\n See parent class\n\n The state is an array consisting of the speed of the rl vehicle, the\n relative speed of the vehicle ahead of it, and the headway between the\n rl vehicle and the vehicle ahead of it.\n \"\"\"\n rl_id = self.rl_ids[0]\n lead_id = self.vehicles[rl_id][\"leader\"]\n max_speed = self.max_speed\n\n # if a vehicle crashes into the car ahead of it, it no longer processes\n # a lead vehicle\n if lead_id is None:\n lead_id = rl_id\n self.vehicles[rl_id][\"headway\"] = 0\n\n observation = np.array([\n [self.vehicles[rl_id][\"speed\"] / max_speed],\n [(self.vehicles[lead_id][\"speed\"] - self.vehicles[rl_id][\"speed\"])\n / max_speed],\n [self.vehicles[rl_id][\"headway\"] / self.scenario.length]])\n\n return observation\n"
] | [
[
"numpy.array",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samuelyu2002/ImVisible | [
"d0027ff803bd8ad4e6121d0aeeed2642a586b68d"
] | [
"Model/dataset.py"
] | [
"import torch\nfrom torch.utils.data import Dataset\nimport pandas as pd\nimport os\nimport numpy as np\nfrom PIL import ImageFile, Image, ImageDraw\nfrom torchvision import transforms\nfrom torchvision.transforms import functional as F\nimport random\n\nclass TrafficLightDataset(Dataset):\n \n def __init__(self, csv_file, img_dir, transformation = True):\n self.labels = pd.read_csv(csv_file)\n self.img_dir = img_dir\n self.transformation = transformation\n\n def __len__(self):\n return len(self.labels)\n\n def __getitem__(self, index):\n \n ImageFile.LOAD_TRUNCATED_IMAGES = True\n img_name = os.path.join(self.img_dir, self.labels.iloc[index, 0]) #gets image name in csv file\n image = Image.open(img_name)\n\n light_mode = self.labels.iloc[index, 1] #mode of the traffic light\n block = self.labels.iloc[index,6] #label of blocked or unblocked\n points = self.labels.iloc[index, 2:6] #midline coordinates\n points = [points[0]/4032, points[1]/3024, points[2]/4032, points[3]/3024] #normalize coordinate values to be between [0,1]\n\n if self.transformation: \n #random horizontal flip with 50% probability\n num = random.random()\n if num >= 0.5:\n image = F.hflip(image)\n #flip x coordinates when entire image is flipped\n points[0] = 1 - points[0] \n points[2] = 1 - points[2]\n \n #random crop\n cp = [points[0]*876, (1-points[1])*657, 876*points[2], (1-points[3])*657] #convert points to cartesian coordinates\n #shifts to determine what region to crop\n shiftx = random.randint(0, 108) \n shifty = random.randint(0, 81)\n\n with np.errstate(all=\"raise\"):\n try: m = (cp[1]-cp[3])/(cp[0]-cp[2]) #slope\n except: m = 10000000000000000 #prevent divide by zero error\n\n b = cp[1] - m*cp[0] #y-intercept\n \n #changing the coordinates based on the new cropped area\n if(shiftx > cp[0]): \n cp[0] = shiftx\n cp[1] = (cp[0]*m + b)\n elif((768+shiftx) < cp[0]):\n cp[0] = (768+shiftx)\n cp[1] = (cp[0]*m + b)\n if(shiftx > cp[2]): \n cp[2] = shiftx\n cp[3] = (cp[2]*m + b)\n elif((768+shiftx) < cp[2]):\n cp[2] = (768+shiftx)\n cp[3] = (cp[2]*m + b)\n if(657-shifty < cp[1]): \n cp[1] = 657-shifty\n cp[0] = (cp[1]-b)/m if (cp[1]-b)/m>0 else 0\n# elif((657-576-shifty) > cp[1]):\n# cp[0] = (657-576-shifty-b)/m\n# cp[1] = 0\n# cp[2] = (657-576-shifty-b)/m\n# cp[3] = 0\n if(657-576-shifty > cp[3]): \n cp[3] = 657-576-shifty\n cp[2] = (cp[3]-b)/m\n# elif((657-shifty) < cp[3]):\n# cp[3] = 657-shifty\n# cp[2] = (657-shifty-b)/m\n# cp[1] = 657-shifty\n# cp[0] = (657-shifty-b)/m\n\n #converting the coordinates from a 876x657 image to a 768x576 image\n cp[0] -= shiftx\n cp[1] -= (657-576-shifty)\n cp[2] -= shiftx\n cp[3] -= (657-576-shifty)\n\n #converting the cartesian coordinates back to image coordinates\n points = [cp[0]/768, 1-cp[1]/576, cp[2]/768, 1-cp[3]/576]\n \n image = F.crop(image, shifty, shiftx, 576, 768)\n transform = transforms.Compose([transforms.ColorJitter(0.05,0.05,0.05,0.01)])\n image = transform(image)\n \n #normalize image\n #image = transforms.functional.to_tensor(image)\n #image = transforms.functional.normalize(image, mean = [120.56737612047593, 119.16664454573734, 113.84554638827127], std=[66.32028460114392, 65.09469952002551, 65.67726614496246])\n \n image = np.transpose(image, (2, 0, 1))\n points = torch.tensor(points)\n \n #combine all the info into a dictionary\n final_label = {'image': image, 'mode':light_mode, 'points': points, 'block': block}\n return final_label\n"
] | [
[
"numpy.errstate",
"torch.tensor",
"pandas.read_csv",
"numpy.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jereliu/GPflow-Slim | [
"3b6a9eaa4967b7285cbd188b44f670bfda6f12c6"
] | [
"gpflowSlim/models/gpmc.py"
] | [
"# Copyright 2018 Shengyang Sun\n# Copyright 2016 James Hensman, alexggmatthews\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom .. import settings\nfrom ..params import Parameter\nfrom ..priors import Gaussian\nfrom ..conditionals import conditional\n\nfrom .model import GPModel\n\n\nclass GPMC(GPModel):\n def __init__(self, X, Y, kern, likelihood,\n mean_function=None,\n num_latent=None,\n **kwargs):\n \"\"\"\n X is a data matrix, size N x D\n Y is a data matrix, size N x R\n kern, likelihood, mean_function are appropriate GPflow objects\n\n This is a vanilla implementation of a GP with a non-Gaussian\n likelihood. The latent function values are represented by centered\n (whitened) variables, so\n\n v ~ N(0, I)\n f = Lv + m(x)\n\n with\n\n L L^T = K\n\n \"\"\"\n GPModel.__init__(self, X, Y, kern, likelihood, mean_function, **kwargs)\n self.num_data = X.shape[0]\n self.num_latent = num_latent or Y.shape[1]\n\n with tf.variable_scope(self.name):\n self._V = Parameter(np.zeros((self.num_data, self.num_latent)), name='V')\n self._V.prior = Gaussian(0., 1.)\n\n self._parameters = self._parameters + [self._V]\n\n @property\n def V(self):\n return self._V.value\n\n def _build_likelihood(self):\n \"\"\"\n Construct a tf function to compute the likelihood of a general GP\n model.\n\n \\log p(Y, V | theta).\n\n \"\"\"\n K = self.kern.K(self.X)\n L = tf.cholesky(\n K + tf.eye(tf.shape(self.X)[0], dtype=settings.float_type) * settings.numerics.jitter_level)\n F = tf.matmul(L, self.V) + self.mean_function(self.X)\n\n return tf.reduce_sum(self.likelihood.logp(F, self.Y))\n\n def _build_predict(self, Xnew, full_cov=False):\n \"\"\"\n Xnew is a data matrix, point at which we want to predict\n\n This method computes\n\n p(F* | (F=LV) )\n\n where F* are points on the GP at Xnew, F=LV are points on the GP at X.\n\n \"\"\"\n mu, var = conditional(Xnew, self.X, self.kern, self.V,\n full_cov=full_cov,\n q_sqrt=None, white=True)\n return mu + self.mean_function(Xnew), var\n"
] | [
[
"tensorflow.variable_scope",
"tensorflow.matmul",
"numpy.zeros",
"tensorflow.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
m0n0l0c0/PokerRL | [
"fa07ddf66fd0da9688bd871ae55fc1ceb863476e"
] | [
"PokerRL/rl/buffers/CircularBufferRNN.py"
] | [
"# Copyright (c) 2019 Eric Steinberger\n\n\nimport numpy as np\nimport torch\n\nfrom PokerRL.rl.buffers._circular_base import CircularBufferBase\n\n\nclass CircularBufferRNN(CircularBufferBase):\n \"\"\"\n self.games stores references to Game subclass objects. One Game instance might be referenced multiple times,\n depending on the number of steps that it contains. This is to keep equally likely sampling.\n \"\"\"\n\n def __init__(self, env_bldr, max_size):\n super().__init__(env_bldr=env_bldr, max_size=max_size)\n\n self._games = None\n self.reset()\n\n @property\n def max_size(self):\n return self._max_size\n\n @property\n def size(self):\n return self._size\n\n def add_game(self, game):\n for _ in range(game.n_steps_in_game_memory):\n self._games[self._top] = game\n\n if self._size < self._max_size:\n self._size += 1\n\n self._top = (self._top + 1) % self._max_size\n\n def sample(self, device, batch_size):\n \"\"\"\n Args:\n batch_size (int)\n device (torch.device)\n\n Returns:\n tuple\n \"\"\"\n indices = np.random.randint(low=0, high=self._size, size=batch_size)\n\n samples = [self._games[i].sample() for i in indices]\n\n batch_legal_action_mask_tp1 = [sample[\"mask_tp1\"] for sample in samples]\n batch_legal_action_mask_tp1 = torch.from_numpy(np.array(batch_legal_action_mask_tp1)).to(device=device)\n\n batch_legal_action_mask_t = [sample[\"mask_t\"] for sample in samples]\n batch_legal_action_mask_t = torch.from_numpy(np.array(batch_legal_action_mask_t)).to(device=device)\n\n batch_action_t = [sample[\"a\"] for sample in samples]\n batch_action_t = torch.tensor(batch_action_t, dtype=torch.long, device=device)\n\n batch_range_idx = [sample[\"range_idx\"] for sample in samples]\n batch_range_idx = torch.from_numpy(np.array(batch_range_idx)).to(dtype=torch.long, device=device)\n\n batch_reward = [sample[\"rew\"] for sample in samples]\n batch_reward = torch.from_numpy(np.array(batch_reward)).to(dtype=torch.float32, device=device)\n\n batch_done = [sample[\"done\"] for sample in samples]\n batch_done = torch.tensor(batch_done, dtype=torch.float32, device=device)\n\n # obs will be further processed into a PackedSequence in the net.\n batch_pub_obs_t = [sample[\"o_t\"] for sample in samples]\n batch_pub_obs_tp1 = [sample[\"o_tp1\"] for sample in samples]\n\n return batch_pub_obs_t, \\\n batch_action_t, \\\n batch_range_idx, \\\n batch_legal_action_mask_t, \\\n batch_reward, \\\n batch_pub_obs_tp1, \\\n batch_legal_action_mask_tp1, \\\n batch_done,\n\n def state_dict(self):\n return {\n \"games\": self._games,\n \"size\": self._size,\n \"top\": self._top\n }\n\n def load_state_dict(self, state):\n self._games = state[\"games\"]\n self._size = state[\"size\"]\n self._top = state[\"top\"]\n\n def reset(self):\n super().reset()\n self._games = np.array([None for _ in range(self._max_size)], dtype=object)\n"
] | [
[
"torch.tensor",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
abhilshit/aerial-lidar-classification | [
"d182300308bc31f6127bec1488ddcb7e25f0f4e3"
] | [
"app/experimenntal/scikit-dbscan-new.py"
] | [
"from app.PointCloudReader import PointCloudReader\nimport numpy as np\nfrom sklearn.cluster import DBSCAN, SpectralClustering\nfrom multiprocessing.dummy import Pool\nimport scipy\nfrom itertools import repeat\nfrom math import sqrt\nfrom sklearn.cluster import AgglomerativeClustering\n\n\ndef calculate_svd(cluster_points):\n \"\"\"\n Performs Singular Value decomposition of the data points\n :param cluster_points: data points\n :return: Principal Directions (V) * Sigma (S) indicating principal directions with magnitude\n \"\"\"\n u, s, vt = scipy.linalg.svd(cluster_points)\n if s.shape[0] < vt.shape[0]:\n difference = vt.shape[0] - s.shape[0]\n for i in range(len(s),len(s)+difference):\n s = np.append(s,0.0)\n principal_directions_with_magnitude = s*vt.transpose()\n return principal_directions_with_magnitude\n\n\ndef norm(vector):\n return sqrt(sum(x * x for x in vector))\n\n\ndef cosine_similarity(vec_a, vec_b):\n norm_a = norm(vec_a)\n norm_b = norm(vec_b)\n dot = sum(a * b for a, b in zip(vec_a, vec_b))\n return dot / (norm_a * norm_b)\n\ndef calc_euclidean_distance(vec1, vec2):\n dist = (vec1 - vec2) ** 2\n dist = np.sqrt(np.sum(dist))\n return dist\n\n\ndef calc_similarity(args):\n item, label, cluster_sim_scores = args\n other_label, other_principal_directions = item\n if label == other_label:\n cluster_sim_scores[label][other_label] = 0\n else:\n first_sim_score = calc_euclidean_distance(principal_directions[0], other_principal_directions[0])\n second_sim_score = calc_euclidean_distance(principal_directions[1], other_principal_directions[1])\n third_sim_score = calc_euclidean_distance(principal_directions[2], other_principal_directions[2])\n weighted_sim_score = (0.7*first_sim_score + 0.3*second_sim_score)/2\n cluster_sim_scores[label][other_label] = weighted_sim_score\n\n\nif __name__ == '__main__':\n pcr = PointCloudReader(\"../resources/outliers_2_0_fr.las\")\n pc = pcr.point_cloud\n pc\n points = np.vstack((pc.x, pc.y, pc.z)).transpose()\n dbscan = DBSCAN(eps=2, min_samples=10, metric='euclidean').fit(points)\n labels = dbscan.labels_\n print(np.unique(labels))\n label_index_map = {i: (labels == i).nonzero()[0] for i in np.unique(labels)}\n # pcr.point_cloud.user_data = labels\n # pcr.point_cloud.write('outlier2_2_0.las')\n\n class_principal_direction_map = {}\n # points_copy = pcr.point_cloud.points.copy()\n for class_, label_indices in label_index_map.items():\n if class_ == -1:\n continue\n if len(label_indices) > 10000:\n sampled_label_indices = np.random.choice(label_indices, size=10000)\n else:\n sampled_label_indices = label_indices\n cluster_points = np.vstack((np.array(pcr.point_cloud.x)[sampled_label_indices],\n np.array(pcr.point_cloud.y)[sampled_label_indices],\n np.array(pcr.point_cloud.z)[sampled_label_indices])).transpose()\n cluster_principal_directions = calculate_svd(cluster_points)\n class_principal_direction_map[class_] = cluster_principal_directions\n\n similar_labels = {}\n cluster_sim_scores = np.full((len(label_index_map.keys()) - 1, len(label_index_map.keys()) - 1), 0, dtype=\"float64\")\n pool = Pool(processes=4)\n for label, principal_directions in class_principal_direction_map.items():\n pool.map(calc_similarity,\n zip(class_principal_direction_map.items(), repeat(label), repeat(cluster_sim_scores)))\n print(\"Calculating Similarity Matrix for label : \" + str(label) + \"/\" + str(len(np.unique(labels))))\n\n ag = AgglomerativeClustering(n_clusters=3)\n new_clusters = ag.fit(cluster_sim_scores).labels_\n new_label_old_label_map = {i: (new_clusters == i).nonzero()[0] for i in np.unique(new_clusters)}\n for new_label, label_index in new_label_old_label_map.items():\n for old_label in label_index:\n old_label_point_indices = label_index_map[old_label]\n pcr.point_cloud.user_data[old_label_point_indices] = new_label+5\n\n unclassified_points = np.where(np.logical_not(np.logical_and(\n np.logical_and(np.logical_and(pcr.point_cloud.user_data == 5, pcr.point_cloud.user_data == 6),\n pcr.point_cloud.user_data == 7), pcr.point_cloud.user_data == 8)))[0]\n\n pcr.point_cloud.user_data[unclassified_points] = 0\n pcr.point_cloud.write('svd_outlier2_2_0_fr_new.las')"
] | [
[
"numpy.array",
"scipy.linalg.svd",
"numpy.unique",
"numpy.random.choice",
"sklearn.cluster.DBSCAN",
"numpy.append",
"sklearn.cluster.AgglomerativeClustering",
"numpy.logical_and",
"numpy.sum",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
jpmaterial/trimesh | [
"4f493ff0a96a14e62eb7c748964fd8f4e44064c5",
"4f493ff0a96a14e62eb7c748964fd8f4e44064c5",
"4f493ff0a96a14e62eb7c748964fd8f4e44064c5"
] | [
"trimesh/exchange/binvox.py",
"trimesh/graph.py",
"trimesh/grouping.py"
] | [
"\"\"\"Parsing functions for Binvox files.\n\nhttps://www.patrickmin.com/binvox/binvox.html\n\nExporting meshes as binvox files requires binvox CL tool to be on your path.\n\"\"\"\nimport os\nimport subprocess\nimport numpy as np\nimport collections\n\nfrom distutils.spawn import find_executable\n\nfrom .. import util\nfrom ..base import Trimesh\n\n# find the executable for binvox in PATH\nbinvox_encoder = find_executable('binvox')\n\nBinvox = collections.namedtuple(\n 'Binvox', ['rle_data', 'shape', 'translate', 'scale'])\n\n\ndef parse_binvox_header(fp):\n \"\"\"\n Read the header from a binvox file.\n Spec available:\n https://www.patrickmin.com/binvox/binvox.html\n\n Parameters\n ------------\n fp: file-object\n File like object with binvox file\n\n Returns\n ----------\n shape : tuple\n Shape of binvox according to binvox spec\n translate : tuple\n Translation\n scale : float\n Scale of voxels\n\n Raises\n ------------\n IOError\n If invalid binvox file.\n \"\"\"\n\n line = fp.readline().strip()\n if hasattr(line, 'decode'):\n binvox = b'#binvox'\n space = b' '\n else:\n binvox = '#binvox'\n space = ' '\n if not line.startswith(binvox):\n raise IOError('Not a binvox file')\n shape = tuple(\n int(s) for s in fp.readline().strip().split(space)[1:])\n translate = tuple(\n float(s) for s in fp.readline().strip().split(space)[1:])\n scale = float(fp.readline().strip().split(space)[1])\n fp.readline()\n return shape, translate, scale\n\n\ndef parse_binvox(fp, writeable=False):\n \"\"\"\n Read a binvox file, spec at\n https://www.patrickmin.com/binvox/binvox.html\n\n Parameters\n ------------\n fp: file-object\n File like object with binvox file\n\n Returns\n ----------\n binvox : namedtuple\n Containing data\n rle : numpy array\n Run length encoded data\n\n Raises\n ------------\n IOError\n If invalid binvox file\n \"\"\"\n # get the header info\n shape, translate, scale = parse_binvox_header(fp)\n # get the rest of the file\n data = fp.read()\n # convert to numpy array\n rle_data = np.frombuffer(data, dtype=np.uint8)\n if writeable:\n rle_data = rle_data.copy()\n return Binvox(rle_data, shape, translate, scale)\n\n\n_binvox_header = '''#binvox 1\ndim {sx} {sy} {sz}\ntranslate {tx} {ty} {tz}\nscale {scale}\ndata\n'''\n\n\ndef binvox_header(shape, translate, scale):\n \"\"\"\n Get a binvox header string.\n\n Parameters\n --------\n shape: length 3 iterable of ints denoting shape of voxel grid.\n translate: length 3 iterable of floats denoting translation.\n scale: num length of entire voxel grid.\n\n Returns\n --------\n string including \"data\\n\" line.\n \"\"\"\n sx, sy, sz = (int(s) for s in shape)\n tx, ty, tz = translate\n return _binvox_header.format(\n sx=sx, sy=sy, sz=sz, tx=tx, ty=ty, tz=tz, scale=scale)\n\n\ndef binvox_bytes(rle_data, shape, translate=(0, 0, 0), scale=1):\n \"\"\"Get a binary representation of binvox data.\n\n Parameters\n --------\n rle_data : numpy array\n Run-length encoded numpy array.\n shape : (3,) int\n Shape of voxel grid.\n translate : (3,) float\n Translation of voxels\n scale : float\n Length of entire voxel grid.\n\n Returns\n --------\n data : bytes\n Suitable for writing to binary file\n \"\"\"\n if rle_data.dtype != np.uint8:\n raise ValueError(\n \"rle_data.dtype must be np.uint8, got %s\" % rle_data.dtype)\n\n header = binvox_header(shape, translate, scale).encode()\n return header + rle_data.tobytes()\n\n\ndef voxel_from_binvox(\n rle_data, shape, translate=None, scale=1.0, axis_order='xzy'):\n \"\"\"\n Factory for building from data associated with binvox files.\n\n Parameters\n ---------\n rle_data : numpy\n Run-length-encoded of flat voxel\n values, or a `trimesh.rle.RunLengthEncoding` object.\n See `trimesh.rle` documentation for description of encoding\n shape : (3,) int\n Shape of voxel grid.\n translate : (3,) float\n Translation of voxels\n scale : float\n Length of entire voxel grid.\n encoded_axes : iterable\n With values in ('x', 'y', 'z', 0, 1, 2),\n where x => 0, y => 1, z => 2\n denoting the order of axes in the encoded data. binvox by\n default saves in xzy order, but using `xyz` (or (0, 1, 2)) will\n be faster in some circumstances.\n\n Returns\n ---------\n result : VoxelGrid\n Loaded voxels\n \"\"\"\n # shape must be uniform else scale is ambiguous\n from ..voxel import encoding as enc\n from ..voxel.base import VoxelGrid\n\n from .. import transformations\n\n if isinstance(rle_data, enc.RunLengthEncoding):\n encoding = rle_data\n else:\n encoding = enc.RunLengthEncoding(rle_data, dtype=bool)\n\n # translate = np.asanyarray(translate) * scale)\n # translate = [0, 0, 0]\n transform = transformations.scale_and_translate(\n scale=scale / (np.array(shape) - 1),\n translate=translate)\n\n if axis_order == 'xzy':\n perm = (0, 2, 1)\n shape = tuple(shape[p] for p in perm)\n encoding = encoding.reshape(shape).transpose(perm)\n elif axis_order is None or axis_order == 'xyz':\n encoding = encoding.reshape(shape)\n else:\n raise ValueError(\n \"Invalid axis_order '%s': must be None, 'xyz' or 'xzy'\")\n\n assert(encoding.shape == shape)\n return VoxelGrid(encoding, transform)\n\n\ndef load_binvox(file_obj,\n resolver=None,\n axis_order='xzy',\n file_type=None):\n \"\"\"\n Load trimesh `VoxelGrid` instance from file.\n\n Parameters\n -----------\n file_obj : file-like object\n Contains binvox data\n resolver : unused\n axis_order : str\n Order of axes in encoded data.\n Binvox default is 'xzy', but 'xyz' may be faster\n where this is not relevant.\n\n Returns\n ---------\n result : trimesh.voxel.VoxelGrid\n Loaded voxel data\n \"\"\"\n if file_type is not None and file_type != 'binvox':\n raise ValueError(\n 'file_type must be None or binvox, got %s' % file_type)\n data = parse_binvox(file_obj, writeable=True)\n return voxel_from_binvox(\n rle_data=data.rle_data,\n shape=data.shape,\n translate=data.translate,\n scale=data.scale,\n axis_order=axis_order)\n\n\ndef export_binvox(voxel, axis_order='xzy'):\n \"\"\"\n Export `trimesh.voxel.VoxelGrid` instance to bytes\n\n Parameters\n ------------\n voxel : `trimesh.voxel.VoxelGrid`\n Assumes axis ordering of `xyz` and encodes\n in binvox default `xzy` ordering.\n axis_order : str\n Eements in ('x', 'y', 'z', 0, 1, 2), the order\n of axes to encode data (standard is 'xzy' for binvox). `voxel`\n data is assumed to be in order 'xyz'.\n\n Returns\n -----------\n result : bytes\n Representation according to binvox spec\n \"\"\"\n translate = voxel.translation\n scale = voxel.scale * ((np.array(voxel.shape) - 1))\n neg_scale, = np.where(scale < 0)\n encoding = voxel.encoding.flip(neg_scale)\n scale = np.abs(scale)\n if not util.allclose(scale[0], scale[1:], 1e-6 * scale[0] + 1e-8):\n raise ValueError('Can only export binvox with uniform scale')\n scale = scale[0]\n if axis_order == 'xzy':\n encoding = encoding.transpose((0, 2, 1))\n elif axis_order != 'xyz':\n raise ValueError('Invalid axis_order: must be one of (\"xyz\", \"xzy\")')\n rle_data = encoding.flat.run_length_data(dtype=np.uint8)\n return binvox_bytes(\n rle_data, shape=voxel.shape, translate=translate, scale=scale)\n\n\nclass Binvoxer(object):\n \"\"\"\n Interface for binvox CL tool.\n\n This class is responsible purely for making calls to the CL tool. It\n makes no attempt to integrate with the rest of trimesh at all.\n\n Constructor args configure command line options.\n\n `Binvoxer.__call__` operates on the path to a mode file.\n\n If using this interface in published works, please cite the references\n below.\n\n See CL tool website for further details.\n\n https://www.patrickmin.com/binvox/\n\n @article{nooruddin03,\n author = {Fakir S. Nooruddin and Greg Turk},\n title = {Simplification and Repair of Polygonal Models Using Volumetric\n Techniques},\n journal = {IEEE Transactions on Visualization and Computer Graphics},\n volume = {9},\n number = {2},\n pages = {191--205},\n year = {2003}\n }\n\n @Misc{binvox,\n author = {Patrick Min},\n title = {binvox},\n howpublished = {{\\tt http://www.patrickmin.com/binvox} or\n {\\tt https://www.google.com/search?q=binvox}},\n year = {2004 - 2019},\n note = {Accessed: yyyy-mm-dd}\n }\n \"\"\"\n\n SUPPORTED_INPUT_TYPES = (\n 'ug',\n 'obj',\n 'off',\n 'dfx',\n 'xgl',\n 'pov',\n 'brep',\n 'ply',\n 'jot',\n )\n\n SUPPORTED_OUTPUT_TYPES = (\n 'binvox',\n 'hips',\n 'mira',\n 'vtk',\n 'raw',\n 'schematic',\n 'msh',\n )\n\n def __init__(\n self,\n dimension=32,\n file_type='binvox',\n z_buffer_carving=True,\n z_buffer_voting=True,\n dilated_carving=False,\n exact=False,\n bounding_box=None,\n remove_internal=False,\n center=False,\n rotate_x=0,\n rotate_z=0,\n wireframe=False,\n fit=False,\n block_id=None,\n use_material_block_id=False,\n use_offscreen_pbuffer=True,\n downsample_factor=None,\n downsample_threshold=None,\n verbose=False,\n binvox_path=None):\n \"\"\"\n Configure the voxelizer.\n\n Parameters\n ------------\n dimension: voxel grid size (max 1024 when not using exact)\n file_type: str\n Output file type, supported types are:\n 'binvox'\n 'hips'\n 'mira'\n 'vtk'\n 'raw'\n 'schematic'\n 'msh'\n z_buffer_carving : use z buffer based carving. At least one of\n `z_buffer_carving` and `z_buffer_voting` must be True.\n z_buffer_voting: use z-buffer based parity voting method.\n dilated_carving: stop carving 1 voxel before intersection.\n exact: any voxel with part of a triangle gets set. Does not use\n graphics card.\n bounding_box: 6-element float list/tuple of min, max values,\n (minx, miny, minz, maxx, maxy, maxz)\n remove_internal: remove internal voxels if True. Note there is some odd\n behaviour if boundary voxels are occupied.\n center: center model inside unit cube.\n rotate_x: number of 90 degree ccw rotations around x-axis before\n voxelizing.\n rotate_z: number of 90 degree cw rotations around z-axis before\n voxelizing.\n wireframe: also render the model in wireframe (helps with thin parts).\n fit: only write voxels in the voxel bounding box.\n block_id: when converting to schematic, use this as the block ID.\n use_matrial_block_id: when converting from obj to schematic, parse\n block ID from material spec \"usemtl blockid_<id>\" (ids 1-255 only).\n use_offscreen_pbuffer: use offscreen pbuffer instead of onscreen\n window.\n downsample_factor: downsample voxels by this factor in each dimension.\n Must be a power of 2 or None. If not None/1 and `core dumped`\n errors occur, try slightly adjusting dimensions.\n downsample_threshold: when downsampling, destination voxel is on if\n more than this number of voxels are on.\n verbose : bool\n If False, silences stdout/stderr from subprocess call.\n binvox_path : str\n Path to binvox executable. The default looks for an\n executable called `binvox` on your `PATH`.\n \"\"\"\n if binvox_path is None:\n encoder = binvox_encoder\n else:\n encoder = binvox_path\n\n if encoder is None:\n raise IOError(\n 'No `binvox_path` provided, and no binvox executable found '\n 'on PATH. \\nPlease go to https://www.patrickmin.com/binvox/ and '\n 'download the appropriate version.')\n\n if dimension > 1024 and not exact:\n raise ValueError(\n 'Maximum dimension using exact is 1024, got %d' % dimension)\n if file_type not in Binvoxer.SUPPORTED_OUTPUT_TYPES:\n raise ValueError(\n 'file_type %s not in set of supported output types %s' %\n (file_type, str(Binvoxer.SUPPORTED_OUTPUT_TYPES)))\n args = [encoder, '-d', str(dimension), '-t', file_type]\n if exact:\n args.append('-e')\n if z_buffer_carving:\n if z_buffer_voting:\n pass\n else:\n args.append('-c')\n elif z_buffer_voting:\n args.append('-v')\n else:\n raise ValueError(\n 'At least one of `z_buffer_carving` or `z_buffer_voting` must '\n 'be True')\n if dilated_carving:\n args.append('-dc')\n\n # Additional parameters\n if bounding_box is not None:\n if len(bounding_box) != 6:\n raise ValueError('bounding_box must have 6 elements')\n args.append('-bb')\n args.extend(str(b) for b in bounding_box)\n if remove_internal:\n args.append('-ri')\n if center:\n args.append('-cb')\n args.extend(('-rotx',) * rotate_x)\n args.extend(('-rotz',) * rotate_z)\n if wireframe:\n args.append('-aw')\n if fit:\n args.append('-fit')\n if block_id is not None:\n args.extend(('-bi', block_id))\n if use_material_block_id:\n args.append('-mb')\n if use_offscreen_pbuffer:\n args.append('-pb')\n if downsample_factor is not None:\n times = np.log2(downsample_factor)\n if int(times) != times:\n raise ValueError(\n 'downsample_factor must be a power of 2, got %d'\n % downsample_factor)\n args.extend(('-down',) * int(times))\n if downsample_threshold is not None:\n args.extend(('-dmin', str(downsample_threshold)))\n args.append('PATH')\n self._args = args\n self._file_type = file_type\n\n self.verbose = verbose\n\n @property\n def file_type(self):\n return self._file_type\n\n def __call__(self, path, overwrite=False):\n \"\"\"\n Create an voxel file in the same directory as model at `path`.\n\n Parameters\n ------------\n path: string path to model file. Supported types:\n 'ug'\n 'obj'\n 'off'\n 'dfx'\n 'xgl'\n 'pov'\n 'brep'\n 'ply'\n 'jot' (polygongs only)\n overwrite: if False, checks the output path (head.file_type) is empty\n before running. If True and a file exists, raises an IOError.\n\n Returns\n ------------\n string path to voxel file. File type give by file_type in constructor.\n \"\"\"\n head, ext = os.path.splitext(path)\n ext = ext[1:].lower()\n if ext not in Binvoxer.SUPPORTED_INPUT_TYPES:\n raise ValueError(\n 'file_type %s not in set of supported input types %s' %\n (ext, str(Binvoxer.SUPPORTED_INPUT_TYPES)))\n out_path = '%s.%s' % (head, self._file_type)\n if os.path.isfile(out_path) and not overwrite:\n raise IOError(\n 'Attempted to voxelize object a %s, but there is already a '\n 'file at output path %s' % (path, out_path))\n self._args[-1] = path\n\n # generalizes to python2 and python3\n # will capture terminal output into variable rather than printing\n verbosity = subprocess.check_output(self._args)\n # if requested print ourselves\n if self.verbose:\n print(verbosity)\n\n return out_path\n\n\ndef voxelize_mesh(mesh,\n binvoxer=None,\n export_type='off',\n **binvoxer_kwargs):\n \"\"\"\n Interface for voxelizing Trimesh object via the binvox tool.\n\n Implementation simply saved the mesh in the specified export_type then\n runs the `Binvoxer.__call__` (using either the supplied `binvoxer` or\n creating one via `binvoxer_kwargs`)\n\n Parameters\n ------------\n mesh: Trimesh object to voxelize.\n binvoxer: optional Binvoxer instance.\n export_type: file type to export mesh as temporarily for Binvoxer to\n operate on.\n **binvoxer_kwargs: kwargs for creating a new Binvoxer instance. If binvoxer\n if provided, this must be empty.\n\n Returns\n ------------\n `VoxelGrid` object resulting.\n \"\"\"\n if not isinstance(mesh, Trimesh):\n raise ValueError('mesh must be Trimesh instance, got %s' % str(mesh))\n if binvoxer is None:\n binvoxer = Binvoxer(**binvoxer_kwargs)\n elif len(binvoxer_kwargs) > 0:\n raise ValueError('Cannot provide binvoxer and binvoxer_kwargs')\n if binvoxer.file_type != 'binvox':\n raise ValueError(\n 'Only \"binvox\" binvoxer `file_type` currently supported')\n with util.TemporaryDirectory() as folder:\n model_path = os.path.join(folder, 'model.%s' % export_type)\n with open(model_path, 'wb') as fp:\n mesh.export(fp, file_type=export_type)\n out_path = binvoxer(model_path)\n with open(out_path, 'rb') as fp:\n out_model = load_binvox(fp)\n return out_model\n\n\n_binvox_loaders = {'binvox': load_binvox}\n",
"\"\"\"\ngraph.py\n-------------\n\nDeal with graph operations. Primarily deal with graphs in (n, 2)\nedge list form, and abstract the backend graph library being used.\n\nCurrently uses networkx or scipy.sparse.csgraph backend.\n\"\"\"\n\nimport numpy as np\nimport collections\n\nfrom . import util\nfrom . import grouping\nfrom . import exceptions\n\nfrom .constants import log, tol\nfrom .geometry import faces_to_edges\n\ntry:\n from scipy.sparse import csgraph, coo_matrix\nexcept BaseException as E:\n # re-raise exception when used\n csgraph = exceptions.ExceptionModule(E)\n coo_matrix = exceptions.closure(E)\n\ntry:\n import networkx as nx\nexcept BaseException as E:\n # create a dummy module which will raise the ImportError\n # or other exception only when someone tries to use networkx\n nx = exceptions.ExceptionModule(E)\n\n\ndef face_adjacency(faces=None,\n mesh=None,\n return_edges=False):\n \"\"\"\n Returns an (n, 2) list of face indices.\n Each pair of faces in the list shares an edge, making them adjacent.\n\n\n Parameters\n -----------\n faces : (n, 3) int, or None\n Vertex indices representing triangles\n mesh : Trimesh object\n If passed will used cached edges\n instead of generating from faces\n return_edges : bool\n Return the edges shared by adjacent faces\n\n Returns\n ----------\n adjacency : (m, 2) int\n Indexes of faces that are adjacent\n edges: (m, 2) int\n Only returned if return_edges is True\n Indexes of vertices which make up the\n edges shared by the adjacent faces\n\n Examples\n ----------\n This is useful for lots of things such as finding\n face- connected components:\n >>> graph = nx.Graph()\n >>> graph.add_edges_from(mesh.face_adjacency)\n >>> groups = nx.connected_components(graph_connected)\n \"\"\"\n\n if mesh is None:\n # first generate the list of edges for the current faces\n # also return the index for which face the edge is from\n edges, edges_face = faces_to_edges(faces, return_index=True)\n # make sure edge rows are sorted\n edges.sort(axis=1)\n else:\n # if passed a mesh, used the cached values\n edges = mesh.edges_sorted\n edges_face = mesh.edges_face\n\n # this will return the indices for duplicate edges\n # every edge appears twice in a well constructed mesh\n # so for every row in edge_idx:\n # edges[edge_idx[*][0]] == edges[edge_idx[*][1]]\n # in this call to group rows we discard edges which\n # don't occur twice\n edge_groups = grouping.group_rows(edges, require_count=2)\n\n if len(edge_groups) == 0:\n log.debug('No adjacent faces detected! Did you merge vertices?')\n\n # the pairs of all adjacent faces\n # so for every row in face_idx, self.faces[face_idx[*][0]] and\n # self.faces[face_idx[*][1]] will share an edge\n adjacency = edges_face[edge_groups]\n\n # degenerate faces may appear in adjacency as the same value\n nondegenerate = adjacency[:, 0] != adjacency[:, 1]\n adjacency = adjacency[nondegenerate]\n\n # sort pairs in-place so we can search for indexes with ordered pairs\n adjacency.sort(axis=1)\n\n if return_edges:\n adjacency_edges = edges[edge_groups[:, 0][nondegenerate]]\n assert len(adjacency_edges) == len(adjacency)\n return adjacency, adjacency_edges\n return adjacency\n\n\ndef face_adjacency_unshared(mesh):\n \"\"\"\n Return the vertex index of the two vertices not in the shared\n edge between two adjacent faces\n\n Parameters\n ----------\n mesh : Trimesh object\n Input mesh\n\n Returns\n -----------\n vid_unshared : (len(mesh.face_adjacency), 2) int\n Indexes of mesh.vertices\n for degenerate faces without exactly\n one unshared vertex per face it will be -1\n \"\"\"\n\n # the non- shared vertex index is the same shape\n # as face_adjacency holding vertex indices vs face indices\n vid_unshared = np.zeros_like(mesh.face_adjacency,\n dtype=np.int64) - 1\n # get the shared edges between adjacent faces\n edges = mesh.face_adjacency_edges\n\n # loop through the two columns of face adjacency\n for i, fid in enumerate(mesh.face_adjacency.T):\n # faces from the current column of face adjacency\n faces = mesh.faces[fid]\n # should have one True per row of (3,)\n # index of vertex not included in shared edge\n unshared = np.logical_not(np.logical_or(\n faces == edges[:, 0].reshape((-1, 1)),\n faces == edges[:, 1].reshape((-1, 1))))\n # each row should have exactly one uncontained verted\n row_ok = unshared.sum(axis=1) == 1\n # any degenerate row should be ignored\n unshared[~row_ok, :] = False\n # set the\n vid_unshared[row_ok, i] = faces[unshared]\n\n return vid_unshared\n\n\ndef face_adjacency_radius(mesh):\n \"\"\"\n Compute an approximate radius between adjacent faces.\n\n Parameters\n --------------\n mesh : trimesh.Trimesh\n\n Returns\n -------------\n radii : (len(self.face_adjacency),) float\n Approximate radius between faces\n Parallel faces will have a value of np.inf\n span : (len(self.face_adjacency),) float\n Perpendicular projection distance of two\n unshared vertices onto the shared edge\n \"\"\"\n\n # solve for the radius of the adjacent faces\n # distance\n # R = ---------------\n # 2 * sin(theta)\n nonzero = mesh.face_adjacency_angles > np.radians(.01)\n denominator = np.abs(\n 2.0 * np.sin(mesh.face_adjacency_angles[nonzero]))\n\n # consider the distance between the non- shared vertices of the\n # face adjacency pair as the key distance\n point_pairs = mesh.vertices[mesh.face_adjacency_unshared]\n vectors = np.diff(point_pairs,\n axis=1).reshape((-1, 3))\n\n # the vertex indices of the shared edge for the adjacency pairx\n edges = mesh.face_adjacency_edges\n # unit vector along shared the edge\n edges_vec = util.unitize(np.diff(mesh.vertices[edges],\n axis=1).reshape((-1, 3)))\n\n # the vector of the perpendicular projection to the shared edge\n perp = np.subtract(\n vectors, (util.diagonal_dot(\n vectors, edges_vec).reshape(\n (-1, 1)) * edges_vec))\n # the length of the perpendicular projection\n span = util.row_norm(perp)\n\n # complete the values for non- infinite radii\n radii = np.ones(len(mesh.face_adjacency)) * np.inf\n radii[nonzero] = span[nonzero] / denominator\n\n return radii, span\n\n\ndef vertex_adjacency_graph(mesh):\n \"\"\"\n Returns a networkx graph representing the vertices and\n their connections in the mesh.\n\n Parameters\n ----------\n mesh : Trimesh object\n\n Returns\n ---------\n graph : networkx.Graph\n Graph representing vertices and edges between\n them where vertices are nodes and edges are edges\n\n Examples\n ----------\n This is useful for getting nearby vertices for a given vertex,\n potentially for some simple smoothing techniques.\n >>> graph = mesh.vertex_adjacency_graph\n >>> graph.neighbors(0)\n > [1, 3, 4]\n \"\"\"\n g = nx.Graph()\n g.add_edges_from(mesh.edges_unique)\n return g\n\n\ndef shared_edges(faces_a, faces_b):\n \"\"\"\n Given two sets of faces, find the edges which are in both sets.\n\n Parameters\n ---------\n faces_a : (n, 3) int\n Array of faces\n faces_b : (m, 3) int\n Array of faces\n\n Returns\n ---------\n shared : (p, 2) int\n Edges shared between faces\n \"\"\"\n e_a = np.sort(faces_to_edges(faces_a), axis=1)\n e_b = np.sort(faces_to_edges(faces_b), axis=1)\n shared = grouping.boolean_rows(\n e_a, e_b, operation=np.intersect1d)\n return shared\n\n\ndef facets(mesh, engine=None):\n \"\"\"\n Find the list of parallel adjacent faces.\n\n Parameters\n -----------\n mesh : trimesh.Trimesh\n engine : str\n Which graph engine to use:\n ('scipy', 'networkx')\n\n Returns\n ---------\n facets : sequence of (n,) int\n Groups of face indexes of\n parallel adjacent faces.\n \"\"\"\n # what is the radius of a circle that passes through the perpendicular\n # projection of the vector between the two non- shared vertices\n # onto the shared edge, with the face normal from the two adjacent faces\n radii = mesh.face_adjacency_radius\n # what is the span perpendicular to the shared edge\n span = mesh.face_adjacency_span\n # a very arbitrary formula for declaring two adjacent faces\n # parallel in a way that is hopefully (and anecdotally) robust\n # to numeric error\n # a common failure mode is two faces that are very narrow with a slight\n # angle between them, so here we divide by the perpendicular span\n # to penalize very narrow faces, and then square it just for fun\n parallel = np.ones(len(radii), dtype=bool)\n # if span is zero we know faces are small/parallel\n nonzero = np.abs(span) > tol.zero\n # faces with a radii/span ratio larger than a threshold pass\n parallel[nonzero] = (radii[nonzero] /\n span[nonzero]) ** 2 > tol.facet_threshold\n\n # run connected components on the parallel faces to group them\n components = connected_components(\n mesh.face_adjacency[parallel],\n nodes=np.arange(len(mesh.faces)),\n min_len=2,\n engine=engine)\n\n return components\n\n\ndef split(mesh, only_watertight=True, adjacency=None, engine=None, **kwargs):\n \"\"\"\n Split a mesh into multiple meshes from face\n connectivity.\n\n If only_watertight is true it will only return\n watertight meshes and will attempt to repair\n single triangle or quad holes.\n\n Parameters\n ----------\n mesh : trimesh.Trimesh\n only_watertight: bool\n Only return watertight components\n adjacency : (n, 2) int\n Face adjacency to override full mesh\n engine : str or None\n Which graph engine to use\n\n Returns\n ----------\n meshes : (m,) trimesh.Trimesh\n Results of splitting\n \"\"\"\n if adjacency is None:\n adjacency = mesh.face_adjacency\n\n # if only watertight the shortest thing we can split has 3 triangles\n if only_watertight:\n min_len = 4\n else:\n min_len = 1\n\n components = connected_components(\n edges=adjacency,\n nodes=np.arange(len(mesh.faces)),\n min_len=min_len,\n engine=engine)\n meshes = mesh.submesh(\n components, only_watertight=only_watertight, **kwargs)\n return meshes\n\n\ndef connected_components(edges,\n min_len=1,\n nodes=None,\n engine=None):\n \"\"\"\n Find groups of connected nodes from an edge list.\n\n Parameters\n -----------\n edges : (n, 2) int\n Edges between nodes\n nodes : (m, ) int or None\n List of nodes that exist\n min_len : int\n Minimum length of a component group to return\n engine : str or None\n Which graph engine to use (None for automatic):\n (None, 'networkx', 'scipy')\n\n\n Returns\n -----------\n components : (n,) sequence of (*,) int\n Nodes which are connected\n \"\"\"\n def components_networkx():\n \"\"\"\n Find connected components using networkx\n \"\"\"\n graph = nx.from_edgelist(edges)\n # make sure every face has a node, so single triangles\n # aren't discarded (as they aren't adjacent to anything)\n if min_len <= 1:\n graph.add_nodes_from(nodes)\n return [list(i) for i in nx.connected_components(graph)]\n\n def components_csgraph():\n \"\"\"\n Find connected components using scipy.sparse.csgraph\n \"\"\"\n # label each node\n labels = connected_component_labels(edges,\n node_count=node_count)\n\n # we have to remove results that contain nodes outside\n # of the specified node set and reindex\n contained = np.zeros(node_count, dtype=bool)\n contained[nodes] = True\n index = np.arange(node_count, dtype=np.int64)[contained]\n components = grouping.group(labels[contained], min_len=min_len)\n return [index[c] for c in components]\n\n return components\n\n # check input edges\n edges = np.asanyarray(edges, dtype=np.int64)\n # if no nodes were specified just use unique\n if nodes is None:\n nodes = np.unique(edges)\n\n # exit early if we have no nodes\n if len(nodes) == 0:\n return []\n elif len(edges) == 0:\n if min_len <= 1:\n return np.reshape(nodes, (-1, 1)).tolist()\n else:\n return []\n\n if not util.is_shape(edges, (-1, 2)):\n raise ValueError('edges must be (n, 2)!')\n\n # find the maximum index referenced in either nodes or edges\n counts = [0]\n if len(edges) > 0:\n counts.append(edges.max())\n if len(nodes) > 0:\n counts.append(nodes.max())\n node_count = np.max(counts) + 1\n\n # remove edges that don't have both nodes in the node set\n mask = np.zeros(node_count, dtype=bool)\n mask[nodes] = True\n edges_ok = mask[edges].all(axis=1)\n edges = edges[edges_ok]\n\n # networkx is pure python and is usually 5-10x slower than scipy\n engines = collections.OrderedDict((\n ('scipy', components_csgraph),\n ('networkx', components_networkx)))\n\n # if a graph engine has explicitly been requested use it\n if engine in engines:\n return engines[engine]()\n\n # otherwise, go through our ordered list of graph engines\n # until we get to one that has actually been installed\n for function in engines.values():\n try:\n return function()\n # will be raised if the library didn't import correctly above\n except BaseException:\n continue\n raise ImportError('no graph engines available!')\n\n\ndef connected_component_labels(edges, node_count=None):\n \"\"\"\n Label graph nodes from an edge list, using scipy.sparse.csgraph\n\n Parameters\n -----------\n edges : (n, 2) int\n Edges of a graph\n node_count : int, or None\n The largest node in the graph.\n\n Returns\n ----------\n labels : (node_count,) int\n Component labels for each node\n \"\"\"\n matrix = edges_to_coo(edges, node_count)\n body_count, labels = csgraph.connected_components(\n matrix, directed=False)\n\n if node_count is not None:\n assert len(labels) == node_count\n\n return labels\n\n\ndef split_traversal(traversal,\n edges,\n edges_hash=None):\n \"\"\"\n Given a traversal as a list of nodes, split the traversal\n if a sequential index pair is not in the given edges.\n\n Parameters\n --------------\n edges : (n, 2) int\n Graph edge indexes\n traversal : (m,) int\n Traversal through edges\n edge_hash : (n,)\n Edges sorted on axis=1 and\n passed to grouping.hashable_rows\n\n Returns\n ---------------\n split : sequence of (p,) int\n \"\"\"\n traversal = np.asanyarray(traversal,\n dtype=np.int64)\n\n # hash edge rows for contains checks\n if edges_hash is None:\n edges_hash = grouping.hashable_rows(\n np.sort(edges, axis=1))\n\n # turn the (n,) traversal into (n-1, 2) edges\n trav_edge = np.column_stack((traversal[:-1],\n traversal[1:]))\n # hash each edge so we can compare to edge set\n trav_hash = grouping.hashable_rows(\n np.sort(trav_edge, axis=1))\n # check if each edge is contained in edge set\n contained = np.in1d(trav_hash, edges_hash)\n\n # exit early if every edge of traversal exists\n if contained.all():\n # just reshape one traversal\n split = [traversal]\n else:\n # find contiguous groups of contained edges\n blocks = grouping.blocks(contained,\n min_len=1,\n only_nonzero=True)\n\n # turn edges back in to sequence of traversals\n split = [np.append(trav_edge[b][:, 0],\n trav_edge[b[-1]][1])\n for b in blocks]\n\n # close traversals if necessary\n for i, t in enumerate(split):\n # make sure elements of sequence are numpy arrays\n split[i] = np.asanyarray(split[i], dtype=np.int64)\n # don't close if its a single edge\n if len(t) <= 2:\n continue\n # make sure it's not already closed\n edge = np.sort([t[0], t[-1]])\n if edge.ptp() == 0:\n continue\n close = grouping.hashable_rows(edge.reshape((1, 2)))[0]\n # if we need the edge add it\n if close in edges_hash:\n split[i] = np.append(t, t[0]).astype(np.int64)\n\n return split\n\n\ndef fill_traversals(traversals, edges, edges_hash=None):\n \"\"\"\n Convert a traversal of a list of edges into a sequence of\n traversals where every pair of consecutive node indexes\n is an edge in a passed edge list\n\n Parameters\n -------------\n traversals : sequence of (m,) int\n Node indexes of traversals of a graph\n edges : (n, 2) int\n Pairs of connected node indexes\n edges_hash : None, or (n,) int\n Edges sorted along axis 1 then hashed\n using grouping.hashable_rows\n\n Returns\n --------------\n splits : sequence of (p,) int\n Node indexes of connected traversals\n \"\"\"\n # make sure edges are correct type\n edges = np.asanyarray(edges, dtype=np.int64)\n # make sure edges are sorted\n edges.sort(axis=1)\n\n # if there are no traversals just return edges\n if len(traversals) == 0:\n return edges.copy()\n\n # hash edges for contains checks\n if edges_hash is None:\n edges_hash = grouping.hashable_rows(edges)\n\n splits = []\n for nodes in traversals:\n # split traversals to remove edges\n # that don't actually exist\n splits.extend(split_traversal(\n traversal=nodes,\n edges=edges,\n edges_hash=edges_hash))\n # turn the split traversals back into (n, 2) edges\n included = util.vstack_empty([np.column_stack((i[:-1], i[1:]))\n for i in splits])\n if len(included) > 0:\n # sort included edges in place\n included.sort(axis=1)\n # make sure any edges not included in split traversals\n # are just added as a length 2 traversal\n splits.extend(grouping.boolean_rows(\n edges,\n included,\n operation=np.setdiff1d))\n else:\n # no edges were included, so our filled traversal\n # is just the original edges copied over\n splits = edges.copy()\n\n return splits\n\n\ndef traversals(edges, mode='bfs'):\n \"\"\"\n Given an edge list generate a sequence of ordered depth\n first search traversals using scipy.csgraph routines.\n\n Parameters\n ------------\n edges : (n, 2) int\n Undirected edges of a graph\n mode : str\n Traversal type, 'bfs' or 'dfs'\n\n Returns\n -----------\n traversals : (m,) sequence of (p,) int\n Ordered DFS or BFS traversals of the graph.\n \"\"\"\n edges = np.array(edges, dtype=np.int64)\n if len(edges) == 0:\n return []\n elif not util.is_shape(edges, (-1, 2)):\n raise ValueError('edges are not (n, 2)!')\n\n # pick the traversal method\n mode = str(mode).lower().strip()\n if mode == 'bfs':\n func = csgraph.breadth_first_order\n elif mode == 'dfs':\n func = csgraph.depth_first_order\n else:\n raise ValueError('traversal mode must be either dfs or bfs')\n\n # make sure edges are sorted so we can query\n # an ordered pair later\n edges.sort(axis=1)\n # set of nodes to make sure we get every node\n nodes = set(edges.reshape(-1))\n # coo_matrix for csgraph routines\n graph = edges_to_coo(edges)\n\n # we're going to make a sequence of traversals\n traversals = []\n\n while len(nodes) > 0:\n # starting at any node\n start = nodes.pop()\n # get an (n,) ordered traversal\n ordered = func(graph,\n i_start=start,\n return_predecessors=False,\n directed=False).astype(np.int64)\n\n traversals.append(ordered)\n # remove the nodes we've consumed\n nodes.difference_update(ordered)\n\n return traversals\n\n\ndef edges_to_coo(edges, count=None, data=None):\n \"\"\"\n Given an edge list, return a boolean scipy.sparse.coo_matrix\n representing the edges in matrix form.\n\n Parameters\n ------------\n edges : (n, 2) int\n Edges of a graph\n count : int\n The total number of nodes in the graph\n if None: count = edges.max() + 1\n data : (n,) any\n Assign data to each edge, if None will\n be bool True for each specified edge\n\n Returns\n ------------\n matrix: (count, count) scipy.sparse.coo_matrix\n Sparse COO\n \"\"\"\n edges = np.asanyarray(edges, dtype=np.int64)\n if not (len(edges) == 0 or\n util.is_shape(edges, (-1, 2))):\n raise ValueError('edges must be (n, 2)!')\n\n # if count isn't specified just set it to largest\n # value referenced in edges\n if count is None:\n count = edges.max() + 1\n count = int(count)\n\n # if no data is specified set every specified edge\n # to True\n if data is None:\n data = np.ones(len(edges), dtype=bool)\n\n matrix = coo_matrix((data, edges.T),\n dtype=data.dtype,\n shape=(count, count))\n return matrix\n\n\ndef neighbors(edges, max_index=None, directed=False):\n \"\"\"\n Find the neighbors for each node in an edgelist graph.\n\n TODO : re-write this with sparse matrix operations\n\n Parameters\n ------------\n edges : (n, 2) int\n Connected nodes\n directed : bool\n If True, only connect edges in one direction\n\n Returns\n ---------\n neighbors : sequence\n Vertex index corresponds to set of other vertex indices\n \"\"\"\n neighbors = collections.defaultdict(set)\n if directed:\n [neighbors[edge[0]].add(edge[1])\n for edge in edges]\n else:\n [(neighbors[edge[0]].add(edge[1]),\n neighbors[edge[1]].add(edge[0]))\n for edge in edges]\n\n if max_index is None:\n max_index = edges.max() + 1\n array = [list(neighbors[i]) for i in range(max_index)]\n\n return array\n\n\ndef smoothed(mesh, angle=None, facet_minarea=10):\n \"\"\"\n Return a non- watertight version of the mesh which\n will render nicely with smooth shading by\n disconnecting faces at sharp angles to each other.\n\n Parameters\n -----------\n mesh : trimesh.Trimesh\n Source geometry\n angle : float or None\n Angle in radians face pairs with angles\n smaller than this will appear smoothed\n facet_minarea : float or None\n Minimum area fraction to consider\n IE for `facets_minarea=25` only facets larger\n than `mesh.area / 25` will be considered.\n\n Returns\n ---------\n smooth : trimesh.Trimesh\n Geometry with disconnected face patches\n \"\"\"\n if angle is None:\n angle = np.radians(20)\n\n # if the mesh has no adjacent faces return a copy\n if len(mesh.face_adjacency) == 0:\n return mesh.copy()\n\n # face pairs below angle threshold\n angle_ok = mesh.face_adjacency_angles < angle\n # subset of face adjacency\n adjacency = mesh.face_adjacency[angle_ok]\n\n # coplanar groups of faces\n facets = []\n nodes = None\n # collect coplanar regions for smoothing\n if facet_minarea is not None:\n areas = mesh.area_faces\n min_area = mesh.area / facet_minarea\n try:\n # we can survive not knowing facets\n # exclude facets with few faces\n facets = [f for f in mesh.facets\n if areas[f].sum() > min_area]\n if len(facets) > 0:\n # mask for removing adjacency pairs where\n # one of the faces is contained in a facet\n mask = np.ones(len(mesh.faces),\n dtype=bool)\n mask[np.hstack(facets)] = False\n # apply the mask to adjacency\n adjacency = adjacency[mask[adjacency].all(axis=1)]\n # nodes are no longer every faces\n nodes = np.unique(adjacency)\n except BaseException:\n log.warning('failed to calculate facets',\n exc_info=True)\n # run connected components on facet adjacency\n components = connected_components(\n adjacency,\n min_len=2,\n nodes=nodes)\n\n # add back coplanar groups if any exist\n if len(facets) > 0:\n components.extend(facets)\n\n if len(components) == 0:\n # if no components for some reason\n # just return a copy of the original mesh\n return mesh.copy()\n\n # add back any faces that were missed\n unique = np.unique(np.hstack(components))\n if len(unique) != len(mesh.faces):\n # things like single loose faces\n # or groups below facet_minlen\n broke = np.setdiff1d(\n np.arange(len(mesh.faces)), unique)\n components.extend(broke.reshape((-1, 1)))\n\n # get a submesh as a single appended Trimesh\n smooth = mesh.submesh(components,\n only_watertight=False,\n append=True)\n # store face indices from original mesh\n smooth.metadata['original_components'] = components\n # smoothed should have exactly the same number of faces\n if len(smooth.faces) != len(mesh.faces):\n log.warning('face count in smooth wrong!')\n return smooth\n\n\ndef is_watertight(edges, edges_sorted=None):\n \"\"\"\n Parameters\n -----------\n edges : (n, 2) int\n List of vertex indices\n edges_sorted : (n, 2) int\n Pass vertex indices sorted on axis 1 as a speedup\n\n Returns\n ---------\n watertight : boolean\n Whether every edge is shared by an even\n number of faces\n winding : boolean\n Whether every shared edge is reversed\n \"\"\"\n # passing edges_sorted is a speedup only\n if edges_sorted is None:\n edges_sorted = np.sort(edges, axis=1)\n\n # group sorted edges\n groups = grouping.group_rows(\n edges_sorted, require_count=2)\n watertight = bool((len(groups) * 2) == len(edges))\n\n # are opposing edges reversed\n opposing = edges[groups].reshape((-1, 4))[:, 1:3].T\n # wrap the weird numpy bool\n winding = bool(np.equal(*opposing).all())\n\n return watertight, winding\n\n\ndef graph_to_svg(graph):\n \"\"\"\n Turn a networkx graph into an SVG string\n using graphviz `dot`.\n\n Parameters\n ----------\n graph: networkx graph\n\n Returns\n ---------\n svg: string, pictoral layout in SVG format\n \"\"\"\n\n import tempfile\n import subprocess\n with tempfile.NamedTemporaryFile() as dot_file:\n nx.drawing.nx_agraph.write_dot(graph, dot_file.name)\n svg = subprocess.check_output(['dot', dot_file.name, '-Tsvg'])\n return svg\n\n\ndef multigraph_paths(G, source, cutoff=None):\n \"\"\"\n For a networkx MultiDiGraph, find all paths from a source node\n to leaf nodes. This function returns edge instance numbers\n in addition to nodes, unlike networkx.all_simple_paths.\n\n Parameters\n ---------------\n G : networkx.MultiDiGraph\n Graph to evaluate\n source : hashable\n Node to start traversal at\n cutoff : int\n Number of nodes to visit\n If None will visit all nodes\n\n Returns\n ----------\n traversals : (n,) list of [(node, edge instance index), ] paths\n Traversals of the multigraph\n \"\"\"\n if cutoff is None:\n cutoff = (len(G.edges()) * len(G.nodes())) + 1\n\n # the path starts at the node specified\n current = [(source, 0)]\n # traversals we need to go back and do\n queue = []\n # completed paths\n traversals = []\n\n for i in range(cutoff):\n # paths are stored as (node, instance) so\n # get the node of the last place visited\n current_node = current[-1][0]\n # get all the children of the current node\n child = G[current_node]\n\n if len(child) == 0:\n # we have no children, so we are at the end of this path\n # save the path as a completed traversal\n traversals.append(current)\n # if there is nothing on the queue, we are done\n if len(queue) == 0:\n break\n # otherwise continue traversing with the next path\n # on the queue\n current = queue.pop()\n else:\n # oh no, we have multiple edges from current -> child\n start = True\n # iterate through child nodes and edge instances\n for node in child.keys():\n for instance in child[node].keys():\n if start:\n # if this is the first edge, keep it on the\n # current traversal and save the others for later\n current.append((node, instance))\n start = False\n else:\n # this child has multiple instances\n # so we will need to traverse them multiple times\n # we appended a node to current, so only take the\n # first n-1 visits\n queue.append(current[:-1] + [(node, instance)])\n return traversals\n\n\ndef multigraph_collect(G, traversal, attrib=None):\n \"\"\"\n Given a MultiDiGraph traversal, collect attributes along it.\n\n Parameters\n -------------\n G: networkx.MultiDiGraph\n traversal: (n) list of (node, instance) tuples\n attrib: dict key, name to collect. If None, will return all\n\n Returns\n -------------\n collected: (len(traversal) - 1) list of attributes\n \"\"\"\n\n collected = []\n for u, v in util.pairwise(traversal):\n attribs = G[u[0]][v[0]][v[1]]\n if attrib is None:\n collected.append(attribs)\n else:\n collected.append(attribs[attrib])\n return collected\n",
"\"\"\"\ngrouping.py\n-------------\n\nFunctions for grouping values and rows.\n\"\"\"\n\nimport numpy as np\n\nfrom . import util\n\nfrom .constants import log, tol\n\ntry:\n from scipy.spatial import cKDTree\nexcept BaseException as E:\n # wrapping just ImportError fails in some cases\n # will raise the error when someone tries to use KDtree\n from . import exceptions\n cKDTree = exceptions.closure(E)\n\n\ndef merge_vertices(mesh,\n merge_tex=False,\n merge_norm=False,\n digits_vertex=None,\n digits_norm=2,\n digits_uv=4,\n **kwargs):\n \"\"\"\n Removes duplicate vertices, grouped by position and\n optionally texture coordinate and normal.\n\n Parameters\n -------------\n mesh : Trimesh object\n Mesh to merge vertices on\n merge_tex : bool\n If True textured meshes with UV coordinates will\n have vertices merged regardless of UV coordinates\n merge_norm : bool\n If True, meshes with vertex normals will have\n vertices merged ignoring different normals\n digits_vertex : None or int\n Number of digits to consider for vertex position\n digits_norm : int\n Number of digits to consider for unit normals\n digits_uv : int\n Number of digits to consider for UV coordinates\n \"\"\"\n # use tol.merge if digit precision not passed\n if not isinstance(digits_vertex, int):\n digits_vertex = util.decimal_to_digits(tol.merge)\n\n # if we have a ton of unreferenced vertices it will\n # make the unique_rows call super slow so cull first\n if hasattr(mesh, 'faces') and len(mesh.faces) > 0:\n referenced = np.zeros(len(mesh.vertices), dtype=bool)\n referenced[mesh.faces] = True\n else:\n # this is used for geometry without faces\n referenced = np.ones(len(mesh.vertices), dtype=bool)\n\n # collect vertex attributes into sequence we can stack\n stacked = [mesh.vertices * (10 ** digits_vertex)]\n\n # UV texture visuals require us to update the\n # vertices and normals differently\n if (not merge_tex and\n mesh.visual.defined and\n mesh.visual.kind == 'texture' and\n mesh.visual.uv is not None and\n len(mesh.visual.uv) == len(mesh.vertices)):\n # get an array with vertices and UV coordinates\n # converted to integers at requested precision\n stacked.append(mesh.visual.uv * (10 ** digits_uv))\n\n # check to see if we have vertex normals\n normals = mesh._cache['vertex_normals']\n if not merge_norm and np.shape(normals) == mesh.vertices.shape:\n stacked.append(normals * (10 ** digits_norm))\n\n # stack collected vertex properties and round to integer\n stacked = np.column_stack(stacked).round().astype(np.int64)\n\n # check unique rows of referenced vertices\n u, i = unique_rows(stacked[referenced])\n\n # construct an inverse using the subset\n inverse = np.zeros(len(mesh.vertices), dtype=np.int64)\n inverse[referenced] = i\n # get the vertex mask\n mask = np.nonzero(referenced)[0][u]\n # run the update including normals and UV coordinates\n mesh.update_vertices(mask=mask, inverse=inverse)\n\n\ndef group(values, min_len=0, max_len=np.inf):\n \"\"\"\n Return the indices of values that are identical\n\n Parameters\n ----------\n values : (n,) int\n Values to group\n min_len : int\n The shortest group allowed\n All groups will have len >= min_length\n max_len : int\n The longest group allowed\n All groups will have len <= max_length\n\n Returns\n ----------\n groups : sequence\n Contains indices to form groups\n IE [0,1,0,1] returns [[0,2], [1,3]]\n \"\"\"\n original = np.asanyarray(values)\n\n # save the sorted order and then apply it\n order = original.argsort()\n values = original[order]\n\n # find the indexes which are duplicates\n if values.dtype.kind == 'f':\n # for floats in a sorted array, neighbors are not duplicates\n # if the difference between them is greater than approximate zero\n nondupe = np.greater(np.abs(np.diff(values)), tol.zero)\n else:\n # for ints and strings we can check exact non- equality\n # for all other types this will only work if they defined\n # an __eq__\n nondupe = values[1:] != values[:-1]\n\n dupe_idx = np.append(0, np.nonzero(nondupe)[0] + 1)\n dupe_len = np.diff(np.concatenate((dupe_idx, [len(values)])))\n dupe_ok = np.logical_and(np.greater_equal(dupe_len, min_len),\n np.less_equal(dupe_len, max_len))\n groups = [order[i:(i + j)]\n for i, j in zip(dupe_idx[dupe_ok],\n dupe_len[dupe_ok])]\n return groups\n\n\ndef hashable_rows(data, digits=None):\n \"\"\"\n We turn our array into integers based on the precision\n given by digits and then put them in a hashable format.\n\n Parameters\n ---------\n data : (n, m) array\n Input data\n digits : int or None\n How many digits to add to hash if data is floating point\n If None, tol.merge will be used\n\n Returns\n ---------\n hashable : (n,) array\n Custom data type which can be sorted\n or used as hash keys\n \"\"\"\n # if there is no data return immediately\n if len(data) == 0:\n return np.array([])\n\n # get array as integer to precision we care about\n as_int = float_to_int(data, digits=digits)\n\n # if it is flat integers already, return\n if len(as_int.shape) == 1:\n return as_int\n\n # if array is 2D and smallish, we can try bitbanging\n # this is significantly faster than the custom dtype\n if len(as_int.shape) == 2 and as_int.shape[1] <= 4:\n # time for some righteous bitbanging\n # can we pack the whole row into a single 64 bit integer\n precision = int(np.floor(64 / as_int.shape[1]))\n # if the max value is less than precision we can do this\n if np.abs(as_int).max() < 2**(precision - 1):\n # the resulting package\n hashable = np.zeros(len(as_int), dtype=np.int64)\n # loop through each column and bitwise xor to combine\n # make sure as_int is int64 otherwise bit offset won't work\n for offset, column in enumerate(as_int.astype(np.int64).T):\n # will modify hashable in place\n np.bitwise_xor(hashable,\n column << (offset * precision),\n out=hashable)\n return hashable\n\n # reshape array into magical data type that is weird but hashable\n dtype = np.dtype((np.void, as_int.dtype.itemsize * as_int.shape[1]))\n # make sure result is contiguous and flat\n hashable = np.ascontiguousarray(as_int).view(dtype).reshape(-1)\n return hashable\n\n\ndef float_to_int(data, digits=None, dtype=np.int32):\n \"\"\"\n Given a numpy array of float/bool/int, return as integers.\n\n Parameters\n -------------\n data : (n, d) float, int, or bool\n Input data\n digits : float or int\n Precision for float conversion\n dtype : numpy.dtype\n What datatype should result be returned as\n\n Returns\n -------------\n as_int : (n, d) int\n Data as integers\n \"\"\"\n # convert to any numpy array\n data = np.asanyarray(data)\n\n # if data is already an integer or boolean we're done\n # if the data is empty we are also done\n if data.dtype.kind in 'ib' or data.size == 0:\n return data.astype(dtype)\n elif data.dtype.kind != 'f':\n data = data.astype(np.float64)\n\n # populate digits from kwargs\n if digits is None:\n digits = util.decimal_to_digits(tol.merge)\n elif isinstance(digits, float) or isinstance(digits, np.float64):\n digits = util.decimal_to_digits(digits)\n elif not (isinstance(digits, int) or isinstance(digits, np.integer)):\n log.warning('Digits were passed as %s!', digits.__class__.__name__)\n raise ValueError('Digits must be None, int, or float!')\n\n # data is float so convert to large integers\n data_max = np.abs(data).max() * 10**digits\n # ignore passed dtype if we have something large\n dtype = [np.int32, np.int64][int(data_max > 2**31)]\n # multiply by requested power of ten\n # then subtract small epsilon to avoid \"go either way\" rounding\n # then do the rounding and convert to integer\n as_int = np.round((data * 10 ** digits) - 1e-6).astype(dtype)\n\n return as_int\n\n\ndef unique_ordered(data):\n \"\"\"\n Returns the same as np.unique, but ordered as per the\n first occurrence of the unique value in data.\n\n Examples\n ---------\n In [1]: a = [0, 3, 3, 4, 1, 3, 0, 3, 2, 1]\n\n In [2]: np.unique(a)\n Out[2]: array([0, 1, 2, 3, 4])\n\n In [3]: trimesh.grouping.unique_ordered(a)\n Out[3]: array([0, 3, 4, 1, 2])\n \"\"\"\n data = np.asanyarray(data)\n order = np.sort(np.unique(data, return_index=True)[1])\n result = data[order]\n return result\n\n\ndef unique_bincount(values,\n minlength=0,\n return_inverse=False,\n return_counts=False):\n \"\"\"\n For arrays of integers find unique values using bin counting.\n Roughly 10x faster for correct input than np.unique\n\n Parameters\n --------------\n values : (n,) int\n Values to find unique members of\n minlength : int\n Maximum value that will occur in values (values.max())\n return_inverse : bool\n If True, return an inverse such that unique[inverse] == values\n return_counts : bool\n If True, also return the number of times each\n unique item appears in values\n\n Returns\n ------------\n unique : (m,) int\n Unique values in original array\n inverse : (n,) int, optional\n An array such that unique[inverse] == values\n Only returned if return_inverse is True\n counts : (m,) int, optional\n An array holding the counts of each unique item in values\n Only returned if return_counts is True\n \"\"\"\n values = np.asanyarray(values)\n if len(values.shape) != 1 or values.dtype.kind != 'i':\n raise ValueError('input must be 1D integers!')\n\n try:\n # count the number of occurrences of each value\n counts = np.bincount(values, minlength=minlength)\n except TypeError:\n # casting failed on 32 bit windows\n log.warning('casting failed, falling back!')\n # fall back to numpy unique\n return np.unique(values,\n return_inverse=return_inverse,\n return_counts=return_counts)\n\n # which bins are occupied at all\n # counts are integers so this works\n unique_bin = counts.astype(bool)\n\n # which values are unique\n # indexes correspond to original values\n unique = np.where(unique_bin)[0]\n ret = (unique,)\n\n if return_inverse:\n # find the inverse to reconstruct original\n inverse = (np.cumsum(unique_bin) - 1)[values]\n ret += (inverse,)\n\n if return_counts:\n unique_counts = counts[unique]\n ret += (unique_counts,)\n\n if len(ret) == 1:\n return ret[0]\n return ret\n\n\ndef merge_runs(data, digits=None):\n \"\"\"\n Merge duplicate sequential values. This differs from unique_ordered\n in that values can occur in multiple places in the sequence, but\n only consecutive repeats are removed\n\n Parameters\n -----------\n data: (n,) float or int\n\n Returns\n --------\n merged: (m,) float or int\n\n Examples\n ---------\n In [1]: a\n Out[1]:\n array([-1, -1, -1, 0, 0, 1, 1, 2, 0,\n 3, 3, 4, 4, 5, 5, 6, 6, 7,\n 7, 8, 8, 9, 9, 9])\n\n In [2]: trimesh.grouping.merge_runs(a)\n Out[2]: array([-1, 0, 1, 2, 0, 3, 4, 5, 6, 7, 8, 9])\n \"\"\"\n data = np.asanyarray(data)\n mask = np.abs(np.diff(data)) > tol.merge\n mask = np.concatenate((np.array([True]), mask))\n\n return data[mask]\n\n\ndef unique_float(data,\n return_index=False,\n return_inverse=False,\n digits=None):\n \"\"\"\n Identical to the numpy.unique command, except evaluates floating point\n numbers, using a specified number of digits.\n\n If digits isn't specified, the library default TOL_MERGE will be used.\n \"\"\"\n data = np.asanyarray(data)\n as_int = float_to_int(data, digits)\n _junk, unique, inverse = np.unique(as_int,\n return_index=True,\n return_inverse=True)\n\n if (not return_index) and (not return_inverse):\n return data[unique]\n\n result = [data[unique]]\n\n if return_index:\n result.append(unique)\n if return_inverse:\n result.append(inverse)\n return tuple(result)\n\n\ndef unique_rows(data, digits=None):\n \"\"\"\n Returns indices of unique rows. It will return the\n first occurrence of a row that is duplicated:\n [[1,2], [3,4], [1,2]] will return [0,1]\n\n Parameters\n ---------\n data : (n, m) array\n Floating point data\n digits : int or None\n How many digits to consider\n\n Returns\n --------\n unique : (j,) int\n Index in data which is a unique row\n inverse : (n,) int\n Array to reconstruct original\n Example: data[unique][inverse] == data\n \"\"\"\n rows = hashable_rows(data, digits=digits)\n _, unique, inverse = np.unique(\n rows,\n return_index=True,\n return_inverse=True)\n\n return unique, inverse\n\n\ndef unique_value_in_row(data, unique=None):\n \"\"\"\n For a 2D array of integers find the position of a\n value in each row which only occurs once.\n\n If there are more than one value per row which\n occur once, the last one is returned.\n\n Parameters\n ----------\n data : (n, d) int\n Data to check values\n unique : (m,) int\n List of unique values contained in data.\n Generated from np.unique if not passed\n\n Returns\n ---------\n result : (n, d) bool\n With one or zero True values per row.\n\n\n Examples\n -------------------------------------\n In [0]: r = np.array([[-1, 1, 1],\n [-1, 1, -1],\n [-1, 1, 1],\n [-1, 1, -1],\n [-1, 1, -1]], dtype=np.int8)\n\n In [1]: unique_value_in_row(r)\n Out[1]:\n array([[ True, False, False],\n [False, True, False],\n [ True, False, False],\n [False, True, False],\n [False, True, False]], dtype=bool)\n\n In [2]: unique_value_in_row(r).sum(axis=1)\n Out[2]: array([1, 1, 1, 1, 1])\n\n In [3]: r[unique_value_in_row(r)]\n Out[3]: array([-1, 1, -1, 1, 1], dtype=int8)\n \"\"\"\n if unique is None:\n unique = np.unique(data)\n data = np.asanyarray(data)\n result = np.zeros_like(data, dtype=bool, subok=False)\n for value in unique:\n test = np.equal(data, value)\n test_ok = test.sum(axis=1) == 1\n result[test_ok] = test[test_ok]\n return result\n\n\ndef group_rows(data, require_count=None, digits=None):\n \"\"\"\n Returns index groups of duplicate rows, for example:\n [[1,2], [3,4], [1,2]] will return [[0,2], [1]]\n\n\n Note that using require_count allows numpy advanced\n indexing to be used in place of looping and\n checking hashes and is ~10x faster.\n\n\n Parameters\n ----------\n data : (n, m) array\n Data to group\n require_count : None or int\n Only return groups of a specified length, eg:\n require_count = 2\n [[1,2], [3,4], [1,2]] will return [[0,2]]\n digits : None or int\n If data is floating point how many decimals\n to consider, or calculated from tol.merge\n\n Returns\n ----------\n groups : sequence (*,) int\n Indices from in indicating identical rows.\n \"\"\"\n\n def group_dict():\n \"\"\"\n Simple hash table based grouping.\n The loop and appends make this rather slow on\n large arrays but it works on irregular groups.\n \"\"\"\n observed = dict()\n hashable = hashable_rows(data, digits=digits)\n for index, key in enumerate(hashable):\n key_string = key.tobytes()\n if key_string in observed:\n observed[key_string].append(index)\n else:\n observed[key_string] = [index]\n return list(observed.values())\n\n def group_slice():\n # create a representation of the rows that can be sorted\n hashable = hashable_rows(data, digits=digits)\n # record the order of the rows so we can get the original indices back\n # later\n order = np.argsort(hashable)\n # but for now, we want our hashes sorted\n hashable = hashable[order]\n # this is checking each neighbour for equality, example:\n # example: hashable = [1, 1, 1]; dupe = [0, 0]\n dupe = hashable[1:] != hashable[:-1]\n # we want the first index of a group, so we can slice from that location\n # example: hashable = [0 1 1]; dupe = [1,0]; dupe_idx = [0,1]\n dupe_idx = np.append(0, np.nonzero(dupe)[0] + 1)\n # if you wanted to use this one function to deal with non- regular groups\n # you could use: np.array_split(dupe_idx)\n # this is roughly 3x slower than using the group_dict method above.\n start_ok = np.diff(\n np.concatenate((dupe_idx, [len(hashable)]))) == require_count\n groups = np.tile(dupe_idx[start_ok].reshape((-1, 1)),\n require_count) + np.arange(require_count)\n groups_idx = order[groups]\n if require_count == 1:\n return groups_idx.reshape(-1)\n return groups_idx\n\n if require_count is None:\n return group_dict()\n else:\n return group_slice()\n\n\ndef boolean_rows(a, b, operation=np.intersect1d):\n \"\"\"\n Find the rows in two arrays which occur in both rows.\n\n Parameters\n ---------\n a: (n, d) int\n Array with row vectors\n b: (m, d) int\n Array with row vectors\n operation : function\n Numpy boolean set operation function:\n -np.intersect1d\n -np.setdiff1d\n\n Returns\n --------\n shared: (p, d) array containing rows in both a and b\n \"\"\"\n a = np.asanyarray(a, dtype=np.int64)\n b = np.asanyarray(b, dtype=np.int64)\n\n av = a.view([('', a.dtype)] * a.shape[1]).ravel()\n bv = b.view([('', b.dtype)] * b.shape[1]).ravel()\n shared = operation(av, bv).view(a.dtype).reshape(-1, a.shape[1])\n\n return shared\n\n\ndef group_vectors(vectors,\n angle=1e-4,\n include_negative=False):\n \"\"\"\n Group vectors based on an angle tolerance, with the option to\n include negative vectors.\n\n Parameters\n -----------\n vectors : (n,3) float\n Direction vector\n angle : float\n Group vectors closer than this angle in radians\n include_negative : bool\n If True consider the same:\n [0,0,1] and [0,0,-1]\n\n Returns\n ------------\n new_vectors : (m,3) float\n Direction vector\n groups : (m,) sequence of int\n Indices of source vectors\n \"\"\"\n\n vectors = np.asanyarray(vectors, dtype=np.float64)\n angle = float(angle)\n\n if include_negative:\n vectors = util.vector_hemisphere(vectors)\n\n spherical = util.vector_to_spherical(vectors)\n angles, groups = group_distance(spherical, angle)\n new_vectors = util.spherical_to_vector(angles)\n return new_vectors, groups\n\n\ndef group_distance(values, distance):\n \"\"\"\n Find groups of points which have neighbours closer than radius,\n where no two points in a group are farther than distance apart.\n\n Parameters\n ---------\n points : (n, d) float\n Points of dimension d\n distance : float\n Max distance between points in a cluster\n\n Returns\n ----------\n unique : (m, d) float\n Median value of each group\n groups : (m) sequence of int\n Indexes of points that make up a group\n\n \"\"\"\n values = np.asanyarray(values,\n dtype=np.float64)\n\n consumed = np.zeros(len(values),\n dtype=bool)\n tree = cKDTree(values)\n\n # (n, d) set of values that are unique\n unique = []\n # (n) sequence of indices in values\n groups = []\n\n for index, value in enumerate(values):\n if consumed[index]:\n continue\n group = np.array(tree.query_ball_point(value, distance),\n dtype=np.int64)\n consumed[group] = True\n unique.append(np.median(values[group], axis=0))\n groups.append(group)\n return np.array(unique), groups\n\n\ndef clusters(points, radius):\n \"\"\"\n Find clusters of points which have neighbours closer than radius\n\n Parameters\n ---------\n points : (n, d) float\n Points of dimension d\n radius : float\n Max distance between points in a cluster\n\n Returns\n ----------\n groups : (m,) sequence of int\n Indices of points in a cluster\n\n \"\"\"\n from . import graph\n tree = cKDTree(points)\n\n # some versions return pairs as a set of tuples\n pairs = tree.query_pairs(r=radius, output_type='ndarray')\n # group connected components\n groups = graph.connected_components(pairs)\n\n return groups\n\n\ndef blocks(data,\n min_len=2,\n max_len=np.inf,\n wrap=False,\n digits=None,\n only_nonzero=False):\n \"\"\"\n Find the indices in an array of contiguous blocks\n of equal values.\n\n Parameters\n ------------\n data : (n,) array\n Data to find blocks on\n min_len : int\n The minimum length group to be returned\n max_len : int\n The maximum length group to be retuurned\n wrap : bool\n Combine blocks on both ends of 1D array\n digits : None or int\n If dealing with floats how many digits to consider\n only_nonzero : bool\n Only return blocks of non- zero values\n\n Returns\n ---------\n blocks : (m) sequence of (*,) int\n Indices referencing data\n \"\"\"\n data = float_to_int(data, digits=digits)\n\n # find the inflection points\n # AKA locations where the array goes from True to False.\n infl = np.concatenate((\n [0], np.nonzero(np.diff(data))[0] + 1, [len(data)]))\n infl_len = np.diff(infl)\n # check the length of each group\n infl_ok = np.logical_and(infl_len >= min_len,\n infl_len <= max_len)\n\n if only_nonzero:\n # check to make sure the values of each contiguous block\n # are True by checking the first value of each block\n infl_ok = np.logical_and(\n infl_ok, data[infl[:-1]])\n\n # inflate start/end indexes into full ranges of values\n blocks = [np.arange(infl[i], infl[i + 1])\n for i, ok in enumerate(infl_ok) if ok]\n\n if wrap:\n # wrap only matters if first and last points are the same\n if data[0] != data[-1]:\n return blocks\n # if we are only grouping nonzero things and\n # the first and last point are zero we can exit\n if only_nonzero and not bool(data[0]):\n return blocks\n\n # so now first point equals last point, so the cases are:\n # - first and last point are in a block: combine two blocks\n # - first OR last point are in block: add other point to block\n # - neither are in a block: check if combined is eligible block\n\n # first point is in a block\n first = len(blocks) > 0 and blocks[0][0] == 0\n # last point is in a block\n last = len(blocks) > 0 and blocks[-1][-1] == (len(data) - 1)\n\n # CASE: first and last point are BOTH in block: combine blocks\n if first and last:\n blocks[0] = np.append(blocks[-1], blocks[0])\n blocks.pop()\n else:\n # combined length\n combined = infl_len[0] + infl_len[-1]\n # exit if lengths aren't OK\n if combined < min_len or combined > max_len:\n return blocks\n # new block combines both ends\n new_block = np.append(np.arange(infl[-2], infl[-1]),\n np.arange(infl[0], infl[1]))\n # we are in a first OR last situation now\n if first:\n # first was already in a block so replace it with combined\n blocks[0] = new_block\n elif last:\n # last was already in a block so replace with superset\n blocks[-1] = new_block\n else:\n # both are false\n # combined length generated new block\n blocks.append(new_block)\n\n return blocks\n\n\ndef group_min(groups, data):\n \"\"\"\n Given a list of groups find the minimum element of data\n within each group\n\n Parameters\n -----------\n groups : (n,) sequence of (q,) int\n Indexes of each group corresponding to each element in data\n data : (m,)\n The data that groups indexes reference\n\n Returns\n -----------\n minimums : (n,)\n Minimum value of data per group\n\n \"\"\"\n # sort with major key groups, minor key data\n order = np.lexsort((data, groups))\n groups = groups[order] # this is only needed if groups is unsorted\n data = data[order]\n # construct an index which marks borders between groups\n index = np.empty(len(groups), 'bool')\n index[0] = True\n index[1:] = groups[1:] != groups[:-1]\n return data[index]\n"
] | [
[
"numpy.log2",
"numpy.abs",
"numpy.frombuffer",
"numpy.array",
"numpy.where"
],
[
"numpy.hstack",
"numpy.radians",
"numpy.abs",
"numpy.unique",
"numpy.reshape",
"numpy.arange",
"numpy.in1d",
"numpy.sort",
"numpy.sin",
"numpy.max",
"numpy.append",
"numpy.asanyarray",
"numpy.zeros_like",
"numpy.diff",
"numpy.equal",
"numpy.column_stack",
"numpy.array",
"numpy.zeros"
],
[
"numpy.cumsum",
"numpy.dtype",
"numpy.bitwise_xor",
"numpy.round",
"numpy.zeros_like",
"numpy.where",
"numpy.unique",
"numpy.arange",
"numpy.lexsort",
"numpy.greater_equal",
"numpy.asanyarray",
"numpy.diff",
"numpy.less_equal",
"numpy.column_stack",
"numpy.nonzero",
"numpy.ascontiguousarray",
"numpy.median",
"numpy.append",
"numpy.equal",
"numpy.floor",
"numpy.argsort",
"numpy.array",
"numpy.logical_and",
"numpy.abs",
"numpy.shape",
"numpy.bincount"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
abcxs/maskrcnn-benchmark | [
"05b10d12aa68eba525a31d0fe1159eded7aef457"
] | [
"maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\nfrom torch.nn import functional as F\n\nfrom maskrcnn_benchmark.layers import smooth_l1_loss\nfrom maskrcnn_benchmark.modeling.box_coder import BoxCoder\nfrom maskrcnn_benchmark.modeling.matcher import Matcher\nfrom maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou\nfrom maskrcnn_benchmark.modeling.balanced_positive_negative_sampler import (\n BalancedPositiveNegativeSampler\n)\nfrom maskrcnn_benchmark.modeling.utils import cat\n\n\nclass FastRCNNLossComputation(object):\n \"\"\"\n Computes the loss for Faster R-CNN.\n Also supports FPN\n \"\"\"\n\n def __init__(\n self,\n proposal_matcher,\n fg_bg_sampler,\n box_coder,\n cls_agnostic_bbox_reg=False\n ):\n \"\"\"\n Arguments:\n proposal_matcher (Matcher)\n fg_bg_sampler (BalancedPositiveNegativeSampler)\n box_coder (BoxCoder)\n \"\"\"\n self.proposal_matcher = proposal_matcher\n self.fg_bg_sampler = fg_bg_sampler\n self.box_coder = box_coder\n self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg\n\n def match_targets_to_proposals(self, proposal, target):\n match_quality_matrix = boxlist_iou(target, proposal)\n matched_idxs = self.proposal_matcher(match_quality_matrix)\n # Fast RCNN only need \"labels\" field for selecting the targets\n target = target.copy_with_fields(\"labels\")\n # get the targets corresponding GT for each proposal\n # NB: need to clamp the indices because we can have a single\n # GT in the image, and matched_idxs can be -2, which goes\n # out of bounds\n if len(target):\n matched_targets = target[matched_idxs.clamp(min=0)]\n else:\n device = target.get_field('labels').device\n dtype = target.get_field('labels').dtype\n labels = torch.zeros_like(matched_idxs, dtype=dtype, device=device)\n matched_targets = target\n matched_targets.add_field('labels', labels)\n\n matched_targets.add_field(\"matched_idxs\", matched_idxs)\n return matched_targets\n\n def prepare_targets(self, proposals, targets):\n labels = []\n regression_targets = []\n for proposals_per_image, targets_per_image in zip(proposals, targets):\n matched_targets = self.match_targets_to_proposals(\n proposals_per_image, targets_per_image\n )\n matched_idxs = matched_targets.get_field(\"matched_idxs\")\n\n labels_per_image = matched_targets.get_field(\"labels\")\n labels_per_image = labels_per_image.to(dtype=torch.int64)\n\n # Label background (below the low threshold)\n bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD\n labels_per_image[bg_inds] = 0\n\n # Label ignore proposals (between low and high thresholds)\n ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS\n labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler\n\n # compute regression targets\n if not matched_targets.bbox.shape[0]:\n zeros = torch.zeros_like(labels_per_image, dtype=torch.float32)\n regression_targets_per_image = torch.stack((zeros, zeros, zeros, zeros), dim=1)\n else:\n regression_targets_per_image = self.box_coder.encode(\n matched_targets.bbox, proposals_per_image.bbox\n )\n\n labels.append(labels_per_image)\n regression_targets.append(regression_targets_per_image)\n\n return labels, regression_targets\n\n def subsample(self, proposals, targets):\n \"\"\"\n This method performs the positive/negative sampling, and return\n the sampled proposals.\n Note: this function keeps a state.\n\n Arguments:\n proposals (list[BoxList])\n targets (list[BoxList])\n \"\"\"\n\n labels, regression_targets = self.prepare_targets(proposals, targets)\n sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)\n\n proposals = list(proposals)\n # add corresponding label and regression_targets information to the bounding boxes\n for labels_per_image, regression_targets_per_image, proposals_per_image in zip(\n labels, regression_targets, proposals\n ):\n proposals_per_image.add_field(\"labels\", labels_per_image)\n proposals_per_image.add_field(\n \"regression_targets\", regression_targets_per_image\n )\n\n # distributed sampled proposals, that were obtained on all feature maps\n # concatenated via the fg_bg_sampler, into individual feature map levels\n for img_idx, (pos_inds_img, neg_inds_img) in enumerate(\n zip(sampled_pos_inds, sampled_neg_inds)\n ):\n img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)\n proposals_per_image = proposals[img_idx][img_sampled_inds]\n proposals[img_idx] = proposals_per_image\n\n self._proposals = proposals\n return proposals\n\n def __call__(self, class_logits, box_regression):\n \"\"\"\n Computes the loss for Faster R-CNN.\n This requires that the subsample method has been called beforehand.\n\n Arguments:\n class_logits (list[Tensor])\n box_regression (list[Tensor])\n\n Returns:\n classification_loss (Tensor)\n box_loss (Tensor)\n \"\"\"\n\n class_logits = cat(class_logits, dim=0)\n box_regression = cat(box_regression, dim=0)\n device = class_logits.device\n\n if not hasattr(self, \"_proposals\"):\n raise RuntimeError(\"subsample needs to be called before\")\n\n proposals = self._proposals\n\n labels = cat([proposal.get_field(\"labels\") for proposal in proposals], dim=0)\n regression_targets = cat(\n [proposal.get_field(\"regression_targets\") for proposal in proposals], dim=0\n )\n\n classification_loss = F.cross_entropy(class_logits, labels)\n\n # get indices that correspond to the regression targets for\n # the corresponding ground truth labels, to be used with\n # advanced indexing\n sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)\n labels_pos = labels[sampled_pos_inds_subset]\n if self.cls_agnostic_bbox_reg:\n map_inds = torch.tensor([4, 5, 6, 7], device=device)\n else:\n map_inds = 4 * labels_pos[:, None] + torch.tensor(\n [0, 1, 2, 3], device=device)\n\n box_loss = smooth_l1_loss(\n box_regression[sampled_pos_inds_subset[:, None], map_inds],\n regression_targets[sampled_pos_inds_subset],\n size_average=False,\n beta=1,\n )\n box_loss = box_loss / labels.numel()\n\n return classification_loss, box_loss\n\n\ndef make_roi_box_loss_evaluator(cfg):\n matcher = Matcher(\n cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,\n cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,\n allow_low_quality_matches=False,\n )\n\n bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS\n box_coder = BoxCoder(weights=bbox_reg_weights)\n\n fg_bg_sampler = BalancedPositiveNegativeSampler(\n cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION\n )\n\n cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG\n\n loss_evaluator = FastRCNNLossComputation(\n matcher,\n fg_bg_sampler,\n box_coder,\n cls_agnostic_bbox_reg\n )\n\n return loss_evaluator\n"
] | [
[
"torch.nn.functional.cross_entropy",
"torch.zeros_like",
"torch.tensor",
"torch.nonzero",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
konstantgr/genetic-acoustic | [
"d5ebdeba03f390a247bfbd249b2997cdc6793942"
] | [
"examples/Comsol2dAcoustics/Comsol2dAcoustics.py"
] | [
"from hpctool import ComsolModel, Task, Solver\nimport numpy as np\nfrom utils import grid, pretty_print_individual, get_multipoles_from_res\nimport os\nimport sys\nfrom loguru import logger\nfrom typing import List, Dict\nfrom scipy.optimize import differential_evolution\nimport logging\n\n\nclass SquaresModel(ComsolModel):\n def __init__(self):\n super().__init__()\n self.geometry = self / 'geometries' / 'Geometry 1'\n self.config = {\n \"n\": n,\n \"x_limits\": (-0.03, 0.03),\n \"y_limits\": (-0.03, 0.03),\n }\n\n def configure(self):\n self.geometry.java.autoRebuild('off')\n self.parameter('max_freq', '1000[Hz]')\n self.parameter('min_freq', '100[Hz]')\n self.parameter('step', '100[Hz]')\n\n def pre_build(self, x, *args, **kwargs):\n indices = np.nonzero(x)\n node_selections = []\n\n xgrid, ygrid = grid(**self.config)\n tau = abs(xgrid[1] - xgrid[0])\n width = tau\n\n idx = 0\n for x_i in xgrid:\n for y_j in ygrid:\n name = f\"circle_xi_{x_i}, yj_{y_j}\"\n\n if idx in list(indices[0]):\n node, node_sel = self.add_square(name, x_i, y_j, self.geometry, width)\n node_selections.append(node_sel)\n else:\n node_selections.append(None)\n idx += 1\n\n (self/'selections'/'plastic').property(\n 'input', list(np.array(node_selections)[indices])\n )\n\n def results(self, x, *args, **kwargs):\n evaluation = self / 'evaluations' / 'Global Evaluation 1'\n dataset = (self / 'datasets').children()[0]\n return self.global_evaluation(dataset, evaluation)\n\n def pre_clear(self, x, save=False, *args, **kwargs):\n if save:\n self.save(save_path)\n self.plot2d('acpr.p_s', image_path)\n self.clean_geometry(self.geometry, 'circle')\n\n\nn = 3\ndirname = os.path.dirname(__file__)\nfile_path = os.path.join(dirname, 'empty_project.mph')\nsave_path = os.path.join(dirname, 'empty_project1.mph')\nimage_path = os.path.join(dirname, 'image.png')\n\n\ndef transform_to_binary_list(x):\n return [int(x_i > 0.5) for x_i in x]\n\n\ndef fitness(x: List, info: Dict, solver: Solver):\n x = transform_to_binary_list(x)\n\n data = solver.solve([Task(x=x, tag=str(x))])\n data = data[0]\n\n Q_multipoles = get_multipoles_from_res(data, c=343, R=0.18)\n res = -np.real(np.max(Q_multipoles[2]))\n\n individual_string = \"\".join(np.array(x).astype(str))\n\n if res < info['best']:\n info['best'] = res\n message = f\"iteration {info['iteration']} | individual {individual_string} | result {round(res, 4)}\"\n logger.log(\"best\", message)\n\n print('=' * 30)\n print('({}). {:.4f} [BEST: {:.4f}]'.format(\n info['iteration'], res,\n info['best']))\n print(pretty_print_individual(x))\n print('=' * 30)\n\n logger.info(\n f\"[BEST {round(info['best'], 4)}]\\titeration {info['iteration']}\\tindividual {individual_string}\\tresult {round(res, 4)}\")\n message = f\"iteration {info['iteration']} | individual {individual_string} | result {round(res, 4)}\"\n logger.log(\"individuals\", message)\n\n info['iteration'] += 1\n\n return res\n\n\ndef main(solver: Solver):\n fmt = \"{time} | {level} |\\t{message}\"\n individuals_level = logger.level(\"individuals\", no=38)\n bests_level = logger.level(\"best\", no=38, color=\"<green>\")\n logger.remove()\n logger.add(sys.stdout, level='INFO', format=fmt, enqueue=True)\n logger.add('logs/logs_{time}.log', level='INFO', format=fmt)\n logger.add('logs/individuals_{time}.log', format=fmt, level='individuals')\n\n # Solver logging\n _l = logging.getLogger('gendev')\n _l.setLevel(logging.DEBUG)\n _l.addHandler(logging.StreamHandler(sys.stdout))\n\n bounds = [(0, 1) for _ in range(n ** 2)]\n print('SciPy Differential Evolution started...')\n result = differential_evolution(\n fitness, bounds,\n args=({'iteration': 0, 'best': np.Inf}, solver, ),\n maxiter=0, popsize=1, seed=2\n )\n x = transform_to_binary_list(result.x)\n\n # Best individual\n solver.solve([Task(x=x, save=True, tag=str(x))])\n print(f'Project saved successfully, best result: {result.fun}')\n"
] | [
[
"numpy.max",
"numpy.array",
"scipy.optimize.differential_evolution",
"numpy.nonzero"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
keshav11/s-index | [
"9e8b123c32bbb1fc3fa92d9bca46f364d8057d54"
] | [
"code/flatten_authors_j_indx.py"
] | [
"import numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport math\n\nranked_papers = pd.read_pickle('dblp_final_paper_rankings.pickle')\n\nkeep_cols = ['authors', 'year', 'normalized_citation_score_scaled','SJR_Normalized','reach_normalized','id', 'j-index', 'normalized_citation_score']\ncols = ['year', 'normalized_citation_score_scaled','SJR_Normalized','reach_normalized','id', 'j-index', 'normalized_citation_score']\npapers = ranked_papers[keep_cols].set_index(cols)\n\nchunk_sz = 300000\npapers_chunks = [papers[i : i + chunk_sz] for i in range(0, papers.shape[0], chunk_sz)]\n\nfor chunk in enumerate(papers_chunks):\n chunk_flattened_auths_df = chunk[1].authors.apply(pd.Series).stack().reset_index(level=2, drop=True).to_frame('authors').reset_index()\n chunk_flattened_auths_df.to_pickle('flattened_chunks_more_fields/ranked_authors'+ str(chunk[0]) + '.pickle')\n del chunk_flattened_auths_df\n\n"
] | [
[
"matplotlib.use",
"pandas.read_pickle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
veneres/gef | [
"07912d01040ca0169977ddd49839050c81ec2349"
] | [
"examples/real-world-data/census/feat_selection.py"
] | [
"import numpy as np\nfrom tqdm import tqdm\nfrom math import comb\nfrom gamexplainer import GamExplainer\nimport lightgbm as lgbm\nimport json\n\n\ndef main():\n with open(\"config.json\") as f:\n config_dict = json.load(f)\n model_path = config_dict[\"model_path\"]\n feat_sel_out = config_dict[\"feat_sel_out\"]\n\n forest = lgbm.Booster(model_file=model_path)\n range_n_splines = range(1, 11)\n range_n_inter = range(0, 9)\n explanation_params = {\"verbose\": False,\n \"sample_method\": \"all\",\n \"classification\": True,\n \"inter_max_distance\": 256}\n\n acc = np.zeros((len(range_n_splines), len(range_n_inter)))\n for i, n_splines in enumerate(range_n_splines):\n explanation_params[\"n_spline_terms\"] = n_splines\n for j, n_inter in enumerate(range_n_inter):\n if n_inter > comb(n_splines, 2):\n continue\n explanation_params[\"n_inter_terms\"] = n_inter\n explainer = GamExplainer(**explanation_params)\n _ = explainer.explain(forest, lam_search_space=[0.1, 1])\n print(f\"Fit {n_splines=}, {n_inter=} completed\")\n acc[i, j] = explainer.loss_res\n\n np.save(feat_sel_out, acc)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gancho-ivanov/pandas | [
"3cbbb96f0514f872bbf154f1ed418c7aa7300e9e",
"3cbbb96f0514f872bbf154f1ed418c7aa7300e9e"
] | [
"pandas/core/indexes/interval.py",
"pandas/tests/arrays/masked/test_arithmetic.py"
] | [
"\"\"\" define the IntervalIndex \"\"\"\nfrom __future__ import annotations\n\nfrom functools import wraps\nfrom operator import (\n le,\n lt,\n)\nimport textwrap\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Hashable,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import lib\nfrom pandas._libs.interval import (\n Interval,\n IntervalMixin,\n IntervalTree,\n)\nfrom pandas._libs.tslibs import (\n BaseOffset,\n Timedelta,\n Timestamp,\n to_offset,\n)\nfrom pandas._typing import (\n Dtype,\n DtypeObj,\n)\nfrom pandas.errors import InvalidIndexError\nfrom pandas.util._decorators import (\n Appender,\n cache_readonly,\n)\nfrom pandas.util._exceptions import rewrite_exception\n\nfrom pandas.core.dtypes.cast import (\n find_common_type,\n infer_dtype_from_scalar,\n maybe_box_datetimelike,\n maybe_downcast_numeric,\n)\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_categorical_dtype,\n is_datetime64tz_dtype,\n is_datetime_or_timedelta_dtype,\n is_dtype_equal,\n is_float,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_interval_dtype,\n is_list_like,\n is_number,\n is_object_dtype,\n is_scalar,\n)\nfrom pandas.core.dtypes.dtypes import IntervalDtype\n\nfrom pandas.core.algorithms import (\n take_nd,\n unique,\n)\nfrom pandas.core.array_algos.putmask import validate_putmask\nfrom pandas.core.arrays.interval import (\n IntervalArray,\n _interval_shared_docs,\n)\nimport pandas.core.common as com\nfrom pandas.core.indexers import is_valid_positional_slice\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import (\n Index,\n _index_shared_docs,\n default_pprint,\n ensure_index,\n maybe_extract_name,\n)\nfrom pandas.core.indexes.datetimes import (\n DatetimeIndex,\n date_range,\n)\nfrom pandas.core.indexes.extension import (\n ExtensionIndex,\n inherit_names,\n)\nfrom pandas.core.indexes.multi import MultiIndex\nfrom pandas.core.indexes.timedeltas import (\n TimedeltaIndex,\n timedelta_range,\n)\nfrom pandas.core.ops import get_op_result_name\n\nif TYPE_CHECKING:\n from pandas import CategoricalIndex\n\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n\n_index_doc_kwargs.update(\n {\n \"klass\": \"IntervalIndex\",\n \"qualname\": \"IntervalIndex\",\n \"target_klass\": \"IntervalIndex or list of Intervals\",\n \"name\": textwrap.dedent(\n \"\"\"\\\n name : object, optional\n Name to be stored in the index.\n \"\"\"\n ),\n }\n)\n\n\ndef _get_next_label(label):\n dtype = getattr(label, \"dtype\", type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = \"datetime64\"\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label + np.timedelta64(1, \"ns\")\n elif is_integer_dtype(dtype):\n return label + 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, np.infty)\n else:\n raise TypeError(f\"cannot determine next label for type {repr(type(label))}\")\n\n\ndef _get_prev_label(label):\n dtype = getattr(label, \"dtype\", type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = \"datetime64\"\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label - np.timedelta64(1, \"ns\")\n elif is_integer_dtype(dtype):\n return label - 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, -np.infty)\n else:\n raise TypeError(f\"cannot determine next label for type {repr(type(label))}\")\n\n\ndef _new_IntervalIndex(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't have\n arguments and breaks __new__.\n \"\"\"\n return cls.from_arrays(**d)\n\n\ndef setop_check(method):\n \"\"\"\n This is called to decorate the set operations of IntervalIndex\n to perform the type check in advance.\n \"\"\"\n op_name = method.__name__\n\n @wraps(method)\n def wrapped(self, other, sort=False):\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name = self._convert_can_do_setop(other)\n\n if op_name == \"difference\":\n if not isinstance(other, IntervalIndex):\n result = getattr(self.astype(object), op_name)(other, sort=sort)\n return result.astype(self.dtype)\n\n elif not self._should_compare(other):\n # GH#19016: ensure set op will not return a prohibited dtype\n result = getattr(self.astype(object), op_name)(other, sort=sort)\n return result.astype(self.dtype)\n\n return method(self, other, sort)\n\n return wrapped\n\n\ndef _setop(op_name: str):\n \"\"\"\n Implement set operation.\n \"\"\"\n\n def func(self, other, sort=None):\n # At this point we are assured\n # isinstance(other, IntervalIndex)\n # other.closed == self.closed\n\n result = getattr(self._multiindex, op_name)(other._multiindex, sort=sort)\n result_name = get_op_result_name(self, other)\n\n # GH 19101: ensure empty results have correct dtype\n if result.empty:\n result = result._values.astype(self.dtype.subtype)\n else:\n result = result._values\n\n return type(self).from_tuples(result, closed=self.closed, name=result_name)\n\n func.__name__ = op_name\n return setop_check(func)\n\n\n@Appender(\n _interval_shared_docs[\"class\"]\n % {\n \"klass\": \"IntervalIndex\",\n \"summary\": \"Immutable index of intervals that are closed on the same side.\",\n \"name\": _index_doc_kwargs[\"name\"],\n \"versionadded\": \"0.20.0\",\n \"extra_attributes\": \"is_overlapping\\nvalues\\n\",\n \"extra_methods\": \"\",\n \"examples\": textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n A new ``IntervalIndex`` is typically constructed using\n :func:`interval_range`:\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],\n dtype='interval[int64, right]')\n\n It may also be constructed using one of the constructor\n methods: :meth:`IntervalIndex.from_arrays`,\n :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.\n\n See further examples in the doc strings of ``interval_range`` and the\n mentioned constructor methods.\n \"\"\"\n ),\n }\n)\n@inherit_names([\"set_closed\", \"to_tuples\"], IntervalArray, wrap=True)\n@inherit_names(\n [\n \"__array__\",\n \"overlaps\",\n \"contains\",\n \"closed_left\",\n \"closed_right\",\n \"open_left\",\n \"open_right\",\n \"is_empty\",\n ],\n IntervalArray,\n)\n@inherit_names([\"is_non_overlapping_monotonic\", \"closed\"], IntervalArray, cache=True)\nclass IntervalIndex(ExtensionIndex):\n _typ = \"intervalindex\"\n _comparables = [\"name\"]\n _attributes = [\"name\", \"closed\"]\n\n # annotate properties pinned via inherit_names\n closed: str\n is_non_overlapping_monotonic: bool\n closed_left: bool\n closed_right: bool\n\n # we would like our indexing holder to defer to us\n _defer_to_indexing = True\n\n _data: IntervalArray\n _values: IntervalArray\n _can_hold_strings = False\n _data_cls = IntervalArray\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n data,\n closed=None,\n dtype: Dtype | None = None,\n copy: bool = False,\n name: Hashable = None,\n verify_integrity: bool = True,\n ) -> IntervalIndex:\n\n name = maybe_extract_name(name, data, cls)\n\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray(\n data,\n closed=closed,\n copy=copy,\n dtype=dtype,\n verify_integrity=verify_integrity,\n )\n\n return cls._simple_new(array, name)\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_breaks\"]\n % {\n \"klass\": \"IntervalIndex\",\n \"examples\": textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n dtype='interval[int64, right]')\n \"\"\"\n ),\n }\n )\n def from_breaks(\n cls,\n breaks,\n closed: str = \"right\",\n name: Hashable = None,\n copy: bool = False,\n dtype: Dtype | None = None,\n ) -> IntervalIndex:\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_breaks(\n breaks, closed=closed, copy=copy, dtype=dtype\n )\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_arrays\"]\n % {\n \"klass\": \"IntervalIndex\",\n \"examples\": textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n dtype='interval[int64, right]')\n \"\"\"\n ),\n }\n )\n def from_arrays(\n cls,\n left,\n right,\n closed: str = \"right\",\n name: Hashable = None,\n copy: bool = False,\n dtype: Dtype | None = None,\n ) -> IntervalIndex:\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_arrays(\n left, right, closed, copy=copy, dtype=dtype\n )\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_tuples\"]\n % {\n \"klass\": \"IntervalIndex\",\n \"examples\": textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])\n IntervalIndex([(0, 1], (1, 2]],\n dtype='interval[int64, right]')\n \"\"\"\n ),\n }\n )\n def from_tuples(\n cls,\n data,\n closed: str = \"right\",\n name: Hashable = None,\n copy: bool = False,\n dtype: Dtype | None = None,\n ) -> IntervalIndex:\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)\n return cls._simple_new(arr, name=name)\n\n # --------------------------------------------------------------------\n\n @cache_readonly\n def _engine(self):\n left = self._maybe_convert_i8(self.left)\n right = self._maybe_convert_i8(self.right)\n return IntervalTree(left, right, closed=self.closed)\n\n def __contains__(self, key: Any) -> bool:\n \"\"\"\n return a boolean if this key is IN the index\n We *only* accept an Interval\n\n Parameters\n ----------\n key : Interval\n\n Returns\n -------\n bool\n \"\"\"\n hash(key)\n if not isinstance(key, Interval):\n return False\n\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False\n\n @cache_readonly\n def _multiindex(self) -> MultiIndex:\n return MultiIndex.from_arrays([self.left, self.right], names=[\"left\", \"right\"])\n\n def __array_wrap__(self, result, context=None):\n # we don't want the superclass implementation\n return result\n\n def __reduce__(self):\n d = {\"left\": self.left, \"right\": self.right}\n d.update(self._get_attributes_dict())\n return _new_IntervalIndex, (type(self), d), None\n\n @Appender(Index.astype.__doc__)\n def astype(self, dtype, copy: bool = True):\n with rewrite_exception(\"IntervalArray\", type(self).__name__):\n new_values = self._values.astype(dtype, copy=copy)\n return Index(new_values, dtype=new_values.dtype, name=self.name)\n\n @property\n def inferred_type(self) -> str:\n \"\"\"Return a string of the type inferred from the values\"\"\"\n return \"interval\"\n\n @Appender(Index.memory_usage.__doc__)\n def memory_usage(self, deep: bool = False) -> int:\n # we don't use an explicit engine\n # so return the bytes here\n return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)\n\n # IntervalTree doesn't have a is_monotonic_decreasing, so have to override\n # the Index implementation\n @cache_readonly\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return True if the IntervalIndex is monotonic decreasing (only equal or\n decreasing values), else False\n \"\"\"\n return self[::-1].is_monotonic_increasing\n\n @cache_readonly\n def is_unique(self) -> bool:\n \"\"\"\n Return True if the IntervalIndex contains unique elements, else False.\n \"\"\"\n left = self.left\n right = self.right\n\n if self.isna().sum() > 1:\n return False\n\n if left.is_unique or right.is_unique:\n return True\n\n seen_pairs = set()\n check_idx = np.where(left.duplicated(keep=False))[0]\n for idx in check_idx:\n pair = (left[idx], right[idx])\n if pair in seen_pairs:\n return False\n seen_pairs.add(pair)\n\n return True\n\n @property\n def is_overlapping(self) -> bool:\n \"\"\"\n Return True if the IntervalIndex has overlapping intervals, else False.\n\n Two intervals overlap if they share a common point, including closed\n endpoints. Intervals that only have an open endpoint in common do not\n overlap.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n bool\n Boolean indicating if the IntervalIndex has overlapping intervals.\n\n See Also\n --------\n Interval.overlaps : Check whether two Interval objects overlap.\n IntervalIndex.overlaps : Check an IntervalIndex elementwise for\n overlaps.\n\n Examples\n --------\n >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])\n >>> index\n IntervalIndex([(0, 2], (1, 3], (4, 5]],\n dtype='interval[int64, right]')\n >>> index.is_overlapping\n True\n\n Intervals that share closed endpoints overlap:\n\n >>> index = pd.interval_range(0, 3, closed='both')\n >>> index\n IntervalIndex([[0, 1], [1, 2], [2, 3]],\n dtype='interval[int64, both]')\n >>> index.is_overlapping\n True\n\n Intervals that only have an open endpoint in common do not overlap:\n\n >>> index = pd.interval_range(0, 3, closed='left')\n >>> index\n IntervalIndex([[0, 1), [1, 2), [2, 3)],\n dtype='interval[int64, left]')\n >>> index.is_overlapping\n False\n \"\"\"\n # GH 23309\n return self._engine.is_overlapping\n\n def _needs_i8_conversion(self, key) -> bool:\n \"\"\"\n Check if a given key needs i8 conversion. Conversion is necessary for\n Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An\n Interval-like requires conversion if its endpoints are one of the\n aforementioned types.\n\n Assumes that any list-like data has already been cast to an Index.\n\n Parameters\n ----------\n key : scalar or Index-like\n The key that should be checked for i8 conversion\n\n Returns\n -------\n bool\n \"\"\"\n if is_interval_dtype(key) or isinstance(key, Interval):\n return self._needs_i8_conversion(key.left)\n\n i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)\n return isinstance(key, i8_types)\n\n def _maybe_convert_i8(self, key):\n \"\"\"\n Maybe convert a given key to its equivalent i8 value(s). Used as a\n preprocessing step prior to IntervalTree queries (self._engine), which\n expects numeric data.\n\n Parameters\n ----------\n key : scalar or list-like\n The key that should maybe be converted to i8.\n\n Returns\n -------\n scalar or list-like\n The original key if no conversion occurred, int if converted scalar,\n Int64Index if converted list-like.\n \"\"\"\n original = key\n if is_list_like(key):\n key = ensure_index(key)\n\n if not self._needs_i8_conversion(key):\n return original\n\n scalar = is_scalar(key)\n if is_interval_dtype(key) or isinstance(key, Interval):\n # convert left/right and reconstruct\n left = self._maybe_convert_i8(key.left)\n right = self._maybe_convert_i8(key.right)\n constructor = Interval if scalar else IntervalIndex.from_arrays\n return constructor(left, right, closed=self.closed)\n\n if scalar:\n # Timestamp/Timedelta\n key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True)\n if lib.is_period(key):\n key_i8 = key.ordinal\n elif isinstance(key_i8, Timestamp):\n key_i8 = key_i8.value\n elif isinstance(key_i8, (np.datetime64, np.timedelta64)):\n key_i8 = key_i8.view(\"i8\")\n else:\n # DatetimeIndex/TimedeltaIndex\n key_dtype, key_i8 = key.dtype, Index(key.asi8)\n if key.hasnans:\n # convert NaT from its i8 value to np.nan so it's not viewed\n # as a valid value, maybe causing errors (e.g. is_overlapping)\n key_i8 = key_i8.where(~key._isnan)\n\n # ensure consistency with IntervalIndex subtype\n subtype = self.dtype.subtype\n\n if not is_dtype_equal(subtype, key_dtype):\n raise ValueError(\n f\"Cannot index an IntervalIndex of subtype {subtype} with \"\n f\"values of dtype {key_dtype}\"\n )\n\n return key_i8\n\n def _searchsorted_monotonic(self, label, side: str = \"left\"):\n if not self.is_non_overlapping_monotonic:\n raise KeyError(\n \"can only get slices from an IntervalIndex if bounds are \"\n \"non-overlapping and all monotonic increasing or decreasing\"\n )\n\n if isinstance(label, (IntervalMixin, IntervalIndex)):\n raise NotImplementedError(\"Interval objects are not currently supported\")\n\n # GH 20921: \"not is_monotonic_increasing\" for the second condition\n # instead of \"is_monotonic_decreasing\" to account for single element\n # indexes being both increasing and decreasing\n if (side == \"left\" and self.left.is_monotonic_increasing) or (\n side == \"right\" and not self.left.is_monotonic_increasing\n ):\n sub_idx = self.right\n if self.open_right:\n label = _get_next_label(label)\n else:\n sub_idx = self.left\n if self.open_left:\n label = _get_prev_label(label)\n\n return sub_idx._searchsorted_monotonic(label, side)\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n def get_loc(\n self, key, method: str | None = None, tolerance=None\n ) -> int | slice | np.ndarray:\n \"\"\"\n Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None}, optional\n * default: matches where the label is within an interval only.\n\n Returns\n -------\n int if unique index, slice if monotonic index, else mask\n\n Examples\n --------\n >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)\n >>> index = pd.IntervalIndex([i1, i2])\n >>> index.get_loc(1)\n 0\n\n You can also supply a point inside an interval.\n\n >>> index.get_loc(1.5)\n 1\n\n If a label is in several intervals, you get the locations of all the\n relevant intervals.\n\n >>> i3 = pd.Interval(0, 2)\n >>> overlapping_index = pd.IntervalIndex([i1, i2, i3])\n >>> overlapping_index.get_loc(0.5)\n array([ True, False, True])\n\n Only exact matches will be returned if an interval is provided.\n\n >>> index.get_loc(pd.Interval(0, 1))\n 0\n \"\"\"\n self._check_indexing_method(method)\n\n if not is_scalar(key):\n raise InvalidIndexError(key)\n\n if isinstance(key, Interval):\n if self.closed != key.closed:\n raise KeyError(key)\n mask = (self.left == key.left) & (self.right == key.right)\n else:\n # assume scalar\n op_left = le if self.closed_left else lt\n op_right = le if self.closed_right else lt\n try:\n mask = op_left(self.left, key) & op_right(key, self.right)\n except TypeError as err:\n # scalar is not comparable to II subtype --> invalid label\n raise KeyError(key) from err\n\n matches = mask.sum()\n if matches == 0:\n raise KeyError(key)\n elif matches == 1:\n return mask.argmax()\n return lib.maybe_booleans_to_slice(mask.view(\"u1\"))\n\n def _get_indexer(\n self,\n target: Index,\n method: str | None = None,\n limit: int | None = None,\n tolerance: Any | None = None,\n ) -> np.ndarray:\n\n if isinstance(target, IntervalIndex):\n # equal indexes -> 1:1 positional match\n if self.equals(target):\n return np.arange(len(self), dtype=\"intp\")\n\n if not self._should_compare(target):\n return self._get_indexer_non_comparable(target, method, unique=True)\n\n # non-overlapping -> at most one match per interval in target\n # want exact matches -> need both left/right to match, so defer to\n # left/right get_indexer, compare elementwise, equality -> match\n left_indexer = self.left.get_indexer(target.left)\n right_indexer = self.right.get_indexer(target.right)\n indexer = np.where(left_indexer == right_indexer, left_indexer, -1)\n elif is_categorical_dtype(target.dtype):\n target = cast(\"CategoricalIndex\", target)\n # get an indexer for unique categories then propagate to codes via take_nd\n categories_indexer = self.get_indexer(target.categories)\n indexer = take_nd(categories_indexer, target.codes, fill_value=-1)\n elif not is_object_dtype(target):\n # homogeneous scalar index: use IntervalTree\n target = self._maybe_convert_i8(target)\n indexer = self._engine.get_indexer(target.values)\n else:\n # heterogeneous scalar index: defer elementwise to get_loc\n return self._get_indexer_pointwise(target)[0]\n\n return ensure_platform_int(indexer)\n\n @Appender(_index_shared_docs[\"get_indexer_non_unique\"] % _index_doc_kwargs)\n def get_indexer_non_unique(self, target: Index) -> tuple[np.ndarray, np.ndarray]:\n target = ensure_index(target)\n\n if isinstance(target, IntervalIndex) and not self._should_compare(target):\n # different closed or incompatible subtype -> no matches\n return self._get_indexer_non_comparable(target, None, unique=False)\n\n elif is_object_dtype(target.dtype) or isinstance(target, IntervalIndex):\n # target might contain intervals: defer elementwise to get_loc\n return self._get_indexer_pointwise(target)\n\n else:\n # Note: this case behaves differently from other Index subclasses\n # because IntervalIndex does partial-int indexing\n target = self._maybe_convert_i8(target)\n indexer, missing = self._engine.get_indexer_non_unique(target.values)\n\n return ensure_platform_int(indexer), ensure_platform_int(missing)\n\n def _get_indexer_pointwise(self, target: Index) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"\n pointwise implementation for get_indexer and get_indexer_non_unique.\n \"\"\"\n indexer, missing = [], []\n for i, key in enumerate(target):\n try:\n locs = self.get_loc(key)\n if isinstance(locs, slice):\n # Only needed for get_indexer_non_unique\n locs = np.arange(locs.start, locs.stop, locs.step, dtype=\"intp\")\n locs = np.array(locs, ndmin=1)\n except KeyError:\n missing.append(i)\n locs = np.array([-1])\n except InvalidIndexError as err:\n # i.e. non-scalar key\n raise TypeError(key) from err\n\n indexer.append(locs)\n\n indexer = np.concatenate(indexer)\n return ensure_platform_int(indexer), ensure_platform_int(missing)\n\n @property\n def _index_as_unique(self) -> bool:\n return not self.is_overlapping\n\n _requires_unique_msg = (\n \"cannot handle overlapping indices; use IntervalIndex.get_indexer_non_unique\"\n )\n\n def _convert_slice_indexer(self, key: slice, kind: str):\n if not (key.step is None or key.step == 1):\n # GH#31658 if label-based, we require step == 1,\n # if positional, we disallow float start/stop\n msg = \"label-based slicing with step!=1 is not supported for IntervalIndex\"\n if kind == \"loc\":\n raise ValueError(msg)\n elif kind == \"getitem\":\n if not is_valid_positional_slice(key):\n # i.e. this cannot be interpreted as a positional slice\n raise ValueError(msg)\n\n return super()._convert_slice_indexer(key, kind)\n\n def _should_fallback_to_positional(self) -> bool:\n # integer lookups in Series.__getitem__ are unambiguously\n # positional in this case\n return self.dtype.subtype.kind in [\"m\", \"M\"]\n\n def _maybe_cast_slice_bound(self, label, side: str, kind):\n return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)\n\n @Appender(Index._convert_list_indexer.__doc__)\n def _convert_list_indexer(self, keyarr):\n \"\"\"\n we are passed a list-like indexer. Return the\n indexer for matching intervals.\n \"\"\"\n locs = self.get_indexer_for(keyarr)\n\n # we have missing values\n if (locs == -1).any():\n raise KeyError(keyarr[locs == -1].tolist())\n\n return locs\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n if not isinstance(dtype, IntervalDtype):\n return False\n common_subtype = find_common_type([self.dtype, dtype])\n return not is_object_dtype(common_subtype)\n\n # --------------------------------------------------------------------\n\n @cache_readonly\n def left(self) -> Index:\n return Index(self._data.left, copy=False)\n\n @cache_readonly\n def right(self) -> Index:\n return Index(self._data.right, copy=False)\n\n @cache_readonly\n def mid(self) -> Index:\n return Index(self._data.mid, copy=False)\n\n @property\n def length(self) -> Index:\n return Index(self._data.length, copy=False)\n\n def putmask(self, mask, value) -> Index:\n mask, noop = validate_putmask(self._data, mask)\n if noop:\n return self.copy()\n\n try:\n self._validate_fill_value(value)\n except (ValueError, TypeError):\n dtype = self._find_common_type_compat(value)\n return self.astype(dtype).putmask(mask, value)\n\n arr = self._data.copy()\n arr.putmask(mask, value)\n return type(self)._simple_new(arr, name=self.name)\n\n def insert(self, loc: int, item):\n \"\"\"\n Return a new IntervalIndex inserting new item at location. Follows\n Python list.append semantics for negative values. Only Interval\n objects and NA can be inserted into an IntervalIndex\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n IntervalIndex\n \"\"\"\n try:\n result = self._data.insert(loc, item)\n except (ValueError, TypeError):\n # e.g trying to insert a string\n dtype, _ = infer_dtype_from_scalar(item, pandas_dtype=True)\n dtype = find_common_type([self.dtype, dtype])\n return self.astype(dtype).insert(loc, item)\n\n return type(self)._simple_new(result, name=self.name)\n\n # --------------------------------------------------------------------\n # Rendering Methods\n # __repr__ associated methods are based on MultiIndex\n\n def _format_with_header(self, header: list[str], na_rep: str = \"NaN\") -> list[str]:\n return header + list(self._format_native_types(na_rep=na_rep))\n\n def _format_native_types(self, na_rep=\"NaN\", quoting=None, **kwargs):\n # GH 28210: use base method but with different default na_rep\n return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs)\n\n def _format_data(self, name=None) -> str:\n\n # TODO: integrate with categorical and make generic\n # name argument is unused here; just for compat with base / categorical\n n = len(self)\n max_seq_items = min((get_option(\"display.max_seq_items\") or n) // 10, 10)\n\n formatter = str\n\n if n == 0:\n summary = \"[]\"\n elif n == 1:\n first = formatter(self[0])\n summary = f\"[{first}]\"\n elif n == 2:\n first = formatter(self[0])\n last = formatter(self[-1])\n summary = f\"[{first}, {last}]\"\n else:\n\n if n > max_seq_items:\n n = min(max_seq_items // 2, 10)\n head = [formatter(x) for x in self[:n]]\n tail = [formatter(x) for x in self[-n:]]\n head_joined = \", \".join(head)\n tail_joined = \", \".join(tail)\n summary = f\"[{head_joined} ... {tail_joined}]\"\n else:\n tail = [formatter(x) for x in self]\n joined = \", \".join(tail)\n summary = f\"[{joined}]\"\n\n return summary + \",\" + self._format_space()\n\n def _format_attrs(self):\n attrs = []\n if self.name is not None:\n attrs.append((\"name\", default_pprint(self.name)))\n attrs.append((\"dtype\", f\"'{self.dtype}'\"))\n return attrs\n\n def _format_space(self) -> str:\n space = \" \" * (len(type(self).__name__) + 1)\n return f\"\\n{space}\"\n\n # --------------------------------------------------------------------\n # Set Operations\n\n def _intersection(self, other, sort):\n \"\"\"\n intersection specialized to the case with matching dtypes.\n \"\"\"\n # For IntervalIndex we also know other.closed == self.closed\n if self.left.is_unique and self.right.is_unique:\n taken = self._intersection_unique(other)\n elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:\n # Swap other/self if other is unique and self does not have\n # multiple NaNs\n taken = other._intersection_unique(self)\n else:\n # duplicates\n taken = self._intersection_non_unique(other)\n\n if sort is None:\n taken = taken.sort_values()\n\n return taken\n\n def _intersection_unique(self, other: IntervalIndex) -> IntervalIndex:\n \"\"\"\n Used when the IntervalIndex does not have any common endpoint,\n no matter left or right.\n Return the intersection with another IntervalIndex.\n\n Parameters\n ----------\n other : IntervalIndex\n\n Returns\n -------\n IntervalIndex\n \"\"\"\n lindexer = self.left.get_indexer(other.left)\n rindexer = self.right.get_indexer(other.right)\n\n match = (lindexer == rindexer) & (lindexer != -1)\n indexer = lindexer.take(match.nonzero()[0])\n indexer = unique(indexer)\n\n return self.take(indexer)\n\n def _intersection_non_unique(self, other: IntervalIndex) -> IntervalIndex:\n \"\"\"\n Used when the IntervalIndex does have some common endpoints,\n on either sides.\n Return the intersection with another IntervalIndex.\n\n Parameters\n ----------\n other : IntervalIndex\n\n Returns\n -------\n IntervalIndex\n \"\"\"\n mask = np.zeros(len(self), dtype=bool)\n\n if self.hasnans and other.hasnans:\n first_nan_loc = np.arange(len(self))[self.isna()][0]\n mask[first_nan_loc] = True\n\n other_tups = set(zip(other.left, other.right))\n for i, tup in enumerate(zip(self.left, self.right)):\n if tup in other_tups:\n mask[i] = True\n\n return self[mask]\n\n _union = _setop(\"union\")\n _difference = _setop(\"difference\")\n\n # --------------------------------------------------------------------\n\n @property\n def _is_all_dates(self) -> bool:\n \"\"\"\n This is False even when left/right contain datetime-like objects,\n as the check is done on the Interval itself\n \"\"\"\n return False\n\n # TODO: arithmetic operations\n\n\ndef _is_valid_endpoint(endpoint) -> bool:\n \"\"\"\n Helper for interval_range to check if start/end are valid types.\n \"\"\"\n return any(\n [\n is_number(endpoint),\n isinstance(endpoint, Timestamp),\n isinstance(endpoint, Timedelta),\n endpoint is None,\n ]\n )\n\n\ndef _is_type_compatible(a, b) -> bool:\n \"\"\"\n Helper for interval_range to check type compat of start/end/freq.\n \"\"\"\n is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset))\n is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset))\n return (\n (is_number(a) and is_number(b))\n or (is_ts_compat(a) and is_ts_compat(b))\n or (is_td_compat(a) and is_td_compat(b))\n or com.any_none(a, b)\n )\n\n\ndef interval_range(\n start=None, end=None, periods=None, freq=None, name: Hashable = None, closed=\"right\"\n) -> IntervalIndex:\n \"\"\"\n Return a fixed frequency IntervalIndex.\n\n Parameters\n ----------\n start : numeric or datetime-like, default None\n Left bound for generating intervals.\n end : numeric or datetime-like, default None\n Right bound for generating intervals.\n periods : int, default None\n Number of periods to generate.\n freq : numeric, str, or DateOffset, default None\n The length of each interval. Must be consistent with the type of start\n and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1\n for numeric and 'D' for datetime-like.\n name : str, default None\n Name of the resulting IntervalIndex.\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n\n Returns\n -------\n IntervalIndex\n\n See Also\n --------\n IntervalIndex : An Index of intervals that are all closed on the same side.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``IntervalIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end``, inclusively.\n\n To learn more about datetime-like frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Numeric ``start`` and ``end`` is supported.\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],\n dtype='interval[int64, right]')\n\n Additionally, datetime-like input is also supported.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n ... end=pd.Timestamp('2017-01-04'))\n IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],\n (2017-01-03, 2017-01-04]],\n dtype='interval[datetime64[ns], right]')\n\n The ``freq`` parameter specifies the frequency between the left and right.\n endpoints of the individual intervals within the ``IntervalIndex``. For\n numeric ``start`` and ``end``, the frequency must also be numeric.\n\n >>> pd.interval_range(start=0, periods=4, freq=1.5)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],\n dtype='interval[float64, right]')\n\n Similarly, for datetime-like ``start`` and ``end``, the frequency must be\n convertible to a DateOffset.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n ... periods=3, freq='MS')\n IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],\n (2017-03-01, 2017-04-01]],\n dtype='interval[datetime64[ns], right]')\n\n Specify ``start``, ``end``, and ``periods``; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.interval_range(start=0, end=6, periods=4)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],\n dtype='interval[float64, right]')\n\n The ``closed`` parameter specifies which endpoints of the individual\n intervals within the ``IntervalIndex`` are closed.\n\n >>> pd.interval_range(end=5, periods=4, closed='both')\n IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],\n dtype='interval[int64, both]')\n \"\"\"\n start = maybe_box_datetimelike(start)\n end = maybe_box_datetimelike(end)\n endpoint = start if start is not None else end\n\n if freq is None and com.any_none(periods, start, end):\n freq = 1 if is_number(endpoint) else \"D\"\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError(\n \"Of the four parameters: start, end, periods, and \"\n \"freq, exactly three must be specified\"\n )\n\n if not _is_valid_endpoint(start):\n raise ValueError(f\"start must be numeric or datetime-like, got {start}\")\n elif not _is_valid_endpoint(end):\n raise ValueError(f\"end must be numeric or datetime-like, got {end}\")\n\n if is_float(periods):\n periods = int(periods)\n elif not is_integer(periods) and periods is not None:\n raise TypeError(f\"periods must be a number, got {periods}\")\n\n if freq is not None and not is_number(freq):\n try:\n freq = to_offset(freq)\n except ValueError as err:\n raise ValueError(\n f\"freq must be numeric or convertible to DateOffset, got {freq}\"\n ) from err\n\n # verify type compatibility\n if not all(\n [\n _is_type_compatible(start, end),\n _is_type_compatible(start, freq),\n _is_type_compatible(end, freq),\n ]\n ):\n raise TypeError(\"start, end, freq need to be type compatible\")\n\n # +1 to convert interval count to breaks count (n breaks = n-1 intervals)\n if periods is not None:\n periods += 1\n\n if is_number(endpoint):\n # force consistency between start/end/freq (lower end if freq skips it)\n if com.all_not_none(start, end, freq):\n end -= (end - start) % freq\n\n # compute the period/start/end if unspecified (at most one)\n if periods is None:\n periods = int((end - start) // freq) + 1\n elif start is None:\n start = end - (periods - 1) * freq\n elif end is None:\n end = start + (periods - 1) * freq\n\n breaks = np.linspace(start, end, periods)\n if all(is_integer(x) for x in com.not_none(start, end, freq)):\n # np.linspace always produces float output\n\n # error: Incompatible types in assignment (expression has type\n # \"Union[ExtensionArray, ndarray]\", variable has type \"ndarray\")\n breaks = maybe_downcast_numeric( # type: ignore[assignment]\n breaks, np.dtype(\"int64\")\n )\n else:\n # delegate to the appropriate range function\n if isinstance(endpoint, Timestamp):\n # error: Incompatible types in assignment (expression has type\n # \"DatetimeIndex\", variable has type \"ndarray\")\n breaks = date_range( # type: ignore[assignment]\n start=start, end=end, periods=periods, freq=freq\n )\n else:\n # error: Incompatible types in assignment (expression has type\n # \"TimedeltaIndex\", variable has type \"ndarray\")\n breaks = timedelta_range( # type: ignore[assignment]\n start=start, end=end, periods=periods, freq=freq\n )\n\n return IntervalIndex.from_breaks(breaks, name=name, closed=closed)\n",
"from typing import (\n Any,\n List,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import is_numpy_dev\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.arrays import ExtensionArray\n\n# integer dtypes\narrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES]\nscalars: List[Any] = [2] * len(arrays)\n# floating dtypes\narrays += [pd.array([0.1, 0.2, 0.3, None], dtype=dtype) for dtype in tm.FLOAT_EA_DTYPES]\nscalars += [0.2, 0.2]\n# boolean\narrays += [pd.array([True, False, True, None], dtype=\"boolean\")]\nscalars += [False]\n\n\[email protected](params=zip(arrays, scalars), ids=[a.dtype.name for a in arrays])\ndef data(request):\n return request.param\n\n\ndef check_skip(data, op_name):\n if isinstance(data.dtype, pd.BooleanDtype) and \"sub\" in op_name:\n pytest.skip(\"subtract not implemented for boolean\")\n\n\n# Test equivalence of scalars, numpy arrays with array ops\n# -----------------------------------------------------------------------------\n\n\ndef test_array_scalar_like_equivalence(data, all_arithmetic_operators):\n data, scalar = data\n op = tm.get_op_from_name(all_arithmetic_operators)\n check_skip(data, all_arithmetic_operators)\n\n scalar_array = pd.array([scalar] * len(data), dtype=data.dtype)\n\n # TODO also add len-1 array (np.array([scalar], dtype=data.dtype.numpy_dtype))\n for scalar in [scalar, data.dtype.type(scalar)]:\n result = op(data, scalar)\n expected = op(data, scalar_array)\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_array_NA(data, all_arithmetic_operators):\n if \"truediv\" in all_arithmetic_operators:\n pytest.skip(\"division with pd.NA raises\")\n if \"floordiv\" in all_arithmetic_operators and is_numpy_dev:\n pytest.skip(\"NumpyDev behavior GH#40874\")\n data, _ = data\n op = tm.get_op_from_name(all_arithmetic_operators)\n check_skip(data, all_arithmetic_operators)\n\n scalar = pd.NA\n scalar_array = pd.array([pd.NA] * len(data), dtype=data.dtype)\n\n result = op(data, scalar)\n expected = op(data, scalar_array)\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_numpy_array_equivalence(data, all_arithmetic_operators):\n data, scalar = data\n op = tm.get_op_from_name(all_arithmetic_operators)\n check_skip(data, all_arithmetic_operators)\n\n numpy_array = np.array([scalar] * len(data), dtype=data.dtype.numpy_dtype)\n pd_array = pd.array(numpy_array, dtype=data.dtype)\n\n result = op(data, numpy_array)\n expected = op(data, pd_array)\n if isinstance(expected, ExtensionArray):\n tm.assert_extension_array_equal(result, expected)\n else:\n # TODO div still gives float ndarray -> remove this once we have Float EA\n tm.assert_numpy_array_equal(result, expected)\n\n\n# Test equivalence with Series and DataFrame ops\n# -----------------------------------------------------------------------------\n\n\ndef test_frame(data, all_arithmetic_operators):\n data, scalar = data\n op = tm.get_op_from_name(all_arithmetic_operators)\n check_skip(data, all_arithmetic_operators)\n\n # DataFrame with scalar\n df = pd.DataFrame({\"A\": data})\n\n result = op(df, scalar)\n expected = pd.DataFrame({\"A\": op(data, scalar)})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_series(data, all_arithmetic_operators):\n data, scalar = data\n op = tm.get_op_from_name(all_arithmetic_operators)\n check_skip(data, all_arithmetic_operators)\n\n s = pd.Series(data)\n\n # Series with scalar\n result = op(s, scalar)\n expected = pd.Series(op(data, scalar))\n tm.assert_series_equal(result, expected)\n\n # Series with np.ndarray\n other = np.array([scalar] * len(data), dtype=data.dtype.numpy_dtype)\n result = op(s, other)\n expected = pd.Series(op(data, other))\n tm.assert_series_equal(result, expected)\n\n # Series with pd.array\n other = pd.array([scalar] * len(data), dtype=data.dtype)\n result = op(s, other)\n expected = pd.Series(op(data, other))\n tm.assert_series_equal(result, expected)\n\n # Series with Series\n other = pd.Series([scalar] * len(data), dtype=data.dtype)\n result = op(s, other)\n expected = pd.Series(op(data, other.array))\n tm.assert_series_equal(result, expected)\n\n\n# Test generic characteristics / errors\n# -----------------------------------------------------------------------------\n\n\ndef test_error_invalid_object(data, all_arithmetic_operators):\n data, _ = data\n\n op = all_arithmetic_operators\n opa = getattr(data, op)\n\n # 2d -> return NotImplemented\n result = opa(pd.DataFrame({\"A\": data}))\n assert result is NotImplemented\n\n msg = r\"can only perform ops with 1-d structures\"\n with pytest.raises(NotImplementedError, match=msg):\n opa(np.arange(len(data)).reshape(-1, len(data)))\n\n\ndef test_error_len_mismatch(data, all_arithmetic_operators):\n # operating with a list-like with non-matching length raises\n data, scalar = data\n op = tm.get_op_from_name(all_arithmetic_operators)\n\n other = [scalar] * (len(data) - 1)\n\n for other in [other, np.array(other)]:\n with pytest.raises(ValueError, match=\"Lengths must match\"):\n op(data, other)\n\n s = pd.Series(data)\n with pytest.raises(ValueError, match=\"Lengths must match\"):\n op(s, other)\n\n\[email protected](\"op\", [\"__neg__\", \"__abs__\", \"__invert__\"])\ndef test_unary_op_does_not_propagate_mask(data, op, request):\n # https://github.com/pandas-dev/pandas/issues/39943\n data, _ = data\n if data.dtype in [\"Float32\", \"Float64\"] and op == \"__invert__\":\n request.node.add_marker(\n pytest.mark.xfail(reason=\"invert is not implemented for float ea dtypes\")\n )\n s = pd.Series(data)\n result = getattr(s, op)()\n expected = result.copy(deep=True)\n s[0] = None\n tm.assert_series_equal(result, expected)\n"
] | [
[
"numpy.linspace",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.core.dtypes.cast.maybe_box_datetimelike",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas._libs.interval.IntervalTree",
"pandas.core.indexes.base.Index",
"numpy.concatenate",
"numpy.dtype",
"pandas.core.indexes.datetimes.date_range",
"pandas._config.get_option",
"pandas.core.indexes.timedeltas.timedelta_range",
"pandas.core.array_algos.putmask.validate_putmask",
"numpy.where",
"numpy.nextafter",
"pandas.core.indexes.extension.inherit_names",
"pandas._libs.tslibs.to_offset",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.core.algorithms.unique",
"pandas.core.common.all_not_none",
"numpy.arange",
"pandas.core.common.not_none",
"pandas.core.common.any_none",
"pandas.core.indexes.base.maybe_extract_name",
"pandas.core.dtypes.common.is_number",
"pandas.core.ops.get_op_result_name",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_float",
"pandas.core.arrays.interval.IntervalArray",
"pandas.core.algorithms.take_nd",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.indexes.base.default_pprint",
"pandas.core.indexers.is_valid_positional_slice",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.util._decorators.Appender",
"pandas.errors.InvalidIndexError",
"pandas.core.dtypes.cast.infer_dtype_from_scalar",
"pandas.core.indexes.base.ensure_index",
"numpy.timedelta64",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.arrays.interval.IntervalArray.from_tuples",
"numpy.array",
"pandas.core.arrays.interval.IntervalArray.from_arrays",
"pandas.core.common.count_not_none",
"pandas._libs.lib.is_period",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.cast.find_common_type",
"pandas.core.indexes.multi.MultiIndex.from_arrays",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas.util._exceptions.rewrite_exception",
"pandas.core.arrays.interval.IntervalArray.from_breaks"
],
[
"numpy.array",
"pandas._testing.assert_numpy_array_equal",
"pandas.Series",
"pandas.array",
"pandas.DataFrame",
"pandas._testing.assert_extension_array_equal",
"pandas._testing.get_op_from_name",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_frame_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
data4help/shiny-bassoon | [
"a240f4b5ec3ad8642e206b582266dc79125eba58",
"a240f4b5ec3ad8642e206b582266dc79125eba58"
] | [
"src/utils/callbacks.py",
"src/tasks/ml/train_clf_multilabel.py"
] | [
"# %% Packages\n\nimport os\nimport tensorflow as tf\nfrom typing import Tuple\nfrom tensorflow.keras.callbacks import TensorBoard, EarlyStopping\n\n# %% Callbacks\n\n\n# Loading callbacks\ndef load_callbacks(\n name: str, monitor: str, patience: int\n) -> Tuple[TensorBoard, EarlyStopping]:\n tensorboard = load_tensorboard(name=name)\n earlystopping = load_earlystopping(monitor=monitor, patience=patience)\n return (tensorboard, earlystopping)\n\n\n# Tensorboard\ndef load_tensorboard(name: str) -> TensorBoard:\n log_dir = os.path.join(\"logs\", \"tensorboard\", name)\n return tf.keras.callbacks.TensorBoard(log_dir=log_dir)\n\n\n# Early stopping\ndef load_earlystopping(monitor: str, patience: int) -> EarlyStopping:\n return tf.keras.callbacks.EarlyStopping(monitor=monitor, patience=patience)\n",
"# %% Packages\n\nimport numpy as np\nimport pandas as pd\nfrom typing import Tuple\nfrom pyhocon import ConfigTree\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom tensorflow.python.keras.preprocessing.image import DataFrameIterator\n\nfrom src.base_classes.task import MLTask\nfrom src.utils.logging import get_logger\n\n# %% Logger\n\nlogger = get_logger()\n\n\n# %% Code\n\n\nclass TaskClassificationMultiLabel(MLTask):\n\n name = \"task_train_classification_multilabel\"\n dependencies = [\"task_preprocess_images\"]\n\n def __init__(self, config: ConfigTree) -> None:\n super().__init__(config, self.name)\n\n def run(self):\n\n logger.info(\"Create image loader\")\n train_gen, val_gen, test_gen = self.create_image_loader()\n self.plot_augmentation_examples(train_gen=train_gen, val_gen=val_gen)\n\n logger.info(\"Load the model\")\n pipeline = self.load_model()\n\n logger.info(\"Train the model\")\n pipeline.train(train_gen, val_gen)\n\n logger.info(\"Create predictions for test data\")\n y_pred, y_true = pipeline.predict(test_gen)\n adj_y_pred, adj_y_true = self.adjust_predictions(y_pred, y_true)\n\n logger.info(\"Evaluate the model's performance\")\n pipeline.evaluate(\n figure_path=self.figure_path, y_pred=adj_y_pred, y_true=adj_y_true\n )\n\n logger.info(\"Extract metrics and parameters\")\n metrics_dict, params_dict = pipeline.extract_params_and_metrics()\n\n logger.info(\"Save model and log results in MlFlow\")\n pipeline.log_and_save_model(\n figure_path=self.figure_path,\n metrics_dict=metrics_dict,\n params_dict=params_dict,\n model_path=self.model_path,\n )\n\n def adjust_predictions(\n self, y_pred: np.array, y_true: np.array\n ) -> Tuple[np.array, np.array]:\n \"\"\"This method decodes the predictions and their true counterpart and also\n brings them into a format which resembles also the initial process at the\n beginning.\n\n :param y_pred: Predictions of the model\n :type y_pred: np.array\n :param y_true: Truths of the testing data\n :type y_true: np.array\n :return: Two numpy arrays, containing the decoded truth and label\n :rtype: Tuple[np.array, np.array]\n \"\"\"\n\n # Decoding the true labels\n encoder = self.load_pickle(self.encoder_path)\n decoded_y_true = encoder.inverse_transform(y_true)\n org_y_true = np.array([\"_\".join(x) for x in decoded_y_true])\n\n # Decoding the predictions\n top_two_threshold = len(y_pred[0]) - 2\n two_max_predictions = (y_pred.argsort().argsort() >= top_two_threshold).astype(\n int\n )\n assert np.all(\n two_max_predictions.sum(axis=1) == 2\n ), \"There are more or less than two labels equal to one\"\n\n decoded_y_pred = encoder.inverse_transform(two_max_predictions)\n org_y_pred = np.array([\"_\".join(x) for x in decoded_y_pred])\n\n return (org_y_pred, org_y_true)\n\n def encode_label(self, data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"This method adjusts the dataframe to fit a multi-label case. For doing\n that we first encode the target into a list of tuples which then are fed\n into a multi-label binarizer. Afterwards\n\n :param data: Dataframe containing targets and image paths\n :type data: pd.DataFrame\n :return: The very same as the input\n :rtype: pd.DataFrame\n \"\"\"\n\n # Bring the target into the right format\n multi_label_target = np.array(\n [tuple(x.split(\"_\")) for x in data.loc[:, \"target\"]]\n )\n\n # Fit, transform and save binarizer\n mlb = MultiLabelBinarizer()\n mlb.fit(multi_label_target)\n self.save_pickle(saving_path=self.encoder_path, file=mlb)\n\n # Add the new label to the dataframe\n columns = list(mlb.classes_)\n ml_labels_list = [list(x) for x in mlb.transform(multi_label_target)]\n ml_label_df = pd.DataFrame(data=ml_labels_list, columns=columns)\n return pd.concat((ml_label_df, data), axis=1)\n\n def add_output_classes_to_parameters(\n self, data_generator: DataFrameIterator\n ) -> None:\n \"\"\"This method adds the number of output labels to the parameters dictionary.\n This is essential, since we need that information later on in the building of\n the transfer learning model.\n\n :param data_generator: The data generator which creates augmented versions of\n our images.\n :type data_generator: DataFrameIterator\n \"\"\"\n\n _, labels, _ = next(iter(data_generator))\n dict_update = {\"number_of_output_classes\": len(labels[0])}\n self.parameters.update(dict_update)\n"
] | [
[
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.keras.callbacks.EarlyStopping"
],
[
"pandas.concat",
"sklearn.preprocessing.MultiLabelBinarizer",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
adbull/pandas | [
"a6e43a43f2cb1b4b7d46b262be2efb825d033eb8"
] | [
"pandas/tests/io/parser/test_common.py"
] | [
"\"\"\"\nTests that work on both the Python and C engines but do not have a\nspecific classification into the other test modules.\n\"\"\"\n\nimport codecs\nfrom collections import OrderedDict\nimport csv\nfrom datetime import datetime\nfrom io import BytesIO, StringIO\nimport os\nimport platform\nfrom tempfile import TemporaryFile\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslib import Timestamp\nfrom pandas.compat import lrange\nfrom pandas.errors import DtypeWarning, EmptyDataError, ParserError\n\nfrom pandas import DataFrame, Index, MultiIndex, Series, compat, concat\nimport pandas.util.testing as tm\n\nfrom pandas.io.common import URLError\nfrom pandas.io.parsers import CParserWrapper, TextFileReader, TextParser\n\n\ndef test_override_set_noconvert_columns():\n # see gh-17351\n #\n # Usecols needs to be sorted in _set_noconvert_columns based\n # on the test_usecols_with_parse_dates test from test_usecols.py\n class MyTextFileReader(TextFileReader):\n def __init__(self):\n self._currow = 0\n self.squeeze = False\n\n class MyCParserWrapper(CParserWrapper):\n def _set_noconvert_columns(self):\n if self.usecols_dtype == \"integer\":\n # self.usecols is a set, which is documented as unordered\n # but in practice, a CPython set of integers is sorted.\n # In other implementations this assumption does not hold.\n # The following code simulates a different order, which\n # before GH 17351 would cause the wrong columns to be\n # converted via the parse_dates parameter\n self.usecols = list(self.usecols)\n self.usecols.reverse()\n return CParserWrapper._set_noconvert_columns(self)\n\n data = \"\"\"a,b,c,d,e\n0,1,20140101,0900,4\n0,1,20140102,1000,4\"\"\"\n\n parse_dates = [[1, 2]]\n cols = {\n \"a\": [0, 0],\n \"c_d\": [\n Timestamp(\"2014-01-01 09:00:00\"),\n Timestamp(\"2014-01-02 10:00:00\")\n ]\n }\n expected = DataFrame(cols, columns=[\"c_d\", \"a\"])\n\n parser = MyTextFileReader()\n parser.options = {\"usecols\": [0, 2, 3],\n \"parse_dates\": parse_dates,\n \"delimiter\": \",\"}\n parser._engine = MyCParserWrapper(StringIO(data), **parser.options)\n\n result = parser.read()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_bytes_io_input(all_parsers):\n encoding = \"cp1255\"\n parser = all_parsers\n\n data = BytesIO(\"שלום:1234\\n562:123\".encode(encoding))\n result = parser.read_csv(data, sep=\":\", encoding=encoding)\n\n expected = DataFrame([[562, 123]], columns=[\"שלום\", \"1234\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_empty_decimal_marker(all_parsers):\n data = \"\"\"A|B|C\n1|2,334|5\n10|13|10.\n\"\"\"\n # Parsers support only length-1 decimals\n msg = \"Only length-1 decimal markers supported\"\n parser = all_parsers\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), decimal=\"\")\n\n\ndef test_bad_stream_exception(all_parsers, csv_dir_path):\n # see gh-13652\n #\n # This test validates that both the Python engine and C engine will\n # raise UnicodeDecodeError instead of C engine raising ParserError\n # and swallowing the exception that caused read to fail.\n path = os.path.join(csv_dir_path, \"sauron.SHIFT_JIS.csv\")\n codec = codecs.lookup(\"utf-8\")\n utf8 = codecs.lookup('utf-8')\n parser = all_parsers\n msg = \"'utf-8' codec can't decode byte\"\n\n # Stream must be binary UTF8.\n with open(path, \"rb\") as handle, codecs.StreamRecoder(\n handle, utf8.encode, utf8.decode, codec.streamreader,\n codec.streamwriter) as stream:\n\n with pytest.raises(UnicodeDecodeError, match=msg):\n parser.read_csv(stream)\n\n\ndef test_read_csv_local(all_parsers, csv1):\n prefix = \"file:///\" if compat.is_platform_windows() else \"file://\"\n parser = all_parsers\n\n fname = prefix + str(os.path.abspath(csv1))\n result = parser.read_csv(fname, index_col=0, parse_dates=True)\n\n expected = DataFrame([[0.980269, 3.685731, -0.364216805298, -1.159738],\n [1.047916, -0.041232, -0.16181208307, 0.212549],\n [0.498581, 0.731168, -0.537677223318, 1.346270],\n [1.120202, 1.567621, 0.00364077397681, 0.675253],\n [-0.487094, 0.571455, -1.6116394093, 0.103469],\n [0.836649, 0.246462, 0.588542635376, 1.062782],\n [-0.157161, 1.340307, 1.1957779562, -1.097007]],\n columns=[\"A\", \"B\", \"C\", \"D\"],\n index=Index([datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n datetime(2000, 1, 6),\n datetime(2000, 1, 7),\n datetime(2000, 1, 10),\n datetime(2000, 1, 11)], name=\"index\"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_1000_sep(all_parsers):\n parser = all_parsers\n data = \"\"\"A|B|C\n1|2,334|5\n10|13|10.\n\"\"\"\n expected = DataFrame({\n \"A\": [1, 10],\n \"B\": [2334, 13],\n \"C\": [5, 10.]\n })\n\n result = parser.read_csv(StringIO(data), sep=\"|\", thousands=\",\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_squeeze(all_parsers):\n data = \"\"\"\\\na,1\nb,2\nc,3\n\"\"\"\n parser = all_parsers\n index = Index([\"a\", \"b\", \"c\"], name=0)\n expected = Series([1, 2, 3], name=1, index=index)\n\n result = parser.read_csv(StringIO(data), index_col=0,\n header=None, squeeze=True)\n tm.assert_series_equal(result, expected)\n\n # see gh-8217\n #\n # Series should not be a view.\n assert not result._is_view\n\n\ndef test_malformed(all_parsers):\n # see gh-6607\n parser = all_parsers\n data = \"\"\"ignore\nA,B,C\n1,2,3 # comment\n1,2,3,4,5\n2,3,4\n\"\"\"\n msg = \"Expected 3 fields in line 4, saw 5\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), header=1, comment=\"#\")\n\n\[email protected](\"nrows\", [5, 3, None])\ndef test_malformed_chunks(all_parsers, nrows):\n data = \"\"\"ignore\nA,B,C\nskip\n1,2,3\n3,5,10 # comment\n1,2,3,4,5\n2,3,4\n\"\"\"\n parser = all_parsers\n msg = 'Expected 3 fields in line 6, saw 5'\n reader = parser.read_csv(StringIO(data), header=1, comment=\"#\",\n iterator=True, chunksize=1, skiprows=[2])\n\n with pytest.raises(ParserError, match=msg):\n reader.read(nrows)\n\n\ndef test_unnamed_columns(all_parsers):\n data = \"\"\"A,B,C,,\n1,2,3,4,5\n6,7,8,9,10\n11,12,13,14,15\n\"\"\"\n parser = all_parsers\n expected = DataFrame([[1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15]],\n dtype=np.int64, columns=[\"A\", \"B\", \"C\",\n \"Unnamed: 3\",\n \"Unnamed: 4\"])\n result = parser.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_csv_mixed_type(all_parsers):\n data = \"\"\"A,B,C\na,1,2\nb,3,4\nc,4,5\n\"\"\"\n parser = all_parsers\n expected = DataFrame({\"A\": [\"a\", \"b\", \"c\"],\n \"B\": [1, 3, 4],\n \"C\": [2, 4, 5]})\n result = parser.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_low_memory_no_rows_with_index(all_parsers):\n # see gh-21141\n parser = all_parsers\n\n if not parser.low_memory:\n pytest.skip(\"This is a low-memory specific test\")\n\n data = \"\"\"A,B,C\n1,1,1,2\n2,2,3,4\n3,3,4,5\n\"\"\"\n result = parser.read_csv(StringIO(data), low_memory=True,\n index_col=0, nrows=0)\n expected = DataFrame(columns=[\"A\", \"B\", \"C\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_dataframe(all_parsers, csv1):\n parser = all_parsers\n result = parser.read_csv(csv1, index_col=0, parse_dates=True)\n\n expected = DataFrame([[0.980269, 3.685731, -0.364216805298, -1.159738],\n [1.047916, -0.041232, -0.16181208307, 0.212549],\n [0.498581, 0.731168, -0.537677223318, 1.346270],\n [1.120202, 1.567621, 0.00364077397681, 0.675253],\n [-0.487094, 0.571455, -1.6116394093, 0.103469],\n [0.836649, 0.246462, 0.588542635376, 1.062782],\n [-0.157161, 1.340307, 1.1957779562, -1.097007]],\n columns=[\"A\", \"B\", \"C\", \"D\"],\n index=Index([datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n datetime(2000, 1, 6),\n datetime(2000, 1, 7),\n datetime(2000, 1, 10),\n datetime(2000, 1, 11)], name=\"index\"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_no_index_name(all_parsers, csv_dir_path):\n parser = all_parsers\n csv2 = os.path.join(csv_dir_path, \"test2.csv\")\n result = parser.read_csv(csv2, index_col=0, parse_dates=True)\n\n expected = DataFrame([[0.980269, 3.685731, -0.364216805298,\n -1.159738, \"foo\"],\n [1.047916, -0.041232, -0.16181208307,\n 0.212549, \"bar\"],\n [0.498581, 0.731168, -0.537677223318,\n 1.346270, \"baz\"],\n [1.120202, 1.567621, 0.00364077397681,\n 0.675253, \"qux\"],\n [-0.487094, 0.571455, -1.6116394093,\n 0.103469, \"foo2\"]],\n columns=[\"A\", \"B\", \"C\", \"D\", \"E\"],\n index=Index([datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n datetime(2000, 1, 6),\n datetime(2000, 1, 7)]))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_unicode(all_parsers):\n parser = all_parsers\n data = BytesIO(\"\\u0141aski, Jan;1\".encode(\"utf-8\"))\n\n result = parser.read_csv(data, sep=\";\", encoding=\"utf-8\", header=None)\n expected = DataFrame([[\"\\u0141aski, Jan\", 1]])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_wrong_num_columns(all_parsers):\n # Too few columns.\n data = \"\"\"A,B,C,D,E,F\n1,2,3,4,5,6\n6,7,8,9,10,11,12\n11,12,13,14,15,16\n\"\"\"\n parser = all_parsers\n msg = \"Expected 6 fields in line 3, saw 7\"\n\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data))\n\n\ndef test_read_duplicate_index_explicit(all_parsers):\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo,12,13,14,15\nbar,12,13,14,15\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=0)\n\n expected = DataFrame([[2, 3, 4, 5], [7, 8, 9, 10],\n [12, 13, 14, 15], [12, 13, 14, 15],\n [12, 13, 14, 15], [12, 13, 14, 15]],\n columns=[\"A\", \"B\", \"C\", \"D\"],\n index=Index([\"foo\", \"bar\", \"baz\",\n \"qux\", \"foo\", \"bar\"], name=\"index\"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_duplicate_index_implicit(all_parsers):\n data = \"\"\"A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo,12,13,14,15\nbar,12,13,14,15\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data))\n\n expected = DataFrame([[2, 3, 4, 5], [7, 8, 9, 10],\n [12, 13, 14, 15], [12, 13, 14, 15],\n [12, 13, 14, 15], [12, 13, 14, 15]],\n columns=[\"A\", \"B\", \"C\", \"D\"],\n index=Index([\"foo\", \"bar\", \"baz\",\n \"qux\", \"foo\", \"bar\"]))\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,kwargs,expected\", [\n (\"A,B\\nTrue,1\\nFalse,2\\nTrue,3\", dict(),\n DataFrame([[True, 1], [False, 2], [True, 3]], columns=[\"A\", \"B\"])),\n (\"A,B\\nYES,1\\nno,2\\nyes,3\\nNo,3\\nYes,3\",\n dict(true_values=[\"yes\", \"Yes\", \"YES\"],\n false_values=[\"no\", \"NO\", \"No\"]),\n DataFrame([[True, 1], [False, 2], [True, 3],\n [False, 3], [True, 3]], columns=[\"A\", \"B\"])),\n (\"A,B\\nTRUE,1\\nFALSE,2\\nTRUE,3\", dict(),\n DataFrame([[True, 1], [False, 2], [True, 3]], columns=[\"A\", \"B\"])),\n (\"A,B\\nfoo,bar\\nbar,foo\", dict(true_values=[\"foo\"],\n false_values=[\"bar\"]),\n DataFrame([[True, False], [False, True]], columns=[\"A\", \"B\"]))\n])\ndef test_parse_bool(all_parsers, data, kwargs, expected):\n parser = all_parsers\n result = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_int_conversion(all_parsers):\n data = \"\"\"A,B\n1.0,1\n2.0,2\n3.0,3\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data))\n\n expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=[\"A\", \"B\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"nrows\", [3, 3.0])\ndef test_read_nrows(all_parsers, nrows):\n # see gh-10476\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n expected = DataFrame([[\"foo\", 2, 3, 4, 5],\n [\"bar\", 7, 8, 9, 10],\n [\"baz\", 12, 13, 14, 15]],\n columns=[\"index\", \"A\", \"B\", \"C\", \"D\"])\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), nrows=nrows)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"nrows\", [1.2, \"foo\", -1])\ndef test_read_nrows_bad(all_parsers, nrows):\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n msg = r\"'nrows' must be an integer >=0\"\n parser = all_parsers\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), nrows=nrows)\n\n\[email protected](\"index_col\", [0, \"index\"])\ndef test_read_chunksize_with_index(all_parsers, index_col):\n parser = all_parsers\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n\n reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2)\n expected = DataFrame([[\"foo\", 2, 3, 4, 5],\n [\"bar\", 7, 8, 9, 10],\n [\"baz\", 12, 13, 14, 15],\n [\"qux\", 12, 13, 14, 15],\n [\"foo2\", 12, 13, 14, 15],\n [\"bar2\", 12, 13, 14, 15]],\n columns=[\"index\", \"A\", \"B\", \"C\", \"D\"])\n expected = expected.set_index(\"index\")\n\n chunks = list(reader)\n tm.assert_frame_equal(chunks[0], expected[:2])\n tm.assert_frame_equal(chunks[1], expected[2:4])\n tm.assert_frame_equal(chunks[2], expected[4:])\n\n\[email protected](\"chunksize\", [1.3, \"foo\", 0])\ndef test_read_chunksize_bad(all_parsers, chunksize):\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n parser = all_parsers\n msg = r\"'chunksize' must be an integer >=1\"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), chunksize=chunksize)\n\n\[email protected](\"chunksize\", [2, 8])\ndef test_read_chunksize_and_nrows(all_parsers, chunksize):\n # see gh-15755\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n parser = all_parsers\n kwargs = dict(index_col=0, nrows=5)\n\n reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs)\n expected = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(concat(reader), expected)\n\n\ndef test_read_chunksize_and_nrows_changing_size(all_parsers):\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n parser = all_parsers\n kwargs = dict(index_col=0, nrows=5)\n\n reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs)\n expected = parser.read_csv(StringIO(data), **kwargs)\n\n tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])\n tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])\n\n with pytest.raises(StopIteration, match=\"\"):\n reader.get_chunk(size=3)\n\n\ndef test_get_chunk_passed_chunksize(all_parsers):\n parser = all_parsers\n data = \"\"\"A,B,C\n1,2,3\n4,5,6\n7,8,9\n1,2,3\"\"\"\n\n reader = parser.read_csv(StringIO(data), chunksize=2)\n result = reader.get_chunk()\n\n expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[\"A\", \"B\", \"C\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"kwargs\", [dict(), dict(index_col=0)])\ndef test_read_chunksize_compat(all_parsers, kwargs):\n # see gh-12185\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n parser = all_parsers\n reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs)\n\n result = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(concat(reader), result)\n\n\ndef test_read_chunksize_jagged_names(all_parsers):\n # see gh-23509\n parser = all_parsers\n data = \"\\n\".join([\"0\"] * 7 + [\",\".join([\"0\"] * 10)])\n\n expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])\n reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4)\n\n result = concat(reader)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_data_list(all_parsers):\n parser = all_parsers\n kwargs = dict(index_col=0)\n data = \"A,B,C\\nfoo,1,2,3\\nbar,4,5,6\"\n\n data_list = [[\"A\", \"B\", \"C\"], [\"foo\", \"1\", \"2\", \"3\"],\n [\"bar\", \"4\", \"5\", \"6\"]]\n expected = parser.read_csv(StringIO(data), **kwargs)\n\n parser = TextParser(data_list, chunksize=2, **kwargs)\n result = parser.read()\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_iterator(all_parsers):\n # see gh-6607\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n parser = all_parsers\n kwargs = dict(index_col=0)\n\n expected = parser.read_csv(StringIO(data), **kwargs)\n reader = parser.read_csv(StringIO(data), iterator=True, **kwargs)\n\n first_chunk = reader.read(3)\n tm.assert_frame_equal(first_chunk, expected[:3])\n\n last_chunk = reader.read(5)\n tm.assert_frame_equal(last_chunk, expected[3:])\n\n\ndef test_iterator2(all_parsers):\n parser = all_parsers\n data = \"\"\"A,B,C\nfoo,1,2,3\nbar,4,5,6\nbaz,7,8,9\n\"\"\"\n\n reader = parser.read_csv(StringIO(data), iterator=True)\n result = list(reader)\n\n expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n index=[\"foo\", \"bar\", \"baz\"],\n columns=[\"A\", \"B\", \"C\"])\n tm.assert_frame_equal(result[0], expected)\n\n\ndef test_reader_list(all_parsers):\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n parser = all_parsers\n kwargs = dict(index_col=0)\n\n lines = list(csv.reader(StringIO(data)))\n reader = TextParser(lines, chunksize=2, **kwargs)\n\n expected = parser.read_csv(StringIO(data), **kwargs)\n chunks = list(reader)\n\n tm.assert_frame_equal(chunks[0], expected[:2])\n tm.assert_frame_equal(chunks[1], expected[2:4])\n tm.assert_frame_equal(chunks[2], expected[4:])\n\n\ndef test_reader_list_skiprows(all_parsers):\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n parser = all_parsers\n kwargs = dict(index_col=0)\n\n lines = list(csv.reader(StringIO(data)))\n reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs)\n\n expected = parser.read_csv(StringIO(data), **kwargs)\n chunks = list(reader)\n\n tm.assert_frame_equal(chunks[0], expected[1:3])\n\n\ndef test_iterator_stop_on_chunksize(all_parsers):\n # gh-3967: stopping iteration when chunksize is specified\n parser = all_parsers\n data = \"\"\"A,B,C\nfoo,1,2,3\nbar,4,5,6\nbaz,7,8,9\n\"\"\"\n\n reader = parser.read_csv(StringIO(data), chunksize=1)\n result = list(reader)\n\n assert len(result) == 3\n expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n index=[\"foo\", \"bar\", \"baz\"],\n columns=[\"A\", \"B\", \"C\"])\n tm.assert_frame_equal(concat(result), expected)\n\n\[email protected](\"kwargs\", [\n dict(iterator=True,\n chunksize=1),\n dict(iterator=True),\n dict(chunksize=1)\n])\ndef test_iterator_skipfooter_errors(all_parsers, kwargs):\n msg = \"'skipfooter' not supported for 'iteration'\"\n parser = all_parsers\n data = \"a\\n1\\n2\"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=1, **kwargs)\n\n\ndef test_nrows_skipfooter_errors(all_parsers):\n msg = \"'skipfooter' not supported with 'nrows'\"\n data = \"a\\n1\\n2\\n3\\n4\\n5\\n6\"\n parser = all_parsers\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=1, nrows=5)\n\n\[email protected](\"data,kwargs,expected\", [\n (\"\"\"foo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\", dict(index_col=0, names=[\"index\", \"A\", \"B\", \"C\", \"D\"]),\n DataFrame([[2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15],\n [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15]],\n index=Index([\"foo\", \"bar\", \"baz\", \"qux\",\n \"foo2\", \"bar2\"], name=\"index\"),\n columns=[\"A\", \"B\", \"C\", \"D\"])),\n (\"\"\"foo,one,2,3,4,5\nfoo,two,7,8,9,10\nfoo,three,12,13,14,15\nbar,one,12,13,14,15\nbar,two,12,13,14,15\n\"\"\", dict(index_col=[0, 1], names=[\"index1\", \"index2\", \"A\", \"B\", \"C\", \"D\"]),\n DataFrame([[2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15],\n [12, 13, 14, 15], [12, 13, 14, 15]],\n index=MultiIndex.from_tuples([\n (\"foo\", \"one\"), (\"foo\", \"two\"), (\"foo\", \"three\"),\n (\"bar\", \"one\"), (\"bar\", \"two\")],\n names=[\"index1\", \"index2\"]),\n columns=[\"A\", \"B\", \"C\", \"D\"])),\n])\ndef test_pass_names_with_index(all_parsers, data, kwargs, expected):\n parser = all_parsers\n result = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"index_col\", [[0, 1], [1, 0]])\ndef test_multi_index_no_level_names(all_parsers, index_col):\n data = \"\"\"index1,index2,A,B,C,D\nfoo,one,2,3,4,5\nfoo,two,7,8,9,10\nfoo,three,12,13,14,15\nbar,one,12,13,14,15\nbar,two,12,13,14,15\n\"\"\"\n headless_data = '\\n'.join(data.split(\"\\n\")[1:])\n\n names = [\"A\", \"B\", \"C\", \"D\"]\n parser = all_parsers\n\n result = parser.read_csv(StringIO(headless_data),\n index_col=index_col,\n header=None, names=names)\n expected = parser.read_csv(StringIO(data), index_col=index_col)\n\n # No index names in headless data.\n expected.index.names = [None] * 2\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multi_index_no_level_names_implicit(all_parsers):\n parser = all_parsers\n data = \"\"\"A,B,C,D\nfoo,one,2,3,4,5\nfoo,two,7,8,9,10\nfoo,three,12,13,14,15\nbar,one,12,13,14,15\nbar,two,12,13,14,15\n\"\"\"\n\n result = parser.read_csv(StringIO(data))\n expected = DataFrame([[2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15],\n [12, 13, 14, 15], [12, 13, 14, 15]],\n columns=[\"A\", \"B\", \"C\", \"D\"],\n index=MultiIndex.from_tuples([\n (\"foo\", \"one\"), (\"foo\", \"two\"), (\"foo\", \"three\"),\n (\"bar\", \"one\"), (\"bar\", \"two\")]))\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,expected,header\", [\n (\"a,b\", DataFrame(columns=[\"a\", \"b\"]), [0]),\n (\"a,b\\nc,d\", DataFrame(columns=MultiIndex.from_tuples(\n [(\"a\", \"c\"), (\"b\", \"d\")])), [0, 1]),\n])\[email protected](\"round_trip\", [True, False])\ndef test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):\n # see gh-14545\n parser = all_parsers\n data = expected.to_csv(index=False) if round_trip else data\n\n result = parser.read_csv(StringIO(data), header=header)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_no_unnamed_index(all_parsers):\n parser = all_parsers\n data = \"\"\" id c0 c1 c2\n0 1 0 a b\n1 2 0 c d\n2 2 2 e f\n\"\"\"\n result = parser.read_csv(StringIO(data), sep=\" \")\n expected = DataFrame([[0, 1, 0, \"a\", \"b\"], [1, 2, 0, \"c\", \"d\"],\n [2, 2, 2, \"e\", \"f\"]], columns=[\"Unnamed: 0\", \"id\",\n \"c0\", \"c1\", \"c2\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_parse_simple_list(all_parsers):\n parser = all_parsers\n data = \"\"\"foo\nbar baz\nqux foo\nfoo\nbar\"\"\"\n\n result = parser.read_csv(StringIO(data), header=None)\n expected = DataFrame([\"foo\", \"bar baz\", \"qux foo\", \"foo\", \"bar\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_url(all_parsers, csv_dir_path):\n # TODO: FTP testing\n parser = all_parsers\n kwargs = dict(sep=\"\\t\")\n\n url = (\"https://raw.github.com/pandas-dev/pandas/master/\"\n \"pandas/tests/io/parser/data/salaries.csv\")\n url_result = parser.read_csv(url, **kwargs)\n\n local_path = os.path.join(csv_dir_path, \"salaries.csv\")\n local_result = parser.read_csv(local_path, **kwargs)\n tm.assert_frame_equal(url_result, local_result)\n\n\[email protected]\ndef test_local_file(all_parsers, csv_dir_path):\n parser = all_parsers\n kwargs = dict(sep=\"\\t\")\n\n local_path = os.path.join(csv_dir_path, \"salaries.csv\")\n local_result = parser.read_csv(local_path, **kwargs)\n url = \"file://localhost/\" + local_path\n\n try:\n url_result = parser.read_csv(url, **kwargs)\n tm.assert_frame_equal(url_result, local_result)\n except URLError:\n # Fails on some systems.\n pytest.skip(\"Failing on: \" + \" \".join(platform.uname()))\n\n\ndef test_path_path_lib(all_parsers):\n parser = all_parsers\n df = tm.makeDataFrame()\n result = tm.round_trip_pathlib(\n df.to_csv, lambda p: parser.read_csv(p, index_col=0))\n tm.assert_frame_equal(df, result)\n\n\ndef test_path_local_path(all_parsers):\n parser = all_parsers\n df = tm.makeDataFrame()\n result = tm.round_trip_localpath(\n df.to_csv, lambda p: parser.read_csv(p, index_col=0))\n tm.assert_frame_equal(df, result)\n\n\ndef test_nonexistent_path(all_parsers):\n # gh-2428: pls no segfault\n # gh-14086: raise more helpful FileNotFoundError\n parser = all_parsers\n path = \"%s.csv\" % tm.rands(10)\n\n msg = (\"does not exist\" if parser.engine == \"c\"\n else r\"\\[Errno 2\\]\")\n with pytest.raises(FileNotFoundError, match=msg) as e:\n parser.read_csv(path)\n\n filename = e.value.filename\n filename = filename.decode() if isinstance(\n filename, bytes) else filename\n\n assert path == filename\n\n\ndef test_missing_trailing_delimiters(all_parsers):\n parser = all_parsers\n data = \"\"\"A,B,C,D\n1,2,3,4\n1,3,3,\n1,4,5\"\"\"\n\n result = parser.read_csv(StringIO(data))\n expected = DataFrame([[1, 2, 3, 4], [1, 3, 3, np.nan],\n [1, 4, 5, np.nan]], columns=[\"A\", \"B\", \"C\", \"D\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_skip_initial_space(all_parsers):\n data = ('\"09-Apr-2012\", \"01:10:18.300\", 2456026.548822908, 12849, '\n '1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '\n '314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '\n '70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '\n '0.212036, 14.7674, 41.605, -9999.0, -9999.0, '\n '-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), names=lrange(33), header=None,\n na_values=[\"-9999.0\"], skipinitialspace=True)\n expected = DataFrame([[\"09-Apr-2012\", \"01:10:18.300\", 2456026.548822908,\n 12849, 1.00361, 1.12551, 330.65659,\n 355626618.16711, 73.48821, 314.11625, 1917.09447,\n 179.71425, 80.0, 240.0, -350, 70.06056, 344.9837,\n 1, 1, -0.689265, -0.692787, 0.212036, 14.7674,\n 41.605, np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, 0, 12, 128]])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"sep\", [\",\", \"\\t\"])\[email protected](\"encoding\", [\"utf-16\", \"utf-16le\", \"utf-16be\"])\ndef test_utf16_bom_skiprows(all_parsers, sep, encoding):\n # see gh-2298\n parser = all_parsers\n data = \"\"\"skip this\nskip this too\nA,B,C\n1,2,3\n4,5,6\"\"\".replace(\",\", sep)\n path = \"__%s__.csv\" % tm.rands(10)\n kwargs = dict(sep=sep, skiprows=2)\n utf8 = \"utf-8\"\n\n with tm.ensure_clean(path) as path:\n from io import TextIOWrapper\n bytes_data = data.encode(encoding)\n\n with open(path, \"wb\") as f:\n f.write(bytes_data)\n\n bytes_buffer = BytesIO(data.encode(utf8))\n bytes_buffer = TextIOWrapper(bytes_buffer, encoding=utf8)\n\n result = parser.read_csv(path, encoding=encoding, **kwargs)\n expected = parser.read_csv(bytes_buffer, encoding=utf8, **kwargs)\n\n bytes_buffer.close()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_utf16_example(all_parsers, csv_dir_path):\n path = os.path.join(csv_dir_path, \"utf16_ex.txt\")\n parser = all_parsers\n result = parser.read_csv(path, encoding=\"utf-16\", sep=\"\\t\")\n assert len(result) == 50\n\n\ndef test_unicode_encoding(all_parsers, csv_dir_path):\n path = os.path.join(csv_dir_path, \"unicode_series.csv\")\n parser = all_parsers\n\n result = parser.read_csv(path, header=None, encoding=\"latin-1\")\n result = result.set_index(0)\n got = result[1][1632]\n\n expected = '\\xc1 k\\xf6ldum klaka (Cold Fever) (1994)'\n assert got == expected\n\n\ndef test_trailing_delimiters(all_parsers):\n # see gh-2442\n data = \"\"\"A,B,C\n1,2,3,\n4,5,6,\n7,8,9,\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=False)\n\n expected = DataFrame({\"A\": [1, 4, 7], \"B\": [2, 5, 8], \"C\": [3, 6, 9]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_escapechar(all_parsers):\n # http://stackoverflow.com/questions/13824840/feature-request-for-\n # pandas-read-csv\n data = '''SEARCH_TERM,ACTUAL_URL\n\"bra tv bord\",\"http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord\"\n\"tv p\\xc3\\xa5 hjul\",\"http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord\"\n\"SLAGBORD, \\\\\"Bergslagen\\\\\", IKEA:s 1700-tals serie\",\"http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord\"''' # noqa\n\n parser = all_parsers\n result = parser.read_csv(StringIO(data), escapechar='\\\\',\n quotechar='\"', encoding='utf-8')\n\n assert result['SEARCH_TERM'][2] == ('SLAGBORD, \"Bergslagen\", '\n 'IKEA:s 1700-tals serie')\n tm.assert_index_equal(result.columns,\n Index(['SEARCH_TERM', 'ACTUAL_URL']))\n\n\ndef test_int64_min_issues(all_parsers):\n # see gh-2599\n parser = all_parsers\n data = \"A,B\\n0,0\\n0,\"\n result = parser.read_csv(StringIO(data))\n\n expected = DataFrame({\"A\": [0, 0], \"B\": [0, np.nan]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_integers_above_fp_precision(all_parsers):\n data = \"\"\"Numbers\n17007000002000191\n17007000002000191\n17007000002000191\n17007000002000191\n17007000002000192\n17007000002000192\n17007000002000192\n17007000002000192\n17007000002000192\n17007000002000194\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data))\n expected = DataFrame({\"Numbers\": [17007000002000191,\n 17007000002000191,\n 17007000002000191,\n 17007000002000191,\n 17007000002000192,\n 17007000002000192,\n 17007000002000192,\n 17007000002000192,\n 17007000002000192,\n 17007000002000194]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_chunks_have_consistent_numerical_type(all_parsers):\n parser = all_parsers\n integers = [str(i) for i in range(499999)]\n data = \"a\\n\" + \"\\n\".join(integers + [\"1.0\", \"2.0\"] + integers)\n\n # Coercions should work without warnings.\n with tm.assert_produces_warning(None):\n result = parser.read_csv(StringIO(data))\n\n assert type(result.a[0]) is np.float64\n assert result.a.dtype == np.float\n\n\ndef test_warn_if_chunks_have_mismatched_type(all_parsers):\n warning_type = None\n parser = all_parsers\n integers = [str(i) for i in range(499999)]\n data = \"a\\n\" + \"\\n\".join(integers + [\"a\", \"b\"] + integers)\n\n # see gh-3866: if chunks are different types and can't\n # be coerced using numerical types, then issue warning.\n if parser.engine == \"c\" and parser.low_memory:\n warning_type = DtypeWarning\n\n with tm.assert_produces_warning(warning_type):\n df = parser.read_csv(StringIO(data))\n assert df.a.dtype == np.object\n\n\[email protected](\"sep\", [\" \", r\"\\s+\"])\ndef test_integer_overflow_bug(all_parsers, sep):\n # see gh-2601\n data = \"65248E10 11\\n55555E55 22\\n\"\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), header=None, sep=sep)\n expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_catch_too_many_names(all_parsers):\n # see gh-5156\n data = \"\"\"\\\n1,2,3\n4,,6\n7,8,9\n10,11,12\\n\"\"\"\n parser = all_parsers\n msg = (\"Too many columns specified: \"\n \"expected 4 and found 3\" if parser.engine == \"c\"\n else \"Number of passed names did not match \"\n \"number of header fields in the file\")\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), header=0, names=[\"a\", \"b\", \"c\", \"d\"])\n\n\ndef test_ignore_leading_whitespace(all_parsers):\n # see gh-3374, gh-6607\n parser = all_parsers\n data = \" a b c\\n 1 2 3\\n 4 5 6\\n 7 8 9\"\n result = parser.read_csv(StringIO(data), sep=r\"\\s+\")\n\n expected = DataFrame({\"a\": [1, 4, 7], \"b\": [2, 5, 8], \"c\": [3, 6, 9]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_chunk_begins_with_newline_whitespace(all_parsers):\n # see gh-10022\n parser = all_parsers\n data = \"\\n hello\\nworld\\n\"\n\n result = parser.read_csv(StringIO(data), header=None)\n expected = DataFrame([\" hello\", \"world\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_empty_with_index(all_parsers):\n # see gh-10184\n data = \"x,y\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=0)\n\n expected = DataFrame(columns=[\"y\"], index=Index([], name=\"x\"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_empty_with_multi_index(all_parsers):\n # see gh-10467\n data = \"x,y,z\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=[\"x\", \"y\"])\n\n expected = DataFrame(columns=[\"z\"],\n index=MultiIndex.from_arrays(\n [[]] * 2, names=[\"x\", \"y\"]))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_empty_with_reversed_multi_index(all_parsers):\n data = \"x,y,z\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=[1, 0])\n\n expected = DataFrame(columns=[\"z\"],\n index=MultiIndex.from_arrays(\n [[]] * 2, names=[\"y\", \"x\"]))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_float_parser(all_parsers):\n # see gh-9565\n parser = all_parsers\n data = \"45e-1,4.5,45.,inf,-inf\"\n result = parser.read_csv(StringIO(data), header=None)\n\n expected = DataFrame([[float(s) for s in data.split(\",\")]])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_scientific_no_exponent(all_parsers):\n # see gh-12215\n df = DataFrame.from_dict(OrderedDict([(\"w\", [\"2e\"]), (\"x\", [\"3E\"]),\n (\"y\", [\"42e\"]),\n (\"z\", [\"632E\"])]))\n data = df.to_csv(index=False)\n parser = all_parsers\n\n for precision in parser.float_precision_choices:\n df_roundtrip = parser.read_csv(StringIO(data),\n float_precision=precision)\n tm.assert_frame_equal(df_roundtrip, df)\n\n\[email protected](\"conv\", [None, np.int64, np.uint64])\ndef test_int64_overflow(all_parsers, conv):\n data = \"\"\"ID\n00013007854817840016671868\n00013007854817840016749251\n00013007854817840016754630\n00013007854817840016781876\n00013007854817840017028824\n00013007854817840017963235\n00013007854817840018860166\"\"\"\n parser = all_parsers\n\n if conv is None:\n # 13007854817840016671868 > UINT64_MAX, so this\n # will overflow and return object as the dtype.\n result = parser.read_csv(StringIO(data))\n expected = DataFrame([\"00013007854817840016671868\",\n \"00013007854817840016749251\",\n \"00013007854817840016754630\",\n \"00013007854817840016781876\",\n \"00013007854817840017028824\",\n \"00013007854817840017963235\",\n \"00013007854817840018860166\"], columns=[\"ID\"])\n tm.assert_frame_equal(result, expected)\n else:\n # 13007854817840016671868 > UINT64_MAX, so attempts\n # to cast to either int64 or uint64 will result in\n # an OverflowError being raised.\n msg = (\"(Python int too large to convert to C long)|\"\n \"(long too big to convert)|\"\n \"(int too big to convert)\")\n\n with pytest.raises(OverflowError, match=msg):\n parser.read_csv(StringIO(data), converters={\"ID\": conv})\n\n\[email protected](\"val\", [\n np.iinfo(np.uint64).max,\n np.iinfo(np.int64).max,\n np.iinfo(np.int64).min\n])\ndef test_int64_uint64_range(all_parsers, val):\n # These numbers fall right inside the int64-uint64\n # range, so they should be parsed as string.\n parser = all_parsers\n result = parser.read_csv(StringIO(str(val)), header=None)\n\n expected = DataFrame([val])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"val\", [\n np.iinfo(np.uint64).max + 1,\n np.iinfo(np.int64).min - 1\n])\ndef test_outside_int64_uint64_range(all_parsers, val):\n # These numbers fall just outside the int64-uint64\n # range, so they should be parsed as string.\n parser = all_parsers\n result = parser.read_csv(StringIO(str(val)), header=None)\n\n expected = DataFrame([str(val)])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"exp_data\", [[str(-1), str(2**63)],\n [str(2**63), str(-1)]])\ndef test_numeric_range_too_wide(all_parsers, exp_data):\n # No numerical dtype can hold both negative and uint64\n # values, so they should be cast as string.\n parser = all_parsers\n data = \"\\n\".join(exp_data)\n expected = DataFrame(exp_data)\n\n result = parser.read_csv(StringIO(data), header=None)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"iterator\", [True, False])\ndef test_empty_with_nrows_chunksize(all_parsers, iterator):\n # see gh-9535\n parser = all_parsers\n expected = DataFrame(columns=[\"foo\", \"bar\"])\n\n nrows = 10\n data = StringIO(\"foo,bar\\n\")\n\n if iterator:\n result = next(iter(parser.read_csv(data, chunksize=nrows)))\n else:\n result = parser.read_csv(data, nrows=nrows)\n\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,kwargs,expected,msg\", [\n # gh-10728: WHITESPACE_LINE\n (\"a,b,c\\n4,5,6\\n \", dict(),\n DataFrame([[4, 5, 6]], columns=[\"a\", \"b\", \"c\"]), None),\n\n # gh-10548: EAT_LINE_COMMENT\n (\"a,b,c\\n4,5,6\\n#comment\", dict(comment=\"#\"),\n DataFrame([[4, 5, 6]], columns=[\"a\", \"b\", \"c\"]), None),\n\n # EAT_CRNL_NOP\n (\"a,b,c\\n4,5,6\\n\\r\", dict(),\n DataFrame([[4, 5, 6]], columns=[\"a\", \"b\", \"c\"]), None),\n\n # EAT_COMMENT\n (\"a,b,c\\n4,5,6#comment\", dict(comment=\"#\"),\n DataFrame([[4, 5, 6]], columns=[\"a\", \"b\", \"c\"]), None),\n\n # SKIP_LINE\n (\"a,b,c\\n4,5,6\\nskipme\", dict(skiprows=[2]),\n DataFrame([[4, 5, 6]], columns=[\"a\", \"b\", \"c\"]), None),\n\n # EAT_LINE_COMMENT\n (\"a,b,c\\n4,5,6\\n#comment\", dict(comment=\"#\", skip_blank_lines=False),\n DataFrame([[4, 5, 6]], columns=[\"a\", \"b\", \"c\"]), None),\n\n # IN_FIELD\n (\"a,b,c\\n4,5,6\\n \", dict(skip_blank_lines=False),\n DataFrame([[\"4\", 5, 6], [\" \", None, None]],\n columns=[\"a\", \"b\", \"c\"]), None),\n\n # EAT_CRNL\n (\"a,b,c\\n4,5,6\\n\\r\", dict(skip_blank_lines=False),\n DataFrame([[4, 5, 6], [None, None, None]],\n columns=[\"a\", \"b\", \"c\"]), None),\n\n # ESCAPED_CHAR\n (\"a,b,c\\n4,5,6\\n\\\\\", dict(escapechar=\"\\\\\"),\n None, \"(EOF following escape character)|(unexpected end of data)\"),\n\n # ESCAPE_IN_QUOTED_FIELD\n ('a,b,c\\n4,5,6\\n\"\\\\', dict(escapechar=\"\\\\\"),\n None, \"(EOF inside string starting at row 2)|(unexpected end of data)\"),\n\n # IN_QUOTED_FIELD\n ('a,b,c\\n4,5,6\\n\"', dict(escapechar=\"\\\\\"),\n None, \"(EOF inside string starting at row 2)|(unexpected end of data)\"),\n], ids=[\"whitespace-line\", \"eat-line-comment\", \"eat-crnl-nop\", \"eat-comment\",\n \"skip-line\", \"eat-line-comment\", \"in-field\", \"eat-crnl\",\n \"escaped-char\", \"escape-in-quoted-field\", \"in-quoted-field\"])\ndef test_eof_states(all_parsers, data, kwargs, expected, msg):\n # see gh-10728, gh-10548\n parser = all_parsers\n\n if expected is None:\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), **kwargs)\n else:\n result = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"usecols\", [None, [0, 1], [\"a\", \"b\"]])\ndef test_uneven_lines_with_usecols(all_parsers, usecols):\n # see gh-12203\n parser = all_parsers\n data = r\"\"\"a,b,c\n0,1,2\n3,4,5,6,7\n8,9,10\"\"\"\n\n if usecols is None:\n # Make sure that an error is still raised\n # when the \"usecols\" parameter is not provided.\n msg = r\"Expected \\d+ fields in line \\d+, saw \\d+\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data))\n else:\n expected = DataFrame({\n \"a\": [0, 3, 8],\n \"b\": [1, 4, 9]\n })\n\n result = parser.read_csv(StringIO(data), usecols=usecols)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,kwargs,expected\", [\n # First, check to see that the response of parser when faced with no\n # provided columns raises the correct error, with or without usecols.\n (\"\", dict(), None),\n (\"\", dict(usecols=[\"X\"]), None),\n (\",,\", dict(names=[\"Dummy\", \"X\", \"Dummy_2\"], usecols=[\"X\"]),\n DataFrame(columns=[\"X\"], index=[0], dtype=np.float64)),\n (\"\", dict(names=[\"Dummy\", \"X\", \"Dummy_2\"], usecols=[\"X\"]),\n DataFrame(columns=[\"X\"])),\n])\ndef test_read_empty_with_usecols(all_parsers, data, kwargs, expected):\n # see gh-12493\n parser = all_parsers\n\n if expected is None:\n msg = \"No columns to parse from file\"\n with pytest.raises(EmptyDataError, match=msg):\n parser.read_csv(StringIO(data), **kwargs)\n else:\n result = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"kwargs,expected\", [\n # gh-8661, gh-8679: this should ignore six lines, including\n # lines with trailing whitespace and blank lines.\n (dict(header=None, delim_whitespace=True, skiprows=[0, 1, 2, 3, 5, 6],\n skip_blank_lines=True), DataFrame([[1., 2., 4.],\n [5.1, np.nan, 10.]])),\n\n # gh-8983: test skipping set of rows after a row with trailing spaces.\n (dict(delim_whitespace=True, skiprows=[1, 2, 3, 5, 6],\n skip_blank_lines=True), DataFrame({\"A\": [1., 5.1],\n \"B\": [2., np.nan],\n \"C\": [4., 10]})),\n])\ndef test_trailing_spaces(all_parsers, kwargs, expected):\n data = \"A B C \\nrandom line with trailing spaces \\nskip\\n1,2,3\\n1,2.,4.\\nrandom line with trailing tabs\\t\\t\\t\\n \\n5.1,NaN,10.0\\n\" # noqa\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data.replace(\",\", \" \")), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_raise_on_sep_with_delim_whitespace(all_parsers):\n # see gh-6607\n data = \"a b c\\n1 2 3\"\n parser = all_parsers\n\n with pytest.raises(ValueError, match=\"you can only specify one\"):\n parser.read_csv(StringIO(data), sep=r\"\\s\", delim_whitespace=True)\n\n\[email protected](\"delim_whitespace\", [True, False])\ndef test_single_char_leading_whitespace(all_parsers, delim_whitespace):\n # see gh-9710\n parser = all_parsers\n data = \"\"\"\\\nMyColumn\na\nb\na\nb\\n\"\"\"\n\n expected = DataFrame({\"MyColumn\": list(\"abab\")})\n result = parser.read_csv(StringIO(data), skipinitialspace=True,\n delim_whitespace=delim_whitespace)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"sep,skip_blank_lines,exp_data\", [\n (\",\", True, [[1., 2., 4.], [5., np.nan, 10.], [-70., .4, 1.]]),\n (r\"\\s+\", True, [[1., 2., 4.], [5., np.nan, 10.], [-70., .4, 1.]]),\n (\",\", False, [[1., 2., 4.], [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan], [5., np.nan, 10.],\n [np.nan, np.nan, np.nan], [-70., .4, 1.]]),\n])\ndef test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):\n parser = all_parsers\n data = \"\"\"\\\nA,B,C\n1,2.,4.\n\n\n5.,NaN,10.0\n\n-70,.4,1\n\"\"\"\n\n if sep == r\"\\s+\":\n data = data.replace(\",\", \" \")\n\n result = parser.read_csv(StringIO(data), sep=sep,\n skip_blank_lines=skip_blank_lines)\n expected = DataFrame(exp_data, columns=[\"A\", \"B\", \"C\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_whitespace_lines(all_parsers):\n parser = all_parsers\n data = \"\"\"\n\n\\t \\t\\t\n\\t\nA,B,C\n\\t 1,2.,4.\n5.,NaN,10.0\n\"\"\"\n expected = DataFrame([[1, 2., 4.], [5., np.nan, 10.]],\n columns=[\"A\", \"B\", \"C\"])\n result = parser.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,expected\", [\n (\"\"\" A B C D\na 1 2 3 4\nb 1 2 3 4\nc 1 2 3 4\n\"\"\", DataFrame([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],\n columns=[\"A\", \"B\", \"C\", \"D\"], index=[\"a\", \"b\", \"c\"])),\n (\" a b c\\n1 2 3 \\n4 5 6\\n 7 8 9\",\n DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=[\"a\", \"b\", \"c\"])),\n])\ndef test_whitespace_regex_separator(all_parsers, data, expected):\n # see gh-6607\n parser = all_parsers\n result = parser.read_csv(StringIO(data), sep=r\"\\s+\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_verbose_read(all_parsers, capsys):\n parser = all_parsers\n data = \"\"\"a,b,c,d\none,1,2,3\none,1,2,3\n,1,2,3\none,1,2,3\n,1,2,3\n,1,2,3\none,1,2,3\ntwo,1,2,3\"\"\"\n\n # Engines are verbose in different ways.\n parser.read_csv(StringIO(data), verbose=True)\n captured = capsys.readouterr()\n\n if parser.engine == \"c\":\n assert \"Tokenization took:\" in captured.out\n assert \"Parser memory cleanup took:\" in captured.out\n else: # Python engine\n assert captured.out == \"Filled 3 NA values in column a\\n\"\n\n\ndef test_verbose_read2(all_parsers, capsys):\n parser = all_parsers\n data = \"\"\"a,b,c,d\none,1,2,3\ntwo,1,2,3\nthree,1,2,3\nfour,1,2,3\nfive,1,2,3\n,1,2,3\nseven,1,2,3\neight,1,2,3\"\"\"\n\n parser.read_csv(StringIO(data), verbose=True, index_col=0)\n captured = capsys.readouterr()\n\n # Engines are verbose in different ways.\n if parser.engine == \"c\":\n assert \"Tokenization took:\" in captured.out\n assert \"Parser memory cleanup took:\" in captured.out\n else: # Python engine\n assert captured.out == \"Filled 1 NA values in column a\\n\"\n\n\ndef test_iteration_open_handle(all_parsers):\n parser = all_parsers\n kwargs = dict(squeeze=True, header=None)\n\n with tm.ensure_clean() as path:\n with open(path, \"w\") as f:\n f.write(\"AAA\\nBBB\\nCCC\\nDDD\\nEEE\\nFFF\\nGGG\")\n\n with open(path, \"r\") as f:\n for line in f:\n if \"CCC\" in line:\n break\n\n result = parser.read_csv(f, **kwargs)\n expected = Series([\"DDD\", \"EEE\", \"FFF\", \"GGG\"], name=0)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"data,thousands,decimal\", [\n (\"\"\"A|B|C\n1|2,334.01|5\n10|13|10.\n\"\"\", \",\", \".\"),\n (\"\"\"A|B|C\n1|2.334,01|5\n10|13|10,\n\"\"\", \".\", \",\"),\n])\ndef test_1000_sep_with_decimal(all_parsers, data, thousands, decimal):\n parser = all_parsers\n expected = DataFrame({\n \"A\": [1, 10],\n \"B\": [2334.01, 13],\n \"C\": [5, 10.]\n })\n\n result = parser.read_csv(StringIO(data), sep=\"|\",\n thousands=thousands,\n decimal=decimal)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_euro_decimal_format(all_parsers):\n parser = all_parsers\n data = \"\"\"Id;Number1;Number2;Text1;Text2;Number3\n1;1521,1541;187101,9543;ABC;poi;4,738797819\n2;121,12;14897,76;DEF;uyt;0,377320872\n3;878,158;108013,434;GHI;rez;2,735694704\"\"\"\n\n result = parser.read_csv(StringIO(data), sep=\";\", decimal=\",\")\n expected = DataFrame([\n [1, 1521.1541, 187101.9543, \"ABC\", \"poi\", 4.738797819],\n [2, 121.12, 14897.76, \"DEF\", \"uyt\", 0.377320872],\n [3, 878.158, 108013.434, \"GHI\", \"rez\", 2.735694704]\n ], columns=[\"Id\", \"Number1\", \"Number2\", \"Text1\", \"Text2\", \"Number3\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"na_filter\", [True, False])\ndef test_inf_parsing(all_parsers, na_filter):\n parser = all_parsers\n data = \"\"\"\\\n,A\na,inf\nb,-inf\nc,+Inf\nd,-Inf\ne,INF\nf,-INF\ng,+INf\nh,-INf\ni,inF\nj,-inF\"\"\"\n expected = DataFrame({\"A\": [float(\"inf\"), float(\"-inf\")] * 5},\n index=[\"a\", \"b\", \"c\", \"d\", \"e\",\n \"f\", \"g\", \"h\", \"i\", \"j\"])\n result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"nrows\", [0, 1, 2, 3, 4, 5])\ndef test_raise_on_no_columns(all_parsers, nrows):\n parser = all_parsers\n data = \"\\n\" * nrows\n\n msg = \"No columns to parse from file\"\n with pytest.raises(EmptyDataError, match=msg):\n parser.read_csv(StringIO(data))\n\n\ndef test_memory_map(all_parsers, csv_dir_path):\n mmap_file = os.path.join(csv_dir_path, \"test_mmap.csv\")\n parser = all_parsers\n\n expected = DataFrame({\n \"a\": [1, 2, 3],\n \"b\": [\"one\", \"two\", \"three\"],\n \"c\": [\"I\", \"II\", \"III\"]\n })\n\n result = parser.read_csv(mmap_file, memory_map=True)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_null_byte_char(all_parsers):\n # see gh-2741\n data = \"\\x00,foo\"\n names = [\"a\", \"b\"]\n parser = all_parsers\n\n if parser.engine == \"c\":\n expected = DataFrame([[np.nan, \"foo\"]], columns=names)\n out = parser.read_csv(StringIO(data), names=names)\n tm.assert_frame_equal(out, expected)\n else:\n msg = \"NULL byte detected\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), names=names)\n\n\[email protected](\"data,kwargs,expected\", [\n # Basic test\n (\"a\\n1\", dict(), DataFrame({\"a\": [1]})),\n\n # \"Regular\" quoting\n ('\"a\"\\n1', dict(quotechar='\"'), DataFrame({\"a\": [1]})),\n\n # Test in a data row instead of header\n (\"b\\n1\", dict(names=[\"a\"]), DataFrame({\"a\": [\"b\", \"1\"]})),\n\n # Test in empty data row with skipping\n (\"\\n1\", dict(names=[\"a\"], skip_blank_lines=True), DataFrame({\"a\": [1]})),\n\n # Test in empty data row without skipping\n (\"\\n1\", dict(names=[\"a\"], skip_blank_lines=False),\n DataFrame({\"a\": [np.nan, 1]})),\n])\ndef test_utf8_bom(all_parsers, data, kwargs, expected):\n # see gh-4793\n parser = all_parsers\n bom = \"\\ufeff\"\n utf8 = \"utf-8\"\n\n def _encode_data_with_bom(_data):\n bom_data = (bom + _data).encode(utf8)\n return BytesIO(bom_data)\n\n result = parser.read_csv(_encode_data_with_bom(data),\n encoding=utf8, **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_temporary_file(all_parsers):\n # see gh-13398\n parser = all_parsers\n data = \"0 0\"\n\n new_file = TemporaryFile(\"w+\")\n new_file.write(data)\n new_file.flush()\n new_file.seek(0)\n\n result = parser.read_csv(new_file, sep=r\"\\s+\", header=None)\n new_file.close()\n\n expected = DataFrame([[0, 0]])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"byte\", [8, 16])\[email protected](\"fmt\", [\"utf-{0}\", \"utf_{0}\",\n \"UTF-{0}\", \"UTF_{0}\"])\ndef test_read_csv_utf_aliases(all_parsers, byte, fmt):\n # see gh-13549\n expected = DataFrame({\"mb_num\": [4.8], \"multibyte\": [\"test\"]})\n parser = all_parsers\n\n encoding = fmt.format(byte)\n data = \"mb_num,multibyte\\n4.8,test\".encode(encoding)\n\n result = parser.read_csv(BytesIO(data), encoding=encoding)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_internal_eof_byte(all_parsers):\n # see gh-5500\n parser = all_parsers\n data = \"a,b\\n1\\x1a,2\"\n\n expected = DataFrame([[\"1\\x1a\", 2]], columns=[\"a\", \"b\"])\n result = parser.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_internal_eof_byte_to_file(all_parsers):\n # see gh-16559\n parser = all_parsers\n data = b'c1,c2\\r\\n\"test \\x1a test\", test\\r\\n'\n expected = DataFrame([[\"test \\x1a test\", \" test\"]],\n columns=[\"c1\", \"c2\"])\n path = \"__%s__.csv\" % tm.rands(10)\n\n with tm.ensure_clean(path) as path:\n with open(path, \"wb\") as f:\n f.write(data)\n\n result = parser.read_csv(path)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_sub_character(all_parsers, csv_dir_path):\n # see gh-16893\n filename = os.path.join(csv_dir_path, \"sub_char.csv\")\n expected = DataFrame([[1, 2, 3]], columns=[\"a\", \"\\x1ab\", \"c\"])\n\n parser = all_parsers\n result = parser.read_csv(filename)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_file_handle_string_io(all_parsers):\n # gh-14418\n #\n # Don't close user provided file handles.\n parser = all_parsers\n data = \"a,b\\n1,2\"\n\n fh = StringIO(data)\n parser.read_csv(fh)\n assert not fh.closed\n\n\ndef test_file_handles_with_open(all_parsers, csv1):\n # gh-14418\n #\n # Don't close user provided file handles.\n parser = all_parsers\n\n with open(csv1, \"r\") as f:\n parser.read_csv(f)\n assert not f.closed\n\n\ndef test_invalid_file_buffer_class(all_parsers):\n # see gh-15337\n class InvalidBuffer:\n pass\n\n parser = all_parsers\n msg = \"Invalid file path or buffer object type\"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(InvalidBuffer())\n\n\ndef test_invalid_file_buffer_mock(all_parsers):\n # see gh-15337\n parser = all_parsers\n msg = \"Invalid file path or buffer object type\"\n\n class Foo:\n pass\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(Foo())\n\n\ndef test_valid_file_buffer_seems_invalid(all_parsers):\n # gh-16135: we want to ensure that \"tell\" and \"seek\"\n # aren't actually being used when we call `read_csv`\n #\n # Thus, while the object may look \"invalid\" (these\n # methods are attributes of the `StringIO` class),\n # it is still a valid file-object for our purposes.\n class NoSeekTellBuffer(StringIO):\n def tell(self):\n raise AttributeError(\"No tell method\")\n\n def seek(self, pos, whence=0):\n raise AttributeError(\"No seek method\")\n\n data = \"a\\n1\"\n parser = all_parsers\n expected = DataFrame({\"a\": [1]})\n\n result = parser.read_csv(NoSeekTellBuffer(data))\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"kwargs\", [\n dict(), # Default is True.\n dict(error_bad_lines=True), # Explicitly pass in.\n])\[email protected](\"warn_kwargs\", [\n dict(), dict(warn_bad_lines=True),\n dict(warn_bad_lines=False)\n])\ndef test_error_bad_lines(all_parsers, kwargs, warn_kwargs):\n # see gh-15925\n parser = all_parsers\n kwargs.update(**warn_kwargs)\n data = \"a\\n1\\n1,2,3\\n4\\n5,6,7\"\n\n msg = \"Expected 1 fields in line 3, saw 3\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), **kwargs)\n\n\ndef test_warn_bad_lines(all_parsers, capsys):\n # see gh-15925\n parser = all_parsers\n data = \"a\\n1\\n1,2,3\\n4\\n5,6,7\"\n expected = DataFrame({\"a\": [1, 4]})\n\n result = parser.read_csv(StringIO(data),\n error_bad_lines=False,\n warn_bad_lines=True)\n tm.assert_frame_equal(result, expected)\n\n captured = capsys.readouterr()\n assert \"Skipping line 3\" in captured.err\n assert \"Skipping line 5\" in captured.err\n\n\ndef test_suppress_error_output(all_parsers, capsys):\n # see gh-15925\n parser = all_parsers\n data = \"a\\n1\\n1,2,3\\n4\\n5,6,7\"\n expected = DataFrame({\"a\": [1, 4]})\n\n result = parser.read_csv(StringIO(data),\n error_bad_lines=False,\n warn_bad_lines=False)\n tm.assert_frame_equal(result, expected)\n\n captured = capsys.readouterr()\n assert captured.err == \"\"\n\n\[email protected](compat.is_platform_windows() and not compat.PY36,\n reason=\"On Python < 3.6 won't pass on Windows\")\[email protected](\"filename\", [\"sé-es-vé.csv\", \"ru-sй.csv\"])\ndef test_filename_with_special_chars(all_parsers, filename):\n # see gh-15086.\n parser = all_parsers\n df = DataFrame({\"a\": [1, 2, 3]})\n\n with tm.ensure_clean(filename) as path:\n df.to_csv(path, index=False)\n\n result = parser.read_csv(path)\n tm.assert_frame_equal(result, df)\n\n\ndef test_read_csv_memory_growth_chunksize(all_parsers):\n # see gh-24805\n #\n # Let's just make sure that we don't crash\n # as we iteratively process all chunks.\n parser = all_parsers\n\n with tm.ensure_clean() as path:\n with open(path, \"w\") as f:\n for i in range(1000):\n f.write(str(i) + \"\\n\")\n\n result = parser.read_csv(path, chunksize=20)\n\n for _ in result:\n pass\n\n\ndef test_read_table_deprecated(all_parsers):\n # see gh-21948\n parser = all_parsers\n data = \"a\\tb\\n1\\t2\\n3\\t4\"\n expected = parser.read_csv(StringIO(data), sep=\"\\t\")\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n result = parser.read_table(StringIO(data))\n tm.assert_frame_equal(result, expected)\n"
] | [
[
"pandas.concat",
"pandas.util.testing.ensure_clean",
"pandas.Series",
"pandas.compat.is_platform_windows",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.assert_series_equal",
"pandas.Index",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"pandas.MultiIndex.from_tuples",
"pandas.MultiIndex.from_arrays",
"pandas._libs.tslib.Timestamp",
"pandas.util.testing.rands",
"pandas.io.parsers.TextParser",
"numpy.iinfo",
"pandas.io.parsers.CParserWrapper._set_noconvert_columns",
"pandas.util.testing.makeDataFrame",
"pandas.compat.lrange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
}
] |
mekkianes/detectron2 | [
"0d72f4397f4e49e4f7c093906600c97810079f1d",
"0d72f4397f4e49e4f7c093906600c97810079f1d"
] | [
"detectron2/export/torchscript.py",
"projects/DensePose/densepose/data/structures.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport os\nimport torch\n\nfrom detectron2.utils.env import TORCH_VERSION\nfrom detectron2.utils.file_io import PathManager\n\nfrom .torchscript_patch import freeze_training_mode, patch_instances\n\n\ndef export_torchscript_with_instances(model, fields):\n \"\"\"\n Run :func:`torch.jit.script` on a model that uses the :class:`Instances` class. Since\n attributes of :class:`Instances` are \"dynamically\" added in eager mode,it is difficult\n for torchscript to support it out of the box. This function is made to support scripting\n a model that uses :class:`Instances`. It does the following:\n\n 1. Create a scriptable ``new_Instances`` class which behaves similarly to ``Instances``,\n but with all attributes been \"static\".\n The attributes need to be statically declared in the ``fields`` argument.\n 2. Register ``new_Instances`` to torchscript, and force torchscript to\n use it when trying to compile ``Instances``.\n\n After this function, the process will be reverted. User should be able to script another model\n using different fields.\n\n Example:\n Assume that ``Instances`` in the model consist of two attributes named\n ``proposal_boxes`` and ``objectness_logits`` with type :class:`Boxes` and\n :class:`Tensor` respectively during inference. You can call this function like:\n\n ::\n fields = {\"proposal_boxes\": Boxes, \"objectness_logits\": torch.Tensor}\n torchscipt_model = export_torchscript_with_instances(model, fields)\n\n Note:\n Currently we only support models in evaluation mode.\n\n Args:\n model (nn.Module): The input model to be exported to torchscript.\n fields (Dict[str, type]): Attribute names and corresponding type that\n ``Instances`` will use in the model. Note that all attributes used in ``Instances``\n need to be added, regardless of whether they are inputs/outputs of the model.\n Data type not defined in detectron2 is not supported for now.\n\n Returns:\n torch.jit.ScriptModule: the input model in torchscript format\n \"\"\"\n assert TORCH_VERSION >= (1, 8), \"This feature is not available in PyTorch < 1.8\"\n assert (\n not model.training\n ), \"Currently we only support exporting models in evaluation mode to torchscript\"\n\n from copy import deepcopy\n\n # TODO: __prepare_scriptable__ was reverted from pytorch: D25061862\n # We hack it here until it's added back\n model = deepcopy(model)\n for m in model.modules():\n for name, subm in m.named_children():\n if hasattr(subm, \"__tmp_prepare_scriptable__\"):\n newm = subm.__tmp_prepare_scriptable__()\n setattr(m, name, newm)\n\n with freeze_training_mode(model), patch_instances(fields):\n scripted_model = torch.jit.script(model)\n return scripted_model\n\n\ndef dump_torchscript_IR(model, dir):\n \"\"\"\n Dump IR of a TracedModule/ScriptModule at various levels.\n Useful for debugging.\n\n Args:\n model (TracedModule or ScriptModule): traced or scripted module\n dir (str): output directory to dump files.\n \"\"\"\n PathManager.mkdirs(dir)\n\n def _get_script_mod(mod):\n if isinstance(mod, torch.jit.TracedModule):\n return mod._actual_script_module\n return mod\n\n # Dump pretty-printed code: https://pytorch.org/docs/stable/jit.html#inspecting-code\n with PathManager.open(os.path.join(dir, \"model_ts_code.txt\"), \"w\") as f:\n\n def get_code(mod):\n # Try a few ways to get code using private attributes.\n try:\n # This contains more information than just `mod.code`\n return _get_script_mod(mod)._c.code\n except AttributeError:\n pass\n try:\n return mod.code\n except AttributeError:\n return None\n\n def dump_code(prefix, mod):\n code = get_code(mod)\n name = prefix or \"root model\"\n if code is None:\n f.write(f\"Could not found code for {name} (type={mod.original_name})\\n\")\n f.write(\"\\n\")\n else:\n f.write(f\"\\nCode for {name}, type={mod.original_name}:\\n\")\n f.write(code)\n f.write(\"\\n\")\n f.write(\"-\" * 80)\n\n for name, m in mod.named_children():\n dump_code(prefix + \".\" + name, m)\n\n dump_code(\"\", model)\n\n # Recursively dump IR of all modules\n with PathManager.open(os.path.join(dir, \"model_ts_IR.txt\"), \"w\") as f:\n try:\n f.write(_get_script_mod(model)._c.dump_to_str(True, False, False))\n except AttributeError:\n pass\n\n # Dump IR of the entire graph (all submodules inlined)\n with PathManager.open(os.path.join(dir, \"model_ts_IR_inlined.txt\"), \"w\") as f:\n f.write(str(model.inlined_graph))\n\n # Dump the model structure in pytorch style\n with PathManager.open(os.path.join(dir, \"model.txt\"), \"w\") as f:\n f.write(str(model))\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport numpy as np\nfrom typing import BinaryIO, Dict, Union\nimport torch\nfrom torch.nn import functional as F\n\nfrom densepose.structures.mesh import load_mesh_symmetry\n\nfrom .meshes.catalog import MeshCatalog\n\n\nclass DensePoseTransformData(object):\n\n # Horizontal symmetry label transforms used for horizontal flip\n MASK_LABEL_SYMMETRIES = [0, 1, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 14]\n # fmt: off\n POINT_LABEL_SYMMETRIES = [ 0, 1, 2, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17, 20, 19, 22, 21, 24, 23] # noqa\n # fmt: on\n\n def __init__(self, uv_symmetries: Dict[str, torch.Tensor], device: torch.device):\n self.mask_label_symmetries = DensePoseTransformData.MASK_LABEL_SYMMETRIES\n self.point_label_symmetries = DensePoseTransformData.POINT_LABEL_SYMMETRIES\n self.uv_symmetries = uv_symmetries\n self.device = torch.device(\"cpu\")\n\n def to(self, device: torch.device, copy: bool = False) -> \"DensePoseTransformData\":\n \"\"\"\n Convert transform data to the specified device\n\n Args:\n device (torch.device): device to convert the data to\n copy (bool): flag that specifies whether to copy or to reference the data\n in case the device is the same\n Return:\n An instance of `DensePoseTransformData` with data stored on the specified device\n \"\"\"\n if self.device == device and not copy:\n return self\n uv_symmetry_map = {}\n for key in self.uv_symmetries:\n uv_symmetry_map[key] = self.uv_symmetries[key].to(device=device, copy=copy)\n return DensePoseTransformData(uv_symmetry_map, device)\n\n @staticmethod\n def load(io: Union[str, BinaryIO]):\n \"\"\"\n Args:\n io: (str or binary file-like object): input file to load data from\n Returns:\n An instance of `DensePoseTransformData` with transforms loaded from the file\n \"\"\"\n import scipy.io\n\n uv_symmetry_map = scipy.io.loadmat(io)\n uv_symmetry_map_torch = {}\n for key in [\"U_transforms\", \"V_transforms\"]:\n uv_symmetry_map_torch[key] = []\n map_src = uv_symmetry_map[key]\n map_dst = uv_symmetry_map_torch[key]\n for i in range(map_src.shape[1]):\n map_dst.append(torch.from_numpy(map_src[0, i]).to(dtype=torch.float))\n uv_symmetry_map_torch[key] = torch.stack(map_dst, dim=0)\n transform_data = DensePoseTransformData(uv_symmetry_map_torch, device=torch.device(\"cpu\"))\n return transform_data\n\n\nclass DensePoseDataRelative(object):\n \"\"\"\n Dense pose relative annotations that can be applied to any bounding box:\n x - normalized X coordinates [0, 255] of annotated points\n y - normalized Y coordinates [0, 255] of annotated points\n i - body part labels 0,...,24 for annotated points\n u - body part U coordinates [0, 1] for annotated points\n v - body part V coordinates [0, 1] for annotated points\n segm - 256x256 segmentation mask with values 0,...,14\n To obtain absolute x and y data wrt some bounding box one needs to first\n divide the data by 256, multiply by the respective bounding box size\n and add bounding box offset:\n x_img = x0 + x_norm * w / 256.0\n y_img = y0 + y_norm * h / 256.0\n Segmentation masks are typically sampled to get image-based masks.\n \"\"\"\n\n # Key for normalized X coordinates in annotation dict\n X_KEY = \"dp_x\"\n # Key for normalized Y coordinates in annotation dict\n Y_KEY = \"dp_y\"\n # Key for U part coordinates in annotation dict (used in chart-based annotations)\n U_KEY = \"dp_U\"\n # Key for V part coordinates in annotation dict (used in chart-based annotations)\n V_KEY = \"dp_V\"\n # Key for I point labels in annotation dict (used in chart-based annotations)\n I_KEY = \"dp_I\"\n # Key for segmentation mask in annotation dict\n S_KEY = \"dp_masks\"\n # Key for vertex ids (used in continuous surface embeddings annotations)\n VERTEX_IDS_KEY = \"vertex_ids\"\n # Key for mesh id (used in continuous surface embeddings annotations)\n MESH_ID_KEY = \"mesh_id\"\n # Number of body parts in segmentation masks\n N_BODY_PARTS = 14\n # Number of parts in point labels\n N_PART_LABELS = 24\n MASK_SIZE = 256\n\n def __init__(self, annotation, cleanup=False):\n self.x = torch.as_tensor(annotation[DensePoseDataRelative.X_KEY])\n self.y = torch.as_tensor(annotation[DensePoseDataRelative.Y_KEY])\n if (\n DensePoseDataRelative.I_KEY in annotation\n and DensePoseDataRelative.U_KEY in annotation\n and DensePoseDataRelative.V_KEY in annotation\n ):\n self.i = torch.as_tensor(annotation[DensePoseDataRelative.I_KEY])\n self.u = torch.as_tensor(annotation[DensePoseDataRelative.U_KEY])\n self.v = torch.as_tensor(annotation[DensePoseDataRelative.V_KEY])\n if (\n DensePoseDataRelative.VERTEX_IDS_KEY in annotation\n and DensePoseDataRelative.MESH_ID_KEY in annotation\n ):\n self.vertex_ids = torch.as_tensor(\n annotation[DensePoseDataRelative.VERTEX_IDS_KEY], dtype=torch.long\n )\n self.mesh_id = annotation[DensePoseDataRelative.MESH_ID_KEY]\n self.segm = DensePoseDataRelative.extract_segmentation_mask(annotation)\n self.device = torch.device(\"cpu\")\n if cleanup:\n DensePoseDataRelative.cleanup_annotation(annotation)\n\n def to(self, device):\n if self.device == device:\n return self\n new_data = DensePoseDataRelative.__new__(DensePoseDataRelative)\n new_data.x = self.x\n new_data.x = self.x.to(device)\n new_data.y = self.y.to(device)\n for attr in [\"i\", \"u\", \"v\", \"vertex_ids\"]:\n if hasattr(self, attr):\n setattr(new_data, attr, getattr(self, attr).to(device))\n if hasattr(self, \"mesh_id\"):\n new_data.mesh_id = self.mesh_id\n new_data.segm = self.segm.to(device)\n new_data.device = device\n return new_data\n\n @staticmethod\n def extract_segmentation_mask(annotation):\n import pycocotools.mask as mask_utils\n\n poly_specs = annotation[DensePoseDataRelative.S_KEY]\n if isinstance(poly_specs, torch.Tensor):\n # data is already given as mask tensors, no need to decode\n return poly_specs\n segm = torch.zeros((DensePoseDataRelative.MASK_SIZE,) * 2, dtype=torch.float32)\n if isinstance(poly_specs, dict):\n if poly_specs:\n mask = mask_utils.decode(poly_specs)\n segm[mask > 0] = 1\n else:\n for i in range(len(poly_specs)):\n poly_i = poly_specs[i]\n if poly_i:\n mask_i = mask_utils.decode(poly_i)\n segm[mask_i > 0] = i + 1\n return segm\n\n @staticmethod\n def validate_annotation(annotation):\n for key in [\n DensePoseDataRelative.X_KEY,\n DensePoseDataRelative.Y_KEY,\n DensePoseDataRelative.I_KEY,\n DensePoseDataRelative.U_KEY,\n DensePoseDataRelative.V_KEY,\n DensePoseDataRelative.S_KEY,\n ]:\n if key not in annotation:\n return False, \"no {key} data in the annotation\".format(key=key)\n return True, None\n\n @staticmethod\n def cleanup_annotation(annotation):\n for key in [\n DensePoseDataRelative.X_KEY,\n DensePoseDataRelative.Y_KEY,\n DensePoseDataRelative.I_KEY,\n DensePoseDataRelative.U_KEY,\n DensePoseDataRelative.V_KEY,\n DensePoseDataRelative.S_KEY,\n DensePoseDataRelative.VERTEX_IDS_KEY,\n DensePoseDataRelative.MESH_ID_KEY,\n ]:\n if key in annotation:\n del annotation[key]\n\n def apply_transform(self, transforms, densepose_transform_data):\n self._transform_pts(transforms, densepose_transform_data)\n self._transform_segm(transforms, densepose_transform_data)\n\n def _transform_pts(self, transforms, dp_transform_data):\n import detectron2.data.transforms as T\n\n # NOTE: This assumes that HorizFlipTransform is the only one that does flip\n do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1\n if do_hflip:\n self.x = self.segm.size(1) - self.x\n self._flip_iuv_semantics(dp_transform_data)\n self._flip_vertices()\n\n for t in transforms.transforms:\n if isinstance(t, T.RotationTransform):\n xy_scale = np.array((t.w, t.h)) / DensePoseDataRelative.MASK_SIZE\n xy = t.apply_coords(np.stack((self.x, self.y), axis=1) * xy_scale)\n self.x, self.y = torch.tensor(xy / xy_scale, dtype=self.x.dtype).T\n\n def _flip_iuv_semantics(self, dp_transform_data: DensePoseTransformData) -> None:\n i_old = self.i.clone()\n uv_symmetries = dp_transform_data.uv_symmetries\n pt_label_symmetries = dp_transform_data.point_label_symmetries\n for i in range(self.N_PART_LABELS):\n if i + 1 in i_old:\n annot_indices_i = i_old == i + 1\n if pt_label_symmetries[i + 1] != i + 1:\n self.i[annot_indices_i] = pt_label_symmetries[i + 1]\n u_loc = (self.u[annot_indices_i] * 255).long()\n v_loc = (self.v[annot_indices_i] * 255).long()\n self.u[annot_indices_i] = uv_symmetries[\"U_transforms\"][i][v_loc, u_loc].to(\n device=self.u.device\n )\n self.v[annot_indices_i] = uv_symmetries[\"V_transforms\"][i][v_loc, u_loc].to(\n device=self.v.device\n )\n\n def _flip_vertices(self):\n if hasattr(self, \"vertex_ids\"):\n mesh_info = MeshCatalog[MeshCatalog.get_mesh_name(self.mesh_id)]\n mesh_symmetry = (\n load_mesh_symmetry(mesh_info.symmetry) if mesh_info.symmetry is not None else None\n )\n self.vertex_ids = mesh_symmetry[\"vertex_transforms\"][self.vertex_ids]\n\n def _transform_segm(self, transforms, dp_transform_data):\n import detectron2.data.transforms as T\n\n # NOTE: This assumes that HorizFlipTransform is the only one that does flip\n do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1\n if do_hflip:\n self.segm = torch.flip(self.segm, [1])\n self._flip_segm_semantics(dp_transform_data)\n\n for t in transforms.transforms:\n if isinstance(t, T.RotationTransform):\n self._transform_segm_rotation(t)\n\n def _flip_segm_semantics(self, dp_transform_data):\n old_segm = self.segm.clone()\n mask_label_symmetries = dp_transform_data.mask_label_symmetries\n for i in range(self.N_BODY_PARTS):\n if mask_label_symmetries[i + 1] != i + 1:\n self.segm[old_segm == i + 1] = mask_label_symmetries[i + 1]\n\n def _transform_segm_rotation(self, rotation):\n self.segm = F.interpolate(self.segm[None, None, :], (rotation.h, rotation.w)).numpy()\n self.segm = torch.tensor(rotation.apply_segmentation(self.segm[0, 0]))[None, None, :]\n self.segm = F.interpolate(self.segm, [DensePoseDataRelative.MASK_SIZE] * 2)[0, 0]\n\n\ndef normalized_coords_transform(x0, y0, w, h):\n \"\"\"\n Coordinates transform that maps top left corner to (-1, -1) and bottom\n right corner to (1, 1). Used for torch.grid_sample to initialize the\n grid\n \"\"\"\n\n def f(p):\n return (2 * (p[0] - x0) / w - 1, 2 * (p[1] - y0) / h - 1)\n\n return f\n\n\nclass DensePoseList(object):\n\n _TORCH_DEVICE_CPU = torch.device(\"cpu\")\n\n def __init__(self, densepose_datas, boxes_xyxy_abs, image_size_hw, device=_TORCH_DEVICE_CPU):\n assert len(densepose_datas) == len(\n boxes_xyxy_abs\n ), \"Attempt to initialize DensePoseList with {} DensePose datas \" \"and {} boxes\".format(\n len(densepose_datas), len(boxes_xyxy_abs)\n )\n self.densepose_datas = []\n for densepose_data in densepose_datas:\n assert isinstance(densepose_data, DensePoseDataRelative) or densepose_data is None, (\n \"Attempt to initialize DensePoseList with DensePose datas \"\n \"of type {}, expected DensePoseDataRelative\".format(type(densepose_data))\n )\n densepose_data_ondevice = (\n densepose_data.to(device) if densepose_data is not None else None\n )\n self.densepose_datas.append(densepose_data_ondevice)\n self.boxes_xyxy_abs = boxes_xyxy_abs.to(device)\n self.image_size_hw = image_size_hw\n self.device = device\n\n def to(self, device):\n if self.device == device:\n return self\n return DensePoseList(self.densepose_datas, self.boxes_xyxy_abs, self.image_size_hw, device)\n\n def __iter__(self):\n return iter(self.densepose_datas)\n\n def __len__(self):\n return len(self.densepose_datas)\n\n def __repr__(self):\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={}, \".format(len(self.densepose_datas))\n s += \"image_width={}, \".format(self.image_size_hw[1])\n s += \"image_height={})\".format(self.image_size_hw[0])\n return s\n\n def __getitem__(self, item):\n if isinstance(item, int):\n densepose_data_rel = self.densepose_datas[item]\n return densepose_data_rel\n elif isinstance(item, slice):\n densepose_datas_rel = self.densepose_datas[item]\n boxes_xyxy_abs = self.boxes_xyxy_abs[item]\n return DensePoseList(\n densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device\n )\n elif isinstance(item, torch.Tensor) and (item.dtype == torch.bool):\n densepose_datas_rel = [self.densepose_datas[i] for i, x in enumerate(item) if x > 0]\n boxes_xyxy_abs = self.boxes_xyxy_abs[item]\n return DensePoseList(\n densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device\n )\n else:\n densepose_datas_rel = [self.densepose_datas[i] for i in item]\n boxes_xyxy_abs = self.boxes_xyxy_abs[item]\n return DensePoseList(\n densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device\n )\n"
] | [
[
"torch.jit.script"
],
[
"torch.zeros",
"torch.from_numpy",
"numpy.stack",
"torch.tensor",
"torch.as_tensor",
"torch.nn.functional.interpolate",
"torch.device",
"torch.flip",
"numpy.array",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ForexAndBeyond/FinRL | [
"a4050c54a0f4680ba0a7cd899534077583aa1332"
] | [
"finrl/finrl_meta/env_fx_trading/env_fx.py"
] | [
"import datetime\nimport math\nimport random\nimport pandas as pd\n\nimport gym\nimport numpy as np\nfrom gym import spaces\nfrom stable_baselines3.common.vec_env import DummyVecEnv\n\nfrom .util.log_render import render_to_file\nfrom .util.plot_chart import TradingChart\n\n\n\nclass tgym(gym.Env):\n \"\"\"forex/future/option trading gym environment\n 1. Three action space (0 Buy, 1 Sell, 2 Nothing)\n 2. Multiple trading pairs (EURUSD, GBPUSD...) under same time frame\n 3. Timeframe from 1 min to daily as long as use candlestick bar (Open, High, Low, Close)\n 4. Use StopLose, ProfitTaken to realize rewards. each pair can configure it own SL and PT in configure file\n 5. Configure over night cash penalty and each pair's transaction fee and overnight position holding penalty\n 6. Split dataset into daily, weekly or monthly..., with fixed time steps, at end of len(df). The business\n logic will force to Close all positions at last Close price (game over).\n 7. Must have df column name: [(time_col),(asset_col), Open,Close,High,Low,day] (case sensitive)\n 8. Addition indicators can add during the data process. 78 available TA indicator from Finta\n 9. Customized observation list handled in json config file.\n 10. ProfitTaken = fraction_action * max_profit_taken + SL.\n 11. SL is pre-fixed\n 12. Limit order can be configure, if limit_order == True, the action will preset buy or sell at Low or High of the bar,\n with a limit_order_expiration (n bars). It will be triggered if the price go cross. otherwise, it will be drop off\n 13. render mode:\n human -- display each steps realized reward on console\n file -- create a transaction log\n graph -- create transaction in graph (under development)\n 14.\n 15. Reward, we want to incentivize profit that is sustained over long periods of time.\n At each step, we will set the reward to the account balance multiplied by\n some fraction of the number of time steps so far.The purpose of this is to delay\n rewarding the agent too fast in the early stages and allow it to explore\n sufficiently before optimizing a single strategy too deeply.\n It will also reward agents that maintain a higher balance for longer,\n rather than those who rapidly gain money using unsustainable strategies.\n 16. Observation_space contains all of the input variables we want our agent\n to consider before making, or not making a trade. We want our agent to “see”\n the forex data points (Open price, High, Low, Close, time serial, TA) in the game window,\n as well a couple other data points like its account balance, current positions,\n and current profit.The intuition here is that for each time step, we want our agent\n to consider the price action leading up to the current price, as well as their\n own portfolio’s status in order to make an informed decision for the next action.\n 17. reward is forex trading unit Point, it can be configure for each trading pair\n \"\"\"\n\n metadata = {\"render.modes\": [\"graph\", \"human\", \"file\", \"none\"]}\n\n def __init__(\n self, df, env_config_obj\n ) -> None:\n assert df.ndim == 2\n super(tgym, self).__init__()\n self.cf = env_config_obj\n self.observation_list = self.cf[\"env\"][\"observation_list\"]\n\n self.balance_initial = self.cf[\"env\"][\"balance\"]\n self.over_night_cash_penalty = self.cf[\"env\"][\"over_night_cash_penalty\"]\n self.asset_col = self.cf[\"env\"][\"asset_col\"]\n self.time_col = self.cf[\"env\"][\"time_col\"]\n self.random_start = self.cf[\"env\"][\"random_start\"]\n self.log_filename = self.cf[\"env\"][\"log_filename\"]+ datetime.datetime.now(\n ).strftime('%Y%m%d%H%M%S') + '.csv'\n\n self.df = df\n self.df[\"_time\"] = df[self.time_col]\n self.df[\"_day\"] = df[\"weekday\"]\n self.assets = df[self.asset_col].unique()\n self.dt_datetime = df[self.time_col].sort_values().unique()\n self.df = self.df.set_index(self.time_col)\n self.visualization = False\n\n # --- reset value ---\n self.equity_list = [0] * len(self.assets)\n self.balance = self.balance_initial\n self.total_equity = self.balance + sum(self.equity_list)\n self.ticket_id = 0\n self.transaction_live = []\n self.transaction_history = []\n self.transaction_limit_order = []\n self.current_draw_downs = [0.0] * len(self.assets)\n self.max_draw_downs = [0.0] * len(self.assets)\n self.max_draw_down_pct = sum(self.max_draw_downs) / self.balance * 100\n self.current_step = 0\n self.episode = 0\n self.current_holding = [0] * len(self.assets)\n self.transaction_open_this_step = []\n self.transaction_close_this_step = []\n self.current_day = 0\n self.done_information = \"\"\n self.log_header = True\n self.reward = 0\n self.cached_data = [\n self.get_observation_vector(_dt) for _dt in self.dt_datetime\n ]\n self.obs = (\n [self.balance, self.max_draw_down_pct]\n + self.current_holding\n + self.current_draw_downs\n + self.get_observation(self.current_step)\n )\n\n self.actions_memory = [2]\n self.date_memory = [self._get_date()]\n\n #TODO: add more fields to match StockTradingEnv2 in `this_works.ipynb`\n self.account_information = {\n \"cash\": [self.balance],\n }\n \n\n # --- end reset ---\n\n self.cached_time_serial = (\n (self.df[[\"_time\", \"_day\"]].sort_values(\"_time\")).drop_duplicates()\n ).values.tolist()\n\n self.action_space = spaces.Box(low=0, high=3, shape=(len(self.assets),))\n # first two 3 = balance,current_holding, max_draw_down_pct\n _space = 3 + len(self.assets) + len(self.assets) * len(self.observation_list)\n self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(_space,))\n print(\n f\"initial done:\\n\"\n f\"observation_list:{self.observation_list}\\n \"\n f\"assets:{self.assets}\\n \"\n f\"time serial: {min(self.dt_datetime)} -> {max(self.dt_datetime)} length: {len(self.dt_datetime)}\"\n )\n\n def _get_date(self):\n # get the date at the current_step\n return self.dt_datetime[self.current_step]\n\n def symbol(self, asset=\"GBP_USD\", item=''):\n \"\"\"get trading pair (symbol) information\n Args:\n asset (str, optional): symbol in config. Defaults to \"GBPUSD\".\n item (str, optional): name of item, if '' return dict, else return item value. Defaults to ''.\n Returns:\n [type]: [description]\n \"\"\"\n if item:\n return self.cf[\"symbol\"][asset][item]\n else:\n return self.cf[\"symbol\"][asset]\n\n def update_memories(self):\n \"\"\"\n TODO: helper function that updates environment memory like asset and actions\n \"\"\"\n pass\n def _take_action(self, actions, done):\n _action = 2\n _profit_taken = 0\n rewards = [0] * len(self.assets)\n self.transaction_open_this_step = []\n self.transaction_close_this_step = []\n # need use multiply assets\n for i, x in enumerate(actions):\n self._o = self.get_observation(self.current_step, i, \"Open\")\n self._h = self.get_observation(self.current_step, i, \"High\")\n self._l = self.get_observation(self.current_step, i, \"Low\")\n self._c = self.get_observation(self.current_step, i, \"Close\")\n self._t = self.get_observation(self.current_step, i, \"_time\")\n self._day = self.get_observation(self.current_step, i, \"_day\")\n _action = math.floor(x)\n rewards[i] = self._calculate_reward(i, done)\n if self.symbol(self.assets[i], \"limit_order\"):\n self._limit_order_process(i, _action, done)\n if (\n _action in (0, 1)\n and not done\n and self.current_holding[i]\n < self.symbol(self.assets[i], \"max_current_holding\")\n ):\n # generating PT based on action fraction\n _profit_taken = math.ceil(\n (x - _action) * self.symbol(self.assets[i], \"profit_taken_max\")\n ) + self.symbol(self.assets[i], \"stop_loss_max\")\n self.ticket_id += 1\n if self.symbol(self.assets[i], \"limit_order\"):\n transaction = {\n \"Ticket\": self.ticket_id,\n \"Symbol\": self.assets[i],\n \"ActionTime\": self._t,\n \"Type\": _action,\n \"Lot\": 1,\n \"ActionPrice\": self._l if _action == 0 else self._h,\n \"SL\": self.symbol(self.assets[i], \"stop_loss_max\"),\n \"PT\": _profit_taken,\n \"MaxDD\": 0,\n \"Swap\": 0.0,\n \"CloseTime\": \"\",\n \"ClosePrice\": 0.0,\n \"Point\": 0,\n \"Reward\": -self.symbol(self.assets[i], \"transaction_fee\"),\n \"DateDuration\": self._day,\n \"Status\": 0,\n \"LimitStep\": self.current_step,\n \"ActionStep\": -1,\n \"CloseStep\": -1,\n }\n self.transaction_limit_order.append(transaction)\n else:\n transaction = {\n \"Ticket\": self.ticket_id,\n \"Symbol\": self.assets[i],\n \"ActionTime\": self._t,\n \"Type\": _action,\n \"Lot\": 1,\n \"ActionPrice\": self._c,\n \"SL\": self.symbol(self.assets[i], \"stop_loss_max\"),\n \"PT\": _profit_taken,\n \"MaxDD\": 0,\n \"Swap\": 0.0,\n \"CloseTime\": \"\",\n \"ClosePrice\": 0.0,\n \"Point\": 0,\n \"Reward\": -self.symbol(self.assets[i], \"transaction_fee\"),\n \"DateDuration\": self._day,\n \"Status\": 0,\n \"LimitStep\": self.current_step,\n \"ActionStep\": self.current_step,\n \"CloseStep\": -1,\n }\n self.current_holding[i] += 1\n self.transaction_open_this_step.append(transaction)\n self.balance -= self.symbol(self.assets[i], \"transaction_fee\")\n self.account_information[\"cash\"].append(self.balance)\n self.actions_memory.append(_action)\n self.date_memory.append(self._get_date())\n self.transaction_live.append(transaction)\n\n return sum(rewards)\n\n def _calculate_reward(self, i, done):\n _total_reward = 0\n _max_draw_down = 0\n for tr in self.transaction_live:\n if tr[\"Symbol\"] == self.assets[i]:\n _point = self.symbol(self.assets[i], \"point\")\n # cash discount overnight\n if self._day > tr[\"DateDuration\"]:\n tr[\"DateDuration\"] = self._day\n tr[\"Reward\"] -= self.symbol(self.assets[i], \"over_night_penalty\")\n\n if tr[\"Type\"] == 0: # buy\n # stop loss trigger\n _sl_price = tr[\"ActionPrice\"] - tr[\"SL\"] / _point\n _pt_price = tr[\"ActionPrice\"] + tr[\"PT\"] / _point\n if done:\n p = (self._c - tr[\"ActionPrice\"]) * _point\n self._manage_transaction(tr, p, self._c, status=2)\n _total_reward += p\n elif self._l <= _sl_price:\n self._manage_transaction(tr, -tr[\"SL\"], _sl_price)\n _total_reward += -tr[\"SL\"]\n self.current_holding[i] -= 1\n elif self._h >= _pt_price:\n self._manage_transaction(tr, tr[\"PT\"], _pt_price)\n _total_reward += tr[\"PT\"]\n self.current_holding[i] -= 1\n else: # still open\n self.current_draw_downs[i] = int(\n (self._l - tr[\"ActionPrice\"]) * _point\n )\n _max_draw_down += self.current_draw_downs[i]\n if (\n self.current_draw_downs[i] < 0\n and tr[\"MaxDD\"] > self.current_draw_downs[i]\n ):\n tr[\"MaxDD\"] = self.current_draw_downs[i]\n\n elif tr[\"Type\"] == 1: # sell\n # stop loss trigger\n _sl_price = tr[\"ActionPrice\"] + tr[\"SL\"] / _point\n _pt_price = tr[\"ActionPrice\"] - tr[\"PT\"] / _point\n if done:\n p = (tr[\"ActionPrice\"] - self._c) * _point\n self._manage_transaction(tr, p, self._c, status=2)\n _total_reward += p\n elif self._h >= _sl_price:\n self._manage_transaction(tr, -tr[\"SL\"], _sl_price)\n _total_reward += -tr[\"SL\"]\n self.current_holding[i] -= 1\n elif self._l <= _pt_price:\n self._manage_transaction(tr, tr[\"PT\"], _pt_price)\n _total_reward += tr[\"PT\"]\n self.current_holding[i] -= 1\n else:\n self.current_draw_downs[i] = int(\n (tr[\"ActionPrice\"] - self._h) * _point\n )\n _max_draw_down += self.current_draw_downs[i]\n if (\n self.current_draw_downs[i] < 0\n and tr[\"MaxDD\"] > self.current_draw_downs[i]\n ):\n tr[\"MaxDD\"] = self.current_draw_downs[i]\n\n if _max_draw_down > self.max_draw_downs[i]:\n self.max_draw_downs[i] = _max_draw_down\n\n return _total_reward\n\n def _limit_order_process(self, i, _action, done):\n for tr in self.transaction_limit_order:\n if tr[\"Symbol\"] == self.assets[i]:\n if tr[\"Type\"] != _action or done:\n self.transaction_limit_order.remove(tr)\n tr[\"Status\"] = 3\n tr[\"CloseStep\"] = self.current_step\n self.transaction_history.append(tr)\n elif (tr[\"ActionPrice\"] >= self._l and _action == 0) or (\n tr[\"ActionPrice\"] <= self._h and _action == 1\n ):\n tr[\"ActionStep\"] = self.current_step\n self.current_holding[i] += 1\n self.balance -= self.symbol(self.assets[i], \"transaction_fee\")\n self.account_information[\"cash\"].append(self.balance)\n self.actions_memory.append(_action)\n self.date_memory.append(self._get_date())\n self.transaction_limit_order.remove(tr)\n self.transaction_live.append(tr)\n self.transaction_open_this_step.append(tr)\n elif (\n tr[\"LimitStep\"]\n + self.symbol(self.assets[i], \"limit_order_expiration\")\n > self.current_step\n ):\n tr[\"CloseStep\"] = self.current_step\n tr[\"Status\"] = 4\n self.transaction_limit_order.remove(tr)\n self.transaction_history.append(tr)\n\n def _manage_transaction(self, tr, _p, close_price, status=1):\n self.transaction_live.remove(tr)\n tr[\"ClosePrice\"] = close_price\n tr[\"Point\"] = int(_p)\n tr[\"Reward\"] = int(tr[\"Reward\"] + _p)\n tr[\"Status\"] = status\n tr[\"CloseTime\"] = self._t\n self.balance += int(tr[\"Reward\"])\n self.account_information[\"cash\"].append(self.balance)\n self.actions_memory.append(tr[\"Type\"])\n self.date_memory.append(self._get_date())\n self.total_equity -= int(abs(tr[\"Reward\"]))\n self.transaction_close_this_step.append(tr)\n self.transaction_history.append(tr)\n\n def step(self, actions):\n\n done = self.current_step >= len(self.dt_datetime)\n if done:\n self.done_information += f\"Episode: {self.episode} Balance: {self.balance} Step: {self.current_step}\\n\"\n self.visualization = True\n\n # TODO: print out sharpe and metric infos per episode\n\n # when done just return current environment things\n return (\n np.array(self.obs).astype(np.float32),\n self.reward,\n done,\n {\"Close\": self.transaction_close_this_step},)\n\n self.reward = self._take_action(actions, done)\n\n if self._day > self.current_day:\n self.current_day = self._day\n self.balance -= self.over_night_cash_penalty\n self.account_information[\"cash\"].append(self.balance)\n self.actions_memory.append(0) # hold\n self.date_memory.append(self._get_date())\n\n if self.balance != 0:\n self.max_draw_down_pct = abs(sum(self.max_draw_downs) / self.balance * 100)\n\n # no action anymore\n self.obs = (\n [self.balance, self.max_draw_down_pct]\n + self.current_holding\n + self.current_draw_downs\n + self.get_observation(self.current_step)\n )\n # Execute one time step within the environment\n self.current_step += 1\n\n return (\n np.array(self.obs).astype(np.float32),\n self.reward,\n done,\n {\"Close\": self.transaction_close_this_step},\n )\n\n def get_observation(self, _step, _iter=0, col=None):\n # print(\"len of cached is\", len(self.cached_data))\n if col is None:\n return self.cached_data[_step]\n if col == \"_day\":\n return self.cached_time_serial[_step][1]\n\n elif col == \"_time\":\n return self.cached_time_serial[_step][0]\n col_pos = -1\n for i, _symbol in enumerate(self.observation_list):\n if _symbol == col:\n col_pos = i\n break\n assert col_pos >= 0\n return self.cached_data[_step][_iter * len(self.observation_list) + col_pos]\n\n def get_observation_vector(self, _dt, cols=None):\n cols = self.observation_list\n v = []\n for a in self.assets:\n subset = self.df.query(\n f'{self.asset_col} == \"{a}\" & {self.time_col} == \"{_dt}\"'\n )\n assert not subset.empty\n v += subset.loc[_dt, cols].tolist()\n assert len(v) == len(self.assets) * len(cols)\n return v\n\n def reset(self):\n print(\"Resetting environment...\")\n # Reset the state of the environment to an initial state\n\n if self.random_start:\n self.current_step = random.choice(range(int(len(self.dt_datetime) * 0.5)))\n else:\n self.current_step = 0\n\n self.equity_list = [0] * len(self.assets)\n self.balance = self.balance_initial\n self.total_equity = self.balance + sum(self.equity_list)\n self.ticket_id = 0\n self.transaction_live = []\n self.transaction_history = []\n self.transaction_limit_order = []\n self.current_draw_downs = [0.0] * len(self.assets)\n self.max_draw_downs = [0.0] * len(self.assets)\n self.max_draw_down_pct = sum(self.max_draw_downs) / self.balance * 100\n self.episode += 1\n self.current_holding = [0] * len(self.assets)\n self.transaction_open_this_step = []\n self.transaction_close_this_step = []\n self.current_day = 0\n self.done_information = \"\"\n self.log_header = True\n self.visualization = False\n self.reward = 0\n self.cached_data = [\n self.get_observation_vector(_dt) for _dt in self.dt_datetime\n ]\n\n self.obs = (\n [self.balance, self.max_draw_down_pct]\n + self.current_holding\n + self.current_draw_downs\n + self.get_observation(self.current_step)\n )\n self.date_memory = [self._get_date()]\n self.actions_memory = [2]\n self.account_information = {\n \"cash\": [self.balance],\n }\n\n _space = (\n [self.balance, self.max_draw_down_pct]\n + [0] * len(self.assets)\n + [0] * len(self.assets)\n + self.get_observation(self.current_step)\n )\n return np.array(_space).astype(np.float32)\n\n def render(self, mode=\"human\", title=None, **kwargs):\n # Render the environment to the screen\n if mode in (\"human\", \"file\"):\n printout = mode == \"human\"\n pm = {\n \"log_header\": self.log_header,\n \"log_filename\": self.log_filename,\n \"printout\": printout,\n \"balance\": self.balance,\n \"balance_initial\": self.balance_initial,\n \"transaction_close_this_step\": self.transaction_close_this_step,\n \"done_information\": self.done_information,\n }\n render_to_file(**pm)\n if self.log_header:\n self.log_header = False\n elif mode == \"graph\" and self.visualization:\n print(\"plotting...\")\n p = TradingChart(self.df, self.transaction_history)\n p.plot()\n\n def save_asset_memory(self):\n #TODO: handle multiple tickers\n cash_list = self.account_information[\"cash\"]\n date_list = self.date_memory\n df_asset_memory = pd.DataFrame({\"date\": date_list, \"cash\": cash_list})\n\n return df_asset_memory\n\n def save_action_memory(self):\n #TODO: handle multiple tickers\n date_list = self.date_memory\n action_list = self.actions_memory\n\n df_actions = pd.DataFrame({\"date\": date_list, \"actions\": action_list})\n df_actions = df_actions.set_index(\"date\")\n return df_actions\n def get_sb_env(self):\n e = DummyVecEnv([lambda: self])\n obs = e.reset()\n return e, obs\n"
] | [
[
"numpy.array",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kco4776/Self_Explaining_Structures_Improve_NLP_Models | [
"dbc2d852cbe8bffd22b18425e9a4bac00d557eeb"
] | [
"explain/model.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@file : model.py\n@author: zijun\n@contact : [email protected]\n@date : 2020/11/17 14:57\n@version: 1.0\n@desc : \n\"\"\"\nimport torch\nfrom torch import nn\nfrom transformers import AutoModel, AutoConfig\n\nfrom datasets.collate_functions import collate_to_max_length\n\n\nclass ExplainableModel(nn.Module):\n def __init__(self, bert_dir):\n super().__init__()\n self.bert_config = AutoConfig.from_pretrained(bert_dir, output_hidden_states=False, num_labels=3)\n self.intermediate = AutoModel.from_pretrained(bert_dir)\n self.span_info_collect = SICModel(self.bert_config.hidden_size)\n self.interpretation = InterpretationModel(self.bert_config.hidden_size)\n self.output = nn.Linear(self.bert_config.hidden_size, self.bert_config.num_labels)\n\n def forward(self, input_ids, start_indexs, end_indexs, span_masks):\n # generate mask\n attention_mask = (input_ids != 1).long()\n # intermediate layer\n hidden_states = self.intermediate(input_ids, attention_mask=attention_mask).last_hidden_state # output.shape = (bs, length, hidden_size)\n # span info collecting layer(SIC)\n h_ij = self.span_info_collect(hidden_states, start_indexs, end_indexs)\n # interpretation layer\n H, a_ij = self.interpretation(h_ij, span_masks)\n # output layer\n out = self.output(H)\n return out, a_ij\n\n\nclass SICModel(nn.Module):\n def __init__(self, hidden_size):\n super().__init__()\n self.hidden_size = hidden_size\n\n self.W_1 = nn.Linear(hidden_size, hidden_size)\n self.W_2 = nn.Linear(hidden_size, hidden_size)\n self.W_3 = nn.Linear(hidden_size, hidden_size)\n self.W_4 = nn.Linear(hidden_size, hidden_size)\n\n def forward(self, hidden_states, start_indexs, end_indexs):\n W1_h = self.W_1(hidden_states) # (bs, length, hidden_size)\n W2_h = self.W_2(hidden_states)\n W3_h = self.W_3(hidden_states)\n W4_h = self.W_4(hidden_states)\n\n W1_hi_emb = torch.index_select(W1_h, 1, start_indexs) # (bs, span_num, hidden_size)\n W2_hj_emb = torch.index_select(W2_h, 1, end_indexs)\n W3_hi_start_emb = torch.index_select(W3_h, 1, start_indexs)\n W3_hi_end_emb = torch.index_select(W3_h, 1, end_indexs)\n W4_hj_start_emb = torch.index_select(W4_h, 1, start_indexs)\n W4_hj_end_emb = torch.index_select(W4_h, 1, end_indexs)\n\n # [w1*hi, w2*hj, w3(hi-hj), w4(hi⊗hj)]\n span = W1_hi_emb + W2_hj_emb + (W3_hi_start_emb - W3_hi_end_emb) + torch.mul(W4_hj_start_emb, W4_hj_end_emb)\n h_ij = torch.tanh(span)\n return h_ij\n\n\nclass InterpretationModel(nn.Module):\n def __init__(self, hidden_size):\n super().__init__()\n self.h_t = nn.Linear(hidden_size, 1)\n\n def forward(self, h_ij, span_masks):\n o_ij = self.h_t(h_ij).squeeze(-1) # (ba, span_num)\n # mask illegal span\n o_ij = o_ij - span_masks\n # normalize all a_ij, a_ij sum = 1\n a_ij = nn.functional.softmax(o_ij, dim=1)\n # weight average span representation to get H\n H = (a_ij.unsqueeze(-1) * h_ij).sum(dim=1) # (bs, hidden_size)\n return H, a_ij\n"
] | [
[
"torch.nn.functional.softmax",
"torch.tanh",
"torch.nn.Linear",
"torch.mul",
"torch.index_select"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dpstart/slowdl | [
"c90d56f9272b791bc2dfcc785100d0a6e2a72bff"
] | [
"slowgrad/utils.py"
] | [
"import numpy as np\n\n\ndef layer_init_uniform(*x):\n ret = np.random.uniform(-1., 1., size=x) / np.sqrt(np.prod(x))\n return ret.astype(np.float32)\n\n\ndef fetch(url):\n import requests, os, hashlib, tempfile\n fp = os.path.join(tempfile.gettempdir(),\n hashlib.md5(url.encode('utf-8')).hexdigest())\n if os.path.isfile(fp) and os.stat(fp).st_size > 0:\n with open(fp, \"rb\") as f:\n dat = f.read()\n else:\n print(\"fetching %s\" % url)\n dat = requests.get(url).content\n with open(fp + \".tmp\", \"wb\") as f:\n f.write(dat)\n os.rename(fp + \".tmp\", fp)\n return dat\n"
] | [
[
"numpy.random.uniform",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
H2O-YT/manim | [
"ae65662823a95b1110536fae34e06d5c540cc424"
] | [
"manimlib/mobject/coordinate_systems.py"
] | [
"import numpy as np\nimport numbers\n\nfrom manimlib.constants import *\nfrom manimlib.mobject.functions import ParametricCurve\nfrom manimlib.mobject.geometry import Arrow\nfrom manimlib.mobject.geometry import Line\nfrom manimlib.mobject.geometry import DashedLine\nfrom manimlib.mobject.geometry import Rectangle\nfrom manimlib.mobject.number_line import NumberLine\nfrom manimlib.mobject.svg.tex_mobject import Tex\nfrom manimlib.mobject.types.vectorized_mobject import VGroup\nfrom manimlib.utils.config_ops import digest_config\nfrom manimlib.utils.config_ops import merge_dicts_recursively\nfrom manimlib.utils.simple_functions import binary_search\nfrom manimlib.utils.space_ops import angle_of_vector\nfrom manimlib.utils.space_ops import get_norm\nfrom manimlib.utils.space_ops import rotate_vector\n\nEPSILON = 1e-8\n\n\nclass CoordinateSystem():\n \"\"\"\n Abstract class for Axes and NumberPlane\n \"\"\"\n CONFIG = {\n \"dimension\": 2,\n \"default_x_range\": [-8.0, 8.0, 1.0],\n \"default_y_range\": [-4.0, 4.0, 1.0],\n \"width\": FRAME_WIDTH,\n \"height\": FRAME_HEIGHT,\n \"num_sampled_graph_points_per_tick\": 20,\n }\n\n def __init__(self, **kwargs):\n digest_config(self, kwargs)\n self.x_range = np.array(self.default_x_range)\n self.y_range = np.array(self.default_y_range)\n\n def coords_to_point(self, *coords):\n raise Exception(\"Not implemented\")\n\n def point_to_coords(self, point):\n raise Exception(\"Not implemented\")\n\n def c2p(self, *coords):\n \"\"\"Abbreviation for coords_to_point\"\"\"\n return self.coords_to_point(*coords)\n\n def p2c(self, point):\n \"\"\"Abbreviation for point_to_coords\"\"\"\n return self.point_to_coords(point)\n\n def get_origin(self):\n return self.c2p(*[0] * self.dimension)\n\n def get_axes(self):\n raise Exception(\"Not implemented\")\n\n def get_all_ranges(self):\n raise Exception(\"Not implemented\")\n\n def get_axis(self, index):\n return self.get_axes()[index]\n\n def get_x_axis(self):\n return self.get_axis(0)\n\n def get_y_axis(self):\n return self.get_axis(1)\n\n def get_z_axis(self):\n return self.get_axis(2)\n\n def get_x_axis_label(self, label_tex, edge=RIGHT, direction=DL, **kwargs):\n return self.get_axis_label(\n label_tex, self.get_x_axis(),\n edge, direction, **kwargs\n )\n\n def get_y_axis_label(self, label_tex, edge=UP, direction=DR, **kwargs):\n return self.get_axis_label(\n label_tex, self.get_y_axis(),\n edge, direction, **kwargs\n )\n\n def get_axis_label(self, label_tex, axis, edge, direction, buff=MED_SMALL_BUFF):\n label = Tex(label_tex)\n label.next_to(\n axis.get_edge_center(edge), direction,\n buff=buff\n )\n label.shift_onto_screen(buff=MED_SMALL_BUFF)\n return label\n\n def get_axis_labels(self, x_label_tex=\"x\", y_label_tex=\"y\"):\n self.axis_labels = VGroup(\n self.get_x_axis_label(x_label_tex),\n self.get_y_axis_label(y_label_tex),\n )\n return self.axis_labels\n\n def get_line_from_axis_to_point(self, index, point,\n line_func=DashedLine,\n color=GREY_A,\n stroke_width=2):\n axis = self.get_axis(index)\n line = line_func(axis.get_projection(point), point)\n line.set_stroke(color, stroke_width)\n return line\n\n def get_v_line(self, point, **kwargs):\n return self.get_line_from_axis_to_point(0, point, **kwargs)\n\n def get_h_line(self, point, **kwargs):\n return self.get_line_from_axis_to_point(1, point, **kwargs)\n\n # Useful for graphing\n def get_graph(self, function, x_range=None, **kwargs):\n t_range = np.array(self.x_range, dtype=float)\n if x_range is not None:\n t_range[:len(x_range)] = x_range\n # For axes, the third coordinate of x_range indicates\n # tick frequency. But for functions, it indicates a\n # sample frequency\n if x_range is None or len(x_range) < 3:\n t_range[2] /= self.num_sampled_graph_points_per_tick\n\n graph = ParametricCurve(\n lambda t: self.c2p(t, function(t)),\n t_range=t_range,\n **kwargs\n )\n graph.underlying_function = function\n graph.x_range = x_range\n return graph\n\n def get_parametric_curve(self, function, **kwargs):\n dim = self.dimension\n graph = ParametricCurve(\n lambda t: self.coords_to_point(*function(t)[:dim]),\n **kwargs\n )\n graph.underlying_function = function\n return graph\n\n def input_to_graph_point(self, x, graph):\n if hasattr(graph, \"underlying_function\"):\n return self.coords_to_point(x, graph.underlying_function(x))\n else:\n alpha = binary_search(\n function=lambda a: self.point_to_coords(\n graph.quick_point_from_proportion(a)\n )[0],\n target=x,\n lower_bound=self.x_range[0],\n upper_bound=self.x_range[1],\n )\n if alpha is not None:\n return graph.quick_point_from_proportion(alpha)\n else:\n return None\n\n def i2gp(self, x, graph):\n \"\"\"\n Alias for input_to_graph_point\n \"\"\"\n return self.input_to_graph_point(x, graph)\n\n def get_graph_label(self,\n graph,\n label=\"f(x)\",\n x=None,\n direction=RIGHT,\n buff=MED_SMALL_BUFF,\n color=None):\n if isinstance(label, str):\n label = Tex(label)\n if color is None:\n label.match_color(graph)\n if x is None:\n # Searching from the right, find a point\n # whose y value is in bounds\n max_y = FRAME_Y_RADIUS - label.get_height()\n max_x = FRAME_X_RADIUS - label.get_width()\n for x0 in np.arange(*self.x_range)[::-1]:\n pt = self.i2gp(x0, graph)\n if abs(pt[0]) < max_x and abs(pt[1]) < max_y:\n x = x0\n break\n if x is None:\n x = self.x_range[1]\n\n point = self.input_to_graph_point(x, graph)\n angle = self.angle_of_tangent(x, graph)\n normal = rotate_vector(RIGHT, angle + 90 * DEGREES)\n if normal[1] < 0:\n normal *= -1\n label.next_to(point, normal, buff=buff)\n label.shift_onto_screen()\n return label\n\n def get_v_line_to_graph(self, x, graph, **kwargs):\n return self.get_v_line(self.i2gp(x, graph), **kwargs)\n\n def get_h_line_to_graph(self, x, graph, **kwargs):\n return self.get_h_line(self.i2gp(x, graph), **kwargs)\n\n # For calculus\n def angle_of_tangent(self, x, graph, dx=EPSILON):\n p0 = self.input_to_graph_point(x, graph)\n p1 = self.input_to_graph_point(x + dx, graph)\n return angle_of_vector(p1 - p0)\n\n def slope_of_tangent(self, x, graph, **kwargs):\n return np.tan(self.angle_of_tangent(x, graph, **kwargs))\n\n def get_tangent_line(self, x, graph, length=5, line_func=Line, dx=EPSILON, **kwargs):\n line = line_func(LEFT, RIGHT, **kwargs)\n line.set_width(length)\n line.rotate(self.angle_of_tangent(x, graph, dx=dx))\n line.move_to(self.input_to_graph_point(x, graph))\n return line\n\n def get_riemann_rectangles(self,\n graph,\n x_range=None,\n dx=None,\n input_sample_type=\"left\",\n stroke_width=1,\n stroke_color=BLACK,\n fill_opacity=1,\n colors=(BLUE, GREEN),\n show_signed_area=True):\n if x_range is None:\n x_range = self.x_range[:2]\n if dx is None:\n dx = self.x_range[2]\n if len(x_range) < 3:\n x_range = [*x_range, dx]\n\n rects = []\n xs = np.arange(*x_range)\n for x0, x1 in zip(xs, xs[1:]):\n if input_sample_type == \"left\":\n sample = x0\n elif input_sample_type == \"right\":\n sample = x1\n elif input_sample_type == \"center\":\n sample = 0.5 * x0 + 0.5 * x1\n else:\n raise Exception(\"Invalid input sample type\")\n height = get_norm(\n self.i2gp(sample, graph) - self.c2p(sample, 0)\n )\n rect = Rectangle(width=x1 - x0, height=height)\n rect.move_to(self.c2p(x0, 0), DL)\n rects.append(rect)\n result = VGroup(*rects)\n result.set_submobject_colors_by_gradient(*colors)\n result.set_style(\n stroke_width=stroke_width,\n stroke_color=stroke_color,\n fill_opacity=fill_opacity,\n )\n return result\n\n\nclass Axes(VGroup, CoordinateSystem):\n CONFIG = {\n \"axis_config\": {\n \"include_tip\": True,\n \"numbers_to_exclude\": [0],\n },\n \"x_axis_config\": {},\n \"y_axis_config\": {\n \"line_to_number_direction\": LEFT,\n },\n \"height\": FRAME_HEIGHT - 2,\n \"width\": FRAME_WIDTH - 2,\n }\n\n def __init__(self,\n x_range=None,\n y_range=None,\n **kwargs):\n CoordinateSystem.__init__(self, **kwargs)\n VGroup.__init__(self, **kwargs)\n\n if x_range is not None:\n self.x_range[:len(x_range)] = x_range\n if y_range is not None:\n self.y_range[:len(y_range)] = y_range\n\n self.x_axis = self.create_axis(\n self.x_range, self.x_axis_config, self.width,\n )\n self.y_axis = self.create_axis(\n self.y_range, self.y_axis_config, self.height\n )\n self.y_axis.rotate(90 * DEGREES, about_point=ORIGIN)\n # Add as a separate group in case various other\n # mobjects are added to self, as for example in\n # NumberPlane below\n self.axes = VGroup(self.x_axis, self.y_axis)\n self.add(*self.axes)\n self.center()\n\n def create_axis(self, range_terms, axis_config, length):\n new_config = merge_dicts_recursively(self.axis_config, axis_config)\n new_config[\"width\"] = length\n axis = NumberLine(range_terms, **new_config)\n axis.shift(-axis.n2p(0))\n return axis\n\n def coords_to_point(self, *coords):\n origin = self.x_axis.number_to_point(0)\n result = origin.copy()\n for axis, coord in zip(self.get_axes(), coords):\n result += (axis.number_to_point(coord) - origin)\n return result\n\n def point_to_coords(self, point):\n return tuple([\n axis.point_to_number(point)\n for axis in self.get_axes()\n ])\n\n def get_axes(self):\n return self.axes\n\n def get_all_ranges(self):\n return [self.x_range, self.y_range]\n\n def add_coordinate_labels(self,\n x_values=None,\n y_values=None,\n **kwargs):\n axes = self.get_axes()\n self.coordinate_labels = VGroup()\n for axis, values in zip(axes, [x_values, y_values]):\n labels = axis.add_numbers(values, **kwargs)\n self.coordinate_labels.add(labels)\n return self.coordinate_labels\n\n\nclass ThreeDAxes(Axes):\n CONFIG = {\n \"dimension\": 3,\n \"x_range\": np.array([-6.0, 6.0, 1.0]),\n \"y_range\": np.array([-5.0, 5.0, 1.0]),\n \"z_range\": np.array([-4.0, 4.0, 1.0]),\n \"z_axis_config\": {},\n \"z_normal\": DOWN,\n \"height\": None,\n \"width\": None,\n \"depth\": None,\n \"num_axis_pieces\": 20,\n \"gloss\": 0.5,\n }\n\n def __init__(self, x_range=None, y_range=None, z_range=None, **kwargs):\n Axes.__init__(self, x_range, y_range, **kwargs)\n if z_range is not None:\n self.z_range[:len(z_range)] = z_range\n\n z_axis = self.create_axis(\n self.z_range,\n self.z_axis_config,\n self.depth,\n )\n z_axis.rotate(-PI / 2, UP, about_point=ORIGIN)\n z_axis.rotate(\n angle_of_vector(self.z_normal), OUT,\n about_point=ORIGIN\n )\n z_axis.shift(self.x_axis.n2p(0))\n self.axes.add(z_axis)\n self.add(z_axis)\n self.z_axis = z_axis\n\n for axis in self.axes:\n axis.insert_n_curves(self.num_axis_pieces - 1)\n\n def get_all_ranges(self):\n return [self.x_range, self.y_range, self.z_range]\n\n\nclass NumberPlane(Axes):\n CONFIG = {\n \"axis_config\": {\n \"stroke_color\": WHITE,\n \"stroke_width\": 2,\n \"include_ticks\": False,\n \"include_tip\": False,\n \"line_to_number_buff\": SMALL_BUFF,\n \"line_to_number_direction\": DL,\n },\n \"y_axis_config\": {\n \"line_to_number_direction\": DL,\n },\n \"background_line_style\": {\n \"stroke_color\": BLUE_D,\n \"stroke_width\": 2,\n \"stroke_opacity\": 1,\n },\n \"height\": None,\n \"width\": None,\n # Defaults to a faded version of line_config\n \"faded_line_style\": None,\n \"faded_line_ratio\": 4,\n \"make_smooth_after_applying_functions\": True,\n }\n\n def __init__(self, x_range=None, y_range=None, **kwargs):\n super().__init__(x_range, y_range, **kwargs)\n self.init_background_lines()\n\n def init_background_lines(self):\n if self.faded_line_style is None:\n style = dict(self.background_line_style)\n # For anything numerical, like stroke_width\n # and stroke_opacity, chop it in half\n for key in style:\n if isinstance(style[key], numbers.Number):\n style[key] *= 0.5\n self.faded_line_style = style\n\n self.background_lines, self.faded_lines = self.get_lines()\n self.background_lines.set_style(**self.background_line_style)\n self.faded_lines.set_style(**self.faded_line_style)\n self.add_to_back(\n self.faded_lines,\n self.background_lines,\n )\n\n def get_lines(self):\n x_axis = self.get_x_axis()\n y_axis = self.get_y_axis()\n\n x_lines1, x_lines2 = self.get_lines_parallel_to_axis(x_axis, y_axis)\n y_lines1, y_lines2 = self.get_lines_parallel_to_axis(y_axis, x_axis)\n lines1 = VGroup(*x_lines1, *y_lines1)\n lines2 = VGroup(*x_lines2, *y_lines2)\n return lines1, lines2\n\n def get_lines_parallel_to_axis(self, axis1, axis2):\n freq = axis2.x_step\n ratio = self.faded_line_ratio\n line = Line(axis1.get_start(), axis1.get_end())\n dense_freq = (1 + ratio)\n step = (1 / dense_freq) * freq\n\n lines1 = VGroup()\n lines2 = VGroup()\n inputs = np.arange(axis2.x_min, axis2.x_max + step, step)\n for i, x in enumerate(inputs):\n new_line = line.copy()\n new_line.shift(axis2.n2p(x) - axis2.n2p(0))\n if i % (1 + ratio) == 0:\n lines1.add(new_line)\n else:\n lines2.add(new_line)\n return lines1, lines2\n\n def get_x_unit_size(self):\n return self.get_x_axis().get_unit_size()\n\n def get_y_unit_size(self):\n return self.get_x_axis().get_unit_size()\n\n def get_axes(self):\n return self.axes\n\n def get_vector(self, coords, **kwargs):\n kwargs[\"buff\"] = 0\n return Arrow(self.c2p(0, 0), self.c2p(*coords), **kwargs)\n\n def prepare_for_nonlinear_transform(self, num_inserted_curves=50):\n for mob in self.family_members_with_points():\n num_curves = mob.get_num_curves()\n if num_inserted_curves > num_curves:\n mob.insert_n_curves(num_inserted_curves - num_curves)\n mob.make_smooth_after_applying_functions = True\n return self\n\n\nclass ComplexPlane(NumberPlane):\n CONFIG = {\n \"color\": BLUE,\n \"line_frequency\": 1,\n }\n\n def number_to_point(self, number):\n number = complex(number)\n return self.coords_to_point(number.real, number.imag)\n\n def n2p(self, number):\n return self.number_to_point(number)\n\n def point_to_number(self, point):\n x, y = self.point_to_coords(point)\n return complex(x, y)\n\n def p2n(self, point):\n return self.point_to_number(point)\n\n def get_default_coordinate_values(self, skip_first=True):\n x_numbers = self.get_x_axis().get_tick_range()[1:]\n y_numbers = self.get_y_axis().get_tick_range()[1:]\n y_numbers = [complex(0, y) for y in y_numbers if y != 0]\n return [*x_numbers, *y_numbers]\n\n def add_coordinate_labels(self, numbers=None, skip_first=True, **kwargs):\n if numbers is None:\n numbers = self.get_default_coordinate_values(skip_first)\n\n self.coordinate_labels = VGroup()\n for number in numbers:\n z = complex(number)\n if abs(z.imag) > abs(z.real):\n axis = self.get_y_axis()\n value = z.imag\n kwargs[\"unit\"] = \"i\"\n else:\n axis = self.get_x_axis()\n value = z.real\n number_mob = axis.get_number_mobject(value, **kwargs)\n # For i and -i, remove the \"1\"\n if z.imag == 1:\n number_mob.remove(number_mob[0])\n if z.imag == -1:\n number_mob.remove(number_mob[1])\n number_mob[0].next_to(\n number_mob[1], LEFT,\n buff=number_mob[0].get_width() / 4\n )\n self.coordinate_labels.add(number_mob)\n self.add(self.coordinate_labels)\n return self\n"
] | [
[
"numpy.arange",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aligholami/kepler | [
"a272ac427e09892cd44ade70e910272c4f69c638"
] | [
"lib/models/ScanRefer2D/cap_satnt.py"
] | [
"# Show, Attend and Tell Captioning Lightning Module\n# Author: Ali Gholami (https://aligholami.github.io)\nimport torch.nn as nn\nfrom itertools import chain\nimport torch.optim as optim\nfrom torchvision import transforms\nfrom lib.data.vocab import VocabTools\nfrom torch.utils.data import DataLoader\nfrom lib.callbacks import CaptionVisualizationCallback\nfrom lib.data.ScanRefer2DLoader import ScanRefer2DDataset\nfrom lib.utils.common import aggregate_caption_predictions\nfrom lib.models.common.resnet_encoder import ResNet101Encoder\nfrom lib.models.common.lightning_module import LightningModule\nfrom lib.models.common.caption_decoder import AttentiveCaptionDecoder\nfrom lib.metrics.caption_generation import CaptionGenerationEvaluation\nfrom pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor, ModelCheckpoint\nimport os\n\nclass ScanRefer2DShowAttendAndTellCaptioning(LightningModule):\n def __init__(self, hparams, name):\n super().__init__()\n self.hparams = hparams\n self.build_vocab_tools()\n self.build_model()\n self.json_dump_path = os.path.join(self.hparams['experiment_path'], 'wandb', 'latest-run', 'files', 'json_dumps')\n\n def forward(self, image_batch, caption_batch, len_batch, mode):\n \"\"\"\n Calls the Show, Attend and Tell model forward to perform one step of decoding. \n later the entire decoding procedure should be moved to the super module.\n step_output (before argmax): [batch_size, num_word_token_classes] \n step_hidden: [batch_size, hidden_size]\n \"\"\"\n encoded_images = self.show(image_batch)\n predicted_captions = self.attend_and_tell(init_h=encoded_images, labels=caption_batch, lens=len_batch, mode=mode)\n\n return predicted_captions\n\n def decouple_batch(self, batch):\n color, targets = batch\n image_batch = color\n caption_batch = targets['caption']\n len_batch = targets['caption_len']\n id_batch = targets['sample_id']\n\n return image_batch, caption_batch, len_batch, id_batch\n\n def on_train_start(self) -> None:\n self.evaluate_captions = CaptionGenerationEvaluation(self.vocab_tools, self.hparams['preds_path'])\n\n def on_train_epoch_start(self):\n self.dtrain = {\n 'gts': [],\n 'preds': [],\n 'lens': [],\n 'ids': []\n }\n \n def training_step(self, batch, batch_idx):\n \"\"\"\n image_batch: a batch of image tensors in RGB format, normalized with:\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]) and resized to width=224 and height=224.\n image_batch has the shape: [batch_size, 3, 224, 224]\n caption_batch: a batch of captions of shape [batch_size, padded_sequence_length]\n id_batch: merged scene_id, object_id, ann_id in the following format:\n '{}-{}_{}'.format(scene_id, object_id, ann_id)\n \"\"\"\n\n image_batch, caption_batch, len_batch, id_batch = batch\n decode_dict = self.forward(image_batch=image_batch, caption_batch=caption_batch, len_batch=len_batch, mode='train')\n self.store_preds(gts=caption_batch, preds=decode_dict['pred_indices'], lens=len_batch, ids=id_batch, mode='train')\n loss = self.simple_eval(preds=decode_dict['pred_scores'], labels=caption_batch, lens=len_batch, mode='train')\n self.log('train_ce_loss', loss, prog_bar=True, on_epoch=True, on_step=False)\n\n return dict(\n loss=loss\n )\n \n def training_step_end(self, training_step_outputs):\n return {'loss': training_step_outputs['loss'].sum()}\n\n def on_train_epoch_end(self, outputs):\n aggregations = aggregate_caption_predictions(self.dtrain)\n self.decoded_aggregations_train = self.decode_aggregations(aggregations)\n\n def on_validation_epoch_start(self):\n self.dval = {\n 'gts': [],\n 'preds': [],\n 'lens': [],\n 'ids': []\n }\n\n def validation_step(self, batch, batch_idx):\n \"\"\"\n image_batch: a batch of image tensors in RGB format, normalized with:\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]) and resized to width=224 and height=224.\n image_batch has the shape: [batch_size, 3, 224, 224]\n caption_batch: a batch of captions of shape [batch_size, padded_sequence_length]\n id_batch: merged scene_id, object_id, ann_id in the following format:\n '{}-{}_{}'.format(scene_id, object_id, ann_id)\n \"\"\"\n # image_batch, caption_batch, len_batch, id_batch = batch\n # decode_dict = self.forward(image_batch=image_batch, caption_batch=caption_batch, len_batch=len_batch, mode='val')\n # self.store_preds(gts=caption_batch, preds=decode_dict['pred_indices'], lens=len_batch, ids=id_batch, mode='val')\n # self.log('validation_nll_score', -1 * decode_dict['pred_log_probs'].mean(), prog_bar=True, on_epoch=True, on_step=False)\n\n def on_validation_epoch_end(self):\n # aggregations = aggregate_caption_predictions(self.dval)\n # self.decoded_aggregations_val = self.decode_aggregations(aggregations)\n # metrics = self.evaluate_captions(preds=self.decoded_aggregations_val['preds'], target=self.decoded_aggregations_val['labels'])\n # self.log_metrics(metrics, 'val')\n return None\n\n def test_step(self, batch, batch_idx):\n # TODO: Required standalone call to forward (because of callback system).\n self.validation_step(batch, batch_idx)\n \n def on_test_epoch_end(self):\n aggregations = aggregate_caption_predictions(self.dtest)\n self.decoded_aggregations_test = self.decode_aggregations(aggregations)\n metrics = self.evaluate_captions(preds=self.decoded_aggregations_test['preds'], target=self.decoded_aggregations_test['labels'])\n self.log_metrics(metrics, 'test')\n self.dump_metrics(metrics, dir_path=self.json_dump_path, file_name='scores.json')\n\n def log_metrics(self, metrics, mode):\n \"\"\"\n This should be moved to the higher level LightningModule.\n \"\"\"\n metrics = {'{}_{}'.format(k, mode): round(v, 3)\n for k, v in metrics.items()}\n\n _ = [self.log(k, v, on_epoch=True, on_step=False,\n prog_bar=True) for k, v in metrics.items()]\n\n def simple_eval(self, preds, labels, lens, mode):\n \"\"\"\n preds: [batch_size, sequence_length, self.vocab_tools.num_unique_tokens]\n labels: [batch_size, sequence_length]\n lens: [batch_size]\n Returns: batch accuracy for the entire unfolded sequence.\n Returns: batch cross entropy for the entire unfolded sequence.\n \"\"\"\n sequence_length = lens[0].item()\n preds = preds[:, 0:sequence_length, :]\n preds = preds.permute(0, 2, 1)\n labels = labels[:, 1:sequence_length+1] # skip sos and eos.\n loss = self.ce_loss(\n input=preds,\n target=labels\n )\n\n return loss\n\n def store_preds(self, gts, preds, lens, ids, mode):\n if mode == 'train':\n aggregator = self.dtrain\n elif mode == 'val':\n aggregator = self.dval\n elif mode == 'test':\n aggregator = self.dtest\n else:\n raise NotImplementedError(\"mode {} is not implemented.\".format(mode))\n\n aggregator['gts'].append(gts.tolist())\n aggregator['preds'].append(preds.tolist())\n aggregator['lens'].append(lens.tolist())\n aggregator['ids'].append(ids)\n\n def decode_aggregations(self, aggregations):\n decoded_aggregations = {}\n for name, aggregation in aggregations.items():\n decoded_aggregation = {}\n for k, v in aggregation.items():\n decoded_aggregation[k] = self.vocab_tools.untokenize(v)\n\n decoded_aggregations[name] = decoded_aggregation\n\n return decoded_aggregations\n\n def configure_callbacks(self):\n early_stop_callback = EarlyStopping(\n monitor=self.hparams['best_criteria'],\n mode='max',\n patience=15,\n strict=True,\n verbose=False,\n )\n\n model_checkpoint_callback = ModelCheckpoint(\n monitor=self.hparams['best_criteria'],\n mode='max',\n save_top_k=1,\n dirpath=self.hparams['checkpoints_path'],\n verbose=False,\n )\n\n caption_visualization_callback = CaptionVisualizationCallback(\n online_renders_path=self.hparams['online_renders_path'],\n num_visualize_ids=100,\n model_checkpointer=model_checkpoint_callback\n )\n\n lr_monitor_callback = LearningRateMonitor(\n logging_interval='step'\n )\n\n return None\n # return [lr_monitor_callback, early_stop_callback, model_checkpoint_callback, caption_visualization_callback]\n\n\n def configure_optimizers(self):\n\n if self.hparams['freeze_resnet']:\n for param in self.show.parameters():\n param.requires_grad = False\n params_to_optimize = self.tell.parameters()\n else:\n params_to_optimize = chain(self.tell.parameters(), self.show.parameters())\n \n optimizer = [optim.Adam(params=params_to_optimize, lr=self.hparams['optimizer.lr'])]\n LR_DECAY_STEP = [3, 7, 10, 20, 50, 90]\n LR_DECAY_RATE = 0.6\n lr_scheduler = [{\n 'scheduler': optim.lr_scheduler.MultiStepLR(\n optimizer=optimizer[0],\n milestones=LR_DECAY_STEP,\n gamma=LR_DECAY_RATE\n ),\n 'name': 'learning_rate',\n 'interval':'epoch',\n 'frequency': 1\n }]\n\n return optimizer, lr_scheduler\n\n def build_model(self) -> None:\n print(\"[INFO.MODEL_ARCHITECTURE]\")\n self.show = ResNet101Encoder(\n pretrained=self.hparams['pretrained'],\n show_progress=self.hparams['show_progress']\n )\n \n self.attend_and_tell = AttentiveCaptionDecoder(\n top_down_hidden_size=self.hparams['model.top_down_hidden_size'],\n eos_token=self.vocab_tools.eos_token_index,\n beam_width=self.hparams['beam_width'],\n embedding_size=self.hparams['embedding_size'],\n num_token_classes=self.vocab_tools.num_unique_tokens,\n embedding_weights=self.embeddings, \n decoder_hidden_size=self.hparams['decoder_hidden_size'],\n max_sequence_length=self.hparams['max_sequence_length'],\n visual_encoding_size=self.hparams['visual_encoding_size'],\n teacher_forcing_ratio=self.hparams['teacher_forcing_ratio']\n )\n\n self.ce_loss = nn.CrossEntropyLoss(\n ignore_index=self.vocab_tools.pad_token_index\n # weight=self.word_weights.cuda()\n )\n\n def build_vocab_tools(self) -> None:\n print(\"[INFO.VOCAB_TOOLS]\")\n self.vocab_tools = VocabTools(\n dataset_name='ScanRefer2D',\n train_samples=self.get_target_samples(self.hparams['paths.train_split_json']),\n val_samples=self.get_target_samples(self.hparams['paths.val_split_json']),\n vocab_path=self.hparams['vocab_path'],\n embedding_path=self.hparams['embedding_path'],\n max_length=self.hparams['max_sequence_length']\n )\n self.embeddings = self.vocab_tools.build_and_init_embedding(mode=self.hparams['embedding_type'])\n self.word_weights = self.vocab_tools.build_word_weights(mode=self.hparams['word_weight_mode'])\n\n def prepare_data(self) -> None:\n \n sample_transforms = {\n 'image_transforms': transforms.Compose([\n transforms.ToTensor(),\n transforms.CenterCrop((240, 320)),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n }\n\n if self.hparams['portion'] == 'subset':\n target_train = self.get_target_samples(self.hparams['paths.train_split_json'])[:800]\n target_val = self.get_target_samples(self.hparams['paths.val_split_json'])[:500]\n \n elif self.hparams['portion'] == 'full':\n target_train = self.get_target_samples(self.hparams['paths.train_split_json'])\n target_val = self.get_target_samples(self.hparams['paths.val_split_json'])\n else:\n raise NotImplementedError('Dataset Porition {} is Invalid.'.format(self.hparams['portion']))\n\n self.train_dset = ScanRefer2DDataset(\n hparams=self.hparams, \n phase='train',\n target_samples=target_train,\n transforms=sample_transforms,\n vocab_tools=self.vocab_tools\n )\n\n self.val_dset = ScanRefer2DDataset(\n hparams=self.hparams, \n phase='val',\n target_samples=target_val,\n transforms=sample_transforms,\n vocab_tools=self.vocab_tools\n )\n\n def _build_dataloader(self, dset, mode):\n return DataLoader(\n dset,\n batch_size=self.hparams[\"batch_size\"],\n shuffle=mode == \"train\",\n num_workers=self.hparams[\"num_workers\"],\n pin_memory=True,\n drop_last=mode == \"train\",\n collate_fn=dset._collate\n )\n\n def train_dataloader(self):\n return self._build_dataloader(self.train_dset, mode=\"train\")\n\n def val_dataloader(self):\n return self._build_dataloader(self.val_dset, mode=\"val\")\n \n def test_dataloader(self):\n return self._build_dataloader(self.val_dset, mode=\"val\")"
] | [
[
"torch.optim.Adam",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader",
"torch.optim.lr_scheduler.MultiStepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Exlsunshine/mlprodict | [
"9ea1a0fc85726822cae82c0b27b23b46f9ef351a",
"9ea1a0fc85726822cae82c0b27b23b46f9ef351a",
"9ea1a0fc85726822cae82c0b27b23b46f9ef351a",
"9ea1a0fc85726822cae82c0b27b23b46f9ef351a",
"9ea1a0fc85726822cae82c0b27b23b46f9ef351a"
] | [
"_unittests/ut_onnxrt/test_onnxrt_validate_benchmark.py",
"mlprodict/onnxrt/ops_cpu/op_min.py",
"_unittests/ut_onnxrt/test_onnxrt_side_by_side.py",
"mlprodict/sklapi/onnx_transformer.py",
"_unittests/ut_onnxrt/test_rt_valid_model_labels.py"
] | [
"\"\"\"\n@brief test log(time=65s)\n\"\"\"\nimport os\nimport unittest\nfrom logging import getLogger\nfrom pandas import DataFrame\nfrom pyquickhelper.loghelper import fLOG\nfrom pyquickhelper.pycode import get_temp_folder, ExtTestCase\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.utils.testing import ignore_warnings\nfrom mlprodict.onnxrt.validate import enumerate_validated_operator_opsets, summary_report\n\n\nclass TestOnnxrtValidateBenchmark(ExtTestCase):\n\n @ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))\n def test_validate_sklearn_operators_benchmark(self):\n fLOG(__file__, self._testMethodName, OutputPrint=__name__ == \"__main__\")\n logger = getLogger('skl2onnx')\n logger.disabled = True\n verbose = 1 if __name__ == \"__main__\" else 0\n temp = get_temp_folder(\n __file__, \"temp_validate_sklearn_operators_benchmark\")\n rows = list(enumerate_validated_operator_opsets(\n verbose, models={\"LinearRegression\"}, opset_min=10,\n benchmark=True, fLOG=fLOG))\n self.assertGreater(len(rows), 1)\n df = DataFrame(rows)\n for col in ['skl', 'batch']:\n self.assertIn('lambda-' + col, df.columns)\n for col in ['1', '10']:\n self.assertIn('time-ratio-N=' + col, df.columns)\n self.assertGreater(df.shape[1], 1)\n self.assertGreater(df.loc[0, \"tostring_time\"], 0)\n piv = summary_report(df)\n self.assertGreater(piv.shape[1], 1)\n self.assertIn('RT/SKL-N=1', piv.columns)\n self.assertNotIn('RT/SKL-N=10', piv.columns)\n self.assertIn('N=10', piv.columns)\n fLOG(\"output results\")\n df.to_excel(os.path.join(\n temp, \"sklearn_opsets_report.xlsx\"), index=False)\n piv.to_excel(os.path.join(\n temp, \"sklearn_opsets_summary.xlsx\"), index=False)\n\n @ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))\n def test_validate_sklearn_operators_benchmark_all(self):\n fLOG(__file__, self._testMethodName, OutputPrint=__name__ == \"__main__\")\n logger = getLogger('skl2onnx')\n logger.disabled = True\n verbose = 11 if __name__ == \"__main__\" else 0\n temp = get_temp_folder(\n __file__, \"temp_validate_sklearn_operators_benchmark\")\n rows = []\n for row in enumerate_validated_operator_opsets(\n verbose, opset_min=10, benchmark=True,\n fLOG=fLOG, runtime=\"onnxruntime1\",\n versions=True):\n rows.append(row)\n if len(rows) > 40:\n break\n self.assertGreater(len(rows), 1)\n df = DataFrame(rows)\n for col in ['skl', 'batch']:\n self.assertIn('lambda-' + col, df.columns)\n for col in ['1', '10']:\n self.assertIn('time-ratio-N=' + col, df.columns)\n self.assertGreater(df.shape[1], 1)\n self.assertGreater(df.loc[0, \"tostring_time\"], 0)\n piv = summary_report(df)\n self.assertGreater(piv.shape[1], 1)\n self.assertIn('RT/SKL-N=1', piv.columns)\n self.assertNotIn('RT/SKL-N=10', piv.columns)\n self.assertIn('N=10', piv.columns)\n fLOG(\"output results\")\n self.assertIn('v_numpy', df.columns)\n df.to_excel(os.path.join(\n temp, \"sklearn_opsets_report.xlsx\"), index=False)\n piv.to_excel(os.path.join(\n temp, \"sklearn_opsets_summary.xlsx\"), index=False)\n self.assertIn('v_numpy', piv.columns)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# -*- encoding: utf-8 -*-\n# pylint: disable=E0203,E1101,C0111\n\"\"\"\n@file\n@brief Runtime operator.\n\"\"\"\nimport numpy\nfrom ._op import OpRunBinaryNum\n\n\nclass Min(OpRunBinaryNum):\n\n def __init__(self, onnx_node, desc=None, **options):\n OpRunBinaryNum.__init__(self, onnx_node, desc=desc, **options)\n\n def _run(self, a, b): # pylint: disable=W0221\n return (numpy.minimum(a, b), )\n",
"\"\"\"\n@brief test log(time=2s)\n\"\"\"\nfrom collections import OrderedDict\nfrom io import StringIO\nimport unittest\nfrom logging import getLogger\nimport numpy\nimport pandas\nfrom sklearn.gaussian_process.kernels import RBF, ConstantKernel as CK, Sum\nfrom pyquickhelper.pycode import ExtTestCase\nfrom pyquickhelper.texthelper.version_helper import compare_module_version\nfrom onnxruntime import __version__ as ort_version\nfrom skl2onnx.common.data_types import FloatTensorType\ntry:\n from skl2onnx.operator_converters.gaussian_process import convert_kernel\nexcept ImportError:\n convert_kernel = None\nfrom mlprodict.onnxrt import OnnxInference\nfrom mlprodict.onnxrt.side_by_side import side_by_side_by_values\n\n\nXtest_ = pandas.read_csv(StringIO(\"\"\"\n1.000000000000000000e+02,1.061277971307766705e+02,1.472195004809226493e+00,2.307125069497626552e-02,4.539948095743629591e-02,2.855191098141335870e-01\n1.000000000000000000e+02,9.417031896832908444e+01,1.249743892709246573e+00,2.370416174339620707e-02,2.613847280316268853e-02,5.097165413593484073e-01\n1.000000000000000000e+02,9.305231488674536422e+01,1.795726729335217264e+00,2.473274733802270642e-02,1.349765645107412620e-02,9.410288840541443378e-02\n1.000000000000000000e+02,7.411264142156210255e+01,1.747723020195752319e+00,1.559695663417645997e-02,4.230394035515055301e-02,2.225492746314280956e-01\n1.000000000000000000e+02,9.326006195761877393e+01,1.738860294343326229e+00,2.280160135767652502e-02,4.883335335161764074e-02,2.806808409247734115e-01\n1.000000000000000000e+02,8.341529291866362428e+01,5.119682123742423929e-01,2.488795768635816003e-02,4.887573336092913834e-02,1.673462179673477768e-01\n1.000000000000000000e+02,1.182436477919874562e+02,1.733516391831658954e+00,1.533520930349476820e-02,3.131213519485807895e-02,1.955345358785769427e-01\n1.000000000000000000e+02,1.228982583299257101e+02,1.115599996405831629e+00,1.929354155079938959e-02,3.056996308544096715e-03,1.197052763998271013e-01\n1.000000000000000000e+02,1.160303269386108838e+02,1.018627021014927303e+00,2.248784981616459844e-02,2.688111547114307651e-02,3.326105131778724355e-01\n1.000000000000000000e+02,1.163414374640396005e+02,6.644299545804077667e-01,1.508088417713602906e-02,4.451836657613789106e-02,3.245643044204808425e-01\n\"\"\".strip(\"\\n\\r \")), header=None).values\n\n\nthreshold = \"0.4.0\"\n\n\nclass TestOnnxrtSideBySide(ExtTestCase):\n\n def setUp(self):\n logger = getLogger('skl2onnx')\n logger.disabled = True\n\n @unittest.skipIf(convert_kernel is None, reason=\"not enough recent version\")\n def test_kernel_ker12_def(self):\n ker = (Sum(CK(0.1, (1e-3, 1e3)), CK(0.1, (1e-3, 1e3)) *\n RBF(length_scale=1, length_scale_bounds=(1e-3, 1e3))))\n onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32)\n model_onnx = onx.to_onnx(\n inputs=[('X', FloatTensorType([None, None]))],\n outputs=[('Y', FloatTensorType([None, None]))])\n sess = OnnxInference(model_onnx.SerializeToString())\n res = sess.run({'X': Xtest_.astype(numpy.float32)})\n m1 = res['Y']\n m2 = ker(Xtest_)\n self.assertEqualArray(m1, m2)\n\n @unittest.skipIf(convert_kernel is None, reason=\"not enough recent version\")\n def test_kernel_ker2_def(self):\n ker = Sum(\n CK(0.1, (1e-3, 1e3)) * RBF(length_scale=10,\n length_scale_bounds=(1e-3, 1e3)),\n CK(0.1, (1e-3, 1e3)) * RBF(length_scale=1,\n length_scale_bounds=(1e-3, 1e3))\n )\n onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32)\n model_onnx = onx.to_onnx(\n inputs=[('X', FloatTensorType([None, None]))],\n outputs=[('Y', FloatTensorType([None, None]))])\n sess = OnnxInference(model_onnx.SerializeToString())\n\n res = sess.run({'X': Xtest_.astype(numpy.float32)})\n m1 = res['Y']\n m2 = ker(Xtest_)\n self.assertEqualArray(m1, m2)\n\n res = sess.run({'X': Xtest_.astype(numpy.float32)}, intermediate=True)\n self.assertGreater(len(res), 30)\n self.assertIsInstance(res, OrderedDict)\n\n @unittest.skipIf(convert_kernel is None, reason=\"not enough recent version\")\n @unittest.skipIf(compare_module_version(ort_version, threshold) <= 0,\n reason=\"Node:Scan1 Field 'shape' of type is required but missing.\")\n def test_kernel_ker2_def_ort(self):\n ker = Sum(\n CK(0.1, (1e-3, 1e3)) * RBF(length_scale=10,\n length_scale_bounds=(1e-3, 1e3)),\n CK(0.1, (1e-3, 1e3)) * RBF(length_scale=1,\n length_scale_bounds=(1e-3, 1e3))\n )\n onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32)\n model_onnx = onx.to_onnx(\n inputs=[('X', FloatTensorType([None, None]))],\n outputs=[('Y', FloatTensorType([None, None]))])\n sess = OnnxInference(model_onnx.SerializeToString(),\n runtime=\"onnxruntime2\")\n res = sess.run({'X': Xtest_.astype(numpy.float32)})\n m1 = res['Y']\n m2 = ker(Xtest_)\n self.assertEqualArray(m1, m2, decimal=5)\n\n @unittest.skipIf(convert_kernel is None, reason=\"not enough recent version\")\n @unittest.skipIf(compare_module_version(ort_version, threshold) <= 0,\n reason=\"Node:Scan1 Field 'shape' of type is required but missing.\")\n def test_kernel_ker2_def_ort1(self):\n ker = Sum(\n CK(0.1, (1e-3, 1e3)) * RBF(length_scale=10,\n length_scale_bounds=(1e-3, 1e3)),\n CK(0.1, (1e-3, 1e3)) * RBF(length_scale=1,\n length_scale_bounds=(1e-3, 1e3))\n )\n onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32)\n model_onnx = onx.to_onnx(\n inputs=[('X', FloatTensorType([None, None]))],\n outputs=[('Y', FloatTensorType([None, None]))])\n sess = OnnxInference(model_onnx.SerializeToString(),\n runtime=\"onnxruntime1\")\n\n rows = []\n\n def myprint(*args, **kwargs):\n rows.append(\" \".join(map(str, args)))\n\n res = sess.run({'X': Xtest_.astype(numpy.float32)},\n intermediate=True, verbose=1, fLOG=myprint)\n self.assertGreater(len(rows), 2)\n m1 = res['Y']\n self.assertNotEmpty(m1)\n self.assertGreater(len(res), 2)\n # m2 = ker(Xtest_)\n # self.assertEqualArray(m1, m2, decimal=5)\n\n cpu = OnnxInference(model_onnx.SerializeToString())\n sbs = side_by_side_by_values(\n [cpu, sess], inputs={'X': Xtest_.astype(numpy.float32)})\n self.assertGreater(len(sbs), 2)\n self.assertIsInstance(sbs, list)\n self.assertIsInstance(sbs[0], dict)\n self.assertIn('step', sbs[0])\n self.assertIn('step', sbs[1])\n self.assertIn('metric', sbs[0])\n self.assertIn('metric', sbs[1])\n self.assertIn('cmp', sbs[0])\n self.assertIn('cmp', sbs[1])\n\n sess3 = OnnxInference(model_onnx.SerializeToString(),\n runtime=\"onnxruntime2\")\n sbs = side_by_side_by_values(\n [cpu, sess, sess3], inputs={'X': Xtest_.astype(numpy.float32)})\n self.assertNotEmpty(sbs)\n\n inputs = {'X': Xtest_.astype(numpy.float32)}\n sbs = side_by_side_by_values(\n [(cpu, inputs), (sess, inputs), (sess3, inputs)])\n self.assertNotEmpty(sbs)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# coding: utf-8\n\"\"\"\n@file\n@brief Wraps runtime into a :epkg:`scikit-learn` transformer.\n\"\"\"\nimport numpy\nimport pandas\nfrom onnx import helper\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom skl2onnx.algebra.onnx_operator_mixin import OnnxOperatorMixin\nfrom skl2onnx.proto import TensorProto\nfrom skl2onnx.helpers.onnx_helper import load_onnx_model, enumerate_model_node_outputs\nfrom skl2onnx.helpers.onnx_helper import select_model_inputs_outputs\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom ..onnxrt import OnnxInference\nfrom ..onnxrt.onnx2py_helper import _var_as_dict\n\n\nclass OnnxTransformer(BaseEstimator, TransformerMixin, OnnxOperatorMixin):\n \"\"\"\n Calls :epkg:`onnxruntime` inference following :epkg:`scikit-learn` API\n so that it can be included in a :epkg:`scikit-learn` pipeline.\n\n Parameters\n ----------\n\n onnx_bytes : bytes\n output_name: string\n requested output name or None to request all and\n have method *transform* to store all of them in a dataframe\n enforce_float32 : boolean\n :epkg:`onnxruntime` only supports *float32*,\n :epkg:`scikit-learn` usually uses double floats, this parameter\n ensures that every array of double floats is converted into\n single floats\n \"\"\"\n\n def __init__(self, onnx_bytes, output_name=None, enforce_float32=True,\n runtime='onnxruntime1'):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.onnx_bytes = (onnx_bytes\n if not hasattr(onnx_bytes, 'SerializeToString')\n else onnx_bytes.SerializeToString())\n self.output_name = output_name\n self.enforce_float32 = enforce_float32\n self.runtime = runtime\n\n def __repr__(self): # pylint: disable=W0222\n \"\"\"\n usual\n \"\"\"\n ob = self.onnx_bytes\n if len(ob) > 20:\n ob = ob[:10] + b\"...\" + ob[-10:]\n return \"{0}(onnx_bytes={1}, output_name={2}, enforce_float32={3}, runtime='{4}')\".format(\n self.__class__.__name__, ob, self.output_name,\n self.enforce_float32, self.runtime)\n\n def fit(self, X=None, y=None, **fit_params):\n \"\"\"\n Loads the :epkg:`ONNX` model.\n\n Parameters\n ----------\n X : unused\n y : unused\n\n Returns\n -------\n self\n \"\"\"\n self.onnxrt_ = OnnxInference(self.onnx_bytes, runtime=self.runtime)\n self.inputs_ = self.onnxrt_.input_names\n return self\n\n def _check_arrays(self, inputs):\n \"\"\"\n Ensures that double floats are converted into single floats\n if *enforce_float32* is True or raises an exception.\n \"\"\"\n for k in inputs:\n v = inputs[k]\n if isinstance(v, numpy.ndarray):\n if v.dtype == numpy.float64:\n if self.enforce_float32:\n inputs[k] = v.astype(numpy.float32)\n else:\n raise TypeError(\n \"onnxunruntime only supports floats. Input '{0}' \"\n \"should be converted.\".format(k))\n\n def transform(self, X, y=None, **inputs):\n \"\"\"\n Runs the predictions. If *X* is a dataframe,\n the function assumes every columns is a separate input,\n otherwise, *X* is considered as a first input and *inputs*\n can be used to specify extra inputs.\n\n Parameters\n ----------\n X : iterable, data to process (or first input if several expected)\n y : unused\n inputs: :epkg:`ONNX` graph support multiple inputs,\n each column of a dataframe is converted into as many inputs if\n *X* is a dataframe, otherwise, *X* is considered as the first input\n and *inputs* can be used to specify the other ones\n\n Returns\n -------\n :epkg:`DataFrame`\n \"\"\"\n if not hasattr(self, \"onnxrt_\"):\n raise AttributeError(\n \"Transform OnnxTransformer must be fit first.\")\n rt_inputs = {}\n if isinstance(X, pandas.DataFrame):\n for c in X.columns:\n rt_inputs[c] = X[c]\n elif isinstance(X, numpy.ndarray):\n rt_inputs[self.inputs_[0]] = X\n elif isinstance(X, dict) and len(inputs) == 0:\n for k, v in X.items():\n rt_inputs[k] = v\n elif isinstance(X, list):\n if len(self.inputs_) == 1:\n rt_inputs[self.inputs_[0]] = numpy.array(X)\n else:\n for i in range(len(self.inputs_)):\n rt_inputs[self.inputs_[i]] = [row[i] for row in X]\n\n for k, v in inputs.items():\n rt_inputs[k] = v\n\n names = [self.output_name] if self.output_name else self.onnxrt_.output_names\n self._check_arrays(rt_inputs)\n doutputs = self.onnxrt_.run(rt_inputs)\n outputs = [doutputs[n] for n in names]\n\n if self.output_name or len(outputs) == 1:\n if isinstance(outputs[0], list):\n return pandas.DataFrame(outputs[0])\n else:\n return outputs[0]\n else:\n names = self.output_name if self.output_name else [\n o.name for o in self.onnxrt_.output_names]\n return pandas.DataFrame({k: v for k, v in zip(names, outputs)})\n\n def fit_transform(self, X, y=None, **inputs):\n \"\"\"\n Loads the *ONNX* model and runs the predictions.\n\n Parameters\n ----------\n X : iterable, data to process (or first input if several expected)\n y : unused\n inputs: :epkg:`ONNX` graph support multiple inputs,\n each column of a dataframe is converted into as many inputs if\n *X* is a dataframe, otherwise, *X* is considered as the first input\n and *inputs* can be used to specify the other ones\n\n Returns\n -------\n :epkg:`DataFrame`\n \"\"\"\n return self.fit(X, y=y, **inputs).transform(X, y)\n\n @staticmethod\n def enumerate_create(onnx_bytes, output_names=None, enforce_float32=True):\n \"\"\"\n Creates multiple *OnnxTransformer*,\n one for each requested intermediate node.\n\n onnx_bytes : bytes\n output_names: string\n requested output names or None to request all and\n have method *transform* to store all of them in a dataframe\n enforce_float32 : boolean\n :epkg:`onnxruntime` only supports *float32*,\n :epkg:`scikit-learn` usually uses double floats, this parameter\n ensures that every array of double floats is converted into\n single floats\n :return: iterator on OnnxTransformer *('output name', OnnxTransformer)*\n \"\"\"\n selected = None if output_names is None else set(output_names)\n model = load_onnx_model(onnx_bytes)\n for out in enumerate_model_node_outputs(model):\n m = select_model_inputs_outputs(model, out)\n if selected is None or out in selected:\n tr = OnnxTransformer(m.SerializeToString(),\n enforce_float32=enforce_float32)\n yield out, tr\n\n def onnx_parser(self, inputs=None):\n \"\"\"\n Returns a parser for this model.\n \"\"\"\n if inputs:\n self.parsed_inputs_ = inputs\n\n def parser():\n return self.onnxrt_.output_names\n return parser\n\n def onnx_shape_calculator(self):\n def shape_calculator(operator):\n cout = self.onnxrt_.output_names\n if len(operator.outputs) != len(cout):\n raise RuntimeError(\"Mismatched number of outputs: {} != {}.\"\n \"\".format(len(operator.outputs), len(cout)))\n for out_op, out in zip(operator.outputs, self.onnxrt_.obj.graph.output):\n var = _var_as_dict(out)\n if var['type']['kind'] != 'tensor':\n raise NotImplementedError(\n \"Noy yet implemented for output:\\n{}\".format(out))\n shape = var['type']['shape']\n if shape[0] == 0:\n shape = ('None',) + tuple(shape[1:])\n elem = var['type']['elem']\n if elem == 'float':\n out_op.type = FloatTensorType(shape=shape)\n else:\n raise NotImplementedError(\n \"Noy yet implemented for elem_type:\\n{}\".format(elem))\n return shape_calculator\n\n def onnx_converter(self):\n \"\"\"\n Returns a converter for this model.\n If not overloaded, it fetches the converter\n mapped to the first *scikit-learn* parent\n it can find.\n \"\"\"\n def copy_inout(inout, scope, new_name):\n shape = [s.dim_value for s in inout.type.tensor_type.shape.dim]\n value_info = helper.make_tensor_value_info(\n new_name, inout.type.tensor_type.elem_type, shape)\n return value_info\n\n def clean_variable_name(name, scope):\n return scope.get_unique_variable_name(name)\n\n def clean_operator_name(name, scope):\n return scope.get_unique_operator_name(name)\n\n def clean_initializer_name(name, scope):\n return scope.get_unique_variable_name(name)\n\n def converter(scope, operator, container):\n\n op = operator.raw_operator\n\n graph = op.onnxrt_.obj.graph\n name_mapping = {}\n node_mapping = {}\n for node in graph.node:\n name = node.name\n if name is not None:\n node_mapping[node.name] = clean_initializer_name(\n node.name, scope)\n for o in node.input:\n name_mapping[o] = clean_variable_name(o, scope)\n for o in node.output:\n name_mapping[o] = clean_variable_name(o, scope)\n for o in graph.initializer:\n name_mapping[o.name] = clean_operator_name(o.name, scope)\n\n inputs = [copy_inout(o, scope, name_mapping[o.name])\n for o in graph.input]\n outputs = [copy_inout(o, scope, name_mapping[o.name])\n for o in graph.output]\n\n for inp, to in zip(operator.inputs, inputs):\n n = helper.make_node('Identity', [inp.onnx_name], [to.name],\n name=clean_operator_name('Identity', scope))\n container.nodes.append(n)\n\n for inp, to in zip(outputs, operator.outputs):\n n = helper.make_node('Identity', [inp.name], [to.onnx_name],\n name=clean_operator_name('Identity', scope))\n container.nodes.append(n)\n\n for node in graph.node:\n n = helper.make_node(node.op_type,\n [name_mapping[o] for o in node.input],\n [name_mapping[o] for o in node.output],\n name=node_mapping[node.name] if node.name else None)\n n.attribute.extend(node.attribute) # pylint: disable=E1101\n container.nodes.append(n)\n\n for o in graph.initializer:\n as_str = o.SerializeToString()\n tensor = TensorProto()\n tensor.ParseFromString(as_str)\n tensor.name = name_mapping[o.name]\n container.initializers.append(tensor)\n\n return converter\n",
"\"\"\"\n@brief test log(time=9s)\n\"\"\"\nimport unittest\nfrom logging import getLogger\nfrom pyquickhelper.loghelper import fLOG\nfrom pyquickhelper.pycode import ExtTestCase\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.utils.testing import ignore_warnings\nfrom skl2onnx import __version__ as skl2onnx_version\nfrom mlprodict.onnxrt.validate import enumerate_validated_operator_opsets\n\n\nclass TestRtValidateLabels(ExtTestCase):\n\n @ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))\n def test_rt_LabelBinarizer(self):\n fLOG(__file__, self._testMethodName, OutputPrint=__name__ == \"__main__\")\n logger = getLogger('skl2onnx')\n logger.disabled = True\n verbose = 1 if __name__ == \"__main__\" else 0\n\n buffer = []\n\n def myprint(*args, **kwargs):\n buffer.append(\" \".join(map(str, args)))\n\n debug = True\n rows = list(enumerate_validated_operator_opsets(\n verbose, opset_min=11, fLOG=myprint,\n models={\"LabelBinarizer\"},\n runtime='python', debug=debug,\n filter_exp=lambda m, p: \"64\" not in p))\n self.assertGreater(len(rows), 1)\n self.assertGreater(len(buffer), 1 if debug else 0)\n\n @ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))\n def test_rt_LabelEncoder(self):\n fLOG(__file__, self._testMethodName, OutputPrint=__name__ == \"__main__\")\n logger = getLogger('skl2onnx')\n logger.disabled = True\n verbose = 1 if __name__ == \"__main__\" else 0\n\n buffer = []\n\n def myprint(*args, **kwargs):\n buffer.append(\" \".join(map(str, args)))\n\n debug = False\n rows = list(enumerate_validated_operator_opsets(\n verbose, opset_min=11, fLOG=myprint,\n models={\"LabelEncoder\"}, # ,\n runtime='python', debug=debug,\n filter_exp=lambda m, p: \"64\" not in p))\n self.assertGreater(len(rows), 1)\n self.assertGreater(len(buffer), 1 if debug else 0)\n\n @ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))\n def test_rt_FeatureHasher(self):\n fLOG(__file__, self._testMethodName, OutputPrint=__name__ == \"__main__\")\n logger = getLogger('skl2onnx')\n logger.disabled = True\n verbose = 1 if __name__ == \"__main__\" else 0\n\n buffer = []\n\n def myprint(*args, **kwargs):\n buffer.append(\" \".join(map(str, args)))\n\n debug = False\n rows = list(enumerate_validated_operator_opsets(\n verbose, opset_min=11, fLOG=myprint,\n models={\"FeatureHasher\"},\n runtime='python', debug=debug,\n filter_exp=lambda m, p: \"64\" not in p))\n self.assertGreater(len(rows), 1)\n self.assertGreater(len(buffer), 1 if debug else 0)\n\n @ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))\n def test_rt_OneHotEncoder(self):\n fLOG(__file__, self._testMethodName, OutputPrint=__name__ == \"__main__\")\n logger = getLogger('skl2onnx')\n logger.disabled = True\n verbose = 1 if __name__ == \"__main__\" else 0\n\n buffer = []\n\n def myprint(*args, **kwargs):\n buffer.append(\" \".join(map(str, args)))\n\n debug = False\n rows = list(enumerate_validated_operator_opsets(\n verbose, opset_min=11, fLOG=myprint,\n models={\"OneHotEncoder\"},\n runtime='python', debug=debug,\n filter_exp=lambda m, p: \"64\" not in p))\n self.assertGreater(len(rows), 1)\n self.assertGreater(len(buffer), 1 if debug else 0)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"sklearn.utils.testing.ignore_warnings",
"pandas.DataFrame"
],
[
"numpy.minimum"
],
[
"sklearn.gaussian_process.kernels.ConstantKernel",
"sklearn.gaussian_process.kernels.RBF"
],
[
"numpy.array",
"sklearn.base.BaseEstimator.__init__",
"sklearn.base.TransformerMixin.__init__",
"pandas.DataFrame"
],
[
"sklearn.utils.testing.ignore_warnings"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Enyokid/keras-gan-style | [
"956072649be0b4ef39a3c18c2c2f2510db87a445"
] | [
"train-gan.py"
] | [
"import os, sys\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\nimport numpy as np\r\nimport cv2 as cv\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, TensorBoard\r\nfrom net import *\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom sklearn.utils import shuffle\r\nimport time\r\nfrom tensorflow.keras.utils import multi_gpu_model\r\n\r\n\r\nh,w = 256, 256\r\ninshape = (h, w, 3)\r\nepochs = 100\r\nbatch_size = 4\r\n\r\ndef COMPILE(inshape,D_model,G_model):\r\n real_img = G_model.input\r\n anie_gray = layers.Input(shape=inshape,name = 'anie_gray')\r\n gen_img = G_model(real_img)\r\n gen_img_logit = D_model(gen_img)\r\n\r\n vgg_model = vgg_net(inshape)\r\n\r\n real_vgg_map = vgg_model(real_img)\r\n anie_vgg_map = vgg_model(anie_gray)\r\n gen_vgg_map = vgg_model(gen_img)\r\n\r\n output=layers.concatenate([real_vgg_map,gen_vgg_map,anie_vgg_map], axis=-1)\r\n output2=layers.concatenate([real_img,gen_img], axis=-1)\r\n return tf.keras.models.Model(inputs=[real_img,anie_gray], outputs=[output,output2,gen_img_logit])\r\n\r\n#compile model\r\nwith tf.device('/cpu:0'):\r\n train_D_model = train_D_net(inshape)\r\n train_D_model.compile(Adam(lr=4e-5), loss=['mse','mse','mse','mse'],loss_weights=[2,2,1,2])\r\n D_model = tf.keras.models.Model(inputs=train_D_model.layers[4].input,outputs=train_D_model.layers[4].output)\r\n\r\n G_model = G_net(inshape)\r\n G_model.load_weights('models/G_weights_pre.h5')\r\n gan_model = COMPILE(inshape,D_model,G_model)\r\n gan_model.summary()\r\n gan_model.layers[3].trainable=False#VGG-net\r\n gan_model.layers[5].trainable=False#D—net\r\n gan_model.compile(Adam(lr=2e-5), loss=[style_loss,color_loss,'mse'],loss_weights=[1,10,250])\r\n \r\n D_model.summary()\r\n\r\n#img_list\r\nrealdir = 'dataset/train_photo'\r\naniedir = 'dataset/Target/style'\r\nsmoothdir = 'dataset/Target/smooth'\r\nreal_list = os.listdir(realdir)\r\nanie_list = os.listdir(aniedir)\r\nsmooth_list = os.listdir(smoothdir)\r\n\r\n#output\r\ngen_logit_mask = np.ones((batch_size, h//4, w//4 ,1))\r\ng_out_mask1 = np.zeros((batch_size, h//4, w//4 ,1536))\r\ng_out_mask2 = np.zeros((batch_size, h, w ,6))\r\n\r\nd_out_mask1 = np.ones((batch_size, h//4, w//4 ,1))\r\nd_out_mask2 = np.zeros((batch_size, h//4, w//4 ,1))\r\nd_out_mask3 = np.zeros((batch_size, h//4, w//4 ,1))\r\nd_out_mask4 = np.zeros((batch_size, h//4, w//4 ,1))\r\n\r\n#g_input\r\nreal_img = np.zeros((batch_size, h, w ,3))\r\nanie_gray = np.zeros((batch_size, h, w ,3))\r\nanie_img = np.zeros((batch_size, h, w ,3))\r\nanie_smooth = np.zeros((batch_size, h, w ,3))\r\n\r\ngen_img_logit = np.zeros((batch_size, h//4, w//4 ,1))\r\n\r\nfor epoch in range(epochs):\r\n for i in range(0,len(anie_list)-5,batch_size):\r\n start_time = time.time()\r\n real_list = shuffle(real_list)\r\n\r\n #img data load\r\n for j in range(batch_size):\r\n real_path = realdir + '/' + real_list[i+j]\r\n real_src = cv.imread(real_path)\r\n\r\n anie_path = aniedir + '/' + anie_list[i+j]\r\n anie_src = cv.imread(anie_path)\r\n anie_src_gray = cv.cvtColor(anie_src, cv.COLOR_BGR2GRAY)\r\n\r\n anie_src = anie_src.astype(np.float64)\r\n \r\n gray_src = cv.merge([anie_src_gray,anie_src_gray,anie_src_gray])\r\n\r\n smooth_path = smoothdir + '/' + smooth_list[i+j]\r\n smooth_src = cv.imread(smooth_path)\r\n\r\n #load to [-1,1]\r\n real_src = 1/127.5 * real_src -1\r\n anie_src = 1/127.5 * anie_src -1\r\n gray_src = 1/127.5 * gray_src -1\r\n smooth_src = 1/127.5 * smooth_src -1\r\n real_img[j,...] = real_src\r\n anie_img[j,...] = anie_src\r\n anie_gray[j,...] = gray_src\r\n anie_smooth[j,...] = smooth_src\r\n \r\n \r\n # Train D\r\n D_model.trainable=True\r\n gen_img = G_model.predict(real_img)\r\n d_loss = train_D_model.train_on_batch([anie_img,anie_gray,anie_smooth,gen_img], [d_out_mask1,d_out_mask2,d_out_mask3,d_out_mask4])\r\n # ---------------------\r\n \r\n # Train G\r\n D_model.trainable=False\r\n g_loss = gan_model.train_on_batch([real_img,anie_gray], [g_out_mask1,g_out_mask2, gen_logit_mask])\r\n\r\n # -----------------\r\n elapsed_time = time.time() - start_time\r\n\r\n print (\"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] time: %s\" \r\n % (epoch,epochs,i, len(anie_list),d_loss[0],g_loss[0],elapsed_time))\r\n\r\n \r\n D_model.save_weights('models/D_weights_' + str(epoch) + '.h5')\r\n G_model.save_weights('models/G_weights_' + str(epoch) + '.h5')"
] | [
[
"tensorflow.device",
"tensorflow.keras.models.Model",
"sklearn.utils.shuffle",
"tensorflow.keras.layers.concatenate",
"numpy.ones",
"tensorflow.keras.optimizers.Adam",
"numpy.zeros",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
brendancol/xarray-spatial | [
"36d53b75086b760cab5100a12fcbda946dd85a25",
"36d53b75086b760cab5100a12fcbda946dd85a25"
] | [
"xrspatial/gpu_rtx/mesh_utils.py",
"xrspatial/tests/test_zonal.py"
] | [
"import numba as nb\nimport numpy as np\nimport cupy\n\n\[email protected]\ndef _triangulate_terrain_kernel(verts, triangles, data, H, W, scale, stride):\n global_id = stride + nb.cuda.grid(1)\n if global_id < W*H:\n h = global_id // W\n w = global_id % W\n mesh_map_index = h * W + w\n\n val = data[h, -w]\n\n offset = 3*mesh_map_index\n verts[offset] = w\n verts[offset+1] = h\n verts[offset+2] = val * scale\n\n if w != W - 1 and h != H - 1:\n offset = 6*(h * (W-1) + w)\n triangles[offset+0] = np.int32(mesh_map_index + W)\n triangles[offset+1] = np.int32(mesh_map_index + W + 1)\n triangles[offset+2] = np.int32(mesh_map_index)\n triangles[offset+3] = np.int32(mesh_map_index + W + 1)\n triangles[offset+4] = np.int32(mesh_map_index + 1)\n triangles[offset+5] = np.int32(mesh_map_index)\n\n\[email protected](parallel=True)\ndef triangulate_cpu(verts, triangles, data, H, W, scale):\n for h in nb.prange(H):\n for w in range(W):\n mesh_map_index = h * W + w\n\n val = data[h, w]\n\n offset = 3*mesh_map_index\n verts[offset] = w\n verts[offset+1] = h\n verts[offset+2] = val * scale\n\n if w != W - 1 and h != H - 1:\n offset = 6*(h*(W-1) + w)\n triangles[offset+0] = np.int32(mesh_map_index + W)\n triangles[offset+1] = np.int32(mesh_map_index + W+1)\n triangles[offset+2] = np.int32(mesh_map_index)\n triangles[offset+3] = np.int32(mesh_map_index + W+1)\n triangles[offset+4] = np.int32(mesh_map_index + 1)\n triangles[offset+5] = np.int32(mesh_map_index)\n\n\ndef triangulate_terrain(verts, triangles, terrain, scale=1):\n H, W = terrain.shape\n if isinstance(terrain.data, np.ndarray):\n triangulate_cpu(verts, triangles, terrain.data, H, W, scale)\n if isinstance(terrain.data, cupy.ndarray):\n job_size = H*W\n blockdim = 1024\n griddim = (job_size + blockdim - 1) // 1024\n d = 100\n offset = 0\n while job_size > 0:\n batch = min(d, griddim)\n _triangulate_terrain_kernel[batch, blockdim](\n verts, triangles, terrain.data, H, W, scale, offset)\n offset += batch*blockdim\n job_size -= batch*blockdim\n return 0\n\n\[email protected](nopython=True)\ndef _fill_contents(content, verts, triangles, num_tris):\n v = np.empty(12, np.float32)\n pad = np.zeros(2, np.int8)\n offset = 0\n for i in range(num_tris):\n t0 = triangles[3*i+0]\n t1 = triangles[3*i+1]\n t2 = triangles[3*i+2]\n v[3*0+0] = 0\n v[3*0+1] = 0\n v[3*0+2] = 0\n v[3*1+0] = verts[3*t0+0]\n v[3*1+1] = verts[3*t0+1]\n v[3*1+2] = verts[3*t0+2]\n v[3*2+0] = verts[3*t1+0]\n v[3*2+1] = verts[3*t1+1]\n v[3*2+2] = verts[3*t1+2]\n v[3*3+0] = verts[3*t2+0]\n v[3*3+1] = verts[3*t2+1]\n v[3*3+2] = verts[3*t2+2]\n\n offset = 50*i\n content[offset:offset+48] = v.view(np.uint8)\n content[offset+48:offset+50] = pad\n\n\ndef write(name, verts, triangles):\n \"\"\"\n Save a triangulated raster to a standard STL file.\n Windows has a default STL viewer and probably all 3D viewers have native\n support for it because of its simplicity. Can be used to verify the\n correctness of the algorithm or to visualize the mesh to get a notion of\n the size/complexity etc.\n @param name - The name of the mesh file we're going to save.\n Should end in .stl\n @param verts - A numpy array containing all the vertices of the mesh.\n Format is 3 float32 per vertex (vertex buffer)\n @param triangles - A numpy array containing all the triangles of the mesh.\n Format is 3 int32 per triangle (index buffer)\n \"\"\"\n ib = triangles\n vb = verts\n if isinstance(ib, cupy.ndarray):\n ib = cupy.asnumpy(ib)\n if isinstance(vb, cupy.ndarray):\n vb = cupy.asnumpy(vb)\n\n header = np.zeros(80, np.uint8)\n nf = np.empty(1, np.uint32)\n num_tris = triangles.shape[0] // 3\n nf[0] = num_tris\n f = open(name, 'wb')\n f.write(header)\n f.write(nf)\n\n # size of 1 triangle in STL is 50 bytes\n # 12 floats (each 4 bytes) for a total of 48\n # And additional 2 bytes for padding\n content = np.empty(num_tris*(50), np.uint8)\n _fill_contents(content, vb, ib, num_tris)\n f.write(content)\n f.close()\n",
"import numpy as np\nimport pandas as pd\nimport xarray as xr\nimport dask.array as da\nimport dask.dataframe as dd\n\nfrom xrspatial import zonal_stats as stats\nfrom xrspatial import zonal_apply as apply\nfrom xrspatial import zonal_crosstab as crosstab\nfrom xrspatial import suggest_zonal_canvas\nfrom xrspatial import trim\nfrom xrspatial import crop\n\n\nfrom xrspatial.zonal import regions\n\n\ndef create_zones_values(backend):\n zones_val = np.array([[0, 0, 1, 1, 2, 2, 3, 3],\n [0, 0, 1, 1, 2, 2, 3, 3],\n [0, 0, 1, 1, 2, np.nan, 3, 3]])\n zones = xr.DataArray(zones_val)\n\n values_val_2d = np.asarray([\n [0, 0, 1, 1, 2, 2, 3, np.inf],\n [0, 0, 1, 1, 2, np.nan, 3, 0],\n [np.inf, 0, 1, 1, 2, 2, 3, 3]\n ])\n values_2d = xr.DataArray(values_val_2d)\n\n values_val_3d = np.ones(4*3*8).reshape(3, 8, 4)\n values_3d = xr.DataArray(\n values_val_3d,\n dims=['lat', 'lon', 'race']\n )\n values_3d['race'] = ['cat1', 'cat2', 'cat3', 'cat4']\n\n if 'dask' in backend:\n zones.data = da.from_array(zones.data, chunks=(3, 3))\n values_2d.data = da.from_array(values_2d.data, chunks=(3, 3))\n values_3d.data = da.from_array(values_3d.data, chunks=(3, 3, 1))\n\n return zones, values_2d, values_3d\n\n\ndef check_results(df_np, df_da, expected_results_dict):\n # numpy case\n assert isinstance(df_np, pd.DataFrame)\n assert len(df_np.columns) == len(expected_results_dict)\n\n # zone column\n assert (df_np['zone'] == expected_results_dict['zone']).all()\n\n for col in df_np.columns[1:]:\n assert np.isclose(\n df_np[col], expected_results_dict[col], equal_nan=True\n ).all()\n\n if df_da is not None:\n # dask case\n assert isinstance(df_da, dd.DataFrame)\n df_da = df_da.compute()\n assert isinstance(df_da, pd.DataFrame)\n\n # numpy results equal dask results, ignoring their indexes\n assert np.array_equal(df_np.values, df_da.values, equal_nan=True)\n\n\ndef test_stats():\n # expected results\n default_stats_results = {\n 'zone': [0, 1, 2, 3],\n 'mean': [0, 1, 2, 2.4],\n 'max': [0, 1, 2, 3],\n 'min': [0, 1, 2, 0],\n 'sum': [0, 6, 8, 12],\n 'std': [0, 0, 0, 1.2],\n 'var': [0, 0, 0, 1.44],\n 'count': [5, 6, 4, 5]\n }\n\n # numpy case\n zones_np, values_np, _ = create_zones_values(backend='numpy')\n # default stats_funcs\n df_np = stats(zones=zones_np, values=values_np)\n\n # dask case\n zones_da, values_da, _ = create_zones_values(backend='dask')\n df_da = stats(zones=zones_da, values=values_da)\n check_results(df_np, df_da, default_stats_results)\n\n # expected results\n stats_results_zone_0_3 = {\n 'zone': [0, 3],\n 'mean': [0, 2.4],\n 'max': [0, 3],\n 'min': [0, 0],\n 'sum': [0, 12],\n 'std': [0, 1.2],\n 'var': [0, 1.44],\n 'count': [5, 5]\n }\n\n # numpy case\n df_np_zone_0_3 = stats(zones=zones_np, values=values_np, zone_ids=[0, 3])\n\n # dask case\n df_da_zone_0_3 = stats(zones=zones_da, values=values_da, zone_ids=[0, 3])\n\n check_results(df_np_zone_0_3, df_da_zone_0_3, stats_results_zone_0_3)\n\n # ---- custom stats (NumPy only) ----\n # expected results\n custom_stats_results = {\n 'zone': [1, 2],\n 'double_sum': [12, 16],\n 'range': [0, 0],\n }\n\n def _double_sum(values):\n return values.sum() * 2\n\n def _range(values):\n return values.max() - values.min()\n\n custom_stats = {\n 'double_sum': _double_sum,\n 'range': _range,\n }\n\n # numpy case\n df_np = stats(\n zones=zones_np, values=values_np, stats_funcs=custom_stats,\n zone_ids=[1, 2], nodata_values=0\n )\n # dask case\n df_da = None\n check_results(df_np, df_da, custom_stats_results)\n\n\ndef test_crosstab_2d():\n # count agg, expected results\n crosstab_2d_results = {\n 'zone': [1, 2, 3],\n 0: [0, 0, 1],\n 1: [6, 0, 0],\n 2: [0, 4, 0],\n }\n\n # numpy case\n zones_np, values_np, _ = create_zones_values(backend='numpy')\n df_np = crosstab(\n zones=zones_np, values=values_np,\n zone_ids=[1, 2, 3], cat_ids=[0, 1, 2],\n )\n # dask case\n zones_da, values_da, _ = create_zones_values(backend='dask')\n df_da = crosstab(\n zones=zones_da, values=values_da, zone_ids=[1, 2, 3], nodata_values=3\n )\n check_results(df_np, df_da, crosstab_2d_results)\n\n # percentage agg, expected results\n\n crosstab_2d_percentage_results = {\n 'zone': [1, 2],\n 1: [100, 0],\n 2: [0, 100],\n }\n\n # numpy case\n df_np = crosstab(\n zones=zones_np, values=values_np, zone_ids=[1, 2], cat_ids=[1, 2],\n nodata_values=3, agg='percentage'\n )\n # dask case\n df_da = crosstab(\n zones=zones_da, values=values_da, zone_ids=[1, 2], cat_ids=[1, 2],\n nodata_values=3, agg='percentage'\n )\n check_results(df_np, df_da, crosstab_2d_percentage_results)\n\n\ndef test_crosstab_3d():\n # expected results\n crosstab_3d_results = {\n 'zone': [1, 2, 3],\n 'cat1': [6, 5, 6],\n 'cat2': [6, 5, 6],\n 'cat3': [6, 5, 6],\n 'cat4': [6, 5, 6],\n }\n\n # numpy case\n zones_np, _, values_np = create_zones_values(backend='numpy')\n df_np = crosstab(\n zones=zones_np, values=values_np, zone_ids=[1, 2, 3], layer=-1\n )\n # dask case\n zones_da, _, values_da = create_zones_values(backend='dask')\n df_da = crosstab(\n zones=zones_da, values=values_da, zone_ids=[1, 2, 3],\n cat_ids=['cat1', 'cat2', 'cat3', 'cat4'], layer=-1\n )\n check_results(df_np, df_da, crosstab_3d_results)\n\n # ----- no values case ------\n crosstab_3d_novalues_results = {\n 'zone': [1, 2, 3],\n 'cat1': [0, 0, 0],\n 'cat2': [0, 0, 0],\n 'cat3': [0, 0, 0],\n 'cat4': [0, 0, 0],\n }\n\n # numpy case\n zones_np, _, values_np = create_zones_values(backend='numpy')\n df_np = crosstab(\n zones=zones_np, values=values_np, layer=-1,\n zone_ids=[1, 2, 3], nodata_values=1\n )\n # dask case\n zones_da, _, values_da = create_zones_values(backend='dask')\n df_da = crosstab(\n zones=zones_da, values=values_da, layer=-1,\n zone_ids=[1, 2, 3], nodata_values=1\n )\n check_results(df_np, df_da, crosstab_3d_novalues_results)\n\n\ndef test_apply():\n\n def func(x):\n return 0\n\n zones_val = np.zeros((3, 3), dtype=int)\n # define some zones\n zones_val[1] = 1\n zones_val[2] = 2\n zones = xr.DataArray(zones_val)\n\n values_val = np.array([[0, 1, 2],\n [3, 4, 5],\n [6, 7, np.nan]])\n values = xr.DataArray(values_val)\n\n values_copy = values.copy()\n apply(zones, values, func, nodata=2)\n\n # agg.shape remains the same\n assert values.shape == values_copy.shape\n\n values_val = values.values\n # values within zones are all 0s\n assert (values_val[0] == [0, 0, 0]).all()\n assert (values_val[1] == [0, 0, 0]).all()\n # values outside zones remain\n assert np.isclose(\n values_val[2], values_copy.values[2], equal_nan=True\n ).all()\n\n\ndef test_suggest_zonal_canvas():\n # crs: Geographic\n x_range = (0, 20)\n y_range = (0, 10)\n smallest_area = 2\n min_pixels = 2\n height, width = suggest_zonal_canvas(x_range=x_range, y_range=y_range,\n smallest_area=smallest_area,\n crs='Geographic',\n min_pixels=min_pixels)\n assert height == 10\n assert width == 20\n\n # crs: Mercator\n x_range = (-1e6, 1e6)\n y_range = (0, 1e6)\n smallest_area = 2e9\n min_pixels = 20\n height, width = suggest_zonal_canvas(x_range=x_range, y_range=y_range,\n smallest_area=smallest_area,\n crs='Mercator',\n min_pixels=min_pixels)\n assert height == 100\n assert width == 200\n\n\ndef create_test_arr(arr):\n n, m = arr.shape\n raster = xr.DataArray(arr, dims=['y', 'x'])\n raster['y'] = np.linspace(0, n, n)\n raster['x'] = np.linspace(0, m, m)\n return raster\n\n\ndef test_regions_four_pixel_connectivity_int():\n arr = np.array([[0, 0, 0, 0],\n [0, 4, 0, 0],\n [1, 4, 4, 0],\n [1, 1, 1, 0],\n [0, 0, 0, 0]], dtype=np.int64)\n raster = create_test_arr(arr)\n raster_regions = regions(raster, neighborhood=4)\n assert len(np.unique(raster_regions.data)) == 3\n assert raster.shape == raster_regions.shape\n\n\ndef test_regions_four_pixel_connectivity_float():\n arr = np.array([[0, 0, 0, np.nan],\n [0, 4, 0, 0],\n [1, 4, 4, 0],\n [1, 1, 1, 0],\n [0, 0, 0, 0]], dtype=np.float64)\n raster = create_test_arr(arr)\n raster_regions = regions(raster, neighborhood=4)\n assert len(np.unique(raster_regions.data)) == 4\n assert raster.shape == raster_regions.shape\n\n\ndef test_regions_eight_pixel_connectivity_int():\n arr = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 1]], dtype=np.int64)\n raster = create_test_arr(arr)\n raster_regions = regions(raster, neighborhood=8)\n assert len(np.unique(raster_regions.data)) == 2\n assert raster.shape == raster_regions.shape\n\n\ndef test_regions_eight_pixel_connectivity_float():\n arr = np.array([[1, 0, 0, np.nan],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 1]], dtype=np.float64)\n raster = create_test_arr(arr)\n raster_regions = regions(raster, neighborhood=8)\n assert len(np.unique(raster_regions.data)) == 3\n assert raster.shape == raster_regions.shape\n\n\ndef test_trim():\n arr = np.array([[0, 0, 0, 0],\n [0, 4, 0, 0],\n [0, 4, 4, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]], dtype=np.int64)\n raster = create_test_arr(arr)\n trimmed_raster = trim(raster, values=(0,))\n assert trimmed_raster.shape == (3, 2)\n\n trimmed_arr = np.array([[4, 0],\n [4, 4],\n [1, 1]], dtype=np.int64)\n\n compare = trimmed_arr == trimmed_raster.data\n assert compare.all()\n\n\ndef test_trim_left_top():\n arr = np.array([[0, 0, 0, 0],\n [0, 4, 0, 3],\n [0, 4, 4, 3],\n [0, 1, 1, 3],\n [0, 1, 1, 3]], dtype=np.int64)\n\n raster = create_test_arr(arr)\n trimmed_raster = trim(raster, values=(0,))\n assert trimmed_raster.shape == (4, 3)\n\n trimmed_arr = np.array([[4, 0, 3],\n [4, 4, 3],\n [1, 1, 3],\n [1, 1, 3]], dtype=np.int64)\n\n compare = trimmed_arr == trimmed_raster.data\n assert compare.all()\n\n\ndef test_trim_right_top():\n arr = np.array([[0, 0, 0, 0],\n [4, 0, 3, 0],\n [4, 4, 3, 0],\n [1, 1, 3, 0],\n [1, 1, 3, 0]], dtype=np.int64)\n\n raster = create_test_arr(arr)\n trimmed_raster = trim(raster, values=(0,))\n assert trimmed_raster.shape == (4, 3)\n\n trimmed_arr = np.array([[4, 0, 3],\n [4, 4, 3],\n [1, 1, 3],\n [1, 1, 3]], dtype=np.int64)\n\n compare = trimmed_arr == trimmed_raster.data\n assert compare.all()\n\n\ndef test_trim_left_bottom():\n arr = np.array([[4, 0, 3, 0],\n [4, 4, 3, 0],\n [1, 1, 3, 0],\n [1, 1, 3, 0],\n [0, 0, 0, 0]], dtype=np.int64)\n\n raster = create_test_arr(arr)\n trimmed_raster = trim(raster, values=(0,))\n assert trimmed_raster.shape == (4, 3)\n\n trimmed_arr = np.array([[4, 0, 3],\n [4, 4, 3],\n [1, 1, 3],\n [1, 1, 3]], dtype=np.int64)\n\n compare = trimmed_arr == trimmed_raster.data\n assert compare.all()\n\n\ndef test_trim_right_bottom():\n arr = np.array([[0, 4, 0, 3],\n [0, 4, 4, 3],\n [0, 1, 1, 3],\n [0, 1, 1, 3],\n [0, 0, 0, 0]], dtype=np.int64)\n\n raster = create_test_arr(arr)\n trimmed_raster = trim(raster, values=(0,))\n assert trimmed_raster.shape == (4, 3)\n\n trimmed_arr = np.array([[4, 0, 3],\n [4, 4, 3],\n [1, 1, 3],\n [1, 1, 3]], dtype=np.int64)\n\n compare = trimmed_arr == trimmed_raster.data\n assert compare.all()\n\n\ndef test_crop():\n arr = np.array([[0, 4, 0, 3],\n [0, 4, 4, 3],\n [0, 1, 1, 3],\n [0, 1, 1, 3],\n [0, 0, 0, 0]], dtype=np.int64)\n\n raster = create_test_arr(arr)\n result = crop(raster, raster, zones_ids=(1, 3))\n assert result.shape == (4, 3)\n\n trimmed_arr = np.array([[4, 0, 3],\n [4, 4, 3],\n [1, 1, 3],\n [1, 1, 3]], dtype=np.int64)\n\n compare = trimmed_arr == result.data\n assert compare.all()\n\n\ndef test_crop_nothing_to_crop():\n arr = np.array([[0, 4, 0, 3],\n [0, 4, 4, 3],\n [0, 1, 1, 3],\n [0, 1, 1, 3],\n [0, 0, 0, 0]], dtype=np.int64)\n\n raster = create_test_arr(arr)\n result = crop(raster, raster, zones_ids=(0,))\n assert result.shape == arr.shape\n compare = arr == result.data\n assert compare.all()\n"
] | [
[
"numpy.int32",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.array_equal",
"numpy.linspace",
"numpy.asarray",
"numpy.unique",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Joshuawadd/rlcard | [
"de2da05a9b32a39e0d8bbfc1cd589d88a6580c56"
] | [
"rlcard/utils/logger.py"
] | [
"import os\nimport csv\n\n\nclass Logger(object):\n ''' Logger saves the running results and helps make plots from the results\n '''\n\n def __init__(self, log_dir):\n ''' Initialize the labels, legend and paths of the plot and log file.\n\n Args:\n log_path (str): The path the log files\n '''\n self.log_dir = log_dir\n self.txt_path = os.path.join(log_dir, 'log.txt')\n self.csv_path = os.path.join(log_dir, 'performance.csv')\n self.fig_path = os.path.join(log_dir, 'fig.png')\n\n self.txt_path_win = os.path.join(log_dir, 'win_log.txt')\n self.csv_path_win = os.path.join(log_dir, 'win_performance.csv')\n self.fig_path_win = os.path.join(log_dir, 'win_fig.png')\n self.fig_path_win_easy = os.path.join(log_dir, 'win_fig_easy.png')\n self.fig_path_win_medium = os.path.join(log_dir, 'win_fig_medium.png')\n self.fig_path_win_hard = os.path.join(log_dir, 'win_fig_hard.png')\n self.fig_path_win_all = os.path.join(log_dir, 'win_fig_all.png')\n\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n self.txt_file = open(self.txt_path, 'a')\n self.csv_file = open(self.csv_path, 'a')\n #self.txt_file_win = open(self.txt_path_win, 'w')\n self.csv_file_win = open(self.csv_path_win, 'a')\n fieldnames = ['episodes', 'reward']\n fieldnames_win = ['episodes', 'win rate', 'win rate easy', 'win rate medium', 'win rate hard']\n self.writer = csv.DictWriter(self.csv_file, fieldnames=fieldnames)\n self.writer_win = csv.DictWriter(self.csv_file_win, fieldnames=fieldnames_win)\n self.writer.writeheader()\n self.writer_win.writeheader()\n\n def log(self, text):\n ''' Write the text to log file then print it.\n Args:\n text(string): text to log\n '''\n self.txt_file.write(text+'\\n')\n self.txt_file.flush()\n print(text)\n\n def log_performance(self, episodes, reward, win_rate):\n ''' Log a point in the curve\n Args:\n episodes (int): the episodes of the current point\n reward (float): the reward of the current point\n '''\n\n self.txt_file = open(self.txt_path, 'a')\n self.csv_file = open(self.csv_path, 'a')\n self.csv_file_win = open(self.csv_path_win, 'a')\n\n fieldnames = ['episodes', 'reward']\n fieldnames_win = ['episodes', 'win rate', 'win rate easy', 'win rate medium', 'win rate hard']\n self.writer = csv.DictWriter(self.csv_file, fieldnames=fieldnames)\n self.writer_win = csv.DictWriter(self.csv_file_win, fieldnames=fieldnames_win)\n\n self.writer.writerow({'episodes': episodes, 'reward': reward})\n self.writer_win.writerow({'episodes': episodes, 'win rate': win_rate[0], 'win rate easy': win_rate[1] , 'win rate medium': win_rate[2], 'win rate hard': win_rate[3]})\n print('')\n self.log('----------------------------------------')\n self.log(' episodes | ' + str(episodes))\n self.log(' reward | ' + str(reward))\n self.log(' win rate | ' + str(win_rate[0]))\n self.log(' win rate easy | ' + str(win_rate[1]))\n self.log(' win rate medium | ' + str(win_rate[2]))\n self.log(' win rate hard | ' + str(win_rate[3]))\n self.log('----------------------------------------')\n\n def plot(self, algorithm):\n plot(self.csv_path, self.fig_path, algorithm)\n plot_win(self.csv_path_win, self.fig_path_win, 'win rate', algorithm)\n plot_win(self.csv_path_win, self.fig_path_win_easy, 'win rate easy', algorithm)\n plot_win(self.csv_path_win, self.fig_path_win_medium, 'win rate medium', algorithm)\n plot_win(self.csv_path_win, self.fig_path_win_hard, 'win rate hard', algorithm)\n plot_win_all(self.csv_path_win, self.fig_path_win_all, algorithm)\n\n def close_files(self):\n ''' Close the created file objects\n '''\n if self.txt_path is not None:\n self.txt_file.close()\n if self.csv_path is not None:\n self.csv_file.close()\n if self.csv_path_win is not None:\n self.csv_file_win.close()\n\ndef plot(csv_path, save_path, algorithm):\n ''' Read data from csv file and plot the results\n '''\n import matplotlib.pyplot as plt\n with open(csv_path) as csvfile:\n #print(csv_path)\n reader = csv.DictReader(csvfile)\n xs = []\n ys = []\n for row in reader:\n xs.append(int(row['episodes']))\n ys.append(float(row['reward']))\n fig, ax = plt.subplots()\n ax.plot(xs, ys, label=algorithm)\n ax.set(xlabel='episodes', ylabel='reward')\n ax.legend()\n ax.grid()\n\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n fig.savefig(save_path)\n plt.close(fig)\n\ndef plot_win(csv_path, save_path, row_name, algorithm):\n ''' Read data from csv file and plot the results\n '''\n import matplotlib.pyplot as plt\n with open(csv_path) as csvfile:\n #print(csv_path)\n reader = csv.DictReader(csvfile)\n xs = []\n ys = []\n for row in reader:\n xs.append(int(row['episodes']))\n ys.append(float(row[row_name]))\n fig, ax = plt.subplots()\n ax.plot(xs, ys, label=algorithm)\n ax.set(xlabel='episodes', ylabel='win rate')\n ax.legend()\n ax.grid()\n\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n fig.savefig(save_path)\n plt.close(fig)\n\ndef plot_win_all(csv_path, save_path, algorithm):\n ''' Read data from csv file and plot the results\n '''\n import matplotlib.pyplot as plt\n with open(csv_path) as csvfile:\n #print(csv_path)\n reader = csv.DictReader(csvfile)\n xs = []\n ys1 = []\n ys2 = []\n ys3 = []\n for row in reader:\n xs.append(int(row['episodes']))\n ys1.append(float(row['win rate easy']))\n ys2.append(float(row['win rate medium']))\n ys3.append(float(row['win rate hard']))\n fig, ax = plt.subplots()\n ax.plot(xs, ys1, label='Easy')\n ax.plot(xs, ys2, label='Medium')\n ax.plot(xs, ys3, label='Hard')\n ax.set(xlabel='episodes', ylabel='win rate')\n ax.legend()\n ax.grid()\n\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n fig.savefig(save_path)\n plt.close(fig)\n"
] | [
[
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mwcraig/pyspeckit | [
"6d6c09aac29549a8c094d97fb385c9283422bb82",
"6d6c09aac29549a8c094d97fb385c9283422bb82"
] | [
"pyspeckit/cubes/mapplot.py",
"pyspeckit/cubes/cubes.py"
] | [
"\"\"\"\nMapPlot\n-------\n\nMake plots of the cube and interactively connect them to spectrum plotting.\nThis is really an interactive component of the package; nothing in here is\nmeant for publication-quality plots, but more for user interactive analysis.\n\nThat said, the plotter makes use of `APLpy <https://github.com/aplpy/aplpy>`_,\nso it is possible to make publication-quality plots.\n\n:author: Adam Ginsburg\n:date: 03/17/2011\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\"\"\"\nfrom __future__ import print_function\nimport matplotlib\nimport matplotlib.pyplot\nimport matplotlib.figure\nimport numpy as np\nimport copy\nimport itertools\nfrom astropy.extern import six\ntry:\n import astropy.wcs as pywcs\n import astropy.io.fits as pyfits\n pywcsOK = True\nexcept ImportError:\n try:\n import pyfits\n import pywcs\n pywcsOK = True\n except ImportError:\n pywcsOK = False\ntry:\n import aplpy\n icanhasaplpy = True\nexcept: # aplpy fails with generic exceptions instead of ImportError\n icanhasaplpy = False\n\nfrom . import cubes\n\nclass MapPlotter(object):\n \"\"\"\n Class to plot a spectrum\n\n See `mapplot` for use documentation; this docstring is only for\n initialization.\n \"\"\"\n\n def __init__(self, Cube=None, figure=None, doplot=False, **kwargs):\n \"\"\"\n Create a map figure for future plotting\n \"\"\"\n # figure out where to put the plot\n if isinstance(figure,matplotlib.figure.Figure):\n self.figure = figure\n elif type(figure) is int:\n self.figure = matplotlib.pyplot.figure(figure)\n else:\n self.figure = None\n self.axis = None\n self.FITSFigure = None\n self._click_marks = []\n self._circles = []\n self._clickX = None\n self._clickY = None\n\n self.overplot_colorcycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y'])\n self.overplot_linestyle = '-'\n \n self.Cube = Cube\n if self.Cube is not None:\n self.header = cubes.flatten_header(self.Cube.header, delete=True)\n if pywcsOK:\n self.wcs = pywcs.WCS(self.header)\n\n if doplot: self.mapplot(**kwargs)\n\n def __call__(self, **kwargs):\n \"\"\" see mapplot \"\"\"\n return self.mapplot(**kwargs)\n\n def mapplot(self, convention='calabretta', colorbar=True, useaplpy=True,\n vmin=None, vmax=None, cmap=None, plotkwargs={}, **kwargs):\n \"\"\"\n Plot up a map based on an input data cube.\n\n The map to be plotted is selected using `makeplane`.\n The `estimator` keyword argument is passed to that function.\n\n The plotted map, once shown, is interactive. You can click on it with any \n of the three mouse buttons. \n\n Button 1 or keyboard '1':\n Plot the selected pixel's spectrum in another window. Mark the\n clicked pixel with an 'x'\n Button 2 or keyboard 'o':\n Overplot a second (or third, fourth, fifth...) spectrum in the\n external plot window\n Button 3:\n Disconnect the interactive viewer\n\n You can also click-and-drag with button 1 to average over a circular\n region. This same effect can be achieved by using the 'c' key to\n set the /c/enter of a circle and the 'r' key to set its /r/adius (i.e.,\n hover over the center and press 'c', then hover some distance away and\n press 'r').\n\n\n Parameters\n ----------\n convention : 'calabretta' or 'griesen'\n The default projection to assume for Galactic data when plotting\n with aplpy.\n colorbar : bool\n Whether to show a colorbar\n plotkwargs : dict, optional\n A dictionary of keyword arguments to pass to aplpy.show_colorscale\n or matplotlib.pyplot.imshow\n useaplpy : bool\n Use aplpy if a FITS header is available\n vmin, vmax: float or None\n Override values for the vmin/vmax values. Will be automatically\n determined if left as None\n\n .. todo:\n Allow mapplot in subfigure\n \"\"\"\n if self.figure is None:\n self.figure = matplotlib.pyplot.figure()\n else:\n self._disconnect()\n self.figure.clf()\n\n # this is where the map is created; everything below this is just plotting\n self.makeplane(**kwargs)\n\n # have tot pop out estimator so that kwargs can be passed to imshow\n if 'estimator' in kwargs:\n kwargs.pop('estimator')\n\n # Below here is all plotting stuff\n\n if vmin is None: vmin = self.plane[self.plane==self.plane].min()\n if vmax is None: vmax = self.plane[self.plane==self.plane].max()\n\n if icanhasaplpy and useaplpy:\n self.fitsfile = pyfits.PrimaryHDU(data=self.plane,header=self.header)\n self.FITSFigure = aplpy.FITSFigure(self.fitsfile,figure=self.figure,convention=convention)\n self.FITSFigure.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)\n self.axis = self.FITSFigure._ax1\n if colorbar:\n try:\n self.FITSFigure.add_colorbar()\n except Exception as ex:\n print(\"ERROR: Could not create colorbar! Error was %s\" % str(ex))\n self._origin = 0 # FITS convention\n # TODO: set _origin to 1 if using PIXEL units, not real wcs\n else:\n self.axis = self.figure.add_subplot(111)\n if hasattr(self,'colorbar') and self.colorbar is not None:\n if self.colorbar.ax in self.axis.figure.axes:\n self.axis.figure.delaxes(self.colorbar.ax)\n self.axis.imshow(self.plane, vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)\n if colorbar: \n try:\n self.colorbar = matplotlib.pyplot.colorbar(self.axis.images[0])\n except Exception as ex:\n print(\"ERROR: Could not create colorbar! Error was %s\" % str(ex))\n self._origin = 0 # normal convention \n\n self.canvas = self.axis.figure.canvas\n\n self._connect()\n\n def _connect(self):\n \"\"\" Connect click, click up (release click), and key press to events \"\"\"\n self.clickid = self.canvas.callbacks.connect('button_press_event',self.click)\n self.clickupid = self.canvas.callbacks.connect('button_release_event',self.plot_spectrum)\n self.keyid = self.canvas.callbacks.connect('key_press_event',self.plot_spectrum)\n\n def _disconnect(self):\n \"\"\" Disconnect click, click up (release click), and key press from events \"\"\"\n if hasattr(self,'canvas'):\n self.canvas.mpl_disconnect(self.clickid)\n self.canvas.mpl_disconnect(self.clickupid)\n self.canvas.mpl_disconnect(self.keyid)\n\n def makeplane(self, estimator=np.nanmean):\n \"\"\"\n Create a \"plane\" view of the cube, either by slicing or projecting it\n or by showing a slice from the best-fit model parameter cube.\n\n Parameters\n ----------\n\n estimator : [ function | 'max' | 'int' | FITS filename | integer | slice ]\n A non-pythonic, non-duck-typed variable. If it's a function, apply that function\n along the cube's spectral axis to obtain an estimate (e.g., mean, min, max, etc.).\n 'max' will do the same thing as passing np.max\n 'int' will attempt to integrate the image (which is why I didn't duck-type)\n (integrate means sum and multiply by dx)\n a .fits filename will be read using pyfits (so you can make your own cover figure)\n an integer will get the n'th slice in the parcube if it exists\n If it's a slice, slice the input data cube along the Z-axis with this slice\n\n \"\"\"\n # THIS IS A HACK!!! isinstance(a function, function) must be a thing...\n FUNCTION = type(np.max)\n\n # estimator is NOT duck-typed\n if type(estimator) is FUNCTION:\n self.plane = estimator(self.Cube.cube,axis=0)\n elif isinstance(estimator, six.string_types):\n if estimator == 'max':\n self.plane = self.Cube.cube.max(axis=0)\n elif estimator == 'int':\n dx = np.abs(self.Cube.xarr[1:] - self.Cube.xarr[:-1])\n dx = np.concatenate([dx,[dx[-1]]])\n self.plane = (self.Cube.cube * dx[:,np.newaxis,np.newaxis]).sum(axis=0)\n elif estimator[-5:] == \".fits\":\n self.plane = pyfits.getdata(estimator)\n elif type(estimator) is slice:\n self.plane = self.Cube.cube[estimator,:,:]\n elif type(estimator) is int:\n if hasattr(self.Cube,'parcube'):\n self.plane = self.Cube.parcube[estimator,:,:]\n \n if self.plane is None:\n raise ValueError(\"Invalid estimator %s\" % (str(estimator)))\n\n if np.sum(np.isfinite(self.plane)) == 0:\n raise ValueError(\"Map is all NaNs or infs. Check your estimator or your input cube.\")\n\n def click(self,event):\n \"\"\"\n Record location of downclick\n \"\"\"\n if event.inaxes:\n self._clickX = np.round(event.xdata) - self._origin\n self._clickY = np.round(event.ydata) - self._origin\n\n def plot_spectrum(self, event, plot_fit=True):\n \"\"\"\n Connects map cube to Spectrum...\n \"\"\"\n self.event = event\n if event.inaxes:\n clickX = np.round(event.xdata) - self._origin\n clickY = np.round(event.ydata) - self._origin\n \n # grab toolbar info so that we don't do anything if a tool is selected\n tb = self.canvas.toolbar\n if tb.mode != '':\n return\n elif event.key is not None:\n if event.key == 'c':\n self._center = (clickX-1,clickY-1)\n self._remove_circle()\n self._add_click_mark(clickX,clickY,clear=True)\n elif event.key == 'r':\n x,y = self._center\n self._add_circle(x,y,clickX,clickY)\n self.circle(x,y,clickX-1,clickY-1)\n elif event.key == 'o':\n clickX,clickY = round(clickX),round(clickY)\n print(\"OverPlotting spectrum from point %i,%i\" % (clickX-1,clickY-1))\n color=self.overplot_colorcycle.next()\n self._add_click_mark(clickX,clickY,clear=False, color=color)\n self.Cube.plot_spectrum(clickX-1,clickY-1,clear=False, color=color, linestyle=self.overplot_linestyle)\n elif event.key in ('1','2'):\n event.button = int(event.key)\n event.key = None\n self.plot_spectrum(event)\n elif (hasattr(event,'button') and event.button in (1,2) \n and not (self._clickX == clickX and self._clickY == clickY)):\n if event.button == 1:\n self._remove_circle()\n clear=True\n color = 'k'\n linestyle = 'steps-mid'\n else:\n color = self.overplot_colorcycle.next()\n linestyle = self.overplot_linestyle\n clear=False\n rad = ( (self._clickX-clickX)**2 + (self._clickY-clickY)**2 )**0.5\n print(\"Plotting circle from point %i,%i to %i,%i (r=%f)\" % (self._clickX-1,self._clickY-1,clickX-1,clickY-1,rad))\n self._add_circle(self._clickX,self._clickY,clickX,clickY)\n self.circle(self._clickX-1,self._clickY-1,clickX-1,clickY-1,clear=clear,linestyle=linestyle,color=color)\n elif hasattr(event,'button') and event.button is not None:\n if event.button==1:\n clickX,clickY = round(clickX),round(clickY)\n print(\"Plotting spectrum from point %i,%i\" % (clickX-1,clickY-1))\n self._remove_circle()\n self._add_click_mark(clickX,clickY,clear=True)\n self.Cube.plot_spectrum(clickX-1,clickY-1,clear=True)\n if plot_fit: self.Cube.plot_fit(clickX-1, clickY-1, silent=True)\n elif event.button==2:\n clickX,clickY = round(clickX),round(clickY)\n print(\"OverPlotting spectrum from point %i,%i\" % (clickX-1,clickY-1))\n color=self.overplot_colorcycle.next()\n self._add_click_mark(clickX,clickY,clear=False, color=color)\n self.Cube.plot_spectrum(clickX-1,clickY-1,clear=False, color=color, linestyle=self.overplot_linestyle)\n elif event.button==3:\n print(\"Disconnecting GAIA-like tool\")\n self._disconnect()\n else:\n print(\"Call failed for some reason: \")\n print(\"event: \",event)\n else:\n pass\n # never really needed... warn(\"Click outside of axes\")\n\n def _add_click_mark(self,x,y,clear=False,color='k'):\n \"\"\"\n Add an X at some position\n \"\"\"\n if clear:\n self._clear_click_marks()\n if self.FITSFigure is not None:\n label = 'xmark%i' % (len(self._click_marks)+1)\n x,y = self.FITSFigure.pixel2world(x,y)\n self.FITSFigure.show_markers(x,y,marker='x',c=color,layer=label)\n self._click_marks.append( label )\n else:\n self._click_marks.append( self.axis.plot(x,y,'kx') )\n self.refresh()\n\n def _clear_click_marks(self):\n \"\"\"\n Remove all marks added by previous clicks\n \"\"\"\n if self.FITSFigure is not None:\n for mark in self._click_marks:\n if mark in self.FITSFigure._layers:\n self.FITSFigure.remove_layer(mark)\n else:\n for mark in self._click_marks:\n self._click_marks.remove(mark)\n if mark in self.axis.lines:\n self.axis.lines.remove(mark)\n self.refresh()\n\n def _add_circle(self,x,y,x2,y2,**kwargs):\n \"\"\"\n \"\"\"\n if self.FITSFigure is not None:\n x,y = self.FITSFigure.pixel2world(x,y)\n x2,y2 = self.FITSFigure.pixel2world(x2,y2)\n r = (np.linalg.norm(np.array([x,y])-np.array([x2,y2])))\n #self.FITSFigure.show_markers(x,y,s=r,marker='o',facecolor='none',edgecolor='black',layer='circle')\n layername = \"circle%02i\" % len(self._circles)\n self.FITSFigure.show_circles(x,y,r,edgecolor='black',facecolor='none',layer=layername,**kwargs)\n self._circles.append(layername)\n else:\n r = np.linalg.norm(np.array([x,y])-np.array([x2,y2]))\n circle = matplotlib.patches.Circle([x,y],radius=r,**kwargs)\n self._circles.append( circle )\n self.axis.patches.append(circle)\n self.refresh()\n\n def _remove_circle(self):\n \"\"\"\n \"\"\"\n if self.FITSFigure is not None:\n for layername in self._circles:\n if layername in self.FITSFigure._layers:\n self.FITSFigure.remove_layer(layername)\n else:\n for circle in self._circles:\n if circle in self.axis.patches:\n self.axis.patches.remove(circle)\n self._circles.remove(circle)\n self.refresh()\n\n def refresh(self):\n if self.axis is not None:\n self.axis.figure.canvas.draw()\n\n def circle(self,x1,y1,x2,y2,**kwargs):\n \"\"\"\n Plot the spectrum of a circular aperture\n \"\"\"\n\n r = (np.linalg.norm(np.array([x1,y1])-np.array([x2,y2])))\n self.Cube.plot_apspec([x1,y1,r],**kwargs)\n #self.Cube.data = cubes.extract_aperture( self.Cube.cube, [x1,y1,r] , coordsys=None )\n #self.Cube.plotter()\n\n def copy(self, parent=None):\n \"\"\"\n Create a copy of the map plotter with blank (uninitialized) axis & figure\n\n [ parent ] \n A spectroscopic axis instance that is the parent of the specfit\n instance. This needs to be specified at some point, but defaults\n to None to prevent overwriting a previous plot.\n \"\"\"\n\n newmapplot = copy.copy(self)\n newmapplot.Cube = parent\n newmapplot.axis = None\n newmapplot.figure = None\n\n return newmapplot\n",
"\"\"\"\n~~~~~~~~\ncubes.py\n~~~~~~~~\n\nFrom `agpy <http://code.google.com/p/agpy/source/browse/trunk/agpy/cubes.py>`_,\ncontains functions to perform various transformations on data cubes and their\nheaders.\n\n\n\"\"\"\nfrom __future__ import print_function\nfrom astropy.extern.six.moves import xrange\nfrom numpy import sqrt,repeat,indices,newaxis,pi,cos,sin,array,mean,nansum\nfrom math import acos,atan2,tan\nimport numpy\nimport numpy as np\nimport copy\nimport os\nimport astropy.io.fits as fits\nimport astropy.wcs as pywcs\nimport tempfile\nimport warnings\nfrom astropy import coordinates\nfrom astropy import log\ntry:\n from AG_fft_tools import smooth\n smoothOK = True\nexcept ImportError:\n smoothOK = False\ntry:\n from scipy.interpolate import UnivariateSpline\n scipyOK = True\nexcept ImportError:\n scipyOK = False\n\nfrom . import posang # agpy code\nfrom ..parallel_map import parallel_map\nfrom ..spectrum import smooth\n\ndtor = pi/180.0\n\n\ndef blfunc_generator(x=None, polyorder=None, splineorder=None,\n sampling=1):\n \"\"\"\n Generate a function that will fit a baseline (polynomial or spline) to a\n data set. Either ``splineorder`` or ``polyorder`` must be set\n\n Parameters\n ----------\n x : np.ndarray or None\n The X-axis of the fitted array. Will be set to\n ``np.arange(len(data))`` if not specified\n polyorder : None or int\n The polynomial order.\n splineorder : None or int\n sampling : int\n The sampling rate to use for the data. Can set to higher numbers to\n effectively downsample the data before fitting\n \"\"\"\n def blfunc(args, x=x):\n yfit,yreal = args\n if hasattr(yfit,'mask'):\n mask = ~yfit.mask\n else:\n mask = np.isfinite(yfit)\n\n if x is None:\n x = np.arange(yfit.size, dtype=yfit.dtype)\n\n ngood = np.count_nonzero(mask)\n if polyorder is not None:\n if ngood < polyorder:\n return yreal\n else:\n endpoint = ngood - (ngood % sampling)\n y = np.mean([yfit[mask][ii:endpoint:sampling]\n for ii in range(sampling)], axis=0)\n polypars = np.polyfit(x[mask][sampling/2:endpoint:sampling],\n y, polyorder)\n return yreal-np.polyval(polypars, x).astype(yreal.dtype)\n\n elif splineorder is not None and scipyOK:\n if splineorder < 1 or splineorder > 4:\n raise ValueError(\"Spline order must be in {1,2,3,4}\")\n elif ngood <= splineorder:\n return yreal\n else:\n log.debug(\"splinesampling: {0} \"\n \"splineorder: {1}\".format(sampling, splineorder))\n endpoint = ngood - (ngood % sampling)\n y = np.mean([yfit[mask][ii:endpoint:sampling]\n for ii in range(sampling)], axis=0)\n if len(y) <= splineorder:\n raise ValueError(\"Sampling is too sparse. Use finer sampling or \"\n \"decrease the spline order.\")\n spl = UnivariateSpline(x[mask][sampling/2:endpoint:sampling],\n y,\n k=splineorder,\n s=0)\n return yreal-spl(x)\n else:\n raise ValueError(\"Must provide polyorder or splineorder\")\n\n return blfunc\n\n\ndef baseline_cube(cube, polyorder=None, cubemask=None, splineorder=None,\n numcores=None, sampling=1):\n \"\"\"\n Given a cube, fit a polynomial to each spectrum\n\n Parameters\n ----------\n cube: np.ndarray\n An ndarray with ndim = 3, and the first dimension is the spectral axis\n polyorder: int\n Order of the polynomial to fit and subtract\n cubemask: boolean ndarray\n Mask to apply to cube. Values that are True will be ignored when\n fitting.\n numcores : None or int\n Number of cores to use for parallelization. If None, will be set to\n the number of available cores.\n \"\"\"\n x = np.arange(cube.shape[0], dtype=cube.dtype)\n #polyfitfunc = lambda y: np.polyfit(x, y, polyorder)\n blfunc = blfunc_generator(x=x,\n splineorder=splineorder,\n polyorder=polyorder,\n sampling=sampling)\n\n reshaped_cube = cube.reshape(cube.shape[0], cube.shape[1]*cube.shape[2]).T\n\n if cubemask is None:\n log.debug(\"No mask defined.\")\n fit_cube = reshaped_cube\n else:\n if cubemask.dtype != 'bool':\n raise TypeError(\"Cube mask *must* be a boolean array.\")\n if cubemask.shape != cube.shape:\n raise ValueError(\"Mask shape does not match cube shape\")\n log.debug(\"Masking cube with shape {0} \"\n \"with mask of shape {1}\".format(cube.shape, cubemask.shape))\n masked_cube = cube.copy()\n masked_cube[cubemask] = np.nan\n fit_cube = masked_cube.reshape(cube.shape[0], cube.shape[1]*cube.shape[2]).T\n\n\n baselined = np.array(parallel_map(blfunc, zip(fit_cube,reshaped_cube), numcores=numcores))\n blcube = baselined.T.reshape(cube.shape)\n return blcube\n\n\n\ndef flatten_header(header,delete=False):\n \"\"\"\n Attempt to turn an N-dimensional fits header into a 2-dimensional header\n Turns all CRPIX[>2] etc. into new keywords with suffix 'A'\n\n header must be a fits.Header instance\n \"\"\"\n\n if not isinstance(header,fits.Header):\n raise Exception(\"flatten_header requires a fits.Header instance\")\n\n newheader = header.copy()\n\n for key in newheader.keys():\n try:\n if delete and int(key[-1]) >= 3 and key[:2] in ['CD','CR','CT','CU','NA']:\n newheader.pop(key)\n elif (int(key[-1]) >= 3 or int(key[2])>=3) and key[:2] in ['CD','CR','CT','CU','NA','PC']:\n newheader.rename_keyword(key,'A'+key,force=True)\n if delete and (int(key[4]) >= 3 or int(key[7]) >= 3) and key[:2]=='PC' and key in newheader:\n newheader.pop(key)\n except ValueError:\n # if key[-1] is not an int\n pass\n except IndexError:\n # if len(key) < 2\n pass\n newheader['NAXIS'] = 2\n if header.get('WCSAXES'):\n newheader['WCSAXES'] = 2\n\n return newheader\n\ndef speccen_header(header, lon=None, lat=None, proj='TAN', system='celestial',\n spectral_axis=3, celestial_axes=[1,2]):\n \"\"\"\n Turn a cube header into a spectrum header, retaining RA/Dec vals where possible\n (speccen is like flatten; spec-ify would be better but, specify? nah)\n\n Assumes 3rd axis is velocity\n \"\"\"\n newheader = header.copy()\n new_spectral_axis = 1\n newheader['CRVAL{0}'.format(new_spectral_axis)] = header.get('CRVAL{0}'.format(spectral_axis))\n newheader['CRPIX{0}'.format(new_spectral_axis)] = header.get('CRPIX{0}'.format(spectral_axis))\n if 'CD{0}_{0}'.format(new_spectral_axis) in header:\n newheader.rename_keyword('CD{0}_{0}'.format(new_spectral_axis),\n 'OLDCD{0}_{0}'.format(new_spectral_axis))\n elif 'CDELT{0}'.format(new_spectral_axis) in header:\n newheader.rename_keyword('CDELT{0}'.format(new_spectral_axis),'OLDCDEL{0}'.format(new_spectral_axis))\n if 'CD{0}_{0}'.format(spectral_axis) in header:\n newheader['CDELT{0}'.format(new_spectral_axis)] = header.get('CD{0}_{0}'.format(spectral_axis))\n elif 'CDELT{0}'.format(spectral_axis) in header:\n newheader['CDELT{0}'.format(new_spectral_axis)] = header.get('CDELT{0}'.format(spectral_axis))\n newheader['CTYPE{0}'.format(new_spectral_axis)] = 'VRAD'\n if header.get('CUNIT{0}'.format(spectral_axis)):\n newheader['CUNIT{0}'.format(new_spectral_axis)] = header.get('CUNIT{0}'.format(spectral_axis))\n else:\n print(\"Assuming CUNIT3 is km/s in speccen_header\")\n newheader['CUNIT{0}'.format(new_spectral_axis)] = 'km/s'\n newheader['CRPIX2'] = 1\n newheader['CRPIX{0}'.format(spectral_axis)] = 1\n if system == 'celestial':\n c2 = 'RA---'\n c3 = 'DEC--'\n elif system == 'galactic':\n c2 = 'GLON-'\n c3 = 'GLAT-'\n elif system == 'PIXEL':\n c2 = 'PIX--'\n c3 = 'PIX--'\n newheader['CTYPE2'] = c2+proj\n newheader['CTYPE{0}'.format(spectral_axis)] = c3+proj\n\n if lon is not None:\n newheader['CRVAL2'] = lon\n if lat is not None:\n newheader['CRVAL{0}'.format(spectral_axis)] = lat\n\n if 'CD2_2' in header:\n newheader.rename_keyword('CD2_2','OLDCD2_2')\n if 'CD{0}_{0}'.format(spectral_axis) in header:\n newheader.rename_keyword('CD{0}_{0}'.format(spectral_axis),\n 'OLDCD{0}_{0}'.format(spectral_axis))\n if 'CROTA2' in header:\n newheader.rename_keyword('CROTA2','OLDCROT2')\n\n return newheader\n\ndef extract_aperture(cube, ap, r_mask=False, wcs=None,\n coordsys='galactic', wunit='arcsec', debug=False,\n method='mean'):\n \"\"\"\n Extract an aperture from a data cube. E.g. to acquire a spectrum\n of an outflow that is extended.\n\n Cube should have shape [z,y,x], e.g.\n cube = fits.getdata('datacube.fits')\n\n Apertures are specified in PIXEL units with an origin of 0,0 (NOT the 1,1\n fits standard!) unless wcs and coordsys are specified\n\n Parameters\n ----------\n ap : list\n For a circular aperture, len(ap)=3:\n ap = [xcen,ycen,radius]\n For an elliptical aperture, len(ap)=5:\n ap = [xcen,ycen,height,width,PA]\n wcs : wcs\n a pywcs.WCS instance associated with the data cube\n coordsys : str\n the coordinate system the aperture is specified in.\n Options are 'celestial' and 'galactic'. Default is 'galactic'\n wunit : str\n units of width/height. default 'arcsec', options 'arcmin' and 'degree'\n method : str\n 'mean' or 'sum' (average over spectra, or sum them)\n or 'error' for sqrt(sum-of-squares / n)\n\n Other Parameters\n ----------------\n r_mask : bool\n return mask in addition to spectrum (for error checking?)\n \"\"\"\n warnings.warn(\"SpectralCube can do what subimage_integ does much more easily!\",\n DeprecationWarning)\n\n if wcs is not None and coordsys is not None:\n if debug:\n print(\"Converting aperture \",ap,)\n ap = aper_world2pix(ap,wcs,coordsys=coordsys,wunit=wunit)\n if debug:\n print(\" to \",ap)\n\n if len(ap) == 3:\n sh = cube.shape\n yind,xind = indices(sh[1:3]) # recall that python indices are backwards\n dis = sqrt((xind-ap[0])**2+(yind-ap[1])**2)\n mask = dis < ap[2]\n elif len(ap) == 5:\n yinds,xinds = indices(cube.shape[1:3])\n th = (ap[4])*dtor\n xindr = (xinds-ap[0])*cos(th) + (yinds-ap[1])*sin(th)\n yindr = (xinds-ap[0])*-sin(th) + (yinds-ap[1])*cos(th)\n ratio = max(ap[2:4])/min(ap[2:4])\n mask = ((xindr*ratio)**2 + yindr**2)**0.5 < max(ap[2:4])\n else:\n raise Exception(\"Wrong number of parameters. Need either 3 parameters \"\n \"for a circular aperture or 5 parameters for an \"\n \"elliptical aperture.\")\n\n npixinmask = mask.sum()\n mask3d = repeat(mask[newaxis,:,:],cube.shape[0],axis=0)\n if method == 'mean':\n specsum = nansum(nansum((cube*mask3d),axis=2),axis=1)\n spec = specsum / npixinmask\n elif method == 'error':\n specsum = nansum(nansum((cube*mask3d)**2,axis=2),axis=1)\n spec = (specsum)**0.5 / npixinmask\n else:\n spec = nansum(nansum((cube*mask3d),axis=2),axis=1)\n\n if r_mask:\n return spec,mask\n else:\n return spec\n\ndef integ(file,vrange,xcen=None,xwidth=None,ycen=None,ywidth=None,**kwargs):\n \"\"\"\n wrapper of subimage_integ that defaults to using the full image\n \"\"\"\n if isinstance(file,fits.PrimaryHDU):\n header = file.header\n cube = file.data\n elif isinstance(file,fits.HDUList):\n header = file[0].header\n cube = file[0].data\n else:\n file = fits.open(file)\n header = file[0].header\n cube = file[0].data\n\n if None in [xcen,xwidth,ycen,ywidth]:\n xcen = header['NAXIS1'] / 2\n xwidth = xcen + header['NAXIS1'] % 2\n ycen = header['NAXIS2'] / 2\n ywidth = ycen + header['NAXIS2'] % 2\n\n return subimage_integ(cube,xcen,xwidth,ycen,ywidth,vrange,header=header,**kwargs)\n\ndef subimage_integ(cube, xcen, xwidth, ycen, ywidth, vrange, header=None,\n average=mean, dvmult=False, return_HDU=False,\n units=\"pixels\", zunits=None):\n \"\"\"\n Returns a sub-image from a data cube integrated over the specified velocity range\n\n NOTE: With `spectral_cube <spectral-cube.rtfd.org>`_, subcube features can\n be easily applied with the `.subcube` method, and integration is handled\n separately.\n\n Parameters\n ----------\n cube : np.ndarray\n A 3-dimensional numpy array with dimensions (velocity, y, x)\n xcen,ycen : float\n The center in the X,Y-dimension. See `units` below for unit information\n xwidth,ywidth : float\n The width in the X,Y-dimension. See `units` below for unit information\n xwidth and ywidth are \"radius\" values, i.e. half the length that will be extracted\n vrange : (float,float)\n The velocity range to integrate over. See `zunits` below for unit information\n header : `astropy.io.fits.Header` or None\n If specified, will allow the use of WCS units\n average : function\n The function to apply when 'integrating' over the subcube\n dvmult : bool\n If dvmult is set, multiply the average by DV (this is useful if you set\n average=sum and dvmul=True to get an integrated value, e.g. K km/s or\n Jy km/s)\n return_hdu : bool\n If specified, will return an HDU object, otherwise will return the\n array and header\n units : 'pixels' or 'wcs'\n If 'pixels', all units (xcen, ycen, xwidth, ywidth) will be in pixels.\n If 'wcs', the values will be converted from WCS units to pixel units\n using the WCS specified by the `header`\n zunits : 'pixels' or 'wcs' or None\n If None, will be set to be the same as `units`\n\n Returns\n -------\n subim, hdu : tuple\n A tuple (integrated array, header) if ``return_hdu`` is ``False``, or an HDU if\n it is True\n \"\"\"\n\n if header:\n flathead = flatten_header(header.copy())\n wcs = pywcs.WCS(header=flathead)\n if header.get('CD3_3'): CD3 = header.get('CD3_3')\n else: CD3 = header.get('CDELT3')\n\n if units==\"pixels\":\n xlo = int( max([xcen-xwidth,0]) )\n ylo = int( max([ycen-ywidth,0]) )\n xhi = int( min([xcen+xwidth,cube.shape[2]]) )\n yhi = int( min([ycen+ywidth,cube.shape[1]]) )\n elif units==\"wcs\" and header:\n newxcen,newycen = wcs.wcs_world2pix(xcen,ycen,0)\n try:\n newxwid,newywid = xwidth / abs(wcs.wcs.cd[0,0]), ywidth / abs(wcs.wcs.cd[1,1])\n except AttributeError:\n newxwid,newywid = xwidth / abs(wcs.wcs.cdelt[0]), ywidth / abs(wcs.wcs.cdelt[1])\n xlo = int( max([newxcen-newxwid,0]) )\n ylo = int( max([newycen-newywid,0]) )\n xhi = int( min([newxcen+newxwid,cube.shape[2]]) )\n yhi = int( min([newycen+newywid,cube.shape[1]]) )\n else:\n print(\"Can only use wcs if you pass a header.\")\n\n if zunits is None:\n zunits = units\n if zunits == 'pixels':\n zrange = vrange\n if zunits == 'wcs':\n zrange = ( array(vrange)-header.get('CRVAL3') ) / CD3 - 1 + header.get('CRPIX3')\n\n subim = average(cube[zrange[0]:zrange[1],ylo:yhi,xlo:xhi],axis=0)\n if dvmult and CD3: subim *= CD3\n elif dvmult:\n print(\"Error: could not multiply by dv; CD3=\",CD3)\n\n if header is None:\n return subim\n else:\n # Cannot set crval2 != 0 for Galactic coordinates: therefore, probably\n # wrong approach in general\n #crv1,crv2 = wcs.wcs_pix2world(xlo,ylo,0)\n\n #try:\n # flathead['CRVAL1'] = crv1[0]\n # flathead['CRVAL2'] = crv2[0]\n #except IndexError:\n # flathead['CRVAL1'] = crv1.item() # np 0-d arrays are not scalar\n # flathead['CRVAL2'] = crv2.item() # np 0-d arrays are not scalar\n\n # xlo, ylo have been forced to integers already above\n flathead['CRPIX1'] = flathead['CRPIX1'] - xlo\n flathead['CRPIX2'] = flathead['CRPIX2'] - ylo\n\n if return_HDU:\n return fits.PrimaryHDU(data=subim,header=flathead)\n else:\n return subim,flathead\n\ndef subcube(cube, xcen, xwidth, ycen, ywidth, header=None,\n dvmult=False, return_HDU=False, units=\"pixels\",\n widthunits=\"pixels\"):\n \"\"\"\n Crops a data cube\n\n All units assumed to be pixel units\n\n cube has dimensions (velocity, y, x)\n\n xwidth and ywidth are \"radius\" values, i.e. half the length that will be extracted\n\n if dvmult is set, multiple the average by DV (this is useful if you set\n average=sum and dvmul=True to get an integrated value)\n\n \"\"\"\n\n if header:\n newheader = header.copy()\n flathead = flatten_header(header.copy())\n wcs = pywcs.WCS(header=flathead)\n\n if widthunits == \"pixels\":\n newxwid, newywid = xwidth, ywidth\n elif widthunits == \"wcs\":\n try:\n newxwid,newywid = xwidth / abs(wcs.wcs.cd[0,0]), ywidth / abs(wcs.wcs.cd[1,1])\n except AttributeError:\n newxwid,newywid = xwidth / abs(wcs.wcs.cdelt[0]), ywidth / abs(wcs.wcs.cdelt[1])\n else:\n raise Exception(\"widthunits must be either 'wcs' or 'pixels'\")\n\n if units==\"pixels\":\n newxcen,newycen = xcen,ycen\n elif units==\"wcs\" and header:\n newxcen,newycen = wcs.wcs_world2pix(xcen,ycen,0)\n else:\n raise Exception(\"units must be either 'wcs' or 'pixels'\")\n\n x1 = int( numpy.floor( max([newxcen-newxwid,0]) ) )\n y1 = int( numpy.floor( max([newycen-newywid,0]) ) )\n x2 = int( numpy.ceil( min([newxcen+newxwid,cube.shape[2]]) ) )\n y2 = int( numpy.ceil( min([newycen+newywid,cube.shape[1]]) ) )\n\n xhi = max(x1,x2)\n xlo = min(x1,x2)\n yhi = max(y1,y2)\n ylo = min(y1,y2)\n\n subim = cube[:,ylo:yhi,xlo:xhi]\n\n if return_HDU:\n\n xmid_sky,ymid_sky = wcs.wcs_pix2world(xlo+xwidth,ylo+ywidth,0)\n\n try:\n newheader['CRVAL1'] = xmid_sky[0]\n newheader['CRVAL2'] = ymid_sky[0]\n except IndexError:\n newheader['CRVAL1'] = float(xmid_sky)\n newheader['CRVAL2'] = float(ymid_sky)\n newheader['CRPIX1'] = 1+xwidth\n newheader['CRPIX2'] = 1+ywidth\n\n newHDU = fits.PrimaryHDU(data=subim,header=newheader)\n if newHDU.header.get('NAXIS1') == 0 or newHDU.header.get('NAXIS2') == 0:\n raise Exception(\"Cube has been cropped to 0 in one dimension\")\n\n return newHDU\n else:\n return subim\n\ndef aper_world2pix(ap,wcs,coordsys='galactic',wunit='arcsec'):\n \"\"\"\n Converts an elliptical aperture (x,y,width,height,PA) from\n WCS to pixel coordinates given an input wcs (an instance\n of the pywcs.WCS class). Must be a 2D WCS header.\n\n\n \"\"\"\n convopt = {'arcsec':3600.0,'arcmin':60.0,'degree':1.0}\n try:\n conv = convopt[wunit]\n except:\n raise Exception(\"Must specify wunit='arcsec','arcmin', or 'degree'\")\n\n if len(wcs.wcs.cdelt) != 2:\n raise Exception(\"WCS header is not strictly 2-dimensional. Look for 3D keywords.\")\n if '' in wcs.wcs.ctype:\n raise Exception(\"WCS header has no CTYPE.\")\n\n if coordsys.lower() == 'galactic':\n pos = coordinates.SkyCoord(ap[0],ap[1],unit=('deg','deg'), frame='galactic')\n elif coordsys.lower() in ('radec','fk5','icrs','celestial'):\n pos = coordinates.SkyCoord(ap[0],ap[1],unit=('deg','deg'), frame='fk5')\n\n if wcs.wcs.ctype[0][:2] == 'RA':\n ra,dec = pos.icrs.ra.deg,pos.icrs.dec.deg\n elif wcs.wcs.ctype[0][:4] == 'GLON':\n ra,dec = pos.galactic.l.deg,pos.galactic.b.deg\n else:\n raise Exception(\"WCS CTYPE has no match.\")\n # workaround for a broken wcs.wcs_sky2pix\n try:\n radif = (wcs.wcs.crval[0]-ra)*dtor\n gamma = acos(cos(dec*dtor)*cos(wcs.wcs.crval[1]*dtor)*cos(radif)+sin(dec*dtor)*sin(wcs.wcs.crval[1]*dtor)) / dtor\n theta = atan2( sin(radif) , ( tan(dec*dtor)*cos(wcs.wcs.crval[1]*dtor)-sin(wcs.wcs.crval[1]*dtor)*cos(radif) ) )\n x = -gamma * sin(theta) / wcs.wcs.cd[0,0] + wcs.wcs.crpix[0]\n y = gamma * cos(theta) / wcs.wcs.cd[1,1] + wcs.wcs.crpix[1]\n except:\n radif = (wcs.wcs.crval[0]-ra)*dtor\n gamma = acos(cos(dec*dtor)*cos(wcs.wcs.crval[1]*dtor)*cos(radif)+sin(dec*dtor)*sin(wcs.wcs.crval[1]*dtor)) / dtor\n theta = atan2( sin(radif) , ( tan(dec*dtor)*cos(wcs.wcs.crval[1]*dtor)-sin(wcs.wcs.crval[1]*dtor)*cos(radif) ) )\n x = -gamma * sin(theta) / wcs.wcs.cdelt[0] + wcs.wcs.crpix[0]\n y = gamma * cos(theta) / wcs.wcs.cdelt[1] + wcs.wcs.crpix[1]\n\n #print \"DEBUG: x,y from math (vectors): \",x,y\n #x,y = wcs.wcs_world2pix(ra,dec,0) # convert WCS coordinate to pixel coordinate (0 is origin, do not use fits convention)\n #print \"DEBUG: x,y from wcs: \",x,y\n try:\n x=x[0] - 1 # change from FITS to python convention\n y=y[0] - 1 # change from FITS to python convention\n #print \"DEBUG: x,y from math: \",x,y\n except:\n pass\n # cd is default, cdelt is backup\n if len(ap) > 3:\n try:\n width = ap[2] / conv / abs(wcs.wcs.cd[0,0]) # first is width, second is height in DS9 PA convention\n height = ap[3] / conv / abs(wcs.wcs.cd[0,0])\n except:\n width = ap[2] / conv / abs(wcs.wcs.cdelt[0]) # first is width, second is height in DS9 PA convention\n height = ap[3] / conv / abs(wcs.wcs.cdelt[0])\n apold = copy.copy(ap)\n if len(ap) == 5:\n PA = ap[4]\n ap = [x,y,width,height,PA]\n else:\n ap = [x,y,width,height]\n elif len(ap) == 3:\n try:\n width = ap[2] / conv / abs(wcs.wcs.cd[0,0]) # first is width, second is height in DS9 PA convention\n except:\n width = ap[2] / conv / abs(wcs.wcs.cdelt[0]) # first is width, second is height in DS9 PA convention\n apold = copy.copy(ap)\n ap = [x,y,width]\n else:\n raise TypeError(\"Aperture length is incorrect.\")\n\n return ap\n\n\ndef getspec(lon,lat,rad,cube,header,r_fits=True,inherit=True,wunit='arcsec'):\n \"\"\"\n Given a longitude, latitude, aperture radius (arcsec), and a cube file,\n return a .fits file or a spectrum.\n\n Parameters\n ----------\n lon: float\n lat: float\n longitude and latitude center of a circular aperture in WCS coordinates\n must be in coordinate system of the file\n rad: float\n radius (default degrees) of aperture\n \"\"\"\n\n convopt = {'arcsec':1.0,'arcmin':60.0,'degree':3600.0}\n\n flathead = flatten_header(header)\n wcs = pywcs.WCS(flathead)\n if wcs.wcs.ctype[0][:2] == 'RA':\n coordsys='celestial'\n elif wcs.wcs.ctype[0][:4] == 'GLON':\n coordsys='galactic'\n spec = extract_aperture(cube,[lon,lat,rad],wcs=wcs,\n coordsys=coordsys,wunit=wunit)\n\n if nansum(spec) == 0:\n print(\"Total of extracted spectrum was zero. lon,lat,rad: \",lon,lat,rad)\n #import pdb; pdb.set_trace()\n\n if r_fits:\n if inherit:\n newhead = header.copy()\n else:\n newhead = fits.Header()\n try:\n newhead['CD1_1'] = header['CD3_3']\n except KeyError:\n newhead['CD1_1'] = header['CDELT3']\n newhead['CRPIX1'] = header['CRPIX3']\n newhead['CRVAL1'] = header['CRVAL3']\n try:\n newhead['CTYPE1'] = header['CTYPE3']\n except KeyError:\n newhead['CTYPE1'] = \"VRAD\"\n try:\n newhead['CUNIT1'] = header['CUNIT3']\n except KeyError:\n print(\"Header did not contain CUNIT3 keyword. Defaulting to km/s\")\n newhead['CUNIT1'] = \"km/s\"\n newhead['BUNIT'] = header['BUNIT']\n newhead['APGLON'] = lon\n newhead['APGLAT'] = lat\n newhead['APRAD'] = (rad*convopt[wunit],'arcseconds') # radius in arcsec\n newfile = fits.PrimaryHDU(data=spec,header=newhead)\n return newfile\n else:\n return spec\n\ndef getspec_reg(cubefilename,region,**kwargs):\n \"\"\"\n Aperture extraction from a cube using a pyregion circle region\n\n The region must be in the same coordinate system as the cube header\n\n .. warning:: The second argument of getspec_reg requires a pyregion region list,\n and therefore this code depends on `pyregion`_.\n \"\"\"\n\n ds9tocoords = {'fk5':'celestial','galactic':'galactic','icrs':'celestial'}\n\n if region.name != 'circle':\n raise Exception(\"Only circular apertures are implemented so far\")\n\n l,b,r = region.coord_list\n #pos = coords.Position([l,b],system=ds9tocoords[region.coord_format])\n if isinstance(cubefilename,fits.HDUList):\n cubefile = cubefilename\n else:\n cubefile = fits.open(cubefilename)\n header = cubefile[0].header\n cube = cubefile[0].data\n if len(cube.shape) == 4: cube = cube[0,:,:,:]\n\n sp = getspec(l,b,r,cube,header,wunit='degree',**kwargs)\n\n return sp\n\ndef coords_in_image(fitsfile,lon,lat,system='galactic'):\n \"\"\"\n Determine whether the coordinates are inside the image\n \"\"\"\n if not isinstance(fitsfile,fits.HDUList):\n fitsfile = fits.open(fitsfile)\n\n wcs = pywcs.WCS(flatten_header(fitsfile[0].header))\n\n if 'RA' in wcs.wcs.ctype[0]:\n pos = coords.Position((lon,lat),system=system)\n lon,lat = pos.j2000()\n if 'GLON' in wcs.wcs.ctype[0]:\n pos = coords.Position((lon,lat),system=system)\n lon,lat = pos.galactic()\n\n x,y = wcs.wcs_world2pix(lon,lat,0)\n #DEBUG print x,y,wcs.naxis1,wcs.naxis2\n if (0 < x < wcs.naxis1) and (0 < y < wcs.naxis2):\n return True\n else:\n return False\n\ndef spectral_smooth(cube, smooth_factor, downsample=True, parallel=True,\n numcores=None, **kwargs):\n \"\"\"\n Smooth the cube along the spectral direction\n \"\"\"\n\n yy,xx = numpy.indices(cube.shape[1:])\n\n if downsample:\n newshape = cube[::smooth_factor,:,:].shape\n else:\n newshape = cube.shape\n\n # need to make the cube \"flat\" along dims 1&2 for iteration in the \"map\"\n flatshape = (cube.shape[0],cube.shape[1]*cube.shape[2])\n\n Ssmooth = lambda x: smooth.smooth(x, smooth_factor, downsample=downsample, **kwargs)\n if parallel:\n newcube = numpy.array(parallel_map(Ssmooth, cube.reshape(flatshape).T, numcores=numcores)).T.reshape(newshape)\n else:\n newcube = numpy.array(map(Ssmooth, cube.reshape(flatshape).T)).T.reshape(newshape)\n\n #naive, non-optimal version\n # for (x,y) in zip(xx.flat,yy.flat):\n # newcube[:,y,x] = smooth.smooth(cube[:,y,x], smooth_factor,\n # downsample=downsample, **kwargs)\n\n return newcube\n\ndef plane_smooth(cube,cubedim=0,parallel=True,numcores=None,**kwargs):\n \"\"\"\n parallel-map the smooth function\n\n Parameters\n ----------\n parallel: bool\n defaults True. Set to false if you want serial (for debug purposes?)\n numcores: int\n pass to parallel_map (None = use all available)\n \"\"\"\n if not smoothOK:\n return\n\n if cubedim != 0:\n cube = cube.swapaxes(0,cubedim)\n\n cubelist = [cube[ii,:,:] for ii in xrange(cube.shape[0])]\n\n Psmooth = lambda C: smooth(C,**kwargs)\n\n if parallel:\n smoothcube = array(parallel_map(Psmooth,cubelist,numcores=numcores))\n else:\n smoothcube = array(map(Psmooth,cubelist))\n\n if cubedim != 0:\n smoothcube = smoothcube.swapaxes(0,cubedim)\n\n return smoothcube\n\n\ntry:\n import montage\n\n def rotcrop_cube(x1, y1, x2, y2, cubename, outname, xwidth=25, ywidth=25,\n in_system='galactic', out_system='equatorial',\n clobber=True, newheader=None, xcen=None, ycen=None):\n \"\"\"\n Crop a data cube and then rotate it with montage\n\n \"\"\"\n\n cubefile = fits.open(cubename)\n\n if xcen is None and ycen is None:\n pos1 = coords.Position([x1,y1],system=in_system)\n pos2 = coords.Position([x2,y2],system=in_system)\n\n if cubefile[0].header.get('CTYPE1')[:2] == 'RA':\n x1,y1 = pos1.j2000()\n x2,y2 = pos2.j2000()\n coord_system = 'celestial'\n elif cubefile[0].header.get('CTYPE1')[:4] == 'GLON':\n x1,y1 = pos1.galactic()\n x2,y2 = pos2.galactic()\n coord_system = 'galactic'\n\n xcen = (x1+x2)/2.0\n ycen = (y1+y2)/2.0\n print(xcen,ycen,xwidth,ywidth,coord_system)\n else:\n coord_system = in_system\n\n sc = subcube(cubefile[0].data, xcen, xwidth, ycen, ywidth,\n widthunits='pixels', units=\"wcs\", header=cubefile[0].header,\n return_HDU=True)\n # note: there should be no security risk here because fits' writeto\n # will not overwrite by default\n tempcube = tempfile.mktemp(suffix='.fits')\n sc.writeto(tempcube)\n\n pa = posang.posang(x1,y1,x2,y2,system=coord_system) - 90\n\n if newheader is None:\n newheader = sc.header.copy()\n cd11 = newheader.get('CDELT1') if newheader.get('CDELT1') else newheader.get('CD1_1')\n cd22 = newheader.get('CDELT2') if newheader.get('CDELT2') else newheader.get('CD2_2')\n cd12 = newheader.get('CD1_2') if newheader.get('CD1_2') else 0.0\n cd21 = newheader.get('CD2_1') if newheader.get('CD2_1') else 0.0\n cdelt = numpy.sqrt(cd11**2+cd12**2)\n\n tempheader = tempfile.mktemp(suffix='.hdr')\n ycensign = \"+\" if numpy.sign(ycen) >= 0 else \"-\"\n montage.mHdr(\"%s %1s%s\" % (xcen, ycensign, numpy.abs(ycen)), xwidth*cdelt,\n tempheader, system=out_system, height=ywidth*cdelt,\n pix_size=cdelt*3600.0, rotation=pa)\n os.system(\"sed -i bck '/END/d' %s\" % (tempheader))\n newheader2 = fits.Header()\n newheader2.fromTxtFile(tempheader)\n #newheader2.fromtextfile(tempheader)\n for key in ('CRPIX3','CRVAL3','CDELT3','CD3_3','CUNIT3','WCSTYPE3','CTYPE3'):\n if newheader.get(key):\n newheader2[key] = newheader.get(key)\n if newheader.get('CD3_3') and newheader2.get('CDELT3') is None:\n newheader2['CDELT3'] = newheader.get('CD3_3')\n newheader2.toTxtFile(tempheader,clobber=True)\n #if newheader2.get('CDELT3') is None:\n # raise Exception(\"No CD3_3 or CDELT3 in header.\")\n else:\n if isinstance(newheader,str):\n newheader2 = fits.Header()\n newheader2.fromTxtFile(newheader)\n tempheader = tempfile.mktemp(suffix='.hdr')\n newheader2.toTxtFile(tempheader,clobber=True)\n\n\n montage.wrappers.reproject_cube(tempcube,outname,header=tempheader,clobber=clobber)\n #print \"\\n\",outname\n #os.system('imhead %s | grep CDELT' % outname)\n\n # AWFUL hack because montage removes CDELT3\n tempcube = fits.open(outname)\n tempcube.header = newheader2\n #if tempcube.header.get('CDELT3') is None:\n # raise Exception(\"No CD3_3 or CDELT3 in header.\")\n #print tempcube.header.get('CDELT3')\n tempcube.writeto(outname,clobber=True)\n #print tempcube.get('CDELT3')\n #print \"\\n\",outname\n #os.system('imhead %s | grep CDELT' % outname)\n\n\n return\n\n def resample_cube(cubefilename, header):\n inhdr = fits.getheader(cubefilename)\n\nexcept:\n pass\n"
] | [
[
"numpy.abs",
"numpy.isfinite",
"matplotlib.patches.Circle",
"numpy.round",
"matplotlib.pyplot.colorbar",
"numpy.concatenate",
"numpy.array",
"matplotlib.pyplot.figure"
],
[
"numpy.polyfit",
"scipy.interpolate.UnivariateSpline",
"numpy.sqrt",
"numpy.isfinite",
"numpy.abs",
"numpy.arange",
"numpy.indices",
"numpy.cos",
"numpy.sin",
"numpy.sign",
"numpy.nansum",
"numpy.count_nonzero",
"numpy.repeat",
"numpy.array",
"numpy.polyval"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
hsjoe/violentpython | [
"e4ecaa29088a0bfc205e3339ae88410d831766ee"
] | [
"stock/test.py"
] | [
"# coding=utf-8\nimport pandas as pd\nimport numpy as np\nimport sys\nimport os\nimport tushare as ts\nimport matplotlib.pyplot as plt\n\nts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000))\n\nts = ts.cumsum()\nts.plot()\nts.figure()"
] | [
[
"numpy.random.randn",
"pandas.date_range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
HansjoergW/Deep-Reinforcement-Learning-Hands-On-Second-Edition | [
"5d27d6f9057bb5526e944274fb803dc8a3a8395b",
"5d27d6f9057bb5526e944274fb803dc8a3a8395b"
] | [
"Chapter04/01_cartpole.py",
"Chapter08/lib/dqn_model.py"
] | [
"#!/usr/bin/env python3\nimport gym\nfrom collections import namedtuple\nimport numpy as np\nfrom tensorboardX import SummaryWriter\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n\nHIDDEN_SIZE = 128\nBATCH_SIZE = 16\nPERCENTILE = 70\n\n\nclass Net(nn.Module):\n def __init__(self, obs_size, hidden_size, n_actions):\n super(Net, self).__init__()\n self.net = nn.Sequential(\n nn.Linear(obs_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, n_actions)\n )\n\n def forward(self, x):\n return self.net(x)\n\n\nEpisode = namedtuple('Episode', field_names=['reward', 'steps'])\nEpisodeStep = namedtuple('EpisodeStep', field_names=['observation', 'action'])\n\n\ndef iterate_batches(env, net, batch_size):\n batch = []\n episode_reward = 0.0\n episode_steps = []\n obs = env.reset()\n sm = nn.Softmax(dim=1)\n while True:\n obs_v = torch.FloatTensor([obs])\n act_probs_v = sm(net(obs_v))\n act_probs = act_probs_v.data.numpy()[0]\n action = np.random.choice(len(act_probs), p=act_probs)\n next_obs, reward, is_done, _ = env.step(action)\n episode_reward += reward\n step = EpisodeStep(observation=obs, action=action)\n episode_steps.append(step)\n if is_done:\n e = Episode(reward=episode_reward, steps=episode_steps)\n batch.append(e)\n episode_reward = 0.0\n episode_steps = []\n next_obs = env.reset()\n if len(batch) == batch_size:\n yield batch\n batch = []\n obs = next_obs\n\n\ndef filter_batch(batch, percentile):\n rewards = list(map(lambda s: s.reward, batch))\n reward_bound = np.percentile(rewards, percentile)\n reward_mean = float(np.mean(rewards))\n\n train_obs = []\n train_act = []\n for reward, steps in batch:\n if reward < reward_bound:\n continue\n train_obs.extend(map(lambda step: step.observation, steps))\n train_act.extend(map(lambda step: step.action, steps))\n\n train_obs_v = torch.FloatTensor(train_obs)\n train_act_v = torch.LongTensor(train_act)\n return train_obs_v, train_act_v, reward_bound, reward_mean\n\n\nif __name__ == \"__main__\":\n env = gym.make(\"CartPole-v0\")\n #env = gym.wrappers.Monitor(env, directory=\"mon\", force=True)\n obs_size = env.observation_space.shape[0]\n n_actions = env.action_space.n\n\n net = Net(obs_size, HIDDEN_SIZE, n_actions)\n\n objective = nn.CrossEntropyLoss()\n optimizer = optim.Adam(params=net.parameters(), lr=0.01)\n writer = SummaryWriter(comment=\"-cartpole\")\n\n for iter_no, batch in enumerate(iterate_batches(\n env, net, BATCH_SIZE)):\n obs_v, acts_v, reward_b, reward_m = \\\n filter_batch(batch, PERCENTILE)\n optimizer.zero_grad()\n action_scores_v = net(obs_v)\n loss_v = objective(action_scores_v, acts_v)\n loss_v.backward()\n optimizer.step()\n print(\"%d: loss=%.3f, reward_mean=%.1f, rw_bound=%.1f\" % (\n iter_no, loss_v.item(), reward_m, reward_b))\n writer.add_scalar(\"loss\", loss_v.item(), iter_no)\n writer.add_scalar(\"reward_bound\", reward_b, iter_no)\n writer.add_scalar(\"reward_mean\", reward_m, iter_no)\n if reward_m > 199:\n print(\"Solved!\")\n break\n writer.close()\n",
"import torch\nimport torch.nn as nn\n\nimport numpy as np\n\n\nclass DQN(nn.Module):\n def __init__(self, input_shape, n_actions):\n super(DQN, self).__init__()\n\n self.conv = nn.Sequential(\n nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1),\n nn.ReLU()\n )\n\n conv_out_size = self._get_conv_out(input_shape)\n\n self.fc = nn.Sequential(\n nn.Linear(conv_out_size, 512),\n nn.ReLU(),\n nn.Linear(512, n_actions)\n )\n\n def _get_conv_out(self, shape):\n o = self.conv(torch.zeros(1, *shape))\n return int(np.prod(o.size()))\n\n def forward(self, x):\n fx = x.float() / 256\n conv_out = self.conv(fx).view(fx.size()[0], -1)\n return self.fc(conv_out)\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.CrossEntropyLoss",
"torch.LongTensor",
"numpy.percentile",
"torch.nn.Linear",
"torch.FloatTensor",
"numpy.mean",
"torch.nn.ReLU"
],
[
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Yuanhang8605/CNN-fro-cifar10 | [
"296ecf39579cfae541af83159bc2c9403027d95a"
] | [
"network/inception.py"
] | [
"\"\"\" Inception model.\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nimport six\nfrom net import Net\nimport ops\n\nclass Inception(Net):\n \"\"\" Inception model. \"\"\"\n\n def __init__(self, cfgs, images, labels, mode='train', multi_gpu_mode=False):\n \"\"\" ResNet constructor. \"\"\"\n Net.__init__(self, cfgs, images, labels, mode)\n self._relu_leakiness = cfgs['RELU_LEAKINESS']\n self._weight_decay_rate = cfgs['WEIGHT_DECAY_RATE']\n self.multi_gpu_mode = multi_gpu_mode\n \n def _stride_arr(self, stride):\n \"\"\"Map a stride scalar to the stride array for tf.nn.conv2d.\"\"\"\n return [1, stride, stride, 1] \n\n def inference(self):\n \"\"\" Build the core model within the gragh. \n return:\n loggits before classifier\n \"\"\"\n batch_size = self.images.get_shape()[0] \n\n with tf.variable_scope('init'):\n x = self.images\n x = ops.conv('init_conv1', x, 3, 3, 16, self._stride_arr(1), 'SAME', self.multi_gpu_mode)\n x, train_op = ops.batch_norm('bn1', x, self.mode, 0.001, self.multi_gpu_mode)\n self.extra_train_ops.extend(train_op)\n x = ops.relu(x, self._relu_leakiness)\n\n x = ops.conv('init_conv2', x ,3, 16, 16, self._stride_arr(1), 'SAME', self.multi_gpu_mode)\n x, train_op = ops.batch_norm('bn2', x, self.mode, 0.001, self.multi_gpu_mode)\n self.extra_train_ops.extend(train_op)\n x = ops.relu(x, self._relu_leakiness)\n \n with tf.variable_scope('unit_1_0'):\n x = tf.nn.max_pool(x, [1,2,2,1] ,self._stride_arr(2), 'VALID', name='max_pool')\n x = ops.conv('conv', x, 3, 16, 32, self._stride_arr(1), 'SAME', self.multi_gpu_mode)\n x, train_op = ops.batch_norm('bn', x, self.mode, 0.001, self.multi_gpu_mode)\n self.extra_train_ops.extend(train_op)\n x = ops.relu(x, self._relu_leakiness)\n for i in six.moves.range(1, 2):\n with tf.variable_scope('unit_1_%d' % i):\n x = ops.conv('conv', x, 3, 32, 32, self._stride_arr(1), 'SAME', self.multi_gpu_mode)\n x, train_op = ops.batch_norm('bn', x, self.mode, 0.001, self.multi_gpu_mode)\n self.extra_train_ops.extend(train_op)\n x = ops.relu(x, self._relu_leakiness)\n \n with tf.variable_scope('unit_2_0'):\n x = tf.nn.max_pool(x, [1,2,2,1] ,self._stride_arr(2), 'VALID', name='max_pool')\n x = ops.conv('conv', x, 3, 32, 64, self._stride_arr(1), 'SAME', self.multi_gpu_mode)\n x, train_op = ops.batch_norm('bn', x, self.mode, 0.001, self.multi_gpu_mode)\n self.extra_train_ops.extend(train_op)\n x = ops.relu(x, self._relu_leakiness)\n for i in six.moves.range(1, 4):\n with tf.variable_scope('unit_2_%d' % i):\n x = ops.conv('conv', x, 3, 64, 64, self._stride_arr(1), 'SAME', self.multi_gpu_mode)\n x, train_op = ops.batch_norm('bn', x, self.mode, 0.001, self.multi_gpu_mode)\n self.extra_train_ops.extend(train_op)\n x = ops.relu(x, self._relu_leakiness)\n \n with tf.variable_scope('unit_3_0'):\n x = tf.nn.max_pool(x, [1,2,2,1] ,self._stride_arr(2), 'VALID', name='max_pool')\n x = ops.conv('conv', x, 3, 64, 128, self._stride_arr(1), 'SAME', self.multi_gpu_mode)\n x, train_op = ops.batch_norm('bn', x, self.mode, 0.001, self.multi_gpu_mode)\n self.extra_train_ops.extend(train_op)\n x = ops.relu(x, self._relu_leakiness)\n for i in six.moves.range(1, 4):\n with tf.variable_scope('unit_3_%d' % i):\n x = ops.conv('conv', x, 3, 128, 128, self._stride_arr(1), 'SAME', self.multi_gpu_mode)\n x, train_op = ops.batch_norm('bn', x, self.mode, 0.001, self.multi_gpu_mode)\n self.extra_train_ops.extend(train_op)\n x = ops.relu(x, self._relu_leakiness)\n \n with tf.variable_scope('unit_last'):\n x = ops.global_avg_pool(x)\n \n with tf.variable_scope('logit'):\n logits = ops.fc('fc1', x, batch_size, self.num_classes, self.multi_gpu_mode)\n #self.predictions = tf.nn.softmax(logits)\n self.logits = logits\n\n\n def loss(self):\n logits = self.logits\n with tf.variable_scope('loss'):\n ls = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=self.labels\n )\n ls = tf.reduce_mean(ls, name='ls')\n ls += ops.weight_decay(self._weight_decay_rate)\n # tf.summary.scalar('loss', ls)\n return ls\n"
] | [
[
"tensorflow.variable_scope",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.reduce_mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Gaurav3963/AGS-intern-files | [
"ee4db12755cf680c0575b3c1a6a6146ee415743a",
"ee4db12755cf680c0575b3c1a6a6146ee415743a"
] | [
"Nikhil/assignment6.py",
"Gaurav/Assignment 7/faster thread.py"
] | [
"import pandas as pd\nimport pyodbc\n\ndata = pd.read_csv (r'C:\\Users\\Nikhil\\Desktop\\ags_intern\\dataset.csv')\nstudio = pyodbc.connect('Driver={SQL Server};''Server=DESKTOP-9T4GEVI;''Database=patil;''Trusted_Connection=yes;')\n\ncursor = studio.cursor()\n\n#data = pd.DataFrame(data, columns= ['ID','Name'] )\n\nfor row in data.itertuples():\n var = str(row.ID)\n if var[0] == '4':\n discount = int(row.TotalAmount) - (int(row.TotalAmount) * 2 / 100)\n cursor.execute('INSERT INTO Table_01(ID, Name, TotalAmount, discount)VALUES (?,?,?,?)',row.ID, row.Name, row.TotalAmount, int(row.TotalAmount) * 2 / 100)\n print(discount)\n \n\n elif var[0] == '5':\n TotalAmount = int(row.TotalAmount) - (int(row.TotalAmount) * 3 / 100)\n cursor.execute('INSERT INTO Table_02(ID, Name, TotalAmount, discount)VALUES (?,?,?,?)',row.ID, row.Name, row.TotalAmount, int(row.TotalAmount) * 3 / 100)\n \n \n elif var[0] == '6': \n TotalAmount = int(row.TotalAmount) - (int(row.TotalAmount) * 4 / 100) \n cursor.execute('INSERT INTO Table_03(ID, Name, TotalAmount, discount)VALUES (?,?,?,?)',row.ID, row.Name, row.TotalAmount, int(row.TotalAmount) * 4 / 100)\n\n\n else:\n print(\"pass\")\n\ncursor.close()\n\nprint(\"Table_01\")\ncursor1 = studio.cursor()\ncursor1.execute(\"select * from Table_01\")\nfor row in cursor1:\n print(row)\ncursor1.close()\n\n\nprint(\"Table_02\")\ncursor2 = studio.cursor()\ncursor2.execute(\"select * from Table_02\")\nfor row in cursor2:\n print(row)\ncursor2.close()\n\n\nprint(\"Table_03\")\ncursor3 = studio.cursor()\ncursor3.execute(\"select * from Table_03\")\nfor row in cursor3:\n print(row)\ncursor3.close()\nstudio.close()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 3 16:07:39 2021\n\n@author: patil\n\"\"\"\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 2 17:47:33 2021\n\n@author: patil\n\"\"\"\nimport pyodbc as db\nimport pandas as pd\nimport time as t\nimport _thread\n\nconn = db.connect('Driver={SQL Server};''Server=DESKTOP-VI5MRAI\\GAURAVPATIL;''Database=sample;''Trusted_Connection=yes;')\nc = conn.cursor()\n\ndef checktable(table_name):\n c = conn.cursor()\n try:\n #c.execute(\"SELECT 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = N'\"+table_name+\"'\")\n c.execute(\"SELECT * from \"+table_name)\n except:\n print('Table Dosent exists.\\n\\n')\n new_table = conn.cursor()\n new_table.execute(\"CREATE TABLE \"+table_name+\"(NAME varchar(50),ID bigint,Price float,Discount float);\");\n new_table.commit()\n new_table.close()\n finally:\n c.close()\n\n#checktable(\"Table_1\")\n#checktable(\"Table_2\")\n#checktable(\"Table_3\")\n\ndef executeNew1(dataset) :\n con = db.connect('Driver={SQL Server};''Server=DESKTOP-VI5MRAI\\GAURAVPATIL;''Database=sample;''Trusted_Connection=yes;')\n\n print(\"Thread 1 starting time : \",t.time())\n data1 = pd.read_csv(\"C:\\Office\\AGS - Internship\\AGS-intern-files\\Gaurav\\Assignment 6\\\\\"+dataset+\".csv\")\n \n cursor = con.cursor()\n old = t.time()\n \n for row in data1.itertuples():\n b = str(row.ID)\n if(b[0]=='4'):\n cursor.execute(\"INSERT into Table_1 values(?,?,?,?)\",row.NAME,row.ID,row.Total_Amount,row.Total_Amount*0.02)\n elif(b[0]=='5'):\n cursor.execute(\"INSERT into Table_2 values(?,?,?,?)\",row.NAME,row.ID,row.Total_Amount,row.Total_Amount*0.03)\n else:\n cursor.execute(\"INSERT into Table_3 values(?,?,?,?)\",row.NAME,row.ID,row.Total_Amount,row.Total_Amount*0.04)\n cursor.commit()\n \n cursor.close()\n required = t.time()-old\n '''\n print(\"\\n\\nTable 1\")\n sql_query = pd.read_sql_query('select * from Table_1',conn)\n print(sql_query)\n \n print(\"\\n\\nTable 2\")\n sql_query = pd.read_sql_query('select * from Table_2',conn)\n print(sql_query)\n \n print(\"\\n\\nTable 3\")\n sql_query = pd.read_sql_query('select * from Table_3',conn)\n print(sql_query)\n '''\n print(\"\\n\\nTime Required for Thread 1 : \",required)\n con.close()\n\ndef executeNew2(dataset) :\n conn = db.connect('Driver={SQL Server};''Server=DESKTOP-VI5MRAI\\GAURAVPATIL;''Database=sample;''Trusted_Connection=yes;')\n print(\"Thread 2 starting time : \",t.time())\n data = pd.read_csv(\"C:\\Office\\AGS - Internship\\AGS-intern-files\\Gaurav\\Assignment 6\\\\\"+dataset+\".csv\")\n \n curso = conn.cursor()\n old = t.time()\n \n for row in data.itertuples():\n b = str(row.ID)\n if(b[0]=='4'):\n curso.execute(\"INSERT into Table_1 values(?,?,?,?)\",row.NAME,row.ID,row.Total_Amount,row.Total_Amount*0.02)\n elif(b[0]=='5'):\n curso.execute(\"INSERT into Table_2 values(?,?,?,?)\",row.NAME,row.ID,row.Total_Amount,row.Total_Amount*0.03)\n else:\n curso.execute(\"INSERT into Table_3 values(?,?,?,?)\",row.NAME,row.ID,row.Total_Amount,row.Total_Amount*0.04)\n curso.commit()\n \n curso.close()\n required = t.time()-old\n '''\n print(\"\\n\\nTable 1\")\n sql_query = pd.read_sql_query('select * from Table_1',conn)\n print(sql_query)\n \n print(\"\\n\\nTable 2\")\n sql_query = pd.read_sql_query('select * from Table_2',conn)\n print(sql_query)\n \n print(\"\\n\\nTable 3\")\n sql_query = pd.read_sql_query('select * from Table_3',conn)\n print(sql_query)\n '''\n print(\"\\n\\nTime Required for Thread 2: \",required)\n conn.close()\n \ne = t.time()\n#t1 = td.Thread(target=executeNew1(\"Book1\"))\n#t2 = td.Thread(target=executeNew2(\"Book2\"))\n_thread.start_new_thread( executeNew1,(\"Book1\",) )\n_thread.start_new_thread( executeNew2,(\"Book2\",) )\n#p1 = multiprocessing.Process(target=executeNew1(\"Book1\"))\n#p2 = multiprocessing.Process(target=executeNew2, args=(\"Book2\"))\n # starting thread 1\n\n#t1.start()\n # starting thread 2\n#t2.start()\n \n # wait until thread 1 is completely executed\n#t1.join()\n # wait until thread 2 is completely executed\n#t2.join()\nprint(\"time needed is \",t.time()-e)\nconn.close()\n # both threads completely executed\nprint(\"Done!\")"
] | [
[
"pandas.read_csv"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
eribean/GIRTH | [
"daf22773aa9cd1c819bf732e1061ebf5cc4dc40e"
] | [
"common/polyserial.py"
] | [
"import numpy as np\nfrom scipy.stats import norm\nfrom scipy.optimize import fminbound\n\n\n__all__ = [\"polyserial_correlation\"]\n\n\ndef polyserial_correlation(continuous, ordinal):\n \"\"\"Computes the polyserial correlation.\n \n Estimates the correlation value based on a bivariate\n normal distribution.\n \n Args:\n continuous: Continuous Measurement\n ordinal: Ordinal Measurement\n \n Returns:\n polyserial_correlation: converged value\n \n Notes:\n User must handle missing data\n \"\"\"\n # Get the number of ordinal values\n values, counts = np.unique(ordinal, return_counts=True)\n \n # Compute the thresholds (tau's)\n thresholds = norm.isf(1 - counts.cumsum() / counts.sum())[:-1]\n \n # Standardize the continuous variable\n standardized_continuous = ((continuous - continuous.mean())\n / continuous.std(ddof=1))\n\n def _min_func(correlation):\n denominator = np.sqrt(1 - correlation * correlation)\n k = standardized_continuous * correlation\n log_likelihood = 0\n \n for ndx, value in enumerate(values):\n mask = ordinal == value\n \n if ndx == 0:\n numerator = thresholds[ndx] - k[mask]\n probabilty = norm.cdf(numerator / denominator)\n \n elif ndx == (values.size -1):\n numerator = thresholds[ndx-1] - k[mask]\n probabilty = (1 - norm.cdf(numerator / denominator))\n \n else:\n numerator1 = thresholds[ndx] - k[mask]\n numerator2 = thresholds[ndx-1] - k[mask]\n probabilty = (norm.cdf(numerator1 / denominator)\n - norm.cdf(numerator2 / denominator))\n \n log_likelihood -= np.log(probabilty).sum()\n \n return log_likelihood\n \n rho = fminbound(_min_func, -.99, .99)\n \n return rho"
] | [
[
"numpy.log",
"scipy.stats.norm.cdf",
"numpy.sqrt",
"numpy.unique",
"scipy.optimize.fminbound"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
cbeiraod/grafica | [
"6f47a47f47625b4bdb83dd15e2fcbaa9c8ca2d05"
] | [
"grafica/PlotlyFigure.py"
] | [
"from .figure import Figure\nfrom .traces import Scatter, ErrorBand, Histogram, Heatmap, Contour, KDE\nimport plotly.graph_objects as go\nimport plotly\nimport numpy as np\nimport warnings\n\nclass PlotlyFigure(Figure):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.plotly_figure = go.Figure()\n\t\n\t# Methods that must be overridden ----------------------------------\n\t\n\tdef show(self):\n\t\t# Overriding this method as specified in the class Figure.\n\t\tself.plotly_figure.show()\n\t\n\tdef save(self, file_name=None, include_plotlyjs='cdn', auto_open=False, **kwargs):\n\t\t# Overriding this method as specified in the class Figure.\n\t\tif file_name is None:\n\t\t\tfile_name = self.title\n\t\tif file_name is None: # If it is still None...\n\t\t\traise ValueError(f'Please provide a name for saving the figure to a file by the <file_name> argument.')\n\t\tif file_name[-5:] != '.html':\n\t\t\tfile_name += '.html'\n\t\tplotly.offline.plot(\n\t\t\tself.plotly_figure,\n\t\t\tfilename = file_name,\n\t\t\tauto_open = auto_open,\n\t\t\tinclude_plotlyjs = include_plotlyjs,\n\t\t\t**kwargs\n\t\t)\n\t\n\tdef draw_layout(self):\n\t\t# Overriding this method as specified in the class Figure.\n\t\tif self.show_title == True and self.title != None:\n\t\t\tself.plotly_figure.update_layout(title = self.title)\n\t\tself.plotly_figure.update_layout(\n\t\t\txaxis_title = self.xlabel,\n\t\t\tyaxis_title = self.ylabel,\n\t\t)\n\t\t# Axes scale:\n\t\tif self.xscale in [None, 'lin']:\n\t\t\tpass\n\t\telif self.xscale == 'log':\n\t\t\tself.plotly_figure.update_layout(xaxis_type = 'log')\n\t\tif self.yscale in [None, 'lin']:\n\t\t\tpass\n\t\telif self.yscale == 'log':\n\t\t\tself.plotly_figure.update_layout(yaxis_type = 'log')\n\t\t\n\t\tif self.aspect == 'equal':\n\t\t\tself.plotly_figure.update_yaxes(\n\t\t\t\tscaleanchor = \"x\",\n\t\t\t\tscaleratio = 1,\n\t\t\t)\n\t\t\n\t\tif self.subtitle != None:\n\t\t\tself.plotly_figure.add_annotation(\n\t\t\t\ttext = self.subtitle.replace('\\n','<br>'),\n\t\t\t\txref = \"paper\", \n\t\t\t\tyref = \"paper\",\n\t\t\t\tx = .5, \n\t\t\t\ty = 1,\n\t\t\t\talign = 'left',\n\t\t\t\tarrowcolor=\"#ffffff\",\n\t\t\t\tfont=dict(\n\t\t\t\t\tfamily=\"Courier New, monospace\",\n\t\t\t\t\tcolor=\"#999999\"\n\t\t\t\t),\n\t\t\t)\n\t\n\tdef draw_trace(self, trace):\n\t\t# Overriding this method as specified in the class Figure.\n\t\ttraces_drawing_methods = {\n\t\t\tScatter: self._draw_scatter,\n\t\t\tErrorBand: self._draw_errorband,\n\t\t\tHistogram: self._draw_histogram,\n\t\t\tHeatmap: self._draw_heatmap,\n\t\t\tContour: self._draw_contour,\n\t\t\tKDE: self._draw_scatter,\n\t\t}\n\t\tif type(trace) not in traces_drawing_methods:\n\t\t\traise RuntimeError(f\"Don't know how to draw a <{type(trace)}> trace...\")\n\t\ttraces_drawing_methods[type(trace)](trace)\n\t\n\t# Methods that draw each of the traces (for internal use only) -----\n\t\n\tdef _draw_scatter(self, scatter: Scatter):\n\t\tif not isinstance(scatter, Scatter):\n\t\t\traise TypeError(f'<scatter> must be an instance of {Scatter}, received object of type {type(scatter)}.')\n\t\tself.plotly_figure.add_trace(\n\t\t\tgo.Scatter(\n\t\t\t\tx = scatter.x,\n\t\t\t\ty = scatter.y,\n\t\t\t\tname = scatter.label,\n\t\t\t\topacity = scatter.alpha,\n\t\t\t\tmode = translate_marker_and_linestyle_to_Plotly_mode(scatter.marker, scatter.linestyle),\n\t\t\t\tmarker_symbol = map_marker_to_Plotly_markers(scatter.marker),\n\t\t\t\tshowlegend = True if scatter.label is not None else False,\n\t\t\t\tline = dict(\n\t\t\t\t\tdash = map_linestyle_to_Plotly_linestyle(scatter.linestyle),\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\t\tself.plotly_figure['data'][-1]['marker']['color'] = rgb2hexastr_color(scatter.color)\n\t\tself.plotly_figure['data'][-1]['line']['width'] = scatter.linewidth\n\t\n\tdef _draw_errorband(self, errorband: ErrorBand):\n\t\tif not isinstance(errorband, ErrorBand):\n\t\t\traise TypeError(f'<errorband> must be an instance of {ErrorBand}, received object of type {type(errorband)}.')\n\t\tx = errorband.x\n\t\ty1 = errorband.y + errorband.higher\n\t\ty2 = errorband.y - errorband.lower\n\t\tlegendgroup = str(np.random.rand(3))\n\t\t# Draw the error band ---\n\t\tself.plotly_figure.add_trace(\n\t\t\tgo.Scatter(\n\t\t\t\tx = list(x) + list(x)[::-1],\n\t\t\t\ty = list(y1) + list(y2)[::-1],\n\t\t\t\topacity = errorband.alpha/2,\n\t\t\t\tmode = 'lines',\n\t\t\t\tname = errorband.label,\n\t\t\t\tlegendgroup = legendgroup,\n\t\t\t\tshowlegend = False,\n\t\t\t\tline = dict(\n\t\t\t\t\tcolor = rgb2hexastr_color(errorband.color),\n\t\t\t\t),\n\t\t\t)\n\t\t)\n\t\tself.plotly_figure['data'][-1]['fill'] = 'toself'\n\t\tself.plotly_figure['data'][-1]['hoveron'] = 'points'\n\t\tself.plotly_figure['data'][-1]['line']['width'] = 0\n\t\t# Draw the trace itself ---\n\t\tself.plotly_figure.add_trace(\n\t\t\tgo.Scatter(\n\t\t\t\tx = errorband.x,\n\t\t\t\ty = errorband.y,\n\t\t\t\tname = errorband.label,\n\t\t\t\topacity = errorband.alpha,\n\t\t\t\tmode = translate_marker_and_linestyle_to_Plotly_mode(errorband.marker, errorband.linestyle),\n\t\t\t\tmarker_symbol = map_marker_to_Plotly_markers(errorband.marker),\n\t\t\t\tshowlegend = True if errorband.label is not None else False,\n\t\t\t\tline = dict(\n\t\t\t\t\tdash = map_linestyle_to_Plotly_linestyle(errorband.linestyle),\n\t\t\t\t\tcolor = rgb2hexastr_color(errorband.color),\n\t\t\t\t\twidth = errorband.linewidth,\n\t\t\t\t),\n\t\t\t\tlegendgroup = legendgroup,\n\t\t\t)\n\t\t)\n\t\n\tdef _draw_histogram(self, histogram):\n\t\tif not isinstance(histogram, Histogram):\n\t\t\traise TypeError(f'<histogram> must be an instance of {Histogram}, received object of type {type(histogram)}.')\n\t\tx = np.array(histogram.x) # Make a copy to avoid touching the original data.\n\t\tx[0] = x[1] - (x[3]-x[1]) # Plotly does not plot points in infinity.\n\t\tx[-1] = x[-2] + (x[-2]-x[-4]) # Plotly does not plot points in infinity.\n\t\tlegendgroup = str(np.random.rand(3))\n\t\t# The following trace is the histogram lines ---\n\t\tself.plotly_figure.add_traces(\n\t\t\tgo.Scatter(\n\t\t\t\tx = x, \n\t\t\t\ty = histogram.y,\n\t\t\t\topacity = histogram.alpha,\n\t\t\t\tmode = 'lines',\n\t\t\t\tline = dict(\n\t\t\t\t\tdash = map_linestyle_to_Plotly_linestyle(histogram.linestyle),\n\t\t\t\t),\n\t\t\t\tlegendgroup = legendgroup,\n\t\t\t\tshowlegend = False,\n\t\t\t\thoverinfo='skip',\n\t\t\t)\n\t\t)\n\t\tself.plotly_figure['data'][-1]['marker']['color'] = rgb2hexastr_color(histogram.color)\n\t\tself.plotly_figure['data'][-1]['line']['width'] = histogram.linewidth\n\t\t# The following trace adds the markers in the middle of each bin ---\n\t\tif histogram.marker is not None:\n\t\t\tself.plotly_figure.add_traces(\n\t\t\t\tgo.Scatter(\n\t\t\t\t\tx = [x[2*i] + (x[2*i+1]-x[2*i])/2 for i in range(int(len(x)/2))],\n\t\t\t\t\ty = histogram.y[::2],\n\t\t\t\t\tname = histogram.label,\n\t\t\t\t\tmode = 'markers',\n\t\t\t\t\tmarker_symbol = map_marker_to_Plotly_markers(histogram.marker),\n\t\t\t\t\topacity = histogram.alpha,\n\t\t\t\t\tline = dict(\n\t\t\t\t\t\tdash = map_linestyle_to_Plotly_linestyle(histogram.linestyle),\n\t\t\t\t\t),\n\t\t\t\t\tlegendgroup = legendgroup,\n\t\t\t\t\thoverinfo = 'skip',\n\t\t\t\t\tshowlegend = False,\n\t\t\t\t)\n\t\t\t)\n\t\t\tself.plotly_figure['data'][-1]['marker']['color'] = rgb2hexastr_color(histogram.color)\n\t\t# The following trace adds the hover texts ---\n\t\tself.plotly_figure.add_traces(\n\t\t\tgo.Scatter(\n\t\t\t\tx = [x[2*i] + (x[2*i+1]-x[2*i])/2 for i in range(int(len(x)/2))],\n\t\t\t\ty = histogram.y[::2],\n\t\t\t\tname = histogram.label,\n\t\t\t\tmode = 'lines',\n\t\t\t\tmarker_symbol = map_marker_to_Plotly_markers(histogram.marker),\n\t\t\t\topacity = histogram.alpha,\n\t\t\t\tline = dict(\n\t\t\t\t\tdash = map_linestyle_to_Plotly_linestyle(histogram.linestyle),\n\t\t\t\t),\n\t\t\t\tlegendgroup = legendgroup,\n\t\t\t\tshowlegend = False,\n\t\t\t\ttext = [f'Bin: (-∞, {histogram.bin_edges[0]})<br>Count: {histogram.bin_counts[0]}'] + [f'Bin: [{histogram.bin_edges[i]}, {histogram.bin_edges[i+1]})<br>Count: {histogram.bin_counts[i+1]}' for i in range(len(histogram.bin_edges)-1)] + [f'Bin: [{histogram.bin_edges[-1]},∞)<br>Count: {histogram.bin_counts[-1]}'],\n\t\t\t\thovertemplate = \"%{text}\",\n\t\t\t)\n\t\t)\n\t\tself.plotly_figure['data'][-1]['marker']['color'] = rgb2hexastr_color(histogram.color)\n\t\tself.plotly_figure['data'][-1]['line']['width'] = 0\n\t\t# The following trace is to add the item in the legend ---\n\t\tself.plotly_figure.add_traces(\n\t\t\tgo.Scatter(\n\t\t\t\tx = [float('NaN')],\n\t\t\t\ty = [float('NaN')],\n\t\t\t\tname = histogram.label,\n\t\t\t\tmode = translate_marker_and_linestyle_to_Plotly_mode(histogram.marker, histogram.linestyle),\n\t\t\t\tmarker_symbol = map_marker_to_Plotly_markers(histogram.marker),\n\t\t\t\topacity = histogram.alpha,\n\t\t\t\tshowlegend = True if histogram.label != None else False,\n\t\t\t\tline = dict(\n\t\t\t\t\tdash = map_linestyle_to_Plotly_linestyle(histogram.linestyle),\n\t\t\t\t),\n\t\t\t\tlegendgroup = legendgroup,\n\t\t\t)\n\t\t)\n\t\tself.plotly_figure['data'][-1]['marker']['color'] = rgb2hexastr_color(histogram.color)\n\t\tself.plotly_figure['data'][-1]['line']['width'] = histogram.linewidth\n\t\n\tdef _draw_heatmap(self, heatmap):\n\t\tif not isinstance(heatmap, Heatmap):\n\t\t\traise TypeError(f'<heatmap> must be an instance of {Heatmap}, received object of type {type(heatmap)}.')\n\t\tx = heatmap.x\n\t\ty = heatmap.y\n\t\tz = heatmap.z\n\t\tif heatmap.zscale == 'log' and (z<=0).any():\n\t\t\twarnings.warn('Warning: log color scale was selected and there are <z> values <= 0. In the plot you will see them as NaN.')\n\t\t\twith warnings.catch_warnings():\n\t\t\t\twarnings.filterwarnings(\"ignore\", message=\"invalid value encountered in log\")\n\t\t\t\tz = np.log(z)\n\t\tself.plotly_figure.add_trace(\n\t\t\tgo.Heatmap(\n\t\t\t\tx = x,\n\t\t\t\ty = y,\n\t\t\t\tz = z,\n\t\t\t\topacity = heatmap.alpha,\n\t\t\t\tzmin = heatmap.zlim[0] if heatmap.zlim is not None else None,\n\t\t\t\tzmax = heatmap.zlim[1] if heatmap.zlim is not None else None,\n\t\t\t\tcolorbar = dict(\n\t\t\t\t\ttitle = ('log ' if heatmap.zscale == 'log' else '') + (heatmap.zlabel if heatmap.zlabel is not None else ''),\n\t\t\t\t\ttitleside = 'right',\n\t\t\t\t),\n\t\t\t\thovertemplate = f'{(self.xlabel if self.xlabel is not None else \"x\")}: %{{x}}<br>{(self.ylabel if self.ylabel is not None else \"y\")}: %{{y}}<br>{(heatmap.zlabel if heatmap.zlabel is not None else \"color scale\")}: %{{z}}<extra></extra>', # https://community.plotly.com/t/heatmap-changing-x-y-and-z-label-on-tooltip/23588/6\n\t\t\t)\n\t\t)\n\t\tself.plotly_figure.update_layout(legend_orientation=\"h\")\n\t\n\tdef _draw_contour(self, contour):\n\t\tif not isinstance(contour, Contour):\n\t\t\traise TypeError(f'<contour> must be an instance of {Contour}, received object of type {type(contour)}.')\n\t\tx = contour.x\n\t\ty = contour.y\n\t\tz = contour.z\n\t\tif contour.zscale == 'log' and (z<=0).any():\n\t\t\twarnings.warn('Warning: log color scale was selected and there are <z> values <= 0. In the plot you will see them as NaN.')\n\t\t\twith warnings.catch_warnings():\n\t\t\t\twarnings.filterwarnings(\"ignore\", message=\"invalid value encountered in log\")\n\t\t\t\tz = np.log(z)\n\t\tlowest_contour = contour.zlim[0] if contour.zlim is not None else contour.z.min()\n\t\thighest_contour = contour.zlim[1] if contour.zlim is not None else contour.z.max()\n\t\tif hasattr(contour.contours, '__iter__'):\n\t\t\traise NotImplementedError(f'An iterable specifying which contours to use was not yet implemented. Only implemented an integer number specifying number of equidistant contours.')\n\t\tn_contours = contour.contours\n\t\tself.plotly_figure.add_trace(\n\t\t\tgo.Contour(\n\t\t\t\tx = x,\n\t\t\t\ty = y,\n\t\t\t\tz = z,\n\t\t\t\topacity = contour.alpha,\n\t\t\t\tzmin = contour.zlim[0] if contour.zlim is not None else None,\n\t\t\t\tzmax = contour.zlim[1] if contour.zlim is not None else None,\n\t\t\t\tcolorbar = dict(\n\t\t\t\t\ttitle = ('log ' if contour.zscale == 'log' else '') + (contour.zlabel if contour.zlabel is not None else ''),\n\t\t\t\t\ttitleside = 'right',\n\t\t\t\t),\n\t\t\t\thovertemplate = f'{(self.xlabel if self.xlabel is not None else \"x\")}: %{{x}}<br>{(self.ylabel if self.ylabel is not None else \"y\")}: %{{y}}<br>{(contour.zlabel if contour.zlabel is not None else \"color scale\")}: %{{z}}<extra></extra>',\n\t\t\t\tcontours = dict(\n\t\t\t\t\tcoloring = 'heatmap',\n\t\t\t\t\tshowlabels = True, # show labels on contours\n\t\t\t\t\tlabelfont = dict( # label font properties\n\t\t\t\t\t\tcolor = 'black',\n\t\t\t\t\t),\n\t\t\t\t\tstart = lowest_contour,\n\t\t\t\t\tend = highest_contour,\n\t\t\t\t\tsize = (highest_contour-lowest_contour)/(n_contours),\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\t\tself.plotly_figure.update_layout(legend_orientation=\"h\")\n\t\t\ndef translate_marker_and_linestyle_to_Plotly_mode(marker, linestyle):\n\t\"\"\"<marker> and <linestyle> are each one and only one of the valid\n\toptions for each object.\"\"\"\n\tif marker is None and linestyle != 'none':\n\t\tmode = 'lines'\n\telif marker is not None and linestyle != 'none':\n\t\tmode = 'lines+markers'\n\telif marker is not None and linestyle == 'none':\n\t\tmode = 'markers'\n\telse:\n\t\tmode = 'lines'\n\treturn mode\n\ndef map_marker_to_Plotly_markers(marker):\n\tmarkers_map = {\n\t\t'.': 'circle',\n\t\t'+': 'cross',\n\t\t'x': 'x',\n\t\t'o': 'circle-open',\n\t\t'*': 'star',\n\t\tNone: None\n\t}\n\treturn markers_map[marker]\n\ndef map_linestyle_to_Plotly_linestyle(linestyle):\n\tlinestyle_map = {\n\t\t'solid': None,\n\t\tNone: None,\n\t\t'none': None,\n\t\t'dashed': 'dash',\n\t\t'dotted': 'dot',\n\t}\n\treturn linestyle_map[linestyle]\n\ndef rgb2hexastr_color(rgb_color: tuple):\n\t# Assuming that <rgb_color> is a (r,g,b) tuple.\n\tcolor_str = '#'\n\tfor rgb in rgb_color:\n\t\tcolor_hex_code = hex(int(rgb*255))[2:]\n\t\tif len(color_hex_code) < 2:\n\t\t\tcolor_hex_code = f'0{color_hex_code}'\n\t\tcolor_str += color_hex_code\n\treturn color_str\n"
] | [
[
"numpy.log",
"numpy.array",
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Dorcoh4/BARTScore | [
"e24fd22b80a01ef142ce43e24ec585f1ee8c1ff2"
] | [
"train/eli5.py"
] | [
"print(\"importing\")\n\nfrom datasets import load_dataset\nfrom datasets import load_metric\nfrom transformers import AutoTokenizer, AutoModelForSequenceClassification\nfrom transformers import TrainingArguments, DefaultFlowCallback, PrinterCallback\nfrom transformers import Trainer\nimport torch\nfrom torch import nn\nimport numpy as np\nimport pickle\nfrom sklearn.preprocessing import StandardScaler\nimport random\nimport json\n\nsep_token = \"[SEP]\" # FORDOR maybe many special tokens\npretrained_model_name = \"roberta-base\" # 'bert-base-cased'\n\n\n\nclass my_Bert(nn.Module):\n def __init__(self, bert):\n super().__init__()\n self.bert = bert\n\n\n\n def forward(self,input_ids,attention_mask=None,labels=None,**kwargs):\n res = self.bert.forward(input_ids,attention_mask,labels=labels,**kwargs)\n print(f\"FORDOR-input_ids {input_ids}\")\n print(f\"FORDOR-inputss {tokenizer.decode(input_ids[0])}\")\n print(f\"FORDOR-inputss {tokenizer.decode(input_ids[1])}\")\n print(f\"FORDOR-labels {labels}\")\n print(f\"FORDOR-res {res}\")\n return res\n\n\n\nprint(\"starting load\")\n\n\n# for i in range(len(dataset[\"train_eli5\"])):\n# print(f'train= {dataset[\"train_eli5\"][i][\"answers\"]}')\n# print(f'valid= {dataset[\"validation_eli5\"][i][\"answers\"]}')\n# print(f'test= {dataset[\"test_eli5\"][i][\"answers\"]}')\n\n\n\nclass ELI5MetricDataset(torch.utils.data.Dataset):\n def __init__(self, encodings, labels):\n self.encodings = encodings\n self.labels = labels\n\n def __getitem__(self, idx):\n item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n item['labels'] = torch.tensor(self.labels[idx])\n return item\n\n def __len__(self):\n return len(self.labels)\n\n\ndef tokenize_function(examples):\n return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n \n \ndef changeArr(input1):\n \n # Copy input array into newArray\n newArray = input1.copy()\n \n # Sort newArray[] in ascending order\n newArray.sort()\n \n # Dictionary to store the rank of\n # the array element\n ranks = {}\n \n rank = 1\n \n for index in range(len(newArray)):\n element = newArray[index];\n \n # Update rank of element\n if element not in ranks:\n ranks[element] = rank\n rank += 1\n \n # Assign ranks to elements\n for index in range(len(input1)):\n element = input1[index]\n input1[index] = float(ranks[input1[index]])\n\nmy_dataset = {}\n\n\n\n\nif False:# try:\n with open(\"my_dataset.pickle\", \"rb\" ) as f:\n my_dataset = pickle.load(f)\nelse: # except IOError:\n print(\"could not load my_dataset - preprocessing\")\n raw_datasets = load_dataset(\"eli5\")\n tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)\n \n def preprocess_data(split_name):\n with open(f'{split_name}.json', 'a') as the_file:\n \n inputs = []\n labels = []\n cnt = 0\n for example in raw_datasets[split_name]:\n\n question = example[\"title\"]+ example[\"selftext\"] #FORDOR add special sep token?\n for i in range (len (example[\"answers\"][\"a_id\"])):\n answer = example[\"answers\"][\"text\"][i]\n# question = question.replace('\"','\\\\\"')\n# answer = answer.replace('\"','\\\\\"')\n the_file.write(f'{{\"text\": {json.dumps(question)}, \"summary\": {json.dumps(answer)} }}\\n')\n# inputs.append(question + sep_token + answer)\n # print (f'FORDOR float - {float(example[\"answers\"][\"score\"][i])} {example[\"answers\"][\"score\"][i]}')\n# labels.append(float(example[\"answers\"][\"score\"][i]))\n cnt = cnt+1\n if cnt > 200000:\n break\n\n # tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)\n \n #shuffle data\n# c = list(zip(inputs, labels))\n# random.seed(42)\n# random.shuffle(c)\n# inputs, labels = zip(*c)\n# inputs = list(inputs)\n# labels = list(labels)\n\n \n# encodings = tokenizer(inputs, padding=\"max_length\", truncation=True)\n# encodings2 = tokenizer(inputs, padding=\"max_length\", truncation=False)\n# for i in range(len(encodings)):\n# if len(encodings[i]) != len( encodings2[i]):\n# print (print(f\"encoding and length {encodings[i]}, {len(encodings[i])} no truncation = {encodings2[i]}, {len(encodings2[i])}\"))\n# \n \n \n# tensor_labels = torch.as_tensor(labels).reshape(-1,1)\n# scaler = StandardScaler()\n# scaler.fit(tensor_labels)\n# scaled_labels = scaler.transform(tensor_labels).astype(np.float32)\n# changeArr(labels)\n\n \n# my_dataset[split_name] = ELI5MetricDataset(encodings, scaled_labels)\n# print (f\"FORDOR lens {len(encodings)}=={len(labels)}\")\n # assert len(encodings) == len(labels)\n \n preprocess_data(\"train_eli5\")\n preprocess_data(\"validation_eli5\")\n# pickle.dump( my_dataset, open( \"my_dataset.pickle\", \"wb\" ) )\n\n# metric = load_metric(\"spearmanr\")\n# def compute_metrics(eval_pred):\n# logits, labels = eval_pred\n# print(f'logits- {max(logits)}, {min(logits)}')\n# print(f'labels- {max(labels)}, {min(labels)}')\n# return metric.compute(predictions=logits, references=labels)\n\n# model = AutoModelForSequenceClassification.from_pretrained(pretrained_model_name, num_labels=1)\n# # freezing bert parameters leaving only regression layer\n# # for param in model.bert.parameters():\n# # param.requires_grad = False\n# # model = my_Bert(model)\n# # print (f\"FORDOR model = {str(model)}\")\n# # print (f'FORDOR debug {raw_datasets[\"train_eli5\"][0][\"answers\"]} =:= {model(input_ids=my_dataset[\"train_eli5\"][0][\"input_ids\"].unsqueeze(0), attention_mask=my_dataset[\"train_eli5\"][0][\"attention_mask\"].unsqueeze(0), token_type_ids=my_dataset[\"train_eli5\"][0][\"token_type_ids\"].unsqueeze(0))}')\n# training_args = TrainingArguments(\"test_trainer\", evaluation_strategy=\"steps\", eval_steps=10000, save_steps=10000, per_device_train_batch_size=8, per_device_eval_batch_size=8)\n# trainer = Trainer(model=model, args=training_args, train_dataset=my_dataset[\"train_eli5\"], eval_dataset=my_dataset[\"validation_eli5\"], compute_metrics=compute_metrics,\n# callbacks = [\n# DefaultFlowCallback(),\n# PrinterCallback()\n# ],\n# )\n# #, max_steps=3000 \n# trainer.train()\n\n# # model.eval()\n# # print (f'FORDOR2 debug {raw_datasets[\"train_eli5\"][0][\"answers\"]} =:= {model(input_ids=my_dataset[\"train_eli5\"][0][\"input_ids\"].unsqueeze(0).cuda(), attention_mask=my_dataset[\"train_eli5\"][0][\"attention_mask\"].unsqueeze(0).cuda(), token_type_ids=my_dataset[\"train_eli5\"][0][\"token_type_ids\"].unsqueeze(0).cuda())}')\n# # print (f'FORDOR3 debug {raw_datasets[\"train_eli5\"][0][\"answers\"]} =:= {model(input_ids=my_dataset[\"train_eli5\"][1][\"input_ids\"].unsqueeze(0).cuda(), attention_mask=my_dataset[\"train_eli5\"][1][\"attention_mask\"].unsqueeze(0).cuda(), token_type_ids=my_dataset[\"train_eli5\"][1][\"token_type_ids\"].unsqueeze(0).cuda())}')\n# # print (f'FORDOR4 debug {raw_datasets[\"train_eli5\"][1][\"answers\"]} =:= {model(input_ids=my_dataset[\"train_eli5\"][4][\"input_ids\"].unsqueeze(0).cuda(), attention_mask=my_dataset[\"train_eli5\"][4][\"attention_mask\"].unsqueeze(0).cuda(), token_type_ids=my_dataset[\"train_eli5\"][4][\"token_type_ids\"].unsqueeze(0).cuda())}')\n\n\n# print (\"evaluation starting\")\n# print (trainer.evaluate())\n"
] | [
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexis-thual/audio-signal-analysis | [
"226e654d7482869c0976199babebac1c67c3803a"
] | [
"test.py"
] | [
"import numpy as np\n\na = np.array([1,2,3,4,5])\nb = np.argmax(a[1:2])\nb\n"
] | [
[
"numpy.array",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Pedestrian1671022/keras_yolov3 | [
"1d66375f67d87ed7d3967364373ec8cdea776ef1"
] | [
"yolo.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nClass definition of YOLO_v3 style detection model on image and video\n\"\"\"\n\nimport colorsys\nimport os\nfrom timeit import default_timer as timer\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\nfrom PIL import Image, ImageFont, ImageDraw\n\nfrom yolo3.model import yolo_eval, yolo_body, tiny_yolo_body\nfrom yolo3.utils import letterbox_image\nimport os\nfrom keras.utils import multi_gpu_model\n\nclass YOLO(object):\n _defaults = {\n \"model_path\": 'model_data/yolov3.h5',\n \"anchors_path\": 'model_data/yolo_anchors.txt',\n \"classes_path\": 'model_data/coco_classes.txt',\n \"score\" : 0.3,\n \"iou\" : 0.45,\n \"model_image_size\" : (416, 416),\n \"gpu_num\" : 1,\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults) # set up default values\n self.__dict__.update(kwargs) # and update with user overrides\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n\n # Load model, or construct model and load weights.\n num_anchors = len(self.anchors)\n num_classes = len(self.class_names)\n is_tiny_version = num_anchors==6 # default setting\n try:\n self.yolo_model = load_model(model_path, compile=False)\n except:\n self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \\\n if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)\n self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match\n else:\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\n num_anchors/len(self.yolo_model.output) * (num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes'\n\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\n np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\n np.random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n self.input_image_shape = K.placeholder(shape=(2, ))\n if self.gpu_num>=2:\n self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\n len(self.class_names), self.input_image_shape,\n score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n def detect_image(self, image):\n start = timer()\n\n if self.model_image_size != (None, None):\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\n else:\n new_image_size = (image.width - (image.width % 32),\n image.height - (image.height % 32))\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n\n print(image_data.shape)\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\n\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf',\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n print(label, (left, top), (right, bottom))\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=self.colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n\n end = timer()\n print(end - start)\n return image\n\n def close_session(self):\n self.sess.close()\n\ndef detect_video(yolo, video_path, output_path=\"\"):\n import cv2\n vid = cv2.VideoCapture(video_path)\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))\n video_fps = vid.get(cv2.CAP_PROP_FPS)\n video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n isOutput = True if output_path != \"\" else False\n if isOutput:\n print(\"!!! TYPE:\", type(output_path), type(video_FourCC), type(video_fps), type(video_size))\n out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)\n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n prev_time = timer()\n while True:\n return_value, frame = vid.read()\n image = Image.fromarray(frame)\n image = yolo.detect_image(image)\n result = np.asarray(image)\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n curr_fps = 0\n cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.50, color=(255, 0, 0), thickness=2)\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"result\", result)\n if isOutput:\n out.write(result)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n yolo.close_session()\n\n"
] | [
[
"numpy.expand_dims",
"numpy.random.seed",
"numpy.asarray",
"numpy.random.shuffle",
"numpy.floor",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pbotros/TRACER | [
"269e17ca27fe0fb78c30484d8685d119caab5dbb",
"269e17ca27fe0fb78c30484d8685d119caab5dbb"
] | [
"tracer/index_tracker.py",
"tracer/probes_insertion.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 14 13:31:04 2020\n\n@author: jacopop\n\"\"\"\n\n# Class to scroll the atlas slices (coronal, sagittal and horizontal) \nclass IndexTracker(object):\n def __init__(self, ax, X, pixdim, p, S=None):\n self.ax = ax\n self.plane = p.lower()\n self.ax.set_title('Atlas viewer')\n print('\\nuse scroll wheel to navigate the atlas \\n')\n\n self.X = X\n if self.plane == 'c':\n rows, self.slices, cols = X.shape\n if S is None:\n self.ind = 653\n else:\n self.ind = S\n self.im = self.ax.imshow(self.X[:, self.ind, :].T, origin=\"lower\", extent=[0, 512*pixdim, 0, 512*pixdim], cmap='gray') #cmap='gist_yarg' if white background wanted\n elif self.plane == 's':\n self.slices, rows, cols = X.shape\n if S is None:\n self.ind = 246\n else:\n self.ind = S\n self.im = self.ax.imshow(self.X[self.ind, :, :].T, origin=\"lower\", extent=[0 ,1024*pixdim, 0, 512*pixdim], cmap='gray')\n elif self.plane == 'h': \n rows, cols, self.slices = X.shape\n if S is None:\n self.ind = 440\n else:\n self.ind = S\n self.im = self.ax.imshow(self.X[:, :, self.ind].T, origin=\"lower\", extent=[0, 512*pixdim, 0, 1024*pixdim], cmap='gray')\n self.update()\n\n def onscroll(self, event):\n if event.button == 'up':\n self.ind = (self.ind + 1) % self.slices\n else:\n self.ind = (self.ind - 1) % self.slices\n self.update()\n\n def update(self):\n if self.plane == 'c':\n self.im.set_data(self.X[:, self.ind, :].T)\n elif self.plane == 's':\n self.im.set_data(self.X[self.ind, :, :].T)\n elif self.plane == 'h':\n self.im.set_data(self.X[:, :, self.ind].T)\n self.ax.set_ylabel('slice %d' % self.ind)\n self.im.axes.figure.canvas.draw() \n \n# Class to scroll the overlayed atlas slices (coronal, sagittal and horizontal) \nclass IndexTracker_g(object):\n def __init__(self, ax, X, pixdim, p, S):\n self.ax = ax\n self.plane = p.lower()\n ax.set_title('Atlas viewer')\n print('\\nuse scroll wheel to navigate the atlas \\n')\n \n self.X = X\n if self.plane == 'c':\n rows, self.slices, cols = X.shape\n self.ind = S\n self.im = ax.imshow(self.X[:, self.ind, :], origin=\"lower\", alpha=0.5, extent=[0, 512*pixdim, 512*pixdim, 0], cmap='gray')\n elif self.plane == 's':\n self.slices, rows, cols = X.shape\n self.ind = S \n self.im = ax.imshow(self.X[self.ind, :, :].T, origin=\"lower\", extent=[0 ,1024*pixdim, 512*pixdim , 0], cmap='gray') \n elif self.plane == 'h': \n rows, cols, self.slices = X.shape\n self.ind = S \n self.im = ax.imshow(self.X[:, :, self.ind].T, origin=\"lower\", extent=[0, 512*pixdim, 1024*pixdim, 0], cmap='gray') \n self.update()\n\n def onscroll(self, event):\n if event.button == 'up':\n self.ind = (self.ind + 1) % self.slices\n else:\n self.ind = (self.ind - 1) % self.slices\n self.update() \n \n def update(self):\n if self.plane == 'c':\n self.im.set_data(self.X[:, self.ind, :])\n elif self.plane == 's':\n self.im.set_data(self.X[self.ind, :, :])\n elif self.plane == 'h':\n self.im.set_data(self.X[:, :, self.ind])\n self.ax.set_ylabel('slice %d' % self.ind)\n self.im.axes.figure.canvas.draw() \n\n# Class to scroll the atlas slices with probe (coronal, sagittal and horizontal) \nclass IndexTracker_p(object):\n def __init__(self, ax, X, pixdim, p, S):\n self.ax = ax\n self.plane = p.lower()\n ax.set_title('Atlas viewer')\n print('\\nuse scroll wheel to navigate the atlas \\n')\n\n self.X = X\n if self.plane == 'c':\n rows, self.slices, cols = X.shape\n self.ind = S\n self.im = ax.imshow(self.X[:, self.ind, :].T, origin=\"lower\", extent=[0, 512*pixdim, 0, 512*pixdim], cmap='gray')\n elif self.plane == 's':\n self.slices, rows, cols = X.shape\n self.ind = S \n self.im = ax.imshow(self.X[self.ind, :, :].T, origin=\"lower\", extent=[0 ,1024*pixdim, 0, 512*pixdim], cmap='gray') \n elif self.plane == 'h': \n rows, cols, self.slices = X.shape\n self.ind = S \n self.im = ax.imshow(self.X[:, :, self.ind].T, origin=\"lower\", extent=[0, 512*pixdim, 0, 1024*pixdim], cmap='gray') \n self.update()\n\n def onscroll(self, event):\n if event.button == 'up':\n self.ind = (self.ind + 1) % self.slices\n else:\n self.ind = (self.ind - 1) % self.slices\n self.update()\n\n def update(self):\n if self.plane == 'c':\n self.im.set_data(self.X[:, self.ind, :].T)\n elif self.plane == 's':\n self.im.set_data(self.X[self.ind, :, :].T)\n elif self.plane == 'h':\n self.im.set_data(self.X[:, :, self.ind].T)\n self.ax.set_ylabel('slice %d' % self.ind)\n self.im.axes.figure.canvas.draw() \n \n \n# Class to scroll the boundaries atlas slices (coronal, sagittal and horizontal) \nclass IndexTracker_b(object):\n def __init__(self, ax, X, pixdim, p, S):\n self.ax = ax\n self.plane = p.lower()\n ax.set_title('Atlas viewer')\n print('\\nuse scroll wheel to navigate the atlas \\n')\n \n self.X = X\n if self.plane == 'c':\n rows, self.slices, cols = X.shape\n self.ind = S\n self.im = ax.imshow(self.X[:, self.ind, :].T, origin=\"lower\", alpha=0.5, extent=[0, 512*pixdim, 0, 512*pixdim,], cmap='gray')\n elif self.plane == 's':\n self.slices, rows, cols = X.shape\n self.ind = S \n self.im = ax.imshow(self.X[:, :, self.ind].T, origin=\"lower\", alpha=0.5, extent=[0 ,1024*pixdim, 0, 512*pixdim], cmap='gray') \n elif self.plane == 'h': \n rows, cols, self.slices = X.shape\n self.ind = S \n self.im = ax.imshow(self.X[self.ind, :, :].T, origin=\"lower\", alpha=0.5, extent=[0, 512*pixdim, 0, 1024*pixdim], cmap='gray') \n self.update()\n\n def onscroll(self, event):\n if event.button == 'up':\n self.ind = (self.ind + 1) % self.slices\n else:\n self.ind = (self.ind - 1) % self.slices\n self.update() \n \n def update(self):\n if self.plane == 'c':\n self.im.set_data(self.X[:, self.ind, :])\n elif self.plane == 's':\n self.im.set_data(self.X[:, :, self.ind])\n elif self.plane == 'h':\n self.im.set_data(self.X[self.ind, :, :])\n self.ax.set_ylabel('slice %d' % self.ind)\n self.im.axes.figure.canvas.draw() \n \n\n# Class to scroll the color atlas slices (coronal, sagittal and horizontal) \nclass IndexTracker_c(object):\n def __init__(self, ax, X, pixdim, p, S):\n self.ax = ax\n self.plane = p.lower()\n ax.set_title('Atlas viewer')\n print('\\nuse scroll wheel to navigate the atlas \\n')\n \n self.X = X\n if self.plane == 'c':\n rows, self.slices, cols, color = X.shape\n self.ind = S\n self.L = self.X.transpose((2,1,0,3))\n self.im = ax.imshow(self.L[:, self.ind, :], origin=\"lower\", alpha=0.5, extent=[0, 512*pixdim, 0, 512*pixdim ], cmap='gray')\n elif self.plane == 's':\n self.slices, rows, cols, color= X.shape\n self.ind = S\n self.L = self.X.transpose((0,2,1,3)) \n self.im = ax.imshow(self.L[self.ind, :, :], origin=\"lower\", alpha=0.5, extent=[0, 1024*pixdim, 0, 512*pixdim], cmap='gray') \n elif self.plane == 'h': \n rows, cols, self.slices, color = X.shape\n self.ind = S\n self.L = self.X.transpose((1,0,2,3))\n self.im = ax.imshow(self.L[:, :, self.ind], origin=\"lower\", alpha=0.5, extent=[0, 512*pixdim, 0, 1024*pixdim ], cmap='gray') \n self.update()\n\n def onscroll(self, event):\n if event.button == 'up':\n self.ind = (self.ind + 1) % self.slices\n else:\n self.ind = (self.ind - 1) % self.slices\n self.update() \n \n def update(self):\n if self.plane == 'c':\n self.im.set_data(self.L[:, self.ind, :])\n elif self.plane == 's':\n self.im.set_data(self.L[self.ind, :, :])\n elif self.plane == 'h':\n self.im.set_data(self.L[:, :, self.ind])\n self.ax.set_ylabel('slice %d' % self.ind)\n self.im.axes.figure.canvas.draw() \n \n \nimport numpy as np \nimport matplotlib.pyplot as plt \ndef lighten_color(color, amount=0.5):\n \"\"\"\n Lightens the given color by multiplying (1-luminosity) by the given amount.\n Input can be matplotlib color string, hex string, or RGB tuple.\n \"\"\"\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])\n# Class to scroll the atlas slices with probe (coronal, sagittal and horizontal) \nclass IndexTracker_pi(object):\n\n def __init__(self, ax, X, pixdim, p, S, unique_slice, probe_x, probe_y, probe_colors, probe_selecter, line_fit):\n \n self.ax = ax\n self.plane = p.lower()\n self.unique_slice = unique_slice\n self.probe_x = probe_x\n self.probe_y = probe_y\n self.probe_colors = probe_colors\n self.probe_selecter = probe_selecter\n self.line_fit = line_fit \n ax.set_title('Atlas viewer')\n print('\\nuse scroll wheel to navigate the atlas \\n')\n\n self.X = X\n if self.plane == 'c':\n rows, self.slices, cols = X.shape\n self.ind = S\n self.im = ax.imshow(self.X[:, self.ind, :].T, origin=\"lower\", extent=[0, 512*pixdim, 0, 512*pixdim], cmap='gray')\n # plot the probe\n if self.line_fit.direction[0] == 0:\n self.line = plt.plot(np.array(sorted(self.probe_x)), np.array(sorted(self.probe_y)),color=self.probe_colors[self.probe_selecter], linestyle='dashed', linewidth=0.8); \n else:\n self.m, self.b = np.polyfit(self.probe_x, self.probe_y, 1)\n self.line = plt.plot(np.array(sorted(self.probe_x)), self.m*np.array(sorted(self.probe_x)) + self.b,color=self.probe_colors[self.probe_selecter], linestyle='dashed', linewidth=0.8); \n elif self.plane == 's':\n self.slices, rows, cols = X.shape\n self.ind = S \n self.im = ax.imshow(self.X[self.ind, :, :].T, origin=\"lower\", extent=[0 ,1024*pixdim, 0, 512*pixdim], cmap='gray') \n # plot the probe\n if self.line_fit.direction[1] == 0:\n self.line = plt.plot(np.array(sorted(self.probe_x)), np.array(sorted(self.probe_y)),color=self.probe_colors[self.probe_selecter], linestyle='dashed', linewidth=0.8); \n else:\n self.m, self.b = np.polyfit(self.probe_x, self.probe_y, 1)\n self.line = plt.plot(np.array(sorted(self.probe_x)), self.m*np.array(sorted(self.probe_x)) + self.b,color=self.probe_colors[self.probe_selecter], linestyle='dashed', linewidth=0.8); \n elif self.plane == 'h': \n rows, cols, self.slices = X.shape\n self.ind = S \n self.im = ax.imshow(self.X[:, :, self.ind].T, origin=\"lower\", extent=[0, 512*pixdim, 0, 1024*pixdim], cmap='gray') \n # plot the probe\n if self.line_fit.direction[0] == 0:\n self.line = plt.plot(np.array(sorted(self.probe_x)), np.array(sorted(self.probe_y)),color=self.probe_colors[self.probe_selecter], linestyle='dashed', linewidth=0.8); \n else:\n self.m, self.b = np.polyfit(self.probe_x, self.probe_y, 1)\n self.line = plt.plot(np.array(sorted(self.probe_x)), self.m*np.array(sorted(self.probe_x)) + self.b,color=self.probe_colors[self.probe_selecter], linestyle='dashed', linewidth=0.8); \n self.points = [plt.scatter(self.probe_x[i], self.probe_y[i], color=self.probe_colors[self.probe_selecter], s=2) for i in range(len(self.probe_x))] \n self.update()\n\n def onscroll(self, event):\n if event.button == 'up':\n self.ind = (self.ind + 1) % self.slices\n #self.points.remove()\n self.line.pop(0).remove()\n if self.ind not in self.unique_slice:\n self.points = [plt.scatter(self.probe_x[i], self.probe_y[i], color=lighten_color(self.probe_colors[self.probe_selecter], 0.4), s=2) for i in range(len(self.probe_x))]\n if (self.line_fit.direction[0] == 0 and (self.plane == 'c' or self.plane == 'h')) or (self.line_fit.direction[1] == 0 and self.plane == 's'):\n self.line = plt.plot(np.array(sorted(self.probe_x)), np.array(sorted(self.probe_y)),color=lighten_color(self.probe_colors[self.probe_selecter], 0.4), linestyle='dashed', linewidth=0.8)\n else:\n self.line = plt.plot(np.array(sorted(self.probe_x)), self.m*np.array(sorted(self.probe_x)) + self.b,color=lighten_color(self.probe_colors[self.probe_selecter], 0.4), linestyle='dashed', linewidth=0.8)\n else:\n self.points = [plt.scatter(self.probe_x[i], self.probe_y[i], color=self.probe_colors[self.probe_selecter], s=2) for i in range(len(self.probe_x))]\n if (self.line_fit.direction[0] == 0 and (self.plane == 'c' or self.plane == 'h')) or (self.line_fit.direction[1] == 0 and self.plane == 's'):\n self.line = plt.plot(np.array(sorted(self.probe_x)), np.array(sorted(self.probe_y)),color=self.probe_colors[self.probe_selecter], linestyle='dashed', linewidth=0.8); \n else:\n # plot the probe\n self.line = plt.plot(np.array(sorted(self.probe_x)), self.m*np.array(sorted(self.probe_x)) + self.b,color=self.probe_colors[self.probe_selecter], linestyle='dashed', linewidth=0.8); \n else:\n self.ind = (self.ind - 1) % self.slices\n #self.points.remove()\n self.line.pop(0).remove()\n if self.ind not in self.unique_slice: \n self.points = [plt.scatter(self.probe_x[i], self.probe_y[i], color=lighten_color(self.probe_colors[self.probe_selecter], 0.4), s=2) for i in range(len(self.probe_x))]\n if (self.line_fit.direction[0] == 0 and (self.plane == 'c' or self.plane == 'h')) or (self.line_fit.direction[1] == 0 and self.plane == 's'):\n self.line = plt.plot(np.array(sorted(self.probe_x)), np.array(sorted(self.probe_y)),color=lighten_color(self.probe_colors[self.probe_selecter], 0.4), linestyle='dashed', linewidth=0.8)\n else:\n self.line = plt.plot(np.array(sorted(self.probe_x)), self.m*np.array(sorted(self.probe_x)) + self.b,color=lighten_color(self.probe_colors[self.probe_selecter], 0.4), linestyle='dashed', linewidth=0.8)\n else:\n self.points = [plt.scatter(self.probe_x[i], self.probe_y[i], color=self.probe_colors[self.probe_selecter], s=2) for i in range(len(self.probe_x))]\n # plot the probe\n if (self.line_fit.direction[0] == 0 and (self.plane == 'c' or self.plane == 'h')) or (self.line_fit.direction[1] == 0 and self.plane == 's'):\n self.line = plt.plot(np.array(sorted(self.probe_x)), np.array(sorted(self.probe_y)),color=self.probe_colors[self.probe_selecter], linestyle='dashed', linewidth=0.8); \n else:\n self.line = plt.plot(np.array(sorted(self.probe_x)), self.m*np.array(sorted(self.probe_x)) + self.b,color=self.probe_colors[self.probe_selecter], linestyle='dashed', linewidth=0.8); \n self.update()\n\n def update(self):\n if self.plane == 'c':\n self.im.set_data(self.X[:, self.ind, :].T)\n elif self.plane == 's':\n self.im.set_data(self.X[self.ind, :, :].T)\n elif self.plane == 'h':\n self.im.set_data(self.X[:, :, self.ind].T)\n self.ax.set_ylabel('slice %d' % self.ind)\n self.im.axes.figure.canvas.draw() \n\n\n# Class to scroll the atlas slices with probe (coronal, sagittal and horizontal) and color of selected regions \nclass IndexTracker_pi_col(object):\n\n def __init__(self, ax, X, edges, pixdim, p, S, probe_x, probe_y, line_fit):\n self.ax = ax\n self.plane = p.lower()\n self.probe_x = probe_x\n self.probe_y = probe_y\n self.line_fit = line_fit\n\n self.X = X\n self.edges =edges\n if self.plane == 'c':\n rows, self.slices, cols, color = X.shape\n self.ind = S\n self.L = self.X.transpose((2,1,0,3))\n self.im = ax.imshow(self.L[:, self.ind, :], origin=\"lower\", extent=[0, 512*pixdim, 0, 512*pixdim]) \n self.im2 = ax.imshow(self.edges[:, self.ind, :], origin=\"lower\", alpha=0.5, extent=[0, 512*pixdim, 0, 512*pixdim,], cmap='gray')\n # plot the probe\n if self.line_fit.direction[0] == 0:\n self.line = plt.plot(np.array(sorted(self.probe_x)), np.array(sorted(self.probe_y)), linewidth=1.5); \n else:\n self.m, self.b = np.polyfit(self.probe_x, self.probe_y, 1)\n self.line = plt.plot(np.array(sorted(self.probe_x)), self.m*np.array(sorted(self.probe_x)) + self.b,color='black', linewidth=1.5); \n elif self.plane == 's':\n self.slices, rows, cols, color= X.shape\n self.ind = S\n self.L = self.X.transpose((0,2,1,3)) \n self.im = ax.imshow(self.L[self.ind, :, :], origin=\"lower\", extent=[0 ,1024*pixdim, 0, 512*pixdim]) \n self.im2 = ax.imshow(self.edges[:, :, self.ind], origin=\"lower\", alpha=0.5, extent=[0, 1024*pixdim, 0, 512*pixdim,], cmap='gray')\n # plot the probe\n if self.line_fit.direction[1] == 0:\n self.line = plt.plot(np.array(sorted(self.probe_x)), np.array(sorted(self.probe_y)), linewidth=1.5); \n else:\n self.m, self.b = np.polyfit(self.probe_x, self.probe_y, 1)\n self.line = plt.plot(np.array(sorted(self.probe_x)), self.m*np.array(sorted(self.probe_x)) + self.b,color='black', linewidth=1.5); \n elif self.plane == 'h': \n rows, cols, self.slices, color = X.shape\n self.ind = S\n self.L = self.X.transpose((1,0,2,3)) \n self.im = ax.imshow(self.L[:, :, self.ind], origin=\"lower\", extent=[0, 512*pixdim, 0, 1024*pixdim]) \n self.im2 = ax.imshow(self.edges[self.ind, :, :], origin=\"lower\", alpha=0.5, extent=[0, 512*pixdim, 0, 1024*pixdim], cmap='gray')\n # plot the probe\n if self.line_fit.direction[0] == 0:\n self.line = plt.plot(np.array(sorted(self.probe_x)), np.array(sorted(self.probe_y)), linewidth=1.5); \n else:\n self.m, self.b = np.polyfit(self.probe_x, self.probe_y, 1)\n self.line = plt.plot(np.array(sorted(self.probe_x)), self.m*np.array(sorted(self.probe_x)) + self.b,color='black', linewidth=1.5); \n self.ax.set_ylabel('slice %d' % self.ind)\n self.im.axes.figure.canvas.draw() \n \n ",
"# Import libraries\nimport math \nimport os\nimport os.path\nimport numpy as np\nimport matplotlib\n# matplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nimport pickle\nfrom collections import OrderedDict\nfrom tabulate import tabulate\nimport mplcursors\nfrom scipy.spatial import distance\nfrom six.moves import input\n\n# fit the probe\nfrom skspatial.objects import Line\n\n\nfrom .ObjSave import probe_obj, save_probe_insertion\nfrom .index_tracker import IndexTracker, IndexTracker_g, IndexTracker_pi, IndexTracker_b, IndexTracker_c, IndexTracker_pi_col\n\n\n\nclass ProbesInsertion(object):\n \"\"\"\n Purpose\n -------------\n To insert probes, pre experiment.\n\n Inputs\n -------------\n atlas :\n probe_folder :\n \n \"\"\"\n\n def __init__(self, atlas, probe_folder):\n \n self.atlas = atlas\n self.probe_folder = probe_folder\n\n \n if not os.path.exists(self.probe_folder):\n raise Exception('Please give the correct folder.')\n \n self.probe_colors = ['purple', 'blue', 'yellow', 'orange', 'red', 'green']\n \n # PROBE\n self.max_probe_length = 10 # maximum length of probe shank is 10mm\n self.probe_widht = 0.07\n self.probe_thickness = 0.024\n self.probe_tip_length = 0.175\n self.total_electrodes = 960 # total number of recording sites\n self.electrode = 0.012 # Electrode size is 12x12 micron\n self.vert_el_dist = 0.02\n # There are 2 electrodes every 0.02 mm\n\n # Lists for the points clicked in atlas and histology\n self.coords_atlas = []\n self.coords_probe_temp_w = []\n self.coords_probe_temp_g = []\n self.coords_probe_temp_p = []\n self.coords_probe_temp_b = []\n self.coords_probe_temp_y = []\n self.coords_probe_temp_o = []\n self.coords_probe_temp_r = []\n # Object for clicked probes\n self.coords_probe = probe_obj()\n # List of probe points\n self.p_probe = []\n # Initialize probe counter and selecter\n self.probe_counter = 0\n self.probe_selecter = 0\n self.probe_selecter_u = 0\n\n self.Pp = []\n\n self.flag_color = 0\n self.flag_boundaries = 0\n self.flag_names = 0\n self.flag = 0\n \n \n self.plane = str(input('Select the plane: coronal (c), sagittal (s), or horizontal (h): ')).lower()\n # Check if the input is correct\n while self.plane != 'c' and self.plane != 's' and self.plane != 'h':\n print('Error: Wrong plane name \\n')\n self.plane = str(input('Select the plane: coronal (c), sagittal (s), or horizontal (h): ')).lower()\n \n \n print('\\nControls: \\n')\n print('--------------------------- \\n')\n print('scroll: move between slices \\n')\n print('g: add/remove gridlines \\n')\n print('b: add/remove name of current region \\n')\n print('u: add/remove viewing boundaries \\n')\n print('v: add/remove atlas color \\n')\n print('r: toggle mode where clicks are logged for probe \\n')\n print('n: trace a new probe \\n')\n print('e: save probes \\n')\n print('w: enable/disable probe viewer mode for current probe \\n')\n print('c: delete most recent probe point \\n')\n print('--------------------------- \\n')\n \n \n \n # Display the ATLAS\n # resolution\n self.dpi_atl = 25.4 / self.atlas.pixdim\n # Bregma coordinates\n self.textstr = 'Bregma (mm): c = %.3f, h = %.3f, s = %.3f \\nBregma (voxels): c = 653, h = 440, s = 246' % (653 * self.atlas.pixdim, 440 * self.atlas.pixdim, 246 * self.atlas.pixdim)\n # these are matplotlib.patch.Patch properties\n self.props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n # Figure\n self.fig, self.ax = plt.subplots(1, 1) #, figsize=(float(d1)/dpi_atl,float(d2)/dpi_atl), dpi=dpi_atl)\n # scroll cursor\n self.tracker = IndexTracker(self.ax, self.atlas.atlas_data, self.atlas.pixdim, self.plane)\n self.fig.canvas.mpl_connect('scroll_event', self.tracker.onscroll)\n\n\n\n # place a text box with bregma coordinates in bottom left in axes coords\n self.ax.text(0.03, 0.03, self.textstr, transform=self.ax.transAxes, fontsize=6, verticalalignment='bottom', bbox=self.props)\n if self.plane == 'c':\n # dimensions\n self.d1 = 512\n self.d2 = 512\n self.ax.format_coord = self.format_coord\n elif self.plane == 's':\n # dimensions\n self.d2 = 1024\n self.d1 = 512\n self.ax.format_coord = self.format_coord\n elif self.plane == 'h':\n # dimensions\n self.d2 = 512\n self.d1 = 1024\n self.ax.format_coord = self.format_coord\n plt.show()\n # Fix size and location of the figure window\n self.mngr = plt.get_current_fig_manager()\n self.mngr.window.setGeometry(600, 200, self.d2 * 2, self.d1 * 2)\n self.fig.canvas.mpl_connect('key_press_event', self.on_key)\n\n\n def re_load_probes(self, probe_name):\n print('\\nLoad probe')\n c_file = open(os.path.join(self.probe_folder, probe_name + '.pkl'), \"rb\")\n tdata = pickle.load(c_file)\n self.Pp.append(tdata)\n c_file.close()\n self.flag = 1\n print('Probe loaded')\n\n \n def format_coord(self, x, y):\n # display the coordinates relative to the bregma when hovering with the cursor\n if self.plane == 'c':\n AP = self.tracker.ind * self.atlas.pixdim - 653 * self.atlas.pixdim\n ML = x - 246 * self.atlas.pixdim\n Z = y - 440 * self.atlas.pixdim\n if ML > 0:\n return 'AP=%1.4f, ML=R%1.4f, z=%1.4f' % (AP, abs(ML), Z)\n else:\n return 'AP=%1.4f, ML=L%1.4f, z=%1.4f' % (AP, abs(ML), Z)\n elif self.plane == 's':\n AP = x - 653 * self.atlas.pixdim\n ML = self.tracker.ind * self.atlas.pixdim - 246 * self.atlas.pixdim\n Z = y - 440 * self.atlas.pixdim\n if ML > 0:\n return 'AP=%1.4f, ML=R%1.4f, z=%1.4f' % (AP, abs(ML), Z)\n else:\n return 'AP=%1.4f, ML=L%1.4f, z=%1.4f' % (AP, abs(ML), Z)\n elif self.plane == 'h':\n AP = y - 653 * self.atlas.pixdim\n ML = x - 246 * self.atlas.pixdim\n Z = self.tracker.ind * self.atlas.pixdim - 440 * self.atlas.pixdim\n if ML > 0:\n return 'AP=%1.4f, ML=R%1.4f, z=%1.4f' % (AP, abs(ML), Z)\n else:\n return 'AP=%1.4f, ML=L%1.4f, z=%1.4f' % (AP, abs(ML), Z)\n\n def show_annotation(self, sel):\n if self.flag_names == 1:\n sel.annotation.set_visible(True)\n elif self.flag_names == 0:\n sel.annotation.set_visible(False)\n xi, yi = sel.target / self.atlas.pixdim\n if self.plane == 'c':\n if np.argwhere(np.all(self.atlas.labels_index == self.atlas.segmentation_data[int(math.modf(xi)[1]), self.tracker.ind, int(math.modf(yi)[1])], axis=1)).size:\n self.Text = self.atlas.labels_name[np.argwhere(np.all(self.atlas.labels_index == self.atlas.segmentation_data[\n int(math.modf(xi)[1]), self.tracker.ind, int(math.modf(yi)[1])], axis=1))[0, 0]]\n else:\n # display nothing\n self.Text = ' '\n elif self.plane == 's':\n if np.argwhere(np.all(self.atlas.labels_index == self.atlas.segmentation_data[self.tracker.ind, int(math.modf(xi)[1]), int(math.modf(yi)[1])], axis=1)).size:\n self.Text = self.atlas.labels_name[np.argwhere(np.all(self.atlas.labels_index == self.atlas.segmentation_data[\n self.tracker.ind, int(math.modf(xi)[1]), int(math.modf(yi)[1])], axis=1))[0, 0]]\n else:\n # display nothing\n self.Text = ' '\n elif self.plane == 'h':\n if np.argwhere(np.all(self.atlas.labels_index == self.atlas.segmentation_data[int(math.modf(xi)[1]), int(math.modf(yi)[1]), self.tracker.ind], axis=1)).size:\n self.Text = self.atlas.labels_name[np.argwhere(np.all(self.atlas.labels_index == self.atlas.segmentation_data[\n int(math.modf(xi)[1]), int(math.modf(yi)[1]), self.tracker.ind], axis=1))[0, 0]]\n else:\n # display nothing\n self.Text = ' '\n sel.annotation.set_text(self.Text)\n\n \n def onclick_probe(self, event):\n px, py = event.xdata, event.ydata\n # assign global variable to access outside of function\n if self.probe_counter == 0:\n self.coords_probe_temp_w.append((px, py, self.tracker.ind))\n self.p_probe.extend(\n self.ax.plot(event.xdata, event.ydata, color=self.probe_colors[self.probe_counter], marker='o', markersize=2))\n setattr(self.coords_probe, self.probe_colors[self.probe_counter], self.coords_probe_temp_w)\n elif self.probe_counter == 1:\n self.coords_probe_temp_g.append((px, py, self.tracker.ind))\n self.p_probe.extend(\n self.ax.plot(event.xdata, event.ydata, color=self.probe_colors[self.probe_counter], marker='o', markersize=2))\n setattr(self.coords_probe, self.probe_colors[self.probe_counter], self.coords_probe_temp_g)\n elif self.probe_counter == 2:\n self.coords_probe_temp_p.append((px, py, self.tracker.ind))\n self.p_probe.extend(\n self.ax.plot(event.xdata, event.ydata, color=self.probe_colors[self.probe_counter], marker='o', markersize=2))\n setattr(self.coords_probe, self.probe_colors[self.probe_counter], self.coords_probe_temp_p)\n elif self.probe_counter == 3:\n self.coords_probe_temp_b.append((px, py, self.tracker.ind))\n self.p_probe.extend(\n self.ax.plot(event.xdata, event.ydata, color=self.probe_colors[self.probe_counter], marker='o', markersize=2))\n setattr(self.coords_probe, self.probe_colors[self.probe_counter], self.coords_probe_temp_b)\n elif self.probe_counter == 4:\n self.coords_probe_temp_y.append((px, py, self.tracker.ind))\n self.p_probe.extend(\n self.ax.plot(event.xdata, event.ydata, color=self.probe_colors[self.probe_counter], marker='o', markersize=2))\n setattr(self.coords_probe, self.probe_colors[self.probe_counter], self.coords_probe_temp_y)\n elif self.probe_counter == 5:\n self.coords_probe_temp_o.append((px, py, self.tracker.ind))\n self.p_probe.extend(\n self.ax.plot(event.xdata, event.ydata, color=self.probe_colors[self.probe_counter], marker='o', markersize=2))\n setattr(self.coords_probe, self.probe_colors[self.probe_counter], self.coords_probe_temp_o)\n self.fig.canvas.draw()\n return\n\n def on_key2(self, event):\n if event.key == 'n':\n # add a new probe\n if self.probe_counter + 1 < len(self.probe_colors):\n self.probe_counter += 1\n print('probe %d added (%s)' % (self.probe_counter + 1, self.probe_colors[self.probe_counter]))\n else:\n print('Cannot add more probes')\n self.probe_counter = len(self.probe_colors)\n \n elif event.key == 'c':\n print('Delete clicked probe point')\n if len(getattr(self.coords_probe, self.probe_colors[0])) != 0:\n if len(getattr(self.coords_probe, self.probe_colors[self.probe_counter])) != 0:\n getattr(self.coords_probe, self.probe_colors[self.probe_counter]).pop(-1) # remove the point from the list\n self.p_probe[-1].remove() # remove the point from the plot\n self.fig.canvas.draw()\n self.p_probe.pop(-1)\n elif len(getattr(self.coords_probe, self.probe_colors[self.probe_counter])) == 0:\n self.probe_counter -= 1\n try:\n getattr(self.coords_probe, self.probe_colors[self.probe_counter]).pop(-1) # remove the point from the list\n self.p_probe[-1].remove() # remove the point from the plot\n self.fig.canvas.draw()\n self.p_probe.pop(-1)\n \n except:\n pass\n\n \n def on_key(self, event):\n if event.key == 'b':\n # Show the names of the regions\n self.cursor = mplcursors.cursor(hover=True)\n self.cursor.connect('add', lambda sel: self.show_annotation(sel))\n if self.flag_names == 0:\n print(\"Show region's name on\")\n self.flag_names = 1\n elif self.flag_names == 1:\n print(\"Show region's name off\")\n self.flag_names = 0\n elif event.key == 'u':\n if self.flag_boundaries == 0:\n print('View boundaries on')\n self.tracker4 = IndexTracker_b(self.ax, self.atlas.Edges, self.atlas.pixdim, self.plane, self.tracker.ind)\n # print(self.atlas.Edges[250, self.tracker.ind, 250])\n self.fig.canvas.mpl_connect('scroll_event', self.tracker4.onscroll)\n # self.tracker4 = IndexTracker_c(self.ax, self.atlas.cv_plot, self.atlas.pixdim, self.plane, self.tracker.ind)\n # self.fig.canvas.mpl_connect('scroll_event', self.tracker4.onscroll)\n plt.show()\n self.flag_boundaries = 1\n elif self.flag_boundaries == 1:\n print('View boundaries off')\n self.fig.delaxes(self.ax)\n self.ax.clear()\n plt.draw()\n self.fig.add_axes(self.ax)\n plt.draw()\n self.tracker = IndexTracker(self.ax, self.atlas.atlas_data, self.atlas.pixdim, self.plane, self.tracker.ind)\n print(self.atlas.atlas_data[250, self.tracker.ind, 250])\n self.fig.canvas.mpl_connect('scroll_event', self.tracker.onscroll)\n plt.show()\n self.flag_boundaries = 0\n elif event.key == 'v':\n if self.flag_color == 0:\n print('View colors on')\n self.tracker3 = IndexTracker_c(self.ax, self.atlas.cv_plot, self.atlas.pixdim, self.plane, self.tracker.ind)\n self.fig.canvas.mpl_connect('scroll_event', self.tracker3.onscroll)\n plt.show()\n self.flag_color = 1\n elif self.flag_color == 1:\n print('View colors off')\n self.fig.delaxes(self.ax)\n self.ax.clear()\n plt.draw()\n self.fig.add_axes(self.ax)\n plt.draw()\n self.tracker = IndexTracker(self.ax, self.atlas.atlas_data, self.atlas.pixdim, self.plane, self.tracker.ind)\n self.fig.canvas.mpl_connect('scroll_event', self.tracker.onscroll)\n # self.fig.canvas.mpl_connect(\"motion_notify_event\", self.show_annotation)\n plt.show()\n self.flag_color = 0\n\n elif event.key == 'r':\n print('Register probe %d' % self.probe_counter)\n # Call click func\n self.fig.canvas.mpl_connect('button_press_event', self.onclick_probe)\n self.fig.canvas.mpl_connect('key_press_event', self.on_key2)\n elif event.key == 'e':\n print('\\n Save probe')\n # Create and save slice, clicked probes\n print(self.coords_probe)\n print(self.probe_counter)\n P = save_probe_insertion(self.coords_probe, self.plane, self.probe_counter) # Saving the object\n probe_name = 'Probe%d.pkl' % self.probe_counter\n file_name = os.path.join(self.probe_folder, probe_name)\n a_file = open(file_name, \"wb\")\n pickle.dump(P, a_file)\n a_file.close()\n\n print('Probe saved')\n \n \n elif event.key == 'w':\n # if the probe if uploaded from a file\n if self.flag == 1:\n # If I have several probes\n for j in range(len(self.probe_colors)):\n for k in range(len(self.Pp)):\n try:\n PC = getattr(self.Pp[k].Probe, self.probe_colors[j])\n p_x = []\n p_y = []\n probe_slice = []\n for i in range(len(PC)):\n p_x.append(PC[i][0])\n p_y.append(PC[i][1])\n probe_slice.append(PC[i][2])\n unique_slice = list(OrderedDict.fromkeys(probe_slice))\n # get the probe coordinates and the region's names\n probe_x = []\n probe_y = []\n probe_z = []\n if self.Pp[k].Plane == 'c':\n for i in range(len(PC)):\n probe_x.append(PC[i][0])\n probe_y.append(PC[i][2] * self.atlas.pixdim)\n probe_z.append(PC[i][1])\n elif self.Pp[k].Plane == 's':\n for i in range(len(PC)):\n probe_x.append(PC[i][2] * self.atlas.pixdim)\n probe_y.append(PC[i][0])\n probe_z.append(PC[i][1])\n elif self.Pp[k].Plane == 'h':\n for i in range(len(PC)):\n probe_x.append(PC[i][0])\n probe_y.append(PC[i][1])\n probe_z.append(PC[i][2] * self.atlas.pixdim)\n pts = np.array((probe_x, probe_y, probe_z)).T\n line_fit = Line.best_fit(pts)\n # display the probe in a separate window\n self.fig_probe, self.ax_probe = plt.subplots(1, 1)\n self.trackerp = IndexTracker_pi(self.ax_probe, self.atlas.atlas_data, self.atlas.pixdim, self.Pp[k].Plane, probe_slice[0], unique_slice, p_x, p_y, self.probe_colors, self.probe_selecter_u, line_fit)\n self.fig_probe.canvas.mpl_connect('scroll_event', self.trackerp.onscroll)\n self.ax_probe.text(0.05, 0.95, self.textstr, transform=self.ax_probe.transAxes, fontsize=6, verticalalignment='bottom', bbox=self.props)\n self.ax_probe.format_coord = self.format_coord\n self.ax_probe.set_title(\"Probe %d viewer\" % (self.probe_selecter_u + 1))\n plt.show()\n self.mngr_probe = plt.get_current_fig_manager()\n self.mngr_probe.window.setGeometry(650, 250, self.d2 * 2, self.d1 * 2)\n # get the probe coordinates and the region's names\n \n # if no inclination in z direction\n if line_fit.direction[2] == 0:\n # if there is NOT inclination in the x direction\n if line_fit.direction[0] == 0:\n # line equations, to derive the send point of the line (aka probe)\n z2 = pts[0,2]\n x2 = pts[-1,0]\n y2 = pts[0,1]\n deg_lat = math.degrees(math.atan(line_fit.direction[0]))\n deg_ant = math.degrees(math.atan(line_fit.direction[1]))\n # position_at_bregma_depth\n z0 = 440 * self.atlas.pixdim # correspond at the position of the bregma DV=0\n x0 = pts[0,0]\n y0 = pts[-1,1]\n ML_position = (x0 - 246 * self.atlas.pixdim)\n AP_position = (y0 - 653 * self.atlas.pixdim)\n X0 = np.array([x0, y0, z0])\n X2 = np.array([x2, y2, z2])\n # start point for visualization (the first clicked point)\n z1 = z2\n x1 = pts[0,0]\n y1 = pts[0,1]\n X1 = np.array([x1, y1, z1])\n # end point minus tip length\n d = (self.probe_tip_length)\n xt = x2\n yt = y2-d\n zt = z2\n Xt = np.array([xt, yt, zt])\n # get lenght of the probe\n dist = np.linalg.norm(X0 - X2)\n dist_check = np.linalg.norm(X0 - Xt)\n # check kthat the new end point is before the end of the tip and not after\n if dist_check > dist:\n xt = x2\n yt = y2+d\n zt = z2\n Xt = np.array([xt, yt, zt])\n regions = []\n point_along_line = []\n s = int(math.modf(X1[1] / self.atlas.pixdim)[1]) # starting point\n f = int(math.modf(Xt[1] / self.atlas.pixdim)[1]) # ending point\n for y in range(min(s,f), max(s,f)):\n x = pts[0,0] / self.atlas.pixdim\n z = pts[0,2] / self.atlas.pixdim\n if int(math.modf(x)[1]) > 512 or int(math.modf(y)[1]) > 1024 or int(math.modf(z)[1]) > 512:\n regions.append('Clear Label')\n else:\n regions.append(self.atlas.labels_name[np.argwhere(np.all(self.atlas.labels_index == self.atlas.segmentation_data[int(math.modf(x)[1]),int(math.modf(y)[1]),int(math.modf(z)[1])], axis=1))[0,0]])\n point_along_line.append([x,y,z])\n # if there is inclination in the x direction\n else:\n # line equations, to derive the send point of the line (aka probe)\n z2 = pts[0,2]\n x2 = pts[-1,0]\n y2 = line_fit.point[1] + ((x2 - line_fit.point[0]) / line_fit.direction[0])*line_fit.direction[1]\n deg_lat = math.degrees(math.atan(line_fit.direction[0]))\n deg_ant = math.degrees(math.atan(line_fit.direction[1]))\n # position_at_bregma_depth\n z0 = 440 * self.atlas.pixdim # correspond at the position of the bregma DV=0\n x0 = pts[0,0]\n y0 = line_fit.point[1]+((x0-line_fit.point[0])/line_fit.direction[0])*line_fit.direction[1]\n ML_position = (x0-246 * self.atlas.pixdim)\n AP_position = (y0-653 * self.atlas.pixdim)\n X0 = np.array([x0,y0,z0])\n X2 = np.array([x2,y2,z2])\n # start point for visualization (the first clicked point)\n z1 = z2\n x1 = pts[0,0]\n y1 = line_fit.point[1]+((x1-line_fit.point[0])/line_fit.direction[0])*line_fit.direction[1]\n X1 = np.array([x1,y1,z1])\n # end point minus tip length\n dq = (self.probe_tip_length)**2\n div = 1 + (line_fit.direction[1]/line_fit.direction[0])**2\n xt = x2 + math.sqrt(dq/div)\n yt = line_fit.point[1]+((xt-line_fit.point[0])/line_fit.direction[0])*line_fit.direction[1]\n zt = z2\n Xt = np.array([xt,yt,zt])\n # get lenght of the probe\n dist = np.linalg.norm(X0-X2)\n dist_check = np.linalg.norm(X0-Xt)\n # check kthat the new end point is before the end of the tip and not after\n if dist_check > dist:\n xt = x2 - math.sqrt(dq/div)\n yt = line_fit.point[1]+((xt-line_fit.point[0])/line_fit.direction[0])*line_fit.direction[1]\n zt = z2\n Xt = np.array([xt,yt,zt])\n regions = []\n point_along_line = []\n s = int(math.modf(X1[0] / self.atlas.pixdim)[1]) # starting point\n f = int(math.modf(Xt[0] / self.atlas.pixdim)[1]) # ending point\n for x in range(min(s,f), max(s,f)):\n y = line_fit.point[1]/self.atlas.pixdim+((x-line_fit.point[0]/self.atlas.pixdim)/line_fit.direction[0])*line_fit.direction[1]\n z = pts[0,2] / self.atlas.pixdim\n if int(math.modf(x)[1]) > 512 or int(math.modf(y)[1]) > 1024 or int(math.modf(z)[1]) > 512:\n regions.append('Clear Label')\n else:\n regions.append(self.atlas.labels_name[np.argwhere(np.all(self.atlas.labels_index == self.atlas.segmentation_data[int(math.modf(x)[1]),int(math.modf(y)[1]),int(math.modf(z)[1])], axis=1))[0,0]])\n point_along_line.append([x,y,z])\n else:\n # line equations, to derive the end point of the line (aka probe)\n # the last of the clicked points represent the end point of the line\n z2 = pts[-1,2]\n x2 = line_fit.point[0]+((z2-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[0]\n y2 = line_fit.point[1]+((z2-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[1]\n deg_lat = math.degrees(math.atan(line_fit.direction[0]))\n deg_ant = math.degrees(math.atan(line_fit.direction[1]))\n # position_at_bregma_depth\n z0 = 440 * self.atlas.pixdim # correspond at the position of the bregma DV=0\n x0 = line_fit.point[0]+((z0-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[0]\n y0 = line_fit.point[1]+((z0-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[1]\n ML_position = (x0 - 246 * self.atlas.pixdim)\n AP_position = (y0 - 653 * self.atlas.pixdim)\n X0 = np.array([x0,y0,z0])\n X2 = np.array([x2,y2,z2])\n # start point for visualization (the first clicked point)\n z1 = pts[0,2]\n x1 = line_fit.point[0]+((z1-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[0]\n y1 = line_fit.point[1]+((z1-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[1]\n X1 = np.array([x1,y1,z1])\n # end point minus tip length\n dq = (self.probe_tip_length)**2\n div = 1 + (line_fit.direction[0]/line_fit.direction[2])**2 + (line_fit.direction[1]/line_fit.direction[2])**2\n zt = z2 + math.sqrt(dq/div)\n xt = line_fit.point[0]+((zt-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[0]\n yt = line_fit.point[1]+((zt-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[1]\n Xt = np.array([xt,yt,zt])\n # get lenght of the probe\n dist = np.linalg.norm(X0-X2)\n dist_check = np.linalg.norm(X0-Xt)\n # check kthat the new end point is before the end of the tip and not after\n if dist_check > dist:\n zt = z2 - math.sqrt(dq/div)\n xt = line_fit.point[0]+((zt-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[0]\n yt = line_fit.point[1]+((zt-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[1]\n Xt = np.array([xt,yt,zt])\n regions = []\n point_along_line = []\n s = int(math.modf(X1[2] / self.atlas.pixdim)[1]) # starting point\n f = int(math.modf(Xt[2] / self.atlas.pixdim)[1]) # ending point\n for z in range(min(s,f),max(s,f)):\n x = line_fit.point[0]/self.atlas.pixdim+((z-line_fit.point[2]/self.atlas.pixdim)/line_fit.direction[2])*line_fit.direction[0]\n y = line_fit.point[1]/self.atlas.pixdim+((z-line_fit.point[2]/self.atlas.pixdim)/line_fit.direction[2])*line_fit.direction[1]\n regions.append(self.atlas.labels_name[np.argwhere(np.all(self.atlas.labels_index == self.atlas.segmentation_data[int(math.modf(x)[1]),int(math.modf(y)[1]),int(math.modf(z)[1])], axis=1))[0,0]])\n point_along_line.append([x,y,z])\n # avoid repetions and reverse the order\n regioni = list(OrderedDict.fromkeys(regions))[::-1]\n if 'Clear Label' in regioni:\n regioni.remove('Clear Label')\n num_el = []\n indici = []\n for re in regioni:\n # store the index o the region to print only the color of the regions of interest\n indici.append(self.atlas.labels_name.index(re))\n # in the case in dont exit and then enter again the region\n position = [i for i,x in enumerate(regions) if x == re]\n # if there is only one point in the region\n if len(position) == 1:\n regional_dist = self.atlas.pixdim\n else:\n # first point along the line in the region\n start = [element * self.atlas.pixdim for element in point_along_line[position[0]]]\n # last point along the line in the region\n end = [element * self.atlas.pixdim for element in point_along_line[position[-1]]]\n # length of the part of the probe in the region\n regional_dist = distance.euclidean(start, end)\n # Number of electrodes in the region\n num_el.append(round(regional_dist/self.vert_el_dist)*2)\n # print insertion coordinates\n print('\\n---Estimated probe insertion---')\n if ML_position > 0:\n testo = ' ---Estimated probe insertion--- \\nEntry position at DV = 0: AP = %.2f mm, ML = R%.2f mm \\nInsertion distance from the above position: %.2f mm \\n%.2f degrees in the anterior direction \\n%.2f degrees in the lateral direction ' %(AP_position, abs(ML_position), dist, deg_ant, deg_lat)\n print('Entry position at DV = 0: AP = %.2f mm, ML = R%.2f mm' % (AP_position, abs(ML_position)))\n else:\n testo = ' ---Estimated probe insertion--- \\nEntry position at DV = 0: AP = %.2f mm, ML = L%.2f mm \\nInsertion distance from the above position: %.2f mm \\n%.2f degrees in the anterior direction \\n%.2f degrees in the lateral direction ' %(AP_position, abs(ML_position), dist, deg_ant, deg_lat)\n print('Entry position at DV = 0: AP = %.2f mm, ML = L%.2f fmm'\n % (AP_position, abs(ML_position)))\n print('Insertion distance from the above position: %.2f mm' % dist)\n print('%.2f degrees in the anterior direction' % deg_ant)\n print('%.2f degrees in the lateral direction\\n' % deg_lat)\n # print regions and channels\n LL = [regioni, num_el]\n headers = [' Regions traversed', 'Channels']\n numpy_array = np.array(LL)\n transpose_ll = numpy_array.T\n transpose_list = transpose_ll.tolist()\n print(tabulate(transpose_list, headers, floatfmt=\".2f\"))\n if self.plane == 'c':\n regioni.insert(0,' ---Regions traversed---')\n if len(regioni)>16:\n self.ax_probe.text(0.01, 0.26, testo, transform=self.ax_probe.transAxes, fontsize=6.5 ,verticalalignment='top', color = 'w')\n B = regioni[:len(regioni)//2]\n C = regioni[len(regioni)//2:]\n self.ax_probe.text(0.41, 0.26, \"\\n\".join(B), transform=self.ax_probe.transAxes, fontsize=6.5 ,verticalalignment='top', color = 'w')\n self.ax_probe.text(0.76, 0.26, \"\\n\".join(C), transform=self.ax_probe.transAxes, fontsize=6.5 ,verticalalignment='top', color = 'w')\n else:\n self.ax_probe.text(0.01, 0.26, testo, transform=self.ax_probe.transAxes, fontsize=9 ,verticalalignment='top', color = 'w')\n self.ax_probe.text(0.51, 0.26, \"\\n\".join(regioni), transform=self.ax_probe.transAxes, fontsize=9 ,verticalalignment='top', color = 'w')\n elif self.plane == 's':\n self.ax_probe.text(0.15, 0.20, testo, transform=self.ax_probe.transAxes, fontsize=11 ,verticalalignment='top', color = 'w')\n regioni.insert(0,' ---Regions traversed---')\n # if there are too many regions to print\n if len(regioni) > 7:\n B = regioni[:len(regioni)//2]\n C = regioni[len(regioni)//2:]\n self.ax_probe.text(0.5, 0.25, \"\\n\".join(B), transform=self.ax_probe.transAxes, fontsize=9.5 ,verticalalignment='top', color = 'w')\n self.ax_probe.text(0.74, 0.25, \"\\n\".join(C), transform=self.ax_probe.transAxes, fontsize=9.5 ,verticalalignment='top', color = 'w')\n else:\n self.ax_probe.text(0.51, 0.25, \"\\n\".join(regioni), transform=self.ax_probe.transAxes, fontsize=11 ,verticalalignment='top', color = 'w')\n elif self.plane == 'h':\n \n regioni.insert(0,' ---Regions traversed---')\n # if there are too many regions to print\n if len(regioni) > 7:\n self.ax_probe.text(0.17, 0.22, testo, transform=self.ax_probe.transAxes, fontsize=8 ,verticalalignment='top', color = 'w')\n B = regioni[:len(regioni)//2]\n C = regioni[len(regioni)//2:]\n self.ax_probe.text(0.01, 0.15, \"\\n\".join(B), transform=self.ax_probe.transAxes, fontsize=6.5 ,verticalalignment='top', color = 'w')\n self.ax_probe.text(0.49, 0.15, \"\\n\".join(C), transform=self.ax_probe.transAxes, fontsize=6.4 ,verticalalignment='top', color = 'w')\n else:\n self.ax_probe.text(0.17, 0.22, testo, transform=self.ax_probe.transAxes, fontsize=9 ,verticalalignment='top', color = 'w')\n self.ax_probe.text(0.17, 0.13, \"\\n\".join(regioni), transform=self.ax_probe.transAxes, fontsize=9 ,verticalalignment='top', color = 'w')\n # here I only color the region of interest\n for i in range(len(self.atlas.labels_index)):\n if i in indici:\n coord = np.where(self.atlas.segmentation_data == self.atlas.labels_index[i][0])\n self.atlas.cv_plot_display[coord[0],coord[1],coord[2],:] = self.atlas.labels_color[i]\n # Plot\n self.fig_color, self.ax_color = plt.subplots(1, 1) # to plot the region interested with colors\n print(self.Pp[k].Plane)\n IndexTracker_pi_col(self.ax_color, self.atlas.cv_plot_display/255, self.atlas.Edges, self.atlas.pixdim, self.Pp[k].Plane, probe_slice[0], p_x, p_y, line_fit)\n plt.show()\n self.mngr_col = plt.get_current_fig_manager()\n self.mngr_col.window.setGeometry(650, 250, self.d2 * 2, self.d1 * 2)\n self.probe_selecter_u += 1\n except:\n pass\n else:\n try:\n print('\\nProbe %d view mode' % (self.probe_selecter + 1))\n L = getattr(self.coords_probe, self.probe_colors[self.probe_selecter])\n p_x = []\n p_y = []\n probe_slice = []\n for i in range(len(L)):\n p_x.append(L[i][0])\n p_y.append(L[i][1])\n probe_slice.append(L[i][2])\n unique_slice = list(OrderedDict.fromkeys(probe_slice))\n # get the probe coordinates and the region's names\n probe_x = []\n probe_y = []\n probe_z = []\n if self.plane == 'c':\n for i in range(len(L)):\n probe_x.append(L[i][0])\n probe_y.append(L[i][2] * self.atlas.pixdim)\n probe_z.append(L[i][1])\n elif self.plane == 's':\n for i in range(len(L)):\n probe_x.append(L[i][2] * self.atlas.pixdim)\n probe_y.append(L[i][0])\n probe_z.append(L[i][1])\n elif self.plane == 'h':\n for i in range(len(L)):\n probe_x.append(L[i][0])\n probe_y.append(L[i][1])\n probe_z.append(L[i][2] * self.atlas.pixdim)\n pts = np.array((probe_x, probe_y, probe_z)).T\n # fit the probe\n line_fit = Line.best_fit(pts)\n # display the probe in a separate window\n self.fig_probe, self.ax_probe = plt.subplots(1, 1)\n self.trackerp = IndexTracker_pi(self.ax_probe, self.atlas.atlas_data, self.atlas.pixdim, self.plane, self.tracker.ind, unique_slice, p_x, p_y, self.probe_colors, self.probe_selecter, line_fit)\n self.fig_probe.canvas.mpl_connect('scroll_event', self.trackerp.onscroll)\n self.ax_probe.text(0.05, 0.95, self.textstr, transform=self.ax_probe.transAxes, fontsize=6, verticalalignment='bottom', bbox=self.props)\n self.ax_probe.format_coord = self.format_coord\n self.ax_probe.set_title(\"Probe %d viewer\" % (self.probe_selecter + 1))\n plt.show()\n self.mngr_probe = plt.get_current_fig_manager()\n self.mngr_probe.window.setGeometry(650, 250, self.d2 * 2, self.d1 * 2)\n \n # if no inclination in z direction\n if line_fit.direction[2] == 0:\n # if there is NOT inclination in the x direction\n if line_fit.direction[0] == 0:\n # line equations, to derive the send point of the line (aka probe)\n z2 = pts[0,2]\n x2 = pts[-1,0]\n y2 = pts[0,1]\n deg_lat = math.degrees(math.atan(line_fit.direction[0]))\n deg_ant = math.degrees(math.atan(line_fit.direction[1]))\n # position_at_bregma_depth\n z0 = 440 * self.atlas.pixdim # correspond at the position of the bregma DV=0\n x0 = pts[0,0]\n y0 = pts[-1,1]\n ML_position = (x0 - 246 * self.atlas.pixdim)\n AP_position = (y0 - 653 * self.atlas.pixdim)\n X0 = np.array([x0,y0,z0])\n X2 = np.array([x2,y2,z2])\n # start point for visualization (the first clicked point)\n z1 = z2\n x1 = pts[0,0]\n y1 = pts[0,1]\n X1 = np.array([x1,y1,z1])\n # end point minus tip length\n d = (self.probe_tip_length)\n xt = x2\n yt = y2-d\n zt = z2\n Xt = np.array([xt,yt,zt])\n # get lenght of the probe\n dist = np.linalg.norm(X0-X2)\n dist_check = np.linalg.norm(X0-Xt)\n # check kthat the new end point is before the end of the tip and not after\n if dist_check > dist:\n xt = x2\n yt = y2+d\n zt = z2\n Xt = np.array([xt,yt,zt])\n regions = []\n point_along_line = []\n s = int(math.modf(X1[1] / self.atlas.pixdim)[1]) # starting point\n f = int(math.modf(Xt[1] / self.atlas.pixdim)[1]) # ending point\n for y in range(min(s,f), max(s,f)):\n x = pts[0,0] / self.atlas.pixdim\n z = pts[0,2] / self.atlas.pixdim\n if int(math.modf(x)[1]) > 512 or int(math.modf(y)[1]) > 1024 or int(math.modf(z)[1]) > 512:\n regions.append('Clear Label')\n else:\n regions.append(self.atlas.labels_name[np.argwhere(np.all(self.atlas.labels_index == self.atlas.segmentation_data[int(math.modf(x)[1]),int(math.modf(y)[1]),int(math.modf(z)[1])], axis=1))[0,0]])\n point_along_line.append([x,y,z])\n # if there is inclination in the x direction\n else:\n # line equations, to derive the send point of the line (aka probe)\n z2 = pts[0,2]\n x2 = pts[-1,0]\n y2 = line_fit.point[1]+((x2-line_fit.point[0])/line_fit.direction[0])*line_fit.direction[1]\n deg_lat = math.degrees(math.atan(line_fit.direction[0]))\n deg_ant = math.degrees(math.atan(line_fit.direction[1]))\n # position_at_bregma_depth\n z0 = 440 * self.atlas.pixdim # correspond at the position of the bregma DV=0\n x0 = pts[0,0]\n y0 = line_fit.point[1]+((x0-line_fit.point[0])/line_fit.direction[0])*line_fit.direction[1]\n ML_position = (x0-246 * self.atlas.pixdim)\n AP_position = (y0-653 * self.atlas.pixdim)\n X0 = np.array([x0,y0,z0])\n X2 = np.array([x2,y2,z2])\n # start point for visualization (the first clicked point)\n z1 = z2\n x1 = pts[0,0]\n y1 = line_fit.point[1]+((x1-line_fit.point[0])/line_fit.direction[0])*line_fit.direction[1]\n X1 = np.array([x1,y1,z1])\n # end point minus tip length\n dq = (self.probe_tip_length)**2\n div = 1 + (line_fit.direction[1]/line_fit.direction[0])**2\n xt = x2 + math.sqrt(dq/div)\n yt = line_fit.point[1]+((xt-line_fit.point[0])/line_fit.direction[0])*line_fit.direction[1]\n zt = z2\n Xt = np.array([xt,yt,zt])\n # get lenght of the probe\n dist = np.linalg.norm(X0-X2)\n dist_check = np.linalg.norm(X0-Xt)\n # check kthat the new end point is before the end of the tip and not after\n if dist_check > dist:\n xt = x2 - math.sqrt(dq/div)\n yt = line_fit.point[1]+((xt-line_fit.point[0])/line_fit.direction[0])*line_fit.direction[1]\n zt = z2\n Xt = np.array([xt,yt,zt])\n regions = []\n point_along_line = []\n s = int(math.modf(X1[0]/self.atlas.pixdim)[1]) # starting point\n f = int(math.modf(Xt[0]/self.atlas.pixdim)[1]) # ending point\n for x in range(min(s,f), max(s,f)):\n y = line_fit.point[1]/self.atlas.pixdim+((x-line_fit.point[0]/self.atlas.pixdim)/line_fit.direction[0])*line_fit.direction[1]\n z = pts[0,2] / self.atlas.pixdim\n if int(math.modf(x)[1]) > 512 or int(math.modf(y)[1]) > 1024 or int(math.modf(z)[1]) > 512:\n regions.append('Clear Label')\n else:\n regions.append(self.atlas.labels_name[np.argwhere(np.all(self.atlas.labels_index == self.atlas.segmentation_data[int(math.modf(x)[1]),int(math.modf(y)[1]),int(math.modf(z)[1])], axis=1))[0,0]])\n point_along_line.append([x,y,z])\n else:\n # line equations, to derive the point of the line (aka probe)\n # the last of the clicked points represent the end point of the line\n z2 = pts[-1,2]\n x2 = line_fit.point[0]+((z2-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[0]\n y2 = line_fit.point[1]+((z2-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[1]\n deg_lat = math.degrees(math.atan(line_fit.direction[0]))\n deg_ant = math.degrees(math.atan(line_fit.direction[1]))\n # position_at_bregma_depth\n z0 = 440 * self.atlas.pixdim # correspond at the position of the bregma DV=0\n x0 = line_fit.point[0]+((z0-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[0]\n y0 = line_fit.point[1]+((z0-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[1]\n ML_position = (x0 - 246 * self.atlas.pixdim)\n AP_position = (y0 - 653 * self.atlas.pixdim)\n X0 = np.array([x0,y0,z0])\n X2 = np.array([x2,y2,z2])\n # start point for visualization (the first clicked point)\n z1 = pts[0,2]\n x1 = line_fit.point[0]+((z1-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[0]\n y1 = line_fit.point[1]+((z1-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[1]\n X1 = np.array([x1,y1,z1])\n # end point minus tip length\n dq = (self.probe_tip_length)**2\n div = 1 + (line_fit.direction[0]/line_fit.direction[2])**2 + (line_fit.direction[1]/line_fit.direction[2])**2\n zt = z2 + math.sqrt(dq/div)\n xt = line_fit.point[0]+((zt-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[0]\n yt = line_fit.point[1]+((zt-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[1]\n Xt = np.array([xt,yt,zt])\n # get lenght of the probe\n dist = np.linalg.norm(X0-X2)\n dist_check = np.linalg.norm(X0-Xt)\n # check kthat the new end point is before the end of the tip and not after\n if dist_check > dist:\n zt = z2 - math.sqrt(dq/div)\n xt = line_fit.point[0]+((zt-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[0]\n yt = line_fit.point[1]+((zt-line_fit.point[2])/line_fit.direction[2])*line_fit.direction[1]\n Xt = np.array([xt,yt,zt])\n regions = []\n point_along_line = []\n s = int(math.modf(X1[2] / self.atlas.pixdim)[1]) # starting point\n f = int(math.modf(Xt[2] / self.atlas.pixdim)[1]) # ending point\n for z in range(min(s,f),max(s,f)):\n x = line_fit.point[0]/self.atlas.pixdim+((z-line_fit.point[2]/self.atlas.pixdim)/line_fit.direction[2])*line_fit.direction[0]\n y = line_fit.point[1]/self.atlas.pixdim+((z-line_fit.point[2]/self.atlas.pixdim)/line_fit.direction[2])*line_fit.direction[1]\n if int(math.modf(x)[1]) > 512 or int(math.modf(y)[1]) > 1024 or int(math.modf(z)[1]) > 512:\n regions.append('Clear Label')\n else:\n regions.append(self.atlas.labels_name[np.argwhere(np.all(self.atlas.labels_index == self.atlas.segmentation_data[int(math.modf(x)[1]),int(math.modf(y)[1]),int(math.modf(z)[1])], axis=1))[0,0]])\n point_along_line.append([x,y,z])\n # avoid repetions and reverse the order\n regioni = list(OrderedDict.fromkeys(regions))[::-1]\n if 'Clear Label' in regioni:\n regioni.remove('Clear Label')\n num_el = []\n indici = []\n for re in regioni:\n # store the index o the region to print only the color of the regions of interest\n indici.append(self.atlas.labels_name.index(re))\n # in the case in dont exit and then enter again the region\n position = [i for i,x in enumerate(regions) if x == re]\n # if there is only one point in the region\n if len(position) == 1:\n regional_dist = self.atlas.pixdim\n else:\n # first point along the line in the region\n start = [element * self.atlas.pixdim for element in point_along_line[position[0]]]\n # last point along the line in the region\n end = [element * self.atlas.pixdim for element in point_along_line[position[-1]]]\n # length of the part of the probe in the region\n regional_dist = distance.euclidean(start, end)\n # Number of electrodes in the region\n num_el.append(round(regional_dist/self.vert_el_dist)*2)\n # print insertion coordinates\n print('\\n---Estimated probe insertion---')\n if ML_position > 0:\n testo = ' ---Estimated probe insertion--- \\nEntry position at DV = 0: AP = %.2f mm, ML = R%.2f mm \\nInsertion distance from the above position: %.2f mm \\n%.2f degrees in the anterior direction \\n%.2f degrees in the lateral direction ' % (AP_position, abs(ML_position), dist, deg_ant, deg_lat)\n print('Entry position at DV = 0: AP = %.2f mm, ML = R%.2f mm' % (AP_position, abs(ML_position)))\n else:\n testo = ' ---Estimated probe insertion--- \\nEntry position at DV = 0: AP = %.2f mm, ML = L%.2f mm \\nInsertion distance from the above position: %.2f mm \\n%.2f degrees in the anterior direction \\n%.2f degrees in the lateral direction ' % (AP_position, abs(ML_position), dist, deg_ant, deg_lat)\n print('Entry position at DV = 0: AP = %.2f mm, ML = L%.2f fmm' % (AP_position, abs(ML_position)))\n print('Insertion distance from the above position: %.2f mm' % dist)\n print('%.2f degrees in the anterior direction' % deg_ant)\n print('%.2f degrees in the lateral direction\\n' % deg_lat)\n # print regions and number of channels\n LL = [regioni, num_el]\n headers = [' Regions traversed', 'Channels']\n numpy_array = np.array(LL)\n transpose_ll = numpy_array.T\n transpose_list = transpose_ll.tolist()\n print(tabulate(transpose_list, headers, floatfmt=\".2f\"))\n if self.plane == 'c':\n # list of regions\n regioni.insert(0,' ---Regions traversed---')\n if len(regioni) > 16:\n self.ax_probe.text(0.01, 0.26, testo, transform=self.ax_probe.transAxes, fontsize=6.5, verticalalignment='top', color='w')\n B = regioni[:len(regioni)//2]\n C = regioni[len(regioni)//2:]\n self.ax_probe.text(0.41, 0.26, \"\\n\".join(B), transform=self.ax_probe.transAxes, fontsize=6.5, verticalalignment='top', color='w')\n self.ax_probe.text(0.76, 0.26, \"\\n\".join(C), transform=self.ax_probe.transAxes, fontsize=6.5, verticalalignment='top', color='w')\n else:\n self.ax_probe.text(0.01, 0.26, testo, transform=self.ax_probe.transAxes, fontsize=9, verticalalignment='top', color='w')\n self.ax_probe.text(0.51, 0.26, \"\\n\".join(regioni), transform=self.ax_probe.transAxes, fontsize=9, verticalalignment='top', color='w')\n elif self.plane == 's':\n self.ax_probe.text(0.15, 0.20, testo, transform=self.ax_probe.transAxes, fontsize=11, verticalalignment='top', color='w')\n regioni.insert(0,' ---Regions traversed---')\n # if there are too many regions to print\n if len(regioni) > 7:\n B = regioni[:len(regioni)//2]\n C = regioni[len(regioni)//2:]\n self.ax_probe.text(0.5, 0.25, \"\\n\".join(B), transform=self.ax_probe.transAxes, fontsize=9.5, verticalalignment='top', color='w')\n self.ax_probe.text(0.74, 0.25, \"\\n\".join(C), transform=self.ax_probe.transAxes, fontsize=9.5, verticalalignment='top', color='w')\n else:\n self.ax_probe.text(0.51, 0.25, \"\\n\".join(regioni), transform=self.ax_probe.transAxes, fontsize=11, verticalalignment='top', color='w')\n elif self.plane == 'h':\n regioni.insert(0,' ---Regions traversed---')\n # if there are too many regions to print\n if len(regioni) > 7:\n self.ax_probe.text(0.17, 0.22, testo, transform=self.ax_probe.transAxes, fontsize=8, verticalalignment='top', color='w')\n B = regioni[:len(regioni) // 2]\n C = regioni[len(regioni) // 2:]\n self.ax_probe.text(0.01, 0.15, \"\\n\".join(B), transform=self.ax_probe.transAxes, fontsize=6.5, verticalalignment='top', color='w')\n self.ax_probe.text(0.49, 0.15, \"\\n\".join(C), transform=self.ax_probe.transAxes, fontsize=6.4, verticalalignment='top', color='w')\n else:\n self.ax_probe.text(0.17, 0.22, testo, transform=self.ax_probe.transAxes, fontsize=9, verticalalignment='top', color='w')\n self.ax_probe.text(0.17, 0.13, \"\\n\".join(regioni), transform=self.ax_probe.transAxes, fontsize=9, verticalalignment='top', color='w')\n \n # here I only color the region of interest\n for i in range(len(self.atlas.labels_index)):\n if i in indici:\n coord = np.where(self.atlas.segmentation_data == self.atlas.labels_index[i][0])\n self.atlas.cv_plot_display[coord[0],coord[1],coord[2],:] = self.atlas.labels_color[i]\n # Plot\n self.fig_color, self.ax_color = plt.subplots(1, 1) # to plot the region interested with colors\n IndexTracker_pi_col(self.ax_color, self.atlas.cv_plot_display / 255, self.atlas.Edges, self.atlas.pixdim, self.plane, self.tracker.ind, p_x, p_y, line_fit)\n plt.show()\n self.mngr_col = plt.get_current_fig_manager()\n self.mngr_col.window.setGeometry(650, 250, self.d2 * 2, self.d1 * 2)\n self.probe_selecter += 1\n except:\n print('No more probes to visualize')\n pass\n \n \n\n\n\n\n\n\n"
] | [
[
"numpy.polyfit",
"matplotlib.colors.to_rgb",
"matplotlib.pyplot.scatter"
],
[
"matplotlib.pyplot.subplots",
"numpy.linalg.norm",
"matplotlib.pyplot.draw",
"scipy.spatial.distance.euclidean",
"matplotlib.pyplot.get_current_fig_manager",
"numpy.array",
"numpy.where",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
google-research/pathdreamer | [
"dc607faf3a6d3011ddd2e4723d53122235774167"
] | [
"utils/utils_test.py"
] | [
"# Copyright 2021 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for pathdreamer.utils.utils.\"\"\"\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom pathdreamer.utils import utils\n\n\nclass UtilsTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Tests on the image_models file.\"\"\"\n\n @parameterized.parameters((1, 5, 128), (2, 3, 256))\n def test_compute_sequence_iou(self, batch_size, seq_len, image_size):\n \"\"\"Tests that the sequence IOU returns valid values.\"\"\"\n num_classes = 41\n test_pred = tf.random.uniform((batch_size, seq_len, image_size, image_size),\n maxval=num_classes, dtype=tf.int32)\n test_pred_one_hot = tf.cast(tf.one_hot(test_pred, num_classes), tf.float32)\n test_gt = tf.random.uniform((batch_size, seq_len, image_size, image_size),\n maxval=num_classes, dtype=tf.int32)\n test_gt_one_hot = tf.cast(tf.one_hot(test_gt, num_classes), tf.float32)\n test_mask = tf.random.uniform(\n (batch_size, seq_len), maxval=1, dtype=tf.int32)\n test_mask = tf.cast(test_mask, tf.float32)\n seq_iou, miou = utils.compute_sequence_iou(\n test_pred_one_hot, test_gt_one_hot, test_mask)\n self.assertAllGreaterEqual(seq_iou, 0)\n self.assertEqual(seq_iou.shape, (batch_size, seq_len))\n self.assertGreaterEqual(miou, 0)\n\n def test_iou_zero_mask(self):\n \"\"\"Tests that the sequence IOU returns 0s with an empty mask.\"\"\"\n batch_size, seq_len, image_size, num_classes = 2, 5, 128, 41\n test_pred = tf.random.uniform((batch_size, seq_len, image_size, image_size),\n maxval=num_classes, dtype=tf.int32)\n test_pred_one_hot = tf.cast(tf.one_hot(test_pred, num_classes), tf.float32)\n test_mask = tf.zeros((batch_size, seq_len))\n seq_iou, miou = utils.compute_sequence_iou(\n test_pred_one_hot, test_pred_one_hot, test_mask)\n self.assertAllClose(seq_iou, test_mask) # Check that everything is 0.\n self.assertAlmostEqual(miou.numpy(), 0)\n\n @parameterized.parameters((1, 32, 16), (2, 16, 8))\n def test_compute_kld(self, batch_size, image_size, dims):\n \"\"\"Tests that the KLD function returns valid values.\"\"\"\n test_mu1 = tf.random.normal((batch_size, image_size, image_size, dims))\n test_logvar1 = tf.random.normal((batch_size, image_size, image_size, dims))\n test_mu2 = tf.random.normal((batch_size, image_size, image_size, dims))\n test_logvar2 = tf.random.normal((batch_size, image_size, image_size, dims))\n kld = utils.compute_kl(test_mu1, test_logvar1, test_mu2, test_logvar2)\n self.assertEqual(kld.shape, test_mu1.shape)\n self.assertAllGreaterEqual(kld, 0)\n\n # Test that KLD is equal for the same distribution.\n kld_same = utils.compute_kl(test_mu1, test_logvar1, test_mu1, test_logvar1)\n self.assertAllEqual(kld_same, tf.zeros_like(kld_same))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.random.uniform",
"tensorflow.test.main",
"tensorflow.zeros_like",
"tensorflow.one_hot",
"tensorflow.random.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shetty4L/malware_classification | [
"3a71f257011efd1eca96696d79202040857f909d"
] | [
"model.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nNUM_CLASSES = 6\n\nIMAGE_SIZE = 144\nIMAGE_PIXELS = 500\n\ndef _variable_summaries(var, name):\n\twith tf.variable_scope('summaries'):\n\t\tmean = tf.reduce_mean(var)\n\t\ttf.scalar_summary('mean/' + name, mean)\n\n\t\t# with tf.variable_scope('stddev'):\n\t\t# \tstddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n\n\t\t# tf.scalar_summary('stddev/' + name, mean)\n\t\t# tf.scalar_summary('max/' + name, tf.reduce_max(var))\n\t\t# tf.scalar_summary('min/' + name, tf.reduce_min(var))\n\t\ttf.histogram_summary(name, var)\n\ndef inference(images, hidden1_units, hidden2_units, hidden3_units, hidden4_units, keep_prob=0.5):\n\n\twith tf.variable_scope('hidden1'):\n\t\tweights = tf.get_variable('weights', shape=[IMAGE_PIXELS,hidden1_units],\n\t\t\tinitializer=tf.contrib.layers.xavier_initializer())\n\t\tbiases = tf.get_variable('biases', shape=[hidden1_units],\n\t\t\tinitializer=tf.constant_initializer(0.1))\n\t\thidden1 = tf.nn.relu(tf.matmul(images,weights) + biases)\n\t\t_variable_summaries(hidden1, 'hidden1')\n\n\twith tf.variable_scope('dropout1'):\n\t\thidden1_drop = tf.nn.dropout(hidden1, keep_prob)\n\n\twith tf.variable_scope('hidden2'):\n\t\tweights = tf.get_variable('weights', shape=[hidden1_units,hidden2_units],\n\t\t\tinitializer=tf.contrib.layers.xavier_initializer())\n\t\tbiases = tf.get_variable('biases', shape=[hidden2_units],\n\t\t\tinitializer=tf.constant_initializer(0.1))\n\t\thidden2 = tf.nn.relu(tf.matmul(hidden1_drop,weights) + biases)\n\t\t_variable_summaries(hidden2, 'hidden2')\n\n\twith tf.variable_scope('dropout2'):\n\t\thidden2_drop = tf.nn.dropout(hidden2, keep_prob)\n\n\twith tf.variable_scope('hidden3'):\n\t\tweights = tf.get_variable('weights', shape=[hidden2_units,hidden3_units],\n\t\t\tinitializer=tf.contrib.layers.xavier_initializer())\n\t\tbiases = tf.get_variable('biases', shape=[hidden3_units],\n\t\t\tinitializer=tf.constant_initializer(0.1))\n\t\thidden3 = tf.nn.relu(tf.matmul(hidden2_drop,weights) + biases)\n\t\t_variable_summaries(hidden3, 'hidden3')\n\n\twith tf.variable_scope('dropout3'):\n\t\thidden3_drop = tf.nn.dropout(hidden3, keep_prob)\n\n\twith tf.variable_scope('hidden4'):\n\t\tweights = tf.get_variable('weights', shape=[hidden3_units,hidden4_units],\n\t\t\tinitializer=tf.contrib.layers.xavier_initializer())\n\t\tbiases = tf.get_variable('biases', shape=[hidden4_units],\n\t\t\tinitializer=tf.constant_initializer(0.1))\n\t\thidden4 = tf.nn.relu(tf.matmul(hidden3_drop,weights) + biases)\n\t\t_variable_summaries(hidden4, 'hidden4')\n\n\twith tf.variable_scope('dropout4'):\n\t\thidden4_drop = tf.nn.dropout(hidden4, keep_prob)\n\n\twith tf.variable_scope('softmax'):\n\t\tweights = tf.get_variable('weights', shape=[hidden4_units,NUM_CLASSES],\n\t\t\tinitializer=tf.contrib.layers.xavier_initializer())\n\t\tbiases = tf.get_variable('biases', shape=[NUM_CLASSES],\n\t\t\tinitializer=tf.constant_initializer(0.1))\n\t\tlogits = tf.matmul(hidden4_drop, weights) + biases\n\t\t_variable_summaries(logits, 'softmax')\n\n\treturn logits\n\ndef loss(logits, labels):\n\n\tlabels = tf.to_int64(labels)\n\tcross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n\t\tlogits, labels, name='cross-entropy')\n\tloss = tf.reduce_mean(cross_entropy, name='cross-entropy-mean')\n\t_variable_summaries(loss, 'loss')\n\n\treturn loss\n\ndef training(loss, learning_rate):\n\n\toptimizer = tf.train.AdamOptimizer(learning_rate)\n\n\tglobal_step = tf.Variable(0, name='global_step', trainable=False)\n\n\ttrain_op = optimizer.minimize(loss, global_step=global_step)\n\n\treturn train_op\n\ndef evaluation(logits, labels):\n\n\twith tf.name_scope('accuracy'):\n\t\twith tf.name_scope('correct_prediction'):\n\t\t\tcorrect_prediction = tf.equal(tf.to_int64(labels), tf.argmax(logits, 1))\n\t\twith tf.name_scope('accuracy'):\n\t\t\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\t\ttf.scalar_summary('accuracy', accuracy)\n\n\treturn accuracy\n\ndef predict(logits):\n\ty = tf.nn.softmax(logits)\n\treturn tf.argmax(y, 1)"
] | [
[
"tensorflow.to_int64",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.Variable",
"tensorflow.reduce_mean",
"tensorflow.scalar_summary",
"tensorflow.cast",
"tensorflow.constant_initializer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.name_scope",
"tensorflow.train.AdamOptimizer",
"tensorflow.histogram_summary",
"tensorflow.variable_scope",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.argmax",
"tensorflow.nn.dropout"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
GindaChen/MLOS | [
"342d321bd4e15ca3bfb29be00d5a19ccd618b217"
] | [
"source/Mlos.Python/mlos/Optimizers/BayesianOptimizer.py"
] | [
"#\r\n# Copyright (c) Microsoft Corporation.\r\n# Licensed under the MIT License.\r\n#\r\nimport pandas as pd\r\n\r\nfrom mlos.Logger import create_logger\r\n\r\nfrom mlos.Optimizers.BayesianOptimizerConfigStore import bayesian_optimizer_config_store\r\nfrom mlos.Optimizers.BayesianOptimizerConvergenceState import BayesianOptimizerConvergenceState\r\nfrom mlos.Optimizers.OptimizerBase import OptimizerBase\r\nfrom mlos.Optimizers.OptimizationProblem import OptimizationProblem\r\nfrom mlos.Optimizers.ExperimentDesigner.ExperimentDesigner import ExperimentDesigner\r\nfrom mlos.Optimizers.RegressionModels.GoodnessOfFitMetrics import DataSetType\r\nfrom mlos.Optimizers.RegressionModels.HomogeneousRandomForestRegressionModel import HomogeneousRandomForestRegressionModel\r\nfrom mlos.Tracer import trace\r\nfrom mlos.Spaces import Point\r\n\r\n\r\n\r\nclass BayesianOptimizer(OptimizerBase):\r\n \"\"\"Generic Bayesian Optimizer based on regresson model\r\n\r\n Uses extra trees as surrogate model and confidence bound acquisition function by default.\r\n\r\n Attributes\r\n ----------\r\n logger : Logger\r\n optimization_problem : OptimizationProblem\r\n surrogate_model : HomogeneousRandomForestRegressionModel\r\n optimizer_config : Point\r\n experiment_designer: ExperimentDesigner\r\n\r\n \"\"\"\r\n def __init__(\r\n self,\r\n optimization_problem: OptimizationProblem,\r\n optimizer_config: Point,\r\n logger=None\r\n ):\r\n if logger is None:\r\n logger = create_logger(\"BayesianOptimizer\")\r\n self.logger = logger\r\n # Let's initialize the optimizer.\r\n #\r\n assert len(optimization_problem.objectives) == 1, \"For now this is a single-objective optimizer.\"\r\n OptimizerBase.__init__(self, optimization_problem)\r\n\r\n assert optimizer_config in bayesian_optimizer_config_store.parameter_space, \"Invalid config.\"\r\n self.optimizer_config = optimizer_config\r\n\r\n # Now let's put together the surrogate model.\r\n #\r\n assert self.optimizer_config.surrogate_model_implementation == HomogeneousRandomForestRegressionModel.__name__, \"TODO: implement more\"\r\n self.surrogate_model = HomogeneousRandomForestRegressionModel(\r\n model_config=self.optimizer_config.homogeneous_random_forest_regression_model_config,\r\n input_space=self.optimization_problem.parameter_space, # TODO: change to feature space\r\n output_space=self.optimization_problem.objective_space,\r\n logger=self.logger\r\n )\r\n\r\n # Now let's put together the experiment designer that will suggest parameters for each experiment.\r\n #\r\n assert self.optimizer_config.experiment_designer_implementation == ExperimentDesigner.__name__\r\n self.experiment_designer = ExperimentDesigner(\r\n designer_config=self.optimizer_config.experiment_designer_config,\r\n optimization_problem=self.optimization_problem,\r\n surrogate_model=self.surrogate_model,\r\n logger=self.logger\r\n )\r\n\r\n self._optimizer_convergence_state = BayesianOptimizerConvergenceState(\r\n surrogate_model_fit_state=self.surrogate_model.fit_state\r\n )\r\n\r\n # Also let's make sure we have the dataframes we need for the surrogate model.\r\n # TODO: this will need a better home - either a DataSet class or the surrogate model itself.\r\n self._feature_values_df = pd.DataFrame(columns=[dimension.name for dimension in self.optimization_problem.parameter_space.dimensions])\r\n self._target_values_df = pd.DataFrame(columns=[dimension.name for dimension in self.optimization_problem.objective_space.dimensions])\r\n\r\n @property\r\n def num_observed_samples(self):\r\n return len(self._feature_values_df.index)\r\n\r\n def get_optimizer_convergence_state(self):\r\n return self._optimizer_convergence_state\r\n\r\n def get_all_observations(self):\r\n return self._feature_values_df.copy(), self._target_values_df.copy()\r\n\r\n @trace()\r\n def suggest(self, random=False, context=None):\r\n # TODO: pass context to the suggest method\r\n random = random or self.num_observed_samples < self.optimizer_config.min_samples_required_for_guided_design_of_experiments\r\n suggested_config = self.experiment_designer.suggest(random=random)\r\n assert suggested_config in self.optimization_problem.parameter_space\r\n return suggested_config\r\n\r\n @trace()\r\n def register(self, feature_values_pandas_frame, target_values_pandas_frame):\r\n # TODO: add to a Dataset and move on. The surrogate model should have a reference to the same dataset\r\n # TODO: and should be able to refit automatically.\r\n\r\n self._feature_values_df = self._feature_values_df.append(feature_values_pandas_frame, ignore_index=True)\r\n self._target_values_df = self._target_values_df.append(target_values_pandas_frame, ignore_index=True)\r\n\r\n # TODO: ascertain that min_samples_required ... is more than min_samples to fit the model\r\n if self.num_observed_samples >= self.optimizer_config.min_samples_required_for_guided_design_of_experiments:\r\n self.surrogate_model.fit(\r\n feature_values_pandas_frame=self._feature_values_df,\r\n target_values_pandas_frame=self._target_values_df,\r\n iteration_number=len(self._feature_values_df.index)\r\n )\r\n self.surrogate_model.compute_goodness_of_fit(features_df=self._feature_values_df, target_df=self._target_values_df, data_set_type=DataSetType.TRAIN)\r\n self.cached_predictions_for_observations = None\r\n\r\n @trace()\r\n def predict(self, feature_values_pandas_frame, t=None):\r\n return self.surrogate_model.predict(feature_values_pandas_frame)\r\n\r\n def focus(self, subspace):\r\n ...\r\n\r\n def reset_focus(self):\r\n ...\r\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hohe12ly/inundation-mapping | [
"d133addd4d730b5c468dcf1a8f7dfab35c55cbd7",
"dcf414f5655ecafbf8bb62cd219aef405e55f0a2"
] | [
"tools/gms_tools/compile_computational_stats.py",
"tools/generate_categorical_fim_mapping.py"
] | [
"#!/usr/bin/env python3\n\nimport argparse\nimport pandas as pd\nimport numpy as np\nfrom glob import iglob\nfrom os.path import join\n\n# desired output for branches\n# dataframe columns: HUC, branch_id, exit status, ,time, ram, \n\ndef compile_summary(gms_output_dir,ouput=None):\n\n unit_summary = join(gms_output_dir,logs, 'summary_gms_unit.log')\n branch_summary = join(gms_output_dir,logs, 'summary_gms_branch.log')\n\n unit_summary = pd.read_csv(unit_summary,sep='\\t')\n branch_summary = pd.read_csv(branch_summary,sep='\\t')\n\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Create stream network level paths')\n parser.add_argument('-d','--gms-output-dir', help='Input stream network', required=True)\n parser.add_argument('-o','--output', help='Input stream network', required=True)\n \n args = vars(parser.parse_args())\n\n compile_summary(**args)\n\n",
"#!/usr/bin/env python3\n\nimport sys\nimport os\nfrom multiprocessing import Pool\nimport argparse\nimport traceback\nimport rasterio\nimport geopandas as gpd\nimport pandas as pd\nimport shutil\nfrom rasterio.features import shapes\nfrom shapely.geometry.polygon import Polygon\nfrom shapely.geometry.multipolygon import MultiPolygon\nfrom inundation import inundate\nsys.path.append('/foss_fim/src')\nfrom utils.shared_variables import PREP_PROJECTION,VIZ_PROJECTION\nfrom utils.shared_functions import getDriver\n\nINPUTS_DIR = r'/data/inputs'\nmagnitude_list = ['action', 'minor', 'moderate','major', 'record']\n\n# Define necessary variables for inundation()\nhucs, hucs_layerName = os.path.join(INPUTS_DIR, 'wbd', 'WBD_National.gpkg'), 'WBDHU8'\nmask_type, catchment_poly = 'huc', ''\n\n\ndef generate_categorical_fim(fim_run_dir, source_flow_dir, output_cat_fim_dir, number_of_jobs, depthtif, log_file):\n\n no_data_list = []\n procs_list = []\n\n source_flow_dir_list = os.listdir(source_flow_dir)\n output_flow_dir_list = os.listdir(fim_run_dir)\n\n # Log missing hucs\n missing_hucs = list(set(source_flow_dir_list) - set(output_flow_dir_list))\n missing_hucs = [huc for huc in missing_hucs if \".\" not in huc]\n if len(missing_hucs) > 0:\n f = open(log_file, 'a+')\n f.write(f\"Missing hucs from output directory: {', '.join(missing_hucs)}\\n\")\n f.close()\n\n # Loop through matching huc directories in the source_flow directory\n matching_hucs = list(set(output_flow_dir_list) & set(source_flow_dir_list))\n for huc in matching_hucs:\n\n if \".\" not in huc:\n\n # Get list of AHPS site directories\n ahps_site_dir = os.path.join(source_flow_dir, huc)\n ahps_site_dir_list = os.listdir(ahps_site_dir)\n\n # Map paths to HAND files needed for inundation()\n fim_run_huc_dir = os.path.join(fim_run_dir, huc)\n rem = os.path.join(fim_run_huc_dir, 'rem_zeroed_masked.tif')\n catchments = os.path.join(fim_run_huc_dir, 'gw_catchments_reaches_filtered_addedAttributes.tif')\n hydroTable = os.path.join(fim_run_huc_dir, 'hydroTable.csv')\n\n exit_flag = False # Default to False.\n\n # Check if necessary data exist; set exit_flag to True if they don't exist\n for f in [rem, catchments, hydroTable]:\n if not os.path.exists(f):\n no_data_list.append(f)\n exit_flag = True\n\n # Log missing data\n if exit_flag == True:\n f = open(log_file, 'a+')\n f.write(f\"Missing data for: {fim_run_huc_dir}\\n\")\n f.close()\n\n # Map path to huc directory inside out output_cat_fim_dir\n cat_fim_huc_dir = os.path.join(output_cat_fim_dir, huc)\n if not os.path.exists(cat_fim_huc_dir):\n os.mkdir(cat_fim_huc_dir)\n\n # Loop through AHPS sites\n for ahps_site in ahps_site_dir_list:\n # map parent directory for AHPS source data dir and list AHPS thresholds (act, min, mod, maj)\n ahps_site_parent = os.path.join(ahps_site_dir, ahps_site)\n thresholds_dir_list = os.listdir(ahps_site_parent)\n\n # Map parent directory for all inundation output filesoutput files.\n cat_fim_huc_ahps_dir = os.path.join(cat_fim_huc_dir, ahps_site)\n if not os.path.exists(cat_fim_huc_ahps_dir):\n os.mkdir(cat_fim_huc_ahps_dir)\n\n # Loop through thresholds/magnitudes and define inundation output files paths\n for magnitude in thresholds_dir_list:\n\n if \".\" not in magnitude:\n\n magnitude_flows_csv = os.path.join(ahps_site_parent, magnitude, 'ahps_' + ahps_site + '_huc_' + huc + '_flows_' + magnitude + '.csv')\n\n if os.path.exists(magnitude_flows_csv):\n\n output_extent_grid = os.path.join(cat_fim_huc_ahps_dir, ahps_site + '_' + magnitude + '_extent.tif')\n\n if depthtif:\n output_depth_grid = os.path.join(cat_fim_huc_ahps_dir, ahps_site + '_' + magnitude + '_depth.tif')\n else:\n output_depth_grid = None\n\n # Append necessary variables to list for multiprocessing.\n procs_list.append([rem, catchments, magnitude_flows_csv, huc, hydroTable, output_extent_grid, output_depth_grid, ahps_site, magnitude, log_file])\n\n # Initiate multiprocessing\n print(f\"Running inundation for {len(procs_list)} sites using {number_of_jobs} jobs\")\n with Pool(processes=number_of_jobs) as pool:\n pool.map(run_inundation, procs_list)\n\n\ndef run_inundation(args):\n\n rem = args[0]\n catchments = args[1]\n magnitude_flows_csv = args[2]\n huc = args[3]\n hydroTable = args[4]\n output_extent_grid = args[5]\n output_depth_grid = args[6]\n ahps_site = args[7]\n magnitude = args[8]\n log_file = args[9]\n\n try:\n inundate(rem,catchments,catchment_poly,hydroTable,magnitude_flows_csv,mask_type,hucs=hucs,hucs_layerName=hucs_layerName,\n subset_hucs=huc,num_workers=1,aggregate=False,inundation_raster=output_extent_grid,inundation_polygon=None,\n depths=output_depth_grid,out_raster_profile=None,out_vector_profile=None,quiet=True\n )\n\n except:\n # Log errors and their tracebacks\n f = open(log_file, 'a+')\n f.write(f\"{output_extent_grid} - inundation error: {traceback.format_exc()}\\n\")\n f.close()\n\n #Inundation.py appends the huc code to the supplied output_extent_grid.\n #Modify output_extent_grid to match inundation.py saved filename.\n #Search for this file, if it didn't create, send message to log file.\n base_file_path,extension = os.path.splitext(output_extent_grid)\n saved_extent_grid_filename = \"{}_{}{}\".format(base_file_path,huc,extension)\n if not os.path.exists(saved_extent_grid_filename):\n with open(log_file, 'a+') as f:\n f.write('FAILURE_huc_{}:{}:{} map failed to create\\n'.format(huc,ahps_site,magnitude))\n\n\ndef post_process_cat_fim_for_viz(number_of_jobs, output_cat_fim_dir, nws_lid_attributes_filename, log_file):\n\n # Create workspace\n gpkg_dir = os.path.join(output_cat_fim_dir, 'gpkg')\n if not os.path.exists(gpkg_dir):\n os.mkdir(gpkg_dir)\n\n # Find the FIM version\n fim_version = os.path.basename(output_cat_fim_dir)\n merged_layer = os.path.join(output_cat_fim_dir, 'catfim_library.shp')\n\n if not os.path.exists(merged_layer): # prevents appending to existing output\n\n huc_ahps_dir_list = os.listdir(output_cat_fim_dir)\n skip_list=['errors','logs','gpkg',merged_layer]\n\n for magnitude in magnitude_list:\n\n procs_list = []\n\n # Loop through all categories\n for huc in huc_ahps_dir_list:\n\n if huc not in skip_list:\n\n huc_dir = os.path.join(output_cat_fim_dir, huc)\n ahps_dir_list = os.listdir(huc_dir)\n\n # Loop through ahps sites\n for ahps_lid in ahps_dir_list:\n ahps_lid_dir = os.path.join(huc_dir, ahps_lid)\n\n extent_grid = os.path.join(ahps_lid_dir, ahps_lid + '_' + magnitude + '_extent_' + huc + '.tif')\n\n if os.path.exists(extent_grid):\n procs_list.append([ahps_lid, extent_grid, gpkg_dir, fim_version, huc, magnitude, nws_lid_attributes_filename])\n\n else:\n try:\n f = open(log_file, 'a+')\n f.write(f\"Missing layers: {extent_gpkg}\\n\")\n f.close()\n except:\n pass\n\n # Multiprocess with instructions\n with Pool(processes=number_of_jobs) as pool:\n pool.map(reformat_inundation_maps, procs_list)\n\n # Merge all layers\n print(f\"Merging {len(os.listdir(gpkg_dir))} layers...\")\n\n for layer in os.listdir(gpkg_dir):\n\n diss_extent_filename = os.path.join(gpkg_dir, layer)\n\n # Open diss_extent\n diss_extent = gpd.read_file(diss_extent_filename)\n diss_extent['viz'] = 'yes'\n\n # Write/append aggregate diss_extent\n if os.path.isfile(merged_layer):\n diss_extent.to_file(merged_layer,driver=getDriver(merged_layer),index=False, mode='a')\n else:\n diss_extent.to_file(merged_layer,driver=getDriver(merged_layer),index=False)\n\n del diss_extent\n\n shutil.rmtree(gpkg_dir)\n\n else:\n print(f\"{merged_layer} already exists.\")\n\n\ndef reformat_inundation_maps(args):\n\n try:\n lid = args[0]\n grid_path = args[1]\n gpkg_dir = args[2]\n fim_version = args[3]\n huc = args[4]\n magnitude = args[5]\n nws_lid_attributes_filename = args[6]\n\n # Convert raster to to shapes\n with rasterio.open(grid_path) as src:\n image = src.read(1)\n mask = image > 0\n\n # Aggregate shapes\n results = ({'properties': {'extent': 1}, 'geometry': s} for i, (s, v) in enumerate(shapes(image, mask=mask,transform=src.transform)))\n\n # Convert list of shapes to polygon\n extent_poly = gpd.GeoDataFrame.from_features(list(results), crs=PREP_PROJECTION)\n\n # Dissolve polygons\n extent_poly_diss = extent_poly.dissolve(by='extent')\n\n # Update attributes\n extent_poly_diss = extent_poly_diss.reset_index(drop=True)\n extent_poly_diss['ahps_lid'] = lid\n extent_poly_diss['magnitude'] = magnitude\n extent_poly_diss['version'] = fim_version\n extent_poly_diss['huc'] = huc\n\n # Project to Web Mercator\n extent_poly_diss = extent_poly_diss.to_crs(VIZ_PROJECTION)\n\n # Join attributes\n nws_lid_attributes_table = pd.read_csv(nws_lid_attributes_filename, dtype={'huc':str})\n nws_lid_attributes_table = nws_lid_attributes_table.loc[(nws_lid_attributes_table.magnitude==magnitude) & (nws_lid_attributes_table.nws_lid==lid)]\n\n\n extent_poly_diss = extent_poly_diss.merge(nws_lid_attributes_table, left_on=['ahps_lid','magnitude','huc'], right_on=['nws_lid','magnitude','huc'])\n\n extent_poly_diss = extent_poly_diss.drop(columns='nws_lid')\n\n # Save dissolved multipolygon\n handle = os.path.split(grid_path)[1].replace('.tif', '')\n\n diss_extent_filename = os.path.join(gpkg_dir, handle + \"_dissolved.gpkg\")\n\n extent_poly_diss[\"geometry\"] = [MultiPolygon([feature]) if type(feature) == Polygon else feature for feature in extent_poly_diss[\"geometry\"]]\n\n if not extent_poly_diss.empty:\n\n extent_poly_diss.to_file(diss_extent_filename,driver=getDriver(diss_extent_filename),index=False)\n\n except Exception as e:\n # Log and clean out the gdb so it's not merged in later\n try:\n f = open(log_file, 'a+')\n f.write(str(diss_extent_filename) + \" - dissolve error: \" + str(e))\n f.close()\n except:\n pass\n\n\nif __name__ == '__main__':\n\n # Parse arguments\n parser = argparse.ArgumentParser(description='Categorical inundation mapping for FOSS FIM.')\n parser.add_argument('-r','--fim-run-dir',help='Name of directory containing outputs of fim_run.sh',required=True)\n parser.add_argument('-s', '--source-flow-dir',help='Path to directory containing flow CSVs to use to generate categorical FIM.',required=True, default=\"\")\n parser.add_argument('-o', '--output-cat-fim-dir',help='Path to directory where categorical FIM outputs will be written.',required=True, default=\"\")\n parser.add_argument('-j','--number-of-jobs',help='Number of processes to use. Default is 1.',required=False, default=\"1\",type=int)\n parser.add_argument('-depthtif','--write-depth-tiff',help='Using this option will write depth TIFFs.',required=False, action='store_true')\n\n args = vars(parser.parse_args())\n\n fim_run_dir = args['fim_run_dir']\n source_flow_dir = args['source_flow_dir']\n output_cat_fim_dir = args['output_cat_fim_dir']\n number_of_jobs = int(args['number_of_jobs'])\n depthtif = args['write_depth_tiff']\n\n # Create output directory\n if not os.path.exists(output_cat_fim_dir):\n os.mkdir(output_cat_fim_dir)\n\n # Create log directory\n log_dir = os.path.join(output_cat_fim_dir, 'logs')\n if not os.path.exists(log_dir):\n os.mkdir(log_dir)\n\n # Create error log path\n log_file = os.path.join(log_dir, 'errors.log')\n\n # Map path to points with attributes\n nws_lid_attributes_filename = os.path.join(source_flow_dir, 'nws_lid_attributes.csv')\n\n print(\"Generating Categorical FIM\")\n generate_categorical_fim(fim_run_dir, source_flow_dir, output_cat_fim_dir, number_of_jobs, depthtif,log_file)\n\n print(\"Aggregating Categorical FIM\")\n post_process_cat_fim_for_viz(number_of_jobs, output_cat_fim_dir,nws_lid_attributes_filename,log_file)\n"
] | [
[
"pandas.read_csv"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
drewrisinger/pyGSTi | [
"dd4ad669931c7f75e026456470cf33ac5b682d0d",
"dd4ad669931c7f75e026456470cf33ac5b682d0d",
"dd4ad669931c7f75e026456470cf33ac5b682d0d",
"dd4ad669931c7f75e026456470cf33ac5b682d0d",
"dd4ad669931c7f75e026456470cf33ac5b682d0d"
] | [
"test/unit/util.py",
"pygsti/report/workspace.py",
"test/test_packages/mpi/testmpiMain.py",
"pygsti/report/plothelpers.py",
"test/unit/objects/test_spamvec.py"
] | [
"\"\"\"Utilities shared by unit tests\"\"\"\nimport sys\nimport numpy as np\nimport numbers\nimport functools\nimport types\nfrom contextlib import contextmanager\nimport os\nimport warnings\nfrom pathlib import Path\nfrom unittest import mock\nfrom tempfile import TemporaryDirectory\nimport unittest\n\n\ndef needs_cvxpy(fn):\n \"\"\"Shortcut decorator for skipping tests that require CVXPY\"\"\"\n return unittest.skipIf('SKIP_CVXPY' in os.environ, \"skipping cvxpy tests\")(fn)\n\n\ndef needs_deap(fn):\n \"\"\"Shortcut decorator for skipping tests that require deap\"\"\"\n return unittest.skipIf('SKIP_DEAP' in os.environ, \"skipping deap tests\")(fn)\n\n\ndef needs_matplotlib(fn):\n \"\"\"Shortcut decorator for skipping tests that require matplotlib\"\"\"\n return unittest.skipIf('SKIP_MATPLOTLIB' in os.environ, \"skipping matplotlib tests\")(fn)\n\n\ndef with_temp_path(fn):\n \"\"\"Decorator version of ``BaseCase.temp_path``\"\"\"\n @functools.wraps(fn)\n def inner(self, *args, **kwargs):\n with self.temp_path() as tmp_path:\n return fn(self, tmp_path, *args, **kwargs)\n return inner\n\n\nclass BaseCase(unittest.TestCase):\n def assertArraysAlmostEqual(self, a, b, **kwargs):\n \"\"\"Assert that two arrays are equal to within a certain precision.\n\n Internally, this just wraps a call to\n ``unittest.assertAlmostEqual`` with the operand difference\n norm and zero.\n\n Parameters\n ----------\n a, b: matrices or vectors\n The two operands to compare\n **kwargs:\n Additional arguments to pass to ``unittest.assertAlmostEqual``\n \"\"\"\n self.assertAlmostEqual(np.linalg.norm(a - b), 0, **kwargs)\n\n def assertArraysEqual(self, a, b, **kwargs):\n \"\"\"Assert that two arrays are exactly equal.\n\n Internally, this just wraps a call to ``numpy.array_equal``\n in an assertion.\n\n Parameters\n ----------\n a, b: matrices or vectors\n The two operands to compare\n **kwargs:\n Additional arguments to pass to ``unittest.assertTrue``\n \"\"\"\n self.assertTrue(np.array_equal(a, b), **kwargs)\n\n @contextmanager\n def assertNoWarns(self, category=Warning):\n \"\"\"Asserts that nothing in the enclosed context generates a warning\n\n Parameters\n ----------\n category: ``Warning``, optional\n This assertion will fail only if a warning descended from\n this type is generated in the context. Since all warnings\n are derived from ``Warning``, by default this will fail on\n any warning.\n \"\"\"\n\n with warnings.catch_warnings(record=True) as warns:\n yield # yield to context\n\n for w in warns:\n if issubclass(w.category, category):\n self.fail(\"{} was triggered\".format(category.__name__))\n\n @contextmanager\n def temp_path(self, filename=None):\n \"\"\"Provides a context with the path of a temporary file.\n\n This is distinct from the contexts provided by tempfile in\n that this method yields the path of the temporary file, so the\n underlying file may be opened or closed inside the context as\n the caller pleases.\n\n Under the hood, this actually creates the file in a temporary\n directory. This directory will be cleaned up when the context\n closes, including the returned file and any other siblings.\n\n Parameters\n ----------\n filename: str, optional\n Optionally, the name of the file. By default, one will be\n randomly generated.\n\n Yields\n ------\n str\n The path of the temporary file.\n\n See Also\n --------\n ``test.unit.util.with_temp_file`` : decorator version\n \"\"\"\n\n filename = filename or \"temp_file\" # yeah looks random to me\n with TemporaryDirectory() as tmpdir:\n tmp_path = Path(tmpdir) / filename\n # Yield to context with temporary path\n yield str(tmp_path)\n # TemporaryDirectory will be cleaned up on close\n\n def debug(self, debugger=None):\n \"\"\"Helper factory for debugger breakpoints.\n\n This sets up certain useful debugging environment things, then returns a function to embed a debugger.\n\n To use this method, call the returned function, like this:\n\n self.debug()()\n\n This method is used in a weird way so that the debugger starts in the caller's stack frame, since you probably\n don't care about debugging this method itself.\n\n By default, if the `bpython` package is installed, this will use `bpdb`, the bpython debugger. bpython is an\n enhanced python interpreter that offers a number of advantages over IPython. If bpython is not installed, this\n will try the IPython embedded debugger, and if that's not installed either, we default to the built-in\n debugger. Alternatively, if the `debugger` argument is given, we'll use that as the debugger.\n\n Parameters\n ----------\n debugger : str, optional\n The debugger to use; one of ('bpdb', 'ipython', 'pdb'). By default, tries bpdb, falls back on ipython, then\n finally falls back on pdb if neither of the previous are available.\n\n Returns\n -------\n function\n Entry point to the debugger. In most cases you'll want to call this immediately, like this:\n\n self.debug()()\n \"\"\"\n\n np.set_printoptions(precision=4, # usually better for debugging\n linewidth=120, # this isn't the 40s, grandpa, we have 1080p now\n suppress=True) # fixed-point notation gets hard to read\n\n def debug_bpython():\n import bpdb\n return bpdb.set_trace\n\n def debug_ipython():\n import IPython\n return IPython.embed\n\n def debug_pdb():\n import pdb\n return pdb.set_trace\n\n if debugger is not None:\n return {\n 'bpython': debug_bpython,\n 'bpdb': debug_bpython,\n 'ipython': debug_ipython,\n 'pdb': debug_pdb,\n 'default': debug_pdb\n }[debugger.lower()]()\n else:\n # Try bpython, fall back to ipython, then to pdb\n try:\n debug = debug_bpython()\n except ModuleNotFoundError:\n try:\n debug = debug_ipython()\n except ModuleNotFoundError:\n debug = debug_pdb()\n return debug\n\n\nclass Namespace(object):\n \"\"\"Namespace for shared test fixtures.\n\n This is included as an alternative to ``types.SimpleNamespace``,\n which may be absent from earlier python versions.\n\n This implementation is included for convenience and does not\n implicitly protect members from modification. When using a\n ``Namespace`` for module- or package-level fixtures, take care\n that any mutable members are used safely.\n\n Parameters\n ----------\n **kwargs\n Initial members of the namespace. Members may also be assigned\n after initialization, either directly or annotated via the\n ``Namespace.property`` and ``Namespace.memo`` decorators.\n \"\"\"\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n self.__ns_props__ = {}\n\n def __getattr__(self, name):\n try:\n return object.__getattribute__(self, name)\n except AttributeError as err:\n if name in self.__ns_props__:\n return self.__ns_props__[name](self)\n else:\n raise err\n\n def property(self, fn):\n \"\"\"Dynamic namespace property\"\"\"\n self.__ns_props__[fn.__name__] = fn\n\n def memo(self, fn):\n \"\"\"Memoized namespace property\n\n Memoized properties may be used to efficiently compose\n namespace members from other memoized members, which could\n otherwise be prohibitively expensive to repeatedly generate.\n\n Memoization should only be used when you want to reuse\n previously computed values. Accordingly, it doesn't make sense\n to memoize functions with side-effects, or impure functions\n like time().\n \"\"\"\n fn.__memo__ = None\n @functools.wraps(fn)\n def inner(self):\n if fn.__memo__ is None:\n fn.__memo__ = fn(self)\n return fn.__memo__\n self.property(inner)\n",
"\"\"\" Defines the Workspace class and supporting functionality.\"\"\"\n#***************************************************************************************************\n# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights\n# in this software.\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.\n#***************************************************************************************************\n\nimport itertools as _itertools\nimport collections as _collections\nimport os as _os\nimport shutil as _shutil\nimport numpy as _np\n#import uuid as _uuid\nimport random as _random\nimport inspect as _inspect\nimport pickle as _pickle\n\nimport subprocess as _subprocess\n\nfrom .. import objects as _objs\nfrom ..tools import compattools as _compat\nfrom ..objects.smartcache import CustomDigestError as _CustomDigestError\n\nfrom . import plotly_plot_ex as _plotly_ex\nfrom . import merge_helpers as _merge\n\nfrom pprint import pprint as _pprint\n\n_PYGSTI_WORKSPACE_INITIALIZED = False\nDEFAULT_PLOTLY_TEMPLATE = 'none'\n\n\ndef in_ipython_notebook():\n \"\"\"Returns true if called from within an IPython/jupyter notebook\"\"\"\n try:\n # 'ZMQInteractiveShell' in a notebook, 'TerminalInteractiveShell' in IPython REPL, and fails elsewhere.\n shell = get_ipython().__class__.__name__\n return shell == 'ZMQInteractiveShell'\n except NameError:\n return False\n\n\ndef display_ipynb(content):\n \"\"\"Render HTML content to an IPython notebook cell display\"\"\"\n from IPython.core.display import display, HTML\n display(HTML(content))\n\n\ndef enable_plotly_pickling():\n \"\"\"\n Hacks the plotly python library so that figures may be pickled and\n un-pickled. This hack should be used only temporarily - so all pickling\n and un-pickling should be done between calls to\n :func:`enable_plotly_pickling` and :func:`disable_plotly_pickling`.\n \"\"\"\n import plotly\n\n if int(plotly.__version__.split(\".\")[0]) >= 3: # plotly version 3 or higher\n BLT = plotly.basedatatypes.BaseLayoutType\n\n def fix_getattr(self, prop):\n if '_subplotid_props' not in self.__dict__:\n self._subplotid_props = set()\n return self.__saved_getattr__(prop)\n if hasattr(BLT, '__getattr__'):\n BLT.__saved_getattr__ = BLT.__getattr__\n del BLT.__getattr__\n BLT.__getattr__ = fix_getattr\n\n else:\n def setitem(self, key, value, _raise=True):\n \"\"\"Sets an item of a dict using the standard dict's __setitem__\n to restore normal dict behavior\"\"\"\n return dict.__setitem__(self, key, value)\n\n plotlyDictClass = plotly.graph_objs.Figure.__bases__[0]\n if hasattr(plotlyDictClass, '__setitem__'):\n plotlyDictClass.__saved_setitem__ = plotlyDictClass.__setitem__\n if hasattr(plotlyDictClass, '__getattr__'):\n plotlyDictClass.__saved_getattr__ = plotlyDictClass.__getattr__\n del plotlyDictClass.__getattr__\n if hasattr(plotlyDictClass, '__setattr__'):\n plotlyDictClass.__saved_setattr__ = plotlyDictClass.__setattr__\n del plotlyDictClass.__setattr__\n plotlyDictClass.__setitem__ = setitem\n\n\ndef disable_plotly_pickling():\n \"\"\" Reverses the effect of :func:`enable_plotly_pickling` \"\"\"\n import plotly\n\n if int(plotly.__version__.split(\".\")[0]) >= 3: # plotly version 3 or higher\n BLT = plotly.basedatatypes.BaseLayoutType\n if hasattr(BLT, '__saved_getattr__'):\n BLT.__getattr__ = BLT.__saved_getattr__\n del BLT.__saved_getattr__\n\n else:\n plotlyDictClass = plotly.graph_objs.Figure.__bases__[0]\n if hasattr(plotlyDictClass, '__saved_setitem__'):\n plotlyDictClass.__setitem__ = plotlyDictClass.__saved_setitem__\n del plotlyDictClass.__saved_setitem__\n if hasattr(plotlyDictClass, '__saved_getattr__'):\n plotlyDictClass.__getattr__ = plotlyDictClass.__saved_getattr__\n del plotlyDictClass.__saved_getattr__\n if hasattr(plotlyDictClass, '__saved_setattr__'):\n plotlyDictClass.__setattr__ = plotlyDictClass.__saved_setattr__\n del plotlyDictClass.__saved_setattr__\n\n\ndef ws_custom_digest(md5, v):\n \"\"\" A \"digest\" function for hashing several special types\"\"\"\n if isinstance(v, NotApplicable):\n md5.update(\"NOTAPPLICABLE\".encode('utf-8'))\n elif isinstance(v, SwitchValue):\n md5.update(v.base.tostring()) # don't recurse to parent switchboard\n else:\n raise _CustomDigestError()\n\n\ndef randomID():\n \"\"\" Returns a random DOM ID \"\"\"\n return str(int(1000000 * _random.random()))\n #return str(_uuid.uuid4().hex) #alternative\n\n\nclass Workspace(object):\n \"\"\"\n Central to data analysis, Workspace objects facilitate the building\n of reports and dashboards. In particular, they serve as a:\n\n - factory for tables, plots, and other types of output\n - cache manager to optimize the construction of such output\n - serialization manager for saving and loading analysis variables\n\n Workspace objects are typically used either 1) within an ipython\n notebook to interactively build a report/dashboard, or 2) within\n a script to build a hardcoded (\"fixed\") report/dashboard.\n \"\"\"\n\n def __init__(self, cachefile=None):\n \"\"\"\n Initialize a Workspace object.\n\n Parameters\n ----------\n cachefile : str, optional\n filename with cached workspace results\n \"\"\"\n self._register_components(False)\n self.smartCache = _objs.SmartCache()\n if cachefile is not None:\n self.load_cache(cachefile)\n self.smartCache.add_digest(ws_custom_digest)\n\n def save_cache(self, cachefile, showUnpickled=False):\n \"\"\"\n Save this Workspace's cache to a file.\n\n Parameters\n ----------\n cachefile : str\n The filename to save the cache to.\n\n showUnpickled : bool, optional\n Whether to print quantities (keys) of cache that could not be\n saved because they were not pickle-able.\n\n Returns\n -------\n None\n \"\"\"\n with open(cachefile, 'wb') as outfile:\n enable_plotly_pickling()\n _pickle.dump(self.smartCache, outfile)\n disable_plotly_pickling()\n if showUnpickled:\n print('Unpickled keys:')\n _pprint(self.smartCache.unpickleable)\n\n def load_cache(self, cachefile):\n \"\"\"\n Load this Workspace's cache from `cachefile`.\n\n Parameters\n ----------\n cachefile : str\n The filename to load the cache from.\n\n Returns\n -------\n None\n \"\"\"\n with open(cachefile, 'rb') as infile:\n enable_plotly_pickling()\n oldCache = _pickle.load(infile).cache\n disable_plotly_pickling()\n for v in oldCache.values():\n if isinstance(v, WorkspaceOutput): # hasattr(v,'ws') == True for plotly dicts (why?)\n print('Updated {} object to set ws to self'.format(type(v)))\n v.ws = self\n self.smartCache.cache.update(oldCache)\n\n def __getstate__(self):\n return {'smartCache': self.smartCache}\n\n def __setstate__(self, state_dict):\n self._register_components(False)\n self.smartCache = state_dict['smartCache']\n\n def _makefactory(self, cls, autodisplay): # , printer=_objs.VerbosityPrinter(1)):\n # XXX this indirection is so wild -- can we please rewrite directly?\n #Manipulate argument list of cls.__init__\n argspec = _inspect.getargspec(cls.__init__)\n argnames = argspec[0]\n assert(argnames[0] == 'self' and argnames[1] == 'ws'), \\\n \"__init__ must begin with (self, ws, ...)\"\n\n factoryfn_argnames = argnames[2:] # strip off self & ws args\n newargspec = (factoryfn_argnames,) + argspec[1:]\n\n #Define a new factory function with appropriate signature\n signature = _inspect.formatargspec(\n formatvalue=lambda val: \"\", *newargspec)\n signature = signature[1:-1] # strip off parenthesis from ends of \"(signature)\"\n\n if autodisplay:\n factory_func_def = (\n 'def factoryfn(%(signature)s):\\n'\n ' ret = cls(self, %(signature)s); ret.display(); return ret' %\n {'signature': signature})\n else:\n factory_func_def = (\n 'def factoryfn(%(signature)s):\\n'\n ' return cls(self, %(signature)s)' %\n {'signature': signature})\n\n #print(\"FACTORY FN DEF = \\n\",new_func)\n exec_globals = {'cls': cls, 'self': self}\n exec(factory_func_def, exec_globals)\n factoryfn = exec_globals['factoryfn']\n\n #Copy cls.__init__ info over to factory function\n factoryfn.__name__ = cls.__init__.__name__\n factoryfn.__doc__ = cls.__init__.__doc__\n factoryfn.__module__ = cls.__init__.__module__\n factoryfn.__dict__ = cls.__init__.__dict__\n factoryfn.__defaults__ = cls.__init__.__defaults__\n\n return factoryfn\n\n def _register_components(self, autodisplay):\n # \"register\" components\n from . import workspacetables as _wt\n from . import workspaceplots as _wp\n from . import workspacetexts as _wtxt\n\n def makefactory(cls): return self._makefactory(cls, autodisplay)\n\n self.Switchboard = makefactory(Switchboard)\n self.NotApplicable = makefactory(NotApplicable)\n\n #Tables\n # Circuits\n self.CircuitTable = makefactory(_wt.CircuitTable)\n\n # Spam & Gates\n self.SpamTable = makefactory(_wt.SpamTable)\n self.SpamParametersTable = makefactory(_wt.SpamParametersTable)\n self.GatesTable = makefactory(_wt.GatesTable)\n self.ChoiTable = makefactory(_wt.ChoiTable)\n\n # Spam & Gates vs. a target\n self.SpamVsTargetTable = makefactory(_wt.SpamVsTargetTable)\n self.ModelVsTargetTable = makefactory(_wt.ModelVsTargetTable)\n self.GatesVsTargetTable = makefactory(_wt.GatesVsTargetTable)\n self.GatesSingleMetricTable = makefactory(_wt.GatesSingleMetricTable)\n self.GateEigenvalueTable = makefactory(_wt.GateEigenvalueTable)\n self.ErrgenTable = makefactory(_wt.ErrgenTable)\n self.GaugeRobustErrgenTable = makefactory(_wt.GaugeRobustErrgenTable)\n self.GaugeRobustModelTable = makefactory(_wt.GaugeRobustModelTable)\n self.GaugeRobustMetricTable = makefactory(_wt.GaugeRobustMetricTable)\n self.NQubitErrgenTable = makefactory(_wt.NQubitErrgenTable)\n self.StandardErrgenTable = makefactory(_wt.StandardErrgenTable)\n\n # Specific to 1Q gates\n self.GateDecompTable = makefactory(_wt.GateDecompTable)\n self.old_GateDecompTable = makefactory(_wt.old_GateDecompTable)\n self.old_RotationAxisVsTargetTable = makefactory(_wt.old_RotationAxisVsTargetTable)\n self.old_RotationAxisTable = makefactory(_wt.old_RotationAxisTable)\n\n # goodness of fit\n self.FitComparisonTable = makefactory(_wt.FitComparisonTable)\n self.WildcardBudgetTable = makefactory(_wt.WildcardBudgetTable)\n\n #Specifically designed for reports\n self.BlankTable = makefactory(_wt.BlankTable)\n self.DataSetOverviewTable = makefactory(_wt.DataSetOverviewTable)\n self.GaugeOptParamsTable = makefactory(_wt.GaugeOptParamsTable)\n self.MetadataTable = makefactory(_wt.MetadataTable)\n self.SoftwareEnvTable = makefactory(_wt.SoftwareEnvTable)\n self.ProfilerTable = makefactory(_wt.ProfilerTable)\n self.ExampleTable = makefactory(_wt.ExampleTable)\n\n #Plots\n self.ColorBoxPlot = makefactory(_wp.ColorBoxPlot)\n self.BoxKeyPlot = makefactory(_wp.BoxKeyPlot)\n self.MatrixPlot = makefactory(_wp.MatrixPlot)\n self.GateMatrixPlot = makefactory(_wp.GateMatrixPlot)\n self.PolarEigenvaluePlot = makefactory(_wp.PolarEigenvaluePlot)\n self.ProjectionsBoxPlot = makefactory(_wp.ProjectionsBoxPlot)\n self.ChoiEigenvalueBarPlot = makefactory(_wp.ChoiEigenvalueBarPlot)\n self.GramMatrixBarPlot = makefactory(_wp.GramMatrixBarPlot)\n self.FitComparisonBarPlot = makefactory(_wp.FitComparisonBarPlot)\n self.FitComparisonBoxPlot = makefactory(_wp.FitComparisonBoxPlot)\n self.DatasetComparisonHistogramPlot = makefactory(_wp.DatasetComparisonHistogramPlot)\n self.DatasetComparisonSummaryPlot = makefactory(_wp.DatasetComparisonSummaryPlot)\n self.RandomizedBenchmarkingPlot = makefactory(_wp.RandomizedBenchmarkingPlot)\n\n #Text blocks\n self.StdoutText = makefactory(_wtxt.StdoutText)\n\n #Extras\n from ..extras import idletomography as _idt\n self.IdleTomographyIntrinsicErrorsTable = makefactory(_idt.IdleTomographyIntrinsicErrorsTable)\n self.IdleTomographyObservedRatePlot = makefactory(_idt.IdleTomographyObservedRatePlot)\n self.IdleTomographyObservedRatesTable = makefactory(_idt.IdleTomographyObservedRatesTable)\n self.IdleTomographyObservedRatesForIntrinsicRateTable = makefactory(\n _idt.IdleTomographyObservedRatesForIntrinsicRateTable)\n\n from ..extras.drift import driftreport as _driftrpt\n self.DriftSummaryTable = makefactory(_driftrpt.DriftSummaryTable)\n self.DriftDetailsTable = makefactory(_driftrpt.DriftDetailsTable)\n self.PowerSpectraPlot = makefactory(_driftrpt.PowerSpectraPlot)\n self.ProbTrajectoriesPlot = makefactory(_driftrpt.ProbTrajectoriesPlot)\n self.GermFiducialProbTrajectoriesPlot = makefactory(_driftrpt.GermFiducialProbTrajectoriesPlot)\n self.GermFiducialPowerSpectraPlot = makefactory(_driftrpt.GermFiducialPowerSpectraPlot)\n\n def init_notebook_mode(self, connected=False, autodisplay=False):\n \"\"\"\n Initialize this Workspace for use in an iPython notebook environment.\n\n This function should be called prior to using the Workspace when\n working within an iPython notebook.\n\n Parameters\n ----------\n connected : bool (optional)\n Whether to assume you are connected to the internet. If you are,\n then setting this to `True` allows initialization to rely on web-\n hosted resources which will reduce the overall size of your\n notebook.\n\n autodisplay : bool (optional)\n Whether to automatically display workspace objects after they are\n created.\n\n Returns\n -------\n None\n \"\"\"\n if not in_ipython_notebook():\n raise ValueError('Only run `init_notebook_mode` from inside an IPython Notebook.')\n\n global _PYGSTI_WORKSPACE_INITIALIZED\n\n script = \"\"\n\n if not connected:\n _merge.rsync_offline_dir(_os.getcwd())\n\n #If offline, add JS to head that will load local requireJS and/or\n # jQuery if needed (jupyter-exported html files always use CDN\n # for these).\n if not connected:\n script += \"<script src='offline/jupyterlibload.js'></script>\\n\"\n\n #Load our custom plotly extension functions\n script += _merge.insert_resource(connected, None, \"pygsti_plotly_ex.js\")\n script += \"<script type='text/javascript'> window.plotman = new PlotManager(); </script>\"\n\n # Load style sheets for displaying tables\n script += _merge.insert_resource(connected, None, \"pygsti_dataviz.css\")\n\n #jQueryUI_CSS = \"https://code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css\"\n jQueryUI_CSS = \"https://code.jquery.com/ui/1.12.1/themes/smoothness/jquery-ui.css\"\n script += _merge.insert_resource(connected, jQueryUI_CSS, \"smoothness-jquery-ui.css\")\n\n #To fix the UI tooltips within Jupyter (b/c they use an old/custom JQueryUI css file)\n if connected:\n imgURL = \"https://code.jquery.com/ui/1.12.1/themes/smoothness/images/ui-icons_222222_256x240.png\"\n else:\n imgURL = \"offline/images/ui-icons_222222_256x240.png\"\n script += \"<style>\\n\" + \\\n \".tooltipbuttons .ui-button { padding: 0; border: 0; background: transparent; }\\n\" + \\\n \".tooltipbuttons .ui-icon { background-image: url(\\\"%s\\\"); margin-top: 0; }\\n\" % imgURL + \\\n \"</style>\"\n\n # Note: within a jupyter notebook, the requireJS base path appears\n # to be \"/static\", so just setting the path to \"offline/myfile\"\n # would attempt to load \"/static/offline/myfile.js\" which points\n # somewhere like .../site-packages/notebook/static/offline/myfile\".\n # So:\n # - when in a notebook, the path needs to be \"../files\" followed\n # by the notebook's path, which we can obtain via the notebook JS\n # object.\n # - when *not* in a notebook, the requireJS base defaults to the\n # current page, so just using \"offline/myfile\" works fine then.\n\n #Tell require.js where jQueryUI and Katex are\n if connected:\n reqscript = (\n \"<script>\"\n \"console.log('ONLINE - using CDN paths');\"\n \"requirejs.config({{ \"\n \" paths: {{ 'jquery-UI': ['{jqueryui}'],\"\n \" 'katex': ['{katex}'],\"\n \" 'autorender': ['{auto}'] }},\"\n \"}});\"\n \"require(['jquery', 'jquery-UI'],function($,ui) {{\"\n \" window.jQueryUI=ui; console.log('jquery-UI loaded'); }});\"\n \"require(['katex', 'autorender'],function(katex,auto) {{\"\n \" window.katex=katex; console.log('Katex loaded'); }});\"\n \"</script>\"\n ).format(jqueryui=\"https://code.jquery.com/ui/1.12.1/jquery-ui.min\",\n katex=\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.7.1/katex.min.js\",\n auto=\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.7.1/contrib/auto-render.min.js\")\n\n else:\n reqscript = (\n \"<script>\"\n \"var pth;\"\n \"if(typeof IPython !== 'undefined') {{\"\n \" var nb = IPython.notebook;\"\n \" var relpath = nb.notebook_path.substr(0, nb.notebook_path.lastIndexOf('/') + 1 );\"\n \" jqueryui_pth = '../files' + nb.base_url + relpath + '{jqueryui}';\"\n \" katex_pth = '../files' + nb.base_url + relpath + '{katex}';\"\n \" auto_pth = '../files' + nb.base_url + relpath + '{auto}';\"\n \" console.log('IPYTHON DETECTED - using path ' + jqueryui_pth);\"\n \"}}\"\n \"else {{\"\n \" jqueryui_pth = '{jqueryui}';\"\n \" katex_pth = '{katex}';\"\n \" auto_pth = '{auto}';\"\n \" console.log('NO IPYTHON DETECTED - using path ' + jqueryui_pth);\"\n \"}}\"\n \"requirejs.config({{ \"\n \" paths: {{ 'jquery-UI': [jqueryui_pth], 'katex': [katex_pth], 'autorender': [auto_pth] }},\"\n \"}});\"\n \"require(['jquery', 'jquery-UI'],function($,ui) {{\"\n \" window.jQueryUI=ui; console.log('jquery & jquery-UI loaded'); }});\"\n \"require(['katex', 'autorender'],function(katex,auto) {{\"\n \" window.katex=katex; console.log('Katex loaded'); }});\"\n \"</script>\"\n ).format(jqueryui=\"offline/jquery-ui.min\",\n katex=\"offline/katex.min\",\n auto=\"offline/auto-render.min\")\n script += reqscript\n\n #Initialize Katex as a fallback if MathJax is unavailable (offline), OR,\n # if MathJax is loaded, wait for plotly to load before rendering SVG text\n # so math shows up properly in plots (maybe we could just use a require\n # statement for this instead of polling?)\n script += _merge.insert_resource(\n connected, \"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.7.1/katex.min.css\",\n \"katex.css\")\n\n script += (\n \"\\n<script>\"\n \"require(['jquery','katex','autorender'],function($,katex,renderMathInElement) {\\n\"\n \" var mathjaxTimer = setInterval( function() {\\n\"\n \" if(document.readyState === 'complete' || document.readyState === 'loaded') {\\n\"\n \" clearInterval(mathjaxTimer);\\n\"\n \" if(typeof MathJax === 'undefined') {\\n\"\n \" console.log('MATHJAX not found - attempting to typeset with Katex');\\n\"\n \" renderMathInElement(document.body, { delimiters: [\\n\"\n \" {left: '$$', right: '$$', display: true},\\n\"\n \" {left: '$', right: '$', display: false},\\n\"\n \" ] } );\\n\"\n \" }\\n\"\n \" else { //Mathjax is alive - wait for plotly\\n\"\n \" var waitForPlotly = setInterval( function() {\\n\"\n \" if( typeof(window.Plotly) !== 'undefined' ){\\n\"\n \" MathJax.Hub.Config({ SVG: { font: 'STIX-Web' }, displayAlign: 'center' });\\n\"\n \" MathJax.Hub.Queue(['setRenderer', MathJax.Hub, 'SVG']);\\n\"\n \" clearInterval(waitForPlotly);\\n\"\n \" }\\n\"\n \" }, 500 );\\n\"\n \" }\\n\"\n \" } //end readyState check \\n\"\n \" }, 500); //end setInterval \\n\"\n \"});\\n\"\n '</script>\\n')\n\n # Initialize Plotly libraries\n script += _plotly_ex.init_notebook_mode_ex(connected)\n\n # Perform check to see what has been loaded\n script += (\n \"<div id='notebook_load_status' style='font-style:italic;color:blue'>Loading...</div>\\n\"\n \"<script type='text/javascript'>\\n\"\n \" require(['jquery','jquery-UI','plotly','katex', 'autorender'],\\n\"\n \" function($,ui,Plotly,katex,auto) {\\n\"\n \" $(document).ready( function() {\\n\"\n \" var txt = '';\\n\"\n \" if( typeof($('#notebook_load_status').resizable) === 'undefined') {\\n\"\n \" txt += '<span class=\\\"failmsg\\\">JQueryUI not loaded correctly</span><br>';\\n\"\n \" }\\n\"\n \" if( typeof(Plotly.newPlot) === 'undefined') {\\n\"\n \" txt += '<span class=\\\"failmsg\\\">Plotly not loaded correctly</span><br>';\\n\"\n \" }\\n\"\n \" if(txt.length == 0) {\\n\"\n \" txt += '<span class=\\\"successmsg\\\">Notebook Initialization Complete</span>';\\n\"\n \" if( typeof MathJax !== 'undefined') {\\n\"\n \" txt += '<span class=\\\"successmsg2\\\"> (+MathJax)</span>';\\n\"\n \" } else {\\n\"\n \" txt += '<span class=\\\"successmsg2\\\"> (+KaTeX)</span>';\\n\"\n \" }\\n\"\n \" }\\n\"\n \" $('#notebook_load_status').html(txt);\\n\"\n \" }); });\\n\"\n \"</script>\\n\")\n\n display_ipynb(script) # single call to display keeps things simple\n\n _PYGSTI_WORKSPACE_INITIALIZED = True\n\n self._register_components(autodisplay)\n return\n\n def switchedCompute(self, fn, *args):\n \"\"\"\n Computes a function, given its name and arguments, when some or all of\n those arguments are SwitchedValue objects.\n\n Caching is employed to avoid duplicating function evaluations which have\n the same arguments. Note that the function itself doesn't need to deal\n with SwitchValue objects, as this routine resolves such objects into a\n series of function evaluations using the underlying value(s) within the\n SwitchValue. This routine is primarily used internally for the\n computation of tables and plots.\n\n if any of the arguments is an instance of `NotApplicable` then `fn`\n is *not* evaluated and the instance is returned as the evaluation\n result. If multiple arguments are `NotApplicable` instances, the\n first is used as the result.\n\n Parameters\n ----------\n fn : function\n The function to evaluate\n\n args : list\n The function's arguments\n\n Returns\n -------\n fn_values : list\n The function return values for all relevant sets of arguments.\n Denote the length of this list by N.\n switchboards : list\n A list of all the relevant Switchboards used during the function\n evaluation. Denote the length of this list by M.\n switchboard_switch_indices : list\n A list of length M whose elements are tuples containing the 0-based\n indices of the relevant switches (i.e. those used by any of the\n arguments) for each switchboard (element of `switchboards`).\n switchpos_map : dict\n A dictionary whose keys are switch positions, and whose values are\n integers between 0 and N which index the element of `fn_values`\n corresponding to the given switch positions. Each\n \"switch positions\" key is a tuple of length M whose elements (one\n per switchboard) are tuples of 0-based switch-position indices\n indicating the position of the relevant switches of that\n switchboard. Thus,\n `len(key[i]) = len(switchboard_switch_indices[i])`, where `key`\n is a dictionary key.\n \"\"\"\n # Computation functions get stripped-down *value* args\n # (strip SwitchedValue stuff away)\n\n switchboards = []\n switchBdInfo = []\n nonSwitchedArgs = []\n\n switchpos_map = {}\n storedKeys = {}\n resultValues = []\n\n for i, arg in enumerate(args):\n if isinstance(arg, SwitchValue):\n isb = None\n for j, sb in enumerate(switchboards):\n if arg.parent is sb:\n isb = j; break\n else:\n isb = len(switchboards)\n switchboards.append(arg.parent)\n switchBdInfo.append({\n 'argument indices': [], # indices of arguments that are children of this switchboard\n 'value names': [], # names of switchboard value correspond to each argument index\n 'switch indices': set() # indices of the switches that are actually used by the args\n })\n assert(isb is not None)\n info = switchBdInfo[isb]\n\n info['argument indices'].append(i)\n info['value names'].append(arg.name)\n info['switch indices'].update(arg.dependencies)\n else:\n nonSwitchedArgs.append((i, arg))\n\n #print(\"DB: %d arguments\" % len(args))\n #print(\"DB: found %d switchboards\" % len(switchboards))\n #print(\"DB: switchBdInfo = \", switchBdInfo)\n #print(\"DB: nonSwitchedArgs = \", nonSwitchedArgs)\n\n #Create a list of lists, each list holding all of the relevant switch positions for each board\n switch_positions = []\n for isb, sb in enumerate(switchboards):\n info = switchBdInfo[isb]\n info['switch indices'] = list(info['switch indices']) # set -> list so definite order\n\n switch_ranges = [list(range(len(sb.positionLabels[i])))\n for i in info['switch indices']]\n sb_switch_positions = list(_itertools.product(*switch_ranges))\n # a list of all possible positions for the switches being\n # used for the *single* board sb\n switch_positions.append(sb_switch_positions)\n\n #loop over all relevant switch configurations (across multiple switchboards)\n for pos in _itertools.product(*switch_positions):\n # pos[i] gives the switch configuration for the i-th switchboard\n\n #fill in the arguments for our function call\n argVals = [None] * len(args)\n\n #first, iterate over all the switchboards\n for sw_pos, sb, info in zip(pos, switchboards, switchBdInfo):\n # sw_pos is a tuple of the info['switch indices'] switch positions for sb\n sis = info['switch indices']\n for nm, j in zip(info[\"value names\"], info[\"argument indices\"]):\n value_swpos = [sw_pos[sis.index(k)] for k in sb[nm].dependencies]\n # potentially a subset of sw_pos, contains only the switch positions\n # relevant to the particular SwitchedValue named nm (also the j-th argument)\n argVals[j] = sb[nm][tuple(value_swpos)] # tuple needed for proper indexing\n\n #next, fill in the non-switched arguments\n for j, arg in nonSwitchedArgs:\n argVals[j] = arg\n\n for v in argVals:\n if isinstance(v, NotApplicable):\n key = \"NA\"; result = v; break\n else:\n key, result = self.smartCache.cached_compute(fn, argVals)\n\n if key not in storedKeys or key == 'INEFFECTIVE':\n switchpos_map[pos] = len(resultValues)\n storedKeys[key] = len(resultValues)\n resultValues.append(result)\n else:\n switchpos_map[pos] = storedKeys[key]\n\n switchboard_switch_indices = [info['switch indices'] for info in switchBdInfo]\n return resultValues, switchboards, switchboard_switch_indices, switchpos_map\n\n\nclass Switchboard(_collections.OrderedDict):\n \"\"\"\n Encapsulates a render-able set of user-interactive switches\n for controlling visualized output.\n\n Outwardly a Switchboard looks like a dictionary of SwitchValue\n objects, which in turn look like appropriately sized numpy arrays\n of values for some quantity. Different switch positions select\n different values and thereby what data is visualized in various\n outputs (e.g. tables and plots).\n \"\"\"\n\n def __init__(self, ws, switches, positions, types, initial_pos=None,\n descriptions=None, show=\"all\", ID=None, use_loadable_items=False):\n \"\"\"\n Create a new Switchboard.\n\n Parameters\n ----------\n switches : list\n A list of switch names. The length of this list is\n the number of switches.\n\n positions : list\n Elements are lists of position labels, one per switch.\n Length must be equal to `len(switches)`.\n\n types : list of {'buttons','dropdown','slider','numslider'}\n A list of switch-type strings specifying what type of switch\n each switch is.\n\n - 'buttons': a set of toggle buttons\n - 'dropdown': a drop-down (or combo-box)\n - 'slider': a horizontal slider (equally spaced items)\n - 'numslider': a horizontal slider (spaced by numeric value)\n\n initial_pos : list or None (optional)\n A list of 0-based integer indices giving the initial\n position of each of the `len(switches)` switches. None\n defaults to the first (0-th) position for each switch.\n\n descriptions : list (optional)\n A string description for each of the `len(switches)` switches.\n\n show : list (optional)\n A list of boolean (one for each of the `len(switches)` switches)\n indicating whether or not that switch should be rendered. The\n special values \"all\" and \"none\" show all or none of the switches,\n respectively.\n\n ID : str (optional)\n A DOM identifier to use when rendering this Switchboard to HTML.\n Usually leaving this value as `None` is best, in which case a\n random identifier is created.\n \"\"\"\n # Note: intentionally leave off ws argument desc. in docstring\n assert(len(switches) == len(positions))\n\n self.ID = randomID() if (ID is None) else ID\n self.ws = ws # Workspace\n self.switchNames = switches\n self.switchTypes = types\n self.switchIDs = [\"switchbd%s_%d\" % (self.ID, i)\n for i in range(len(switches))]\n self.positionLabels = positions\n self.use_loadable_items = use_loadable_items\n if initial_pos is None:\n self.initialPositions = _np.array([0] * len(switches), _np.int64)\n else:\n assert(len(initial_pos) == len(switches))\n self.initialPositions = _np.array(initial_pos, _np.int64)\n\n self.descriptions = descriptions\n\n if show == \"all\":\n self.show = [True] * len(switches)\n elif show == \"none\":\n self.show = [False] * len(switches)\n else:\n assert(len(show) == len(switches))\n self.show = show\n\n self.widget = None\n super(Switchboard, self).__init__([])\n\n def add(self, varname, dependencies):\n \"\"\"\n Adds a new switched-value to this Switchboard.\n\n Parameters\n ----------\n varname : str\n A name for the variable being added. This name will be used to\n access the new variable (as either a dictionary key or as an\n object member).\n\n dependencies : list or tuple\n The (0-based) switch-indices specifying which switch positions\n the new variable is dependent on. For example, if the Switchboard\n has two switches, one for \"amplitude\" and one for \"frequency\", and\n this value is only dependent on frequency, then `dependencies`\n should be set to `(1,)` or `[1]`.\n\n Returns\n -------\n None\n \"\"\"\n super(Switchboard, self).__setitem__(varname, SwitchValue(self, varname, dependencies))\n\n def add_unswitched(self, varname, value):\n \"\"\"\n Adds a new non-switched-value to this Switchboard.\n\n This can be convenient for attaching related non-switched data to\n a :class:`Switchboard`.\n\n Parameters\n ----------\n varname : str\n A name for the variable being added. This name will be used to\n access the new variable (as either a dictionary key or as an\n object member).\n\n value : object\n The un-switched value to associate with `varname`.\n\n Returns\n -------\n None\n \"\"\"\n super(Switchboard, self).__setitem__(varname, value)\n\n def __setitem__(self, key, val):\n raise KeyError(\"Use add(...) to add an item to this swichboard\")\n\n def render(self, typ=\"html\"):\n \"\"\"\n Render this Switchboard into the requested format.\n\n The returned string(s) are intended to be used to embedded a\n visualization of this object within a larger document.\n\n Parameters\n ----------\n typ : {\"html\"}\n The format to render as. Currently only HTML is supported.\n\n Returns\n -------\n dict\n A dictionary of strings whose keys indicate which portion of\n the embeddable output the value is. Keys will vary for different\n `typ`. For `\"html\"`, keys are `\"html\"` and `\"js\"` for HTML and\n and Javascript code, respectively.\n \"\"\"\n return self._render_base(typ, None, self.show)\n\n def _render_base(self, typ, view_suffix, show):\n \"\"\"\n Break off this implementation so SwitchboardViews can use.\n \"\"\"\n assert(typ == \"html\"), \"Can't render Switchboards as anything but HTML\"\n\n switch_html = []; switch_js = []\n for name, baseID, styp, posLbls, ipos, bShow in zip(\n self.switchNames, self.switchIDs, self.switchTypes,\n self.positionLabels, self.initialPositions, show):\n\n ID = (baseID + view_suffix) if view_suffix else baseID\n style = \"\" if bShow else \" style='display: none'\"\n\n if styp == \"buttons\":\n html = \"<div class='switch_container'%s>\\n\" % style\n html += \"<fieldset id='%s'>\\n\" % ID\n if name:\n html += \"<legend>%s: </legend>\\n\" % name\n for k, lbl in enumerate(posLbls):\n checked = \" checked='checked'\" if k == ipos else \"\"\n html += \"<label for='%s-%d'>%s</label>\\n\" % (ID, k, lbl)\n html += \"<input type='radio' name='%s' id='%s-%d' value=%d%s>\\n\" \\\n % (ID, ID, k, k, checked)\n html += \"</fieldset></div>\\n\"\n js = \" $('#%s > input').checkboxradio({ icon: false });\" % ID\n\n if view_suffix:\n js += \"\\n\".join((\n \"function connect_%s_to_base(){\" % ID,\n \" if( $('#%s').hasClass('initializedSwitch') ) {\" % baseID, # \"if base switch is ready\"\n \" $('#%s').on('change', function(event, ui) {\" % baseID,\n \" var v = $(\\\"#%s > input[name='%s']:checked\\\").val();\" % (baseID, baseID),\n \" var el = $(\\\"#%s > input[name='%s'][value=\\\" + v + \\\"]\\\");\" % (ID, ID),\n \" if( el.is(':checked') == false ) { \",\n \" el.click();\",\n \" }\",\n \" });\"\n \" $('#%s').on('change', function(event, ui) {\" % ID,\n \" var v = $(\\\"#%s > input[name='%s']:checked\\\").val();\" % (ID, ID),\n \" var el = $(\\\"#%s > input[name='%s'][value=\\\" + v + \\\"]\\\");\" % (baseID, baseID),\n \" if( el.is(':checked') == false ) { \",\n \" el.click();\",\n \" }\",\n \" });\",\n \" $('#%s').trigger('change');\" % baseID,\n \" }\",\n \" else {\", # need to wait for base switch\n \" setTimeout(connect_%s_to_base, 500);\" % ID,\n \" console.log('%s base NOT initialized: Waiting...');\" % ID,\n \" }\",\n \"};\",\n \"connect_%s_to_base();\" % ID # start trying to connect\n ))\n\n elif styp == \"dropdown\":\n html = \"<div class='switch_container'%s><fieldset>\\n\" % style\n if name:\n html += \"<label for='%s'>%s</label>\\n\" % (ID, name)\n html += \"<select name='%s' id='%s'>\\n\" % (ID, ID)\n for k, lbl in enumerate(posLbls):\n selected = \" selected='selected'\" if k == ipos else \"\"\n html += \"<option value=%d%s>%s</option>\\n\" % (k, selected, lbl)\n html += \"</select>\\n</fieldset></div>\\n\"\n js = \" $('#%s').selectmenu();\" % ID\n\n if view_suffix:\n js += \"\\n\".join((\n \"function connect_%s_to_base(){\" % ID,\n \" if( $('#%s').hasClass('initializedSwitch') ) {\" % baseID, # \"if base switch is ready\"\n \" $('#%s').on('selectmenuchange', function(event, ui) {\" % baseID,\n \" var v = $('#%s').val();\" % baseID,\n \" var el = $('#%s');\" % ID,\n \" if( el.val() != v ) { \",\n \" el.val(v).selectmenu('refresh');\",\n \" }\",\n \" });\"\n \" $('#%s').on('selectmenuchange', function(event, ui) {\" % ID,\n \" var v = $('#%s').val();\" % ID,\n \" var el = $('#%s');\" % baseID,\n \" if( el.val() != v ) { \",\n \" el.val(v).selectmenu('refresh').trigger('selectmenuchange');\",\n \" }\",\n \" });\",\n \" $('#%s').trigger('selectmenuchange');\" % baseID,\n \" console.log('%s connected to base');\\n\" % ID,\n \" }\",\n \" else {\", # need to wait for base switch\n \" setTimeout(connect_%s_to_base, 500);\" % ID,\n \" console.log('%s base NOT initialized: Waiting...');\" % ID,\n \" }\",\n \"};\",\n \"connect_%s_to_base();\" % ID # start trying to connect\n ))\n\n elif styp == \"slider\" or styp == \"numslider\":\n\n if styp == \"numslider\":\n float_vals = list(map(float, posLbls))\n m, M = min(float_vals), max(float_vals)\n else:\n float_vals = list(range(len(posLbls)))\n m, M = 0, len(posLbls) - 1\n\n #ml = max(list(map(len,posLbls)))\n w = 3.0 # 1.0*ml\n\n html = \"<div id='%s-container' class='switch_container'%s>\\n\" \\\n % (ID, style)\n html += \"<fieldset>\\n\"\n if name:\n html += \"<label for='%s' class='pygsti-slider-label'>%s</label>\\n\" % (ID, name)\n html += \"<div name='%s' id='%s'>\\n\" % (ID, ID)\n html += \"<div id='%s-handle' class='ui-slider-handle'></div>\" % ID\n html += \"</div>\\n</fieldset></div>\\n\"\n # \" $('#%s-container').css({'margin-top':'%fem'});\" % (ID,1.7/2),\n\n js = \"\"\n if view_suffix is None:\n js = \"var %s_float_values = [\" % ID + \\\n \",\".join(map(str, float_vals)) + \"];\\n\"\n js += \"var %s_str_values = [\" % ID + \\\n \",\".join([\"'%s'\" % s for s in posLbls]) + \"];\\n\"\n js += \"window.%s_float_values = %s_float_values;\\n\" % (ID, ID) # ensure declared globally\n js += \"window.%s_str_values = %s_str_values;\\n\" % (ID, ID) # ensure declared globally\n\n js += \"\\n\".join((\n \"function findNearest_%s(includeLeft, includeRight, value) {\" % ID,\n \" var nearest = null;\",\n \" var diff = null;\",\n \" for (var i = 0; i < %s_float_values.length; i++) {\" % ID,\n \" if ((includeLeft && %s_float_values[i] <= value) ||\" % ID,\n \" (includeRight && %s_float_values[i] >= value)) {\" % ID,\n \" var newDiff = Math.abs(value - %s_float_values[i]);\" % ID,\n \" if (diff == null || newDiff < diff) {\",\n \" nearest = i;\",\n \" diff = newDiff;\",\n \" }\",\n \" }\",\n \" }\",\n \" return nearest;\",\n \"}\",\n \"window.findNearest_%s = findNearest_%s;\\n\" % (ID, ID)))\n\n #allow ipos = something (e.g. -1) when there aren't any position labels\n if len(posLbls) == 0:\n float_val = 0.0; posLabel = \"--\"\n else:\n float_val = float_vals[ipos]\n posLabel = posLbls[ipos]\n\n js += \"\\n\".join((\n \" $('#%s').slider({\" % ID,\n \" orientation: 'horizontal', range: false,\",\n \" min: %f, max: %f, step: %f,\" % (m, M, (M - m) / 100.0),\n \" value: %f,\" % float_val,\n \" create: function() {\",\n \" $('#%s-handle').text('%s');\" % (ID, posLabel),\n \" $('#%s-handle').css({'width':'%fem','height':'%fem'});\" % (ID, w, 1.7),\n \" $('#%s-handle').css({'margin-left':'%fem','top':'%fem'});\" % (ID, -w / 2, -1.7 / 2 + 0.4),\n \" $('#%s-handle').css({'text-align':'center','line-height':'1.5em'});\" % ID,\n \" $('#%s').css({'margin-left':'%fem', 'margin-top':'0.4em'});\" % (ID, w / 2),\n \" },\",\n \" slide: function(event, ui) {\",\n \" var includeLeft = event.keyCode != $.ui.keyCode.RIGHT;\",\n \" var includeRight = event.keyCode != $.ui.keyCode.LEFT;\",\n \" var iValue = findNearest_%s(includeLeft, includeRight, ui.value);\" % baseID,\n \" if($('#%s').slider('value') != %s_float_values[iValue]) {\" % (ID, baseID),\n \" $('#%s-handle').text(%s_str_values[iValue]);\" % (baseID, baseID),\n \" $('#%s').slider('value', %s_float_values[iValue]);\" % (baseID, baseID),\n \" }\"\n \" return false;\"\n \" },\",\n \" });\",\n ))\n\n if view_suffix:\n # slide events always change *base* (non-view) slider (see above),\n # which causes a change event to fire. Views handle this event\n # to update their own slider values.\n js += \"\\n\".join((\n \"function connect_%s_to_base(){\" % ID,\n \" if( $('#%s').hasClass('initializedSwitch') ) {\" % baseID, # \"if base switch is ready\"\n \" $('#%s').on('slidechange', function(event, ui) {\" % baseID,\n \" $('#%s').slider('value', ui.value);\" % ID,\n \" $('#%s-handle').text( $('#%s-handle').text() );\" % (ID, baseID),\n \" });\",\n \" var mock_ui = { value: $('#%s').slider('value') };\" % baseID, # b/c handler uses ui.value\n \" $('#%s').trigger('slidechange', mock_ui);\" % baseID,\n \" }\",\n \" else {\", # need to wait for base switch\n \" setTimeout(connect_%s_to_base, 500);\" % ID,\n \" console.log('%s base NOT initialized: Waiting...');\" % ID,\n \" }\",\n \"};\",\n \"connect_%s_to_base();\" % ID # start trying to connect\n ))\n\n else:\n raise ValueError(\"Unknown switch type: %s\" % styp)\n\n js += \"$('#%s').addClass('initializedSwitch');\\n\" % ID\n\n switch_html.append(html)\n switch_js.append(js)\n\n html = \"\\n\".join(switch_html)\n if not self.use_loadable_items: # run JS as soon as the document is ready\n js = \"$(document).ready(function() {\\n\" + \\\n \"\\n\".join(switch_js) + \"\\n});\"\n else: # in a report, where we have a 'loadable' parent and might not want to load right away\n js = \"$(document).ready(function() {\\n\" + \\\n \"$('#%s').closest('.loadable').on('load_loadable_item', function(){\\n\" % ID + \\\n \"\\n\".join(switch_js) + \"\\n}); });\"\n\n return {'html': html, 'js': js}\n\n def get_switch_change_handlerjs(self, switchIndex):\n \"\"\"\n Returns the Javascript needed to begin an on-change handler\n for a particular switch.\n\n Parameters\n ----------\n switchIndex : int\n The 0-based index of which switch to get handler JS for.\n\n Returns\n -------\n str\n \"\"\"\n ID = self.switchIDs[switchIndex]\n typ = self.switchTypes[switchIndex]\n if typ == \"buttons\":\n return \"$('#%s').on('change', function() {\" % ID\n elif typ == \"dropdown\":\n return \"$('#%s').on('selectmenuchange', function() {\" % ID\n elif typ == \"slider\" or typ == \"numslider\":\n return \"$('#%s').on('slidechange', function() {\" % ID # only when slider stops\n #return \"$('#%s').on('slide', function() {\" % ID # continuous on\n # mouse move - but doesn't respond correctly to arrows, so seems\n # better to use 'slidechange'\n else:\n raise ValueError(\"Unknown switch type: %s\" % typ)\n\n def get_switch_valuejs(self, switchIndex):\n \"\"\"\n Returns the Javascript needed to get the value of a particular switch.\n\n Parameters\n ----------\n switchIndex : int\n The 0-based index of which switch to get value-extracting JS for.\n\n Returns\n -------\n str\n \"\"\"\n ID = self.switchIDs[switchIndex]\n typ = self.switchTypes[switchIndex]\n if typ == \"buttons\":\n return \"$(\\\"#%s > input[name='%s']:checked\\\").val()\" % (ID, ID)\n elif typ == \"dropdown\":\n return \"$('#%s').val()\" % ID\n elif typ == \"slider\" or typ == \"numslider\":\n #return \"%s_float_values.indexOf($('#%s').slider('option', 'value'))\" % (ID,ID)\n return \"findNearest_%s(true,true,$('#%s').slider('option', 'value'))\" % (ID, ID)\n else:\n raise ValueError(\"Unknown switch type: %s\" % typ)\n\n def display(self):\n \"\"\"\n Display this switchboard within an iPython notebook.\n\n Calling this function requires that you are in an\n iPython environment, and really only makes sense\n within a notebook.\n\n Returns\n -------\n None\n \"\"\"\n if not in_ipython_notebook():\n raise ValueError('Only run `display` from inside an IPython Notebook.')\n\n #if self.widget is None:\n # self.widget = _widgets.HTMLMath(value=\"?\",\n # placeholder='Switch HTML',\n # description='Switch HTML',\n # disabled=False)\n out = self.render(\"html\")\n content = \"<script>\\n\" + \\\n \"require(['jquery','jquery-UI'],function($,ui) {\" + \\\n out['js'] + \" });</script>\" + out['html']\n #self.widget.value = content\n display_ipynb(content) # self.widget)\n\n def view(self, switches=\"all\", idsuffix=\"auto\"):\n \"\"\"\n Return a view of this Switchboard.\n\n Parameters\n ----------\n switches : list, optional\n The names of the switches to include in this view. The special\n value \"all\" includes all of the switches in the view.\n Alternatively, this can be an array of boolean values, one\n for each switch.\n\n idsuffix : str, optional\n A suffix to append to the DOM ID of this switchboard when\n rendering the view. If \"auto\", a random suffix is used.\n\n Returns\n -------\n SwitchboardView\n \"\"\"\n if switches == \"all\":\n show = [True] * len(self.switchNames)\n elif all([isinstance(b, bool) for b in switches]):\n assert(len(switches) == len(self.switchNames))\n show = switches\n else:\n show = [False] * len(self.switchNames)\n for nm in switches:\n show[self.switchNames.index(nm)] = True\n\n return SwitchboardView(self, idsuffix, show)\n\n def __getattr__(self, attr):\n if attr in self:\n return self[attr]\n return getattr(self.__dict__, attr)\n\n\nclass SwitchboardView(object):\n \"\"\"\n A duplicate or \"view\" of an existing switchboard which logically\n represents the *same* set of switches. Thus, when switches are\n moved on the duplicate board, switches will move on the original\n (and vice versa).\n \"\"\"\n\n def __init__(self, switchboard, idsuffix=\"auto\", show=\"all\"):\n \"\"\"\n Create a new SwitchboardView\n\n Parameters\n ----------\n switchboard : Switchboard\n The base switch board.\n\n idsuffix : str, optional\n A suffix to append to the DOM ID of this switchboard\n when rendering the view. If \"auto\", a random suffix\n is used.\n\n show : list (optional)\n A list of booleans indicating which switches should be rendered.\n The special values \"all\" and \"none\" show all or none of the\n switches, respectively.\n \"\"\"\n if idsuffix == \"auto\":\n self.idsuffix = \"v\" + randomID()\n else:\n self.idsuffix = idsuffix\n\n if show == \"all\":\n self.show = [True] * len(switchboard.switchNames)\n elif show == \"none\":\n self.show = [False] * len(switchboard.switchNames)\n else:\n assert(len(show) == len(switchboard.switchNames))\n self.show = show\n\n self.switchboard = switchboard\n\n def render(self, typ=\"html\"):\n \"\"\"\n Render this Switchboard into the requested format.\n\n The returned string(s) are intended to be used to embedded a\n visualization of this object within a larger document.\n\n Parameters\n ----------\n typ : {\"html\"}\n The format to render as. Currently only HTML is supported.\n\n Returns\n -------\n dict\n A dictionary of strings whose keys indicate which portion of\n the embeddable output the value is. Keys will vary for different\n `typ`. For `\"html\"`, keys are `\"html\"` and `\"js\"` for HTML and\n and Javascript code, respectively.\n \"\"\"\n return self.switchboard._render_base(typ, self.idsuffix, self.show)\n\n def display(self):\n \"\"\"\n Display this switchboard within an iPython notebook.\n\n Calling this function requires that you are in an\n iPython environment, and really only makes sense\n within a notebook.\n\n Returns\n -------\n None\n \"\"\"\n if not in_ipython_notebook():\n raise ValueError('Only run `display` from inside an IPython Notebook.')\n\n out = self.render(\"html\")\n content = \"<script>\\n\" + \\\n \"require(['jquery','jquery-UI'],function($,ui) {\" + \\\n out['js'] + \" });</script>\" + out['html']\n display_ipynb(content)\n\n\nclass SwitchValue(object):\n \"\"\"\n Encapsulates a \"switched value\", which is essentially a value (i.e. some\n quantity, usually one used as an argument to visualization functions) that\n is controlled by the switches of a single Switchboard.\n\n The paradigm is one of a Switchboard being a collection of switches along\n with a dictionary of SwitchValues, whereby each SwitchValue is a mapping\n of switch positions to values. For efficiency, a SwitchValue need only map\n a \"subspace\" of the switch positions, that is, the position-space spanned\n by only a subset of the switches. Which switch-positions are mapped is\n given by the \"dependencies\" of a SwitchValue.\n\n SwitchValue behaves much like a numpy array of values in terms of\n element access.\n \"\"\"\n\n def __init__(self, parent_switchboard, name, dependencies):\n \"\"\"\n Creates a new SwitchValue.\n\n Parameters\n ----------\n parent_switchboard : Switchboard\n The switch board this value is associated with.\n\n name : str\n The name of this value, which is also the key or member\n name used to access this value from its parent `Switchboard`.\n\n dependencies : iterable\n The 0-based indices identifying which switches this value\n depends upon, and correspondingly, which switch positions\n the different axes of the new `SwitchValue` correspond to.\n \"\"\"\n self.ws = parent_switchboard.ws # workspace\n self.parent = parent_switchboard\n self.name = name\n self.dependencies = dependencies\n\n shape = [len(self.parent.positionLabels[i]) for i in dependencies]\n self.base = _np.empty(shape, dtype=_np.object)\n index_all = (slice(None, None),) * len(shape)\n self.base[index_all] = NotApplicable(self.ws)\n\n #Access to underlying ndarray\n def __getitem__(self, key):\n return self.base.__getitem__(key)\n\n def __getslice__(self, i, j):\n return self.__getitem__(slice(i, j)) # Called for A[:]\n\n def __setitem__(self, key, val):\n return self.base.__setitem__(key, val)\n\n def __getattr__(self, attr):\n #use __dict__ so no chance for recursive __getattr__\n return getattr(self.__dict__['base'], attr)\n\n def __len__(self): return len(self.base)\n #Future - arithmetic ops should return a new SwitchValue\n #def __add__(self,x): return self.base + x\n #def __sub__(self,x): return self.base - x\n #def __mul__(self,x): return self.base * x\n #def __truediv__(self, x): return self.base / x\n\n\nclass WorkspaceOutput(object):\n \"\"\"\n Base class for all forms of data-visualization within a `Workspace` context.\n\n WorkspaceOutput sets a common interface for performing data visualization\n using a Workspace. In particular, `render` is used to create embeddable\n output in various formats, and `display` is used to show the object within\n an iPython notebook.\n \"\"\"\n default_render_options = {\n #General\n 'output_dir': False,\n 'precision': None,\n\n 'output_name': False,\n 'switched_item_mode': 'inline', # or 'separate files'\n 'switched_item_id_overrides': {},\n\n #HTML specific\n 'global_requirejs': False,\n 'use_loadable_items': False,\n 'click_to_display': False,\n 'render_math': True,\n 'resizable': True,\n 'autosize': 'none',\n 'link_to': None,\n 'valign': 'top',\n\n #Latex specific\n 'latex_cmd': \"pdflatex\",\n 'latex_flags': [\"-interaction=nonstopmode\", \"-halt-on-error\", \"-shell-escape\"],\n 'page_size': (6.5, 8.0),\n 'render_includes': True,\n 'leave_includes_src': False,\n }\n\n def __init__(self, ws):\n \"\"\"\n Create a new WorkspaceOutput object. Usually not called directly.\n\n Parameters\n ----------\n ws : Workspace\n The workspace containing the new object.\n \"\"\"\n self.ws = ws\n self.ID = randomID() # maybe allow overriding this in the FUTURE\n self.options = WorkspaceOutput.default_render_options.copy()\n\n def set_render_options(self, **kwargs):\n \"\"\"\n Sets rendering options, which affect how render() behaves.\n\n The reason render options are set via this function rather\n than passed directly as arguments to the render(...) call\n is twofold. First, it allows for global 'default' options\n to be set before creating `WorkspaceOutput`-derived objects;\n Secondly, it allows the user to set render options right after\n an object is constructed, separately from the rendering process\n (which is sometimes desirable).\n\n Parameters\n ----------\n output_dir : str or False\n The name of the output directory under which all output files\n should be created. The names of these files just the IDs of the\n items being rendered.\n\n precision : int or dict, optional\n The amount of precision to display. A dictionary with keys\n \"polar\", \"sci\", and \"normal\" can separately specify the\n precision for complex angles, numbers in scientific notation, and\n everything else, respectively. If an integer is given, it this\n same value is taken for all precision types. If None, then\n `{'normal': 6, 'polar': 3, 'sci': 0}` is used.\n\n\n\n switched_item_mode : {'inline','separate files'}, optional\n Whether switched items should be rendered inline within the 'html'\n and 'js' blocks of the return value of :func:`render`, or whether\n each switched item (corresponding to a single \"switch position\")\n should be rendered in a separate file and loaded on-demand only\n when it is needed.\n\n switched_item_id_overrides : dict, optional\n A dictionary of *index*:*id* pairs, where *index* is a 0-based index\n into the list of switched items (plots or tables), and *id* is a\n string ID. Since the ID is used as the filename when saving files,\n overriding the ID is useful when writing a single plot or table to\n a specific filename.\n\n global_requirejs : bool, optional\n Whether the table is going to be embedded in an environment\n with a globally defined RequireJS library. If True, then\n rendered output will make use of RequireJS.\n\n click_to_display : bool, optional\n If True, table plots are not initially created but must\n be clicked to prompt creation. This is False by default,\n and can be useful to set to True for tables with\n especially complex plots whose creation would slow down\n page loading significantly.\n\n resizable : bool, optional\n Whether or not to place table inside a JQueryUI\n resizable widget (only applies when `typ == \"html\"`).\n\n autosize : {'none', 'initial', 'continual'}, optional\n Whether tables and plots should be resized either\n initially, i.e. just upon first rendering (`\"initial\"`) or whenever\n the browser window is resized (`\"continual\"`). This option only\n applies for html rendering.\n\n link_to : tuple of {\"tex\", \"pdf\", \"pkl\"} or None, optional\n If not None, a list of one or more items from the given set\n indicating whether or not to include links to Latex, PDF, and\n Python pickle files, respectively. Note that setting this\n render option does not automatically *create/render* additional\n formats of this output object (you need to make multiple `render`\n calls for that) - it just creates the *links* to these files when\n rendering as \"html\".\n\n valign : {\"top\",\"bottom\"}\n Whether the switched items should be vertically aligned by their\n tops or bottoms (when they're different heights).\n\n\n\n latex_cmd : str, optional\n The system command or executable used to compile LaTeX documents.\n Usually `\"pdflatex\"`.\n\n latex_flags : list, optional\n A list of (string-valued) flags to pass to `latex_cmd` when\n compiling LaTeX documents. Defaults to\n `[\"-interaction=nonstopmode\", \"-halt-on-error\", \"-shell-escape\"]`\n\n page_size : tuple\n The usable page size for LaTeX documents, as (*width*,*height*)\n where *width* and *height* are in inches. Note that this does not\n include margins. Defaults to `(6.5,8.0)`.\n\n render_includes : bool, optional\n When rendering as \"latex\", whether included files should also be\n rendered (either by compiling latex to PDF or saving plots as PDFs).\n\n leave_includes_src : bool, optional\n When LaTeX compilation is done, should the source \"*.tex\" files be\n removed? If `False`, then they *are* removed.\n\n Returns\n -------\n None\n \"\"\"\n for key, val in kwargs.items():\n if key in self.options:\n self.options[key] = val\n else:\n raise ValueError(\"Invalid render option: %s\\nValid options are:\\n\" % key\n + '\\n'.join(self.options.keys()))\n\n def __getstate__(self):\n state_dict = self.__dict__.copy()\n del state_dict['ws']\n return state_dict\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n if 'ws' not in self.__dict__:\n self.__dict__['ws'] = None\n\n # Note: hashing not needed because these objects are not *inputs* to\n # other WorspaceOutput objects or computation functions - these objects\n # are cached using call_key.\n\n def render(self, typ=\"html\"):\n \"\"\"\n Renders this object into the specifed format, specifically for\n embedding it within a larger document.\n\n Parameters\n ----------\n typ : str\n The format to render as. Currently `\"html\"` is widely supported\n and `\"latex\"` is supported for tables.\n\n Returns\n -------\n dict\n A dictionary of strings whose keys indicate which portion of\n the embeddable output the value is. Keys will vary for different\n `typ`. For `\"html\"`, keys are `\"html\"` and `\"js\"` for HTML and\n and Javascript code, respectively.\n \"\"\"\n raise NotImplementedError(\"Derived classes must implement their own render()\")\n\n def display(self):\n \"\"\"\n Display this object within an iPython notebook.\n \"\"\"\n if not in_ipython_notebook():\n raise ValueError('Only run `display` from inside an IPython Notebook.')\n\n self.set_render_options(global_requirejs=True,\n output_dir=None) # b/c jupyter uses require.js\n out = self.render(\"html\")\n content = \"<script>\\n\" + \\\n \"require(['jquery','jquery-UI','plotly'],function($,ui,Plotly) {\" + \\\n out['js'] + \" });</script>\" + out['html']\n\n display_ipynb(content)\n\n def saveas(self, filename, index=None, verbosity=0):\n \"\"\"\n Saves this workspace output object to a file.\n\n The type of file that is saved is determined automatically by the\n extension of `filename`. Recognized extensions are `pdf` (PDF),\n `tex` (LaTeX), `pkl` (Python pickle) and `html` (HTML). Since this\n object may contain different instances of its data based on switch\n positions, when their are multiple instances the user must specify\n the `index` argument to disambiguate.\n\n Parameters\n ----------\n filename : str\n The destination filename. Its extension determines what type\n of file is saved.\n\n index : int, optional\n An absolute index into the list of different switched \"versions\"\n of this object's data. In most cases, the object being saved\n doesn't depend on any switch boards and has only a single \"version\",\n in which caes this can be left as the default.\n\n verbosity : int, optional\n Controls the level of detail printed to stdout.\n\n Returns\n -------\n None\n \"\"\"\n raise NotImplementedError()\n\n def _ccompute(self, fn, *args, **kwargs):\n \"\"\" Cached-computation using self.ws's smart cache \"\"\"\n return self.ws.smartCache.cached_compute(fn, args, kwargs)[1]\n\n def _create_onready_handler(self, content, ID):\n global_requirejs = self.options.get('global_requirejs', False)\n use_loadable_items = self.options.get('use_loadable_items', False)\n ret = \"\"\n\n if global_requirejs:\n ret += \"require(['jquery','jquery-UI','plotly','autorender'],function($,ui,Plotly,renderMathInElement) {\\n\"\n\n ret += ' $(document).ready(function() {\\n'\n if use_loadable_items:\n ret += \" $('#%s').closest('.loadable').on('load_loadable_item', function(){\\n\" % ID\n\n ret += content\n\n if use_loadable_items:\n ret += \" });\" # end load_loadable_item handler\n\n ret += '}); //end on-ready or on-load handler\\n'\n\n if global_requirejs:\n ret += '}); //end require block\\n'\n\n return ret\n\n def _render_html(self, ID, div_htmls, div_jss, div_ids, switchpos_map,\n switchboards, switchIndices, div_css_classes=None,\n link_to=None, link_to_files_dir=None, embed_figures=True):\n \"\"\"\n Helper rendering function, which takes care of the (complex)\n common logic which take a series of HTML div blocks corresponding\n to the results of a Workspace.switchedCompute(...) call and\n builds the HTML and JS necessary for toggling the visibility of\n these divs in response to changes in switch position(s).\n\n Parameters\n ----------\n ID: str\n The identifier to use when constructing DOM ids.\n\n div_htmls : list\n The html content for each switched block (typically a elements are\n \"<div>...</div>\" blocks themselves). This is the content that\n is switched between.\n\n div_jss : list\n Javascript content to accompany each switched block.\n\n div_ids : list\n A list giving the DOM ids for the div blocks given by `div_html`.\n\n switchpos_map : dict\n A dictionary mapping switch positions to div-index. Keys are switch\n tuples of per-switchboard positions (i.e. a tuple of tuples), giving\n the positions of each switch specified in `switchIndices`. Values\n are integer indices into `html_divs`.\n\n switchboards : list\n A list of relevant SwitchBoard objects.\n\n switchIndices : list\n A list of tuples, one per Switchboard object, giving the relevant\n switch indices (integers) within that Switchboard.\n\n div_css_classes : list, optional\n A list of (string) CSS classes to add to the div elements created\n by this function.\n\n link_to : list, optional\n If not None, a list of one or more items from the set\n {\"tex\", \"pdf\", \"pkl\"} indicating whether or not to\n include links to Latex, PDF, and Python pickle files,\n respectively.\n\n link_to_files_dir : str, optional\n The directory to place linked-to files in. Only used when\n `link_to` is not None.\n\n embed_figures: bool, optional\n If True (default), figures will be embedded directly into\n the report HTML. Otherwise, figures will be written to\n `link_to_files_dir` and dynamically loaded into the report\n with AJAX requests.\n\n Returns\n -------\n dict\n A dictionary of strings whose keys indicate which portion of\n the embeddable output the value is. Keys are `\"html\"` and `\"js\"`.\n \"\"\"\n\n # within_report = self.options.get('within_report', False)\n\n #Build list of CSS classes for the created divs\n classes = ['single_switched_value']\n if div_css_classes is not None:\n classes.extend(div_css_classes)\n cls = ' '.join(classes)\n\n #build HTML as container div containing one or more plot divs\n # Note: 'display: none' doesn't always work in firefox... (polar plots in ptic)\n # style='display: none' or 'visibility: hidden'\n html = \"<div id='%s' class='pygsti-wsoutput-group'>\\n\" % ID\n\n div_contents = []\n if div_jss is None: div_jss = [\"\"] * len(div_htmls)\n for divHTML, divJS in zip(div_htmls, div_jss):\n scriptJS = \"<script>\\n%s\\n</script>\\n\" % divJS if divJS else \"\"\n div_contents.append((\"{script}{html}\".format(\n script=scriptJS, html=divHTML)))\n\n if embed_figures:\n #Inline div contents\n html += \"\\n\".join([\"<div class='%s' id='%s'>\\n%s\\n</div>\\n\" %\n (cls, divID, divContent) for divID, divContent\n in zip(div_ids, div_contents)])\n else:\n html += \"\\n\".join([\"<div class='%s' id='%s'></div>\\n\" %\n (cls, divID) for divID in div_ids])\n\n #build a list of filenames based on the divIDs\n div_filenames = [(divID + \".html\") for divID in div_ids]\n\n #Create separate files with div contents\n for divContent, divFilenm in zip(div_contents, div_filenames):\n with open(_os.path.join(str(link_to_files_dir), divFilenm), 'w') as f:\n f.write(divContent)\n html += \"\\n</div>\\n\" # ends pygsti-wsoutput-group div\n\n #build javascript to map switch positions to div_ids\n js = \"var switchmap_%s = new Array();\\n\" % ID\n for switchPositions, iDiv in switchpos_map.items():\n #switchPositions is a tuple of tuples of position indices, one tuple per switchboard\n div_id = div_ids[iDiv]\n flatPositions = []\n for singleBoardSwitchPositions in switchPositions:\n flatPositions.extend(singleBoardSwitchPositions)\n js += \"switchmap_%s[ [%s] ] = '%s';\\n\" % \\\n (ID, \",\".join(map(str, flatPositions)), div_id)\n\n js += \"window.switchmap_%s = switchmap_%s;\\n\" % (ID, ID) # ensure a *global* variable\n js += \"\\n\"\n\n cnd = \" && \".join([\"$('#switchbd%s_%d').hasClass('initializedSwitch')\"\n % (sb.ID, switchIndex)\n for sb, switchInds in zip(switchboards, switchIndices)\n for switchIndex in switchInds])\n if len(cnd) == 0: cnd = \"true\"\n\n #define fn to \"connect\" output object to switchboard, i.e.\n # register event handlers for relevant switches so output object updates\n js += \"function connect_%s_to_switches(){\\n\" % ID\n js += \" if(%s) {\\n\" % cnd # \"if switches are ready\"\n # loop below adds event bindings to the body of this if-block\n\n #build a handler function to get all of the relevant switch positions,\n # build a (flattened) position array, and perform the lookup. Note that\n # this function does the same thing regardless of *which* switch was\n # changed, and so is called by all relevant switch change handlers.\n onchange_name = \"%s_onchange\" % ID\n handler_js = \"function %s() {\\n\" % onchange_name\n handler_js += \" var tabdiv = $( '#%s' ).closest('.tabcontent');\\n\" % ID\n handler_js += \" if( tabdiv.length > 0 && !tabdiv.hasClass('active') ) return;\\n\" # short-circuit\n handler_js += \" var curSwitchPos = new Array();\\n\"\n for sb, switchInds in zip(switchboards, switchIndices):\n for switchIndex in switchInds:\n handler_js += \" curSwitchPos.push(%s);\\n\" % sb.get_switch_valuejs(switchIndex)\n handler_js += \" var idToShow = switchmap_%s[ curSwitchPos ];\\n\" % ID\n handler_js += \" $( '#%s' ).children().hide();\\n\" % ID\n handler_js += \" divToShow = $( '#' + idToShow );\\n\"\n\n #Javascript to switch to a new div\n if embed_figures:\n handler_js += \" divToShow.show();\\n\"\n handler_js += \" divToShow.parentsUntil('#%s').show();\\n\" % ID\n handler_js += \" caption = divToShow.closest('figure').children('figcaption:first');\\n\"\n handler_js += \" caption.css('width', Math.round(divToShow.width()*0.9) + 'px');\\n\"\n else:\n handler_js += \" if( divToShow.children().length == 0 ) {\\n\"\n handler_js += \" $(`#${idToShow}`).load(`figures/${idToShow}.html`, function() {\\n\"\n handler_js += \" divToShow = $( '#' + idToShow );\\n\"\n handler_js += \" divToShow.show();\\n\"\n handler_js += \" divToShow.parentsUntil('#%s').show();\\n\" % ID\n if link_to and ('tex' in link_to):\n handler_js += \" divToShow.append('<a class=\\\"dlLink\\\" href=\\\"figures/'\"\n handler_js += \" + idToShow + '.tex\\\" target=\\\"_blank\\\">▼TEX</a>');\\n\"\n if link_to and ('pdf' in link_to):\n handler_js += \" divToShow.append('<a class=\\\"dlLink\\\" href=\\\"figures/'\"\n handler_js += \" + idToShow + '.pdf\\\" target=\\\"_blank\\\">▼PDF</a>');\\n\"\n if link_to and ('pkl' in link_to):\n handler_js += \" divToShow.append('<a class=\\\"dlLink\\\" href=\\\"figures/'\"\n handler_js += \" + idToShow + '.pkl\\\" target=\\\"_blank\\\">▼PKL</a>');\\n\"\n handler_js += \" caption = divToShow.closest('figure').children('figcaption:first');\\n\"\n handler_js += \" caption.css('width', Math.round(divToShow.width()*0.9) + 'px');\\n\"\n handler_js += \" });\\n\" # end load-complete handler\n handler_js += \" }\\n\"\n handler_js += \" else {\\n\"\n handler_js += \" divToShow.show();\\n\"\n handler_js += \" divToShow.parentsUntil('#%s').show();\\n\" % ID\n handler_js += \" caption = divToShow.closest('figure').children('figcaption:first');\\n\"\n handler_js += \" caption.css('width', Math.round(divToShow.width()*0.9) + 'px');\\n\"\n handler_js += \" }\\n\"\n handler_js += \"}\\n\" # end <ID>_onchange function\n\n #build change event listener javascript\n for sb, switchInds in zip(switchboards, switchIndices):\n # switchInds is a tuple containing the \"used\" switch indices of sb\n for switchIndex in switchInds:\n # part of if-block ensuring switches are ready (i.e. created)\n js += \" \" + sb.get_switch_change_handlerjs(switchIndex) + \\\n \"%s(); });\\n\" % onchange_name\n\n #bind onchange call to custom 'tabchange' event that we trigger when tab changes\n js += \" $( '#%s' ).closest('.tabcontent').on('tabchange', function(){\\n\" % ID\n js += \"%s(); });\\n\" % onchange_name\n js += \" %s();\\n\" % onchange_name # call onchange function *once* at end to update visibility\n\n # end if-block\n js += \" console.log('Switches initialized: %s handlers set');\\n\" % ID\n js += \" $( '#%s' ).show()\\n\" % ID # visibility updates are done: show parent container\n js += \" }\\n\" # ends if-block\n js += \" else {\\n\" # switches aren't ready - so wait\n js += \" setTimeout(connect_%s_to_switches, 500);\\n\" % ID\n js += \" console.log('%s switches NOT initialized: Waiting...');\\n\" % ID\n js += \" }\\n\"\n js += \"};\\n\" # end of connect function\n\n #on-ready handler starts trying to connect to switches\n # - note this is already in a 'load_loadable_item' handler, so no need for that here\n js += \"$(document).ready(function() {\\n\"\n js += \" connect_%s_to_switches();\\n\" % ID\n\n if link_to:\n # Add download links for all divs at once since they're all ready\n rel_figure_dir = _os.path.basename(str(link_to_files_dir))\n if 'tex' in link_to:\n for div_id in div_ids:\n js += \" $('#%s').append('<a class=\\\"dlLink\\\" href=\\\"%s/\" % (div_id, rel_figure_dir)\n js += \"%s.tex\\\" target=\\\"_blank\\\">▼TEX</a>');\\n\" % div_id\n if 'pdf' in link_to:\n for div_id in div_ids:\n js += \" $('#%s').append('<a class=\\\"dlLink\\\" href=\\\"%s/\" % (div_id, rel_figure_dir)\n js += \"%s.pdf\\\" target=\\\"_blank\\\">▼PDF</a>');\\n\" % div_id\n if 'pkl' in link_to:\n for div_id in div_ids:\n js += \" $('#%s').append('<a class=\\\"dlLink\\\" href=\\\"%s/\" % (div_id, rel_figure_dir)\n js += \"%s.pkl\\\" target=\\\"_blank\\\">▼PKL</a>');\\n\" % div_id\n\n js += \"});\\n\\n\" # end on-ready handler\n js += handler_js\n\n return {'html': html, 'js': js}\n\n\nclass NotApplicable(WorkspaceOutput):\n \"\"\"\n Class signifying that an given set of arguments is not applicable\n to a function being evaluated.\n \"\"\"\n\n def __init__(self, ws):\n \"\"\"\n Create a new NotApplicable object.\n \"\"\"\n super(NotApplicable, self).__init__(ws)\n\n def render(self, typ=\"html\", ID=None):\n \"\"\"\n Renders this object into the specifed format, specifically for\n embedding it within a larger document.\n\n Parameters\n ----------\n typ : str\n The format to render as. Allowed options are `\"html\"`,\n `\"latex\"`, and `\"python\"`.\n\n ID : str, optional\n An DOM ID used in place of the objects internal ID.\n\n Returns\n -------\n dict\n A dictionary of strings whose keys indicate which portion of\n the embeddable output the value is. Keys will vary for different\n `typ`. For `\"html\"`, keys are `\"html\"` and `\"js\"` for HTML and\n and Javascript code, respectively.\n \"\"\"\n if ID is None: ID = self.ID\n\n if typ == \"html\":\n return {'html': \"<div id='%s' class='notapplicable'>[NO DATA or N/A]</div>\" % ID, 'js': \"\"}\n\n elif typ == \"latex\":\n return {'latex': \"Not applicable\"}\n\n elif typ == \"python\":\n return \"Not Applicable\"\n else:\n raise ValueError(\"NotApplicable render type not supported: %s\" % typ)\n\n\nclass WorkspaceTable(WorkspaceOutput):\n \"\"\"\n Encapsulates a table within a `Workspace` context.\n\n A base class which provides the logic required to take a\n single table-generating function and make it into a legitimate\n `WorkspaceOutput` object for using within workspaces.\n \"\"\"\n\n def __init__(self, ws, fn, *args):\n \"\"\"\n Create a new WorkspaceTable. Usually not called directly.\n\n Parameters\n ----------\n ws : Workspace\n The workspace containing the new object.\n\n fn : function\n A table-creating function.\n\n args : various\n The arguments to `fn`.\n \"\"\"\n super(WorkspaceTable, self).__init__(ws)\n self.tablefn = fn\n self.initargs = args\n self.tables, self.switchboards, self.sbSwitchIndices, self.switchpos_map = \\\n self.ws.switchedCompute(self.tablefn, *self.initargs)\n\n def render(self, typ):\n \"\"\"\n Renders this table into the specifed format, specifically for\n embedding it within a larger document.\n\n Parameters\n ----------\n typ : str\n The format to render as. Currently `\"html\"`, `\"latex\"`\n and `\"python\"` are supported.\n\n Returns\n -------\n dict\n A dictionary of strings giving the different portions of the\n embeddable output. For `\"html\"`, keys are `\"html\"` and `\"js\"`.\n For `\"latex\"`, there is a single key `\"latex\"`.\n \"\"\"\n resizable = self.options.get('resizable', True)\n autosize = self.options.get('autosize', 'none')\n precision = self.options.get('precision', None)\n switched_item_mode = self.options.get('switched_item_mode', 'inline')\n overrideIDs = self.options.get('switched_item_id_overrides', {})\n output_dir = self.options.get('output_dir', None)\n\n if precision is None:\n precDict = {'normal': 6, 'polar': 3, 'sci': 0}\n elif _compat.isint(precision):\n precDict = {'normal': precision, 'polar': precision, 'sci': precision}\n else:\n assert('normal' in precision), \"Must at least specify 'normal' precision\"\n p = precision['normal']\n precDict = {'normal': p,\n 'polar': precision.get('polar', p),\n 'sci': precision.get('sci', p)}\n\n ID = self.ID\n tableID = \"table_\" + ID\n\n if typ == \"html\":\n\n divHTML = []\n divIDs = []\n divJS = []\n\n for i, table in enumerate(self.tables):\n tableDivID = tableID + \"_%d\" % i\n if i in overrideIDs: tableDivID = overrideIDs[i]\n\n if isinstance(table, NotApplicable):\n table_dict = table.render(\"html\", tableDivID)\n else:\n table_dict = table.render(\"html\", tableID=tableDivID + \"_tbl\",\n tableclass=\"dataTable\",\n precision=precDict['normal'],\n polarprecision=precDict['polar'],\n sciprecision=precDict['sci'],\n resizable=resizable, autosize=(autosize == \"continual\"),\n click_to_display=self.options['click_to_display'],\n link_to=self.options['link_to'],\n output_dir=output_dir)\n\n if switched_item_mode == 'separate files':\n divJS.append(self._form_table_js(tableDivID, table_dict['html'], table_dict['js'], None))\n else:\n #otherwise just add plot handers (table_dict['js']) to divJS for later\n divJS.append(table_dict['js'])\n\n divHTML.append(table_dict['html'])\n divIDs.append(tableDivID)\n\n if switched_item_mode == 'inline':\n base = self._render_html(tableID, divHTML, None, divIDs, self.switchpos_map,\n self.switchboards, self.sbSwitchIndices, None,\n self.options.get('link_to', None), output_dir) # no JS yet...\n js = self._form_table_js(tableID, base['html'], '\\n'.join(divJS), base['js'])\n # creates JS for everything: plot creation, switchboard init, autosize\n elif switched_item_mode == 'separate files':\n base = self._render_html(tableID, divHTML, divJS, divIDs, self.switchpos_map,\n self.switchboards, self.sbSwitchIndices, None,\n self.options.get('link_to', None), output_dir, embed_figures=False)\n js = self._form_table_js(tableID, None, None, base['js'])\n else:\n raise ValueError(\"Invalid `switched_item_mode` render option: %s\" %\n switched_item_mode)\n\n return {'html': base['html'], 'js': js}\n\n elif typ == \"latex\":\n\n render_includes = self.options.get('render_includes', True)\n leave_src = self.options.get('leave_includes_src', False)\n W, H = self.options.get('page_size', (6.5, 8.0))\n printer = _objs.VerbosityPrinter(1) # TEMP - add verbosity arg?\n\n #Note: in both cases output_dir needs to be the *relative* path\n # between the current directory and the output directory if\n # \\includegraphics statements are to work. If this isn't needed\n # (e.g. if just the standalone files are needed) then output_dir\n # can be an absolute path as well.\n\n # table rendering returned in ret dict\n if switched_item_mode == 'inline':\n # Assume current directory is where generated latex\n # code will reside and output_dir is where figs go.\n tablefig_output_dir = output_dir # (can be None, in\n #which case an error will be raised if table has figs)\n render_dir = None # no need to chdir for table render\n\n #render each switched \"item\" as a separate standalone file\n elif switched_item_mode == 'separate files':\n # Assume current directory is where \\includegraphics{...}\n # latex will go, and that separate table TEX files *and*\n # figures go in `output_dir`. The table latex is given an\n # output_dir of '.' because figure files will be in the same\n # directory.\n assert(output_dir), \"Cannot render a table as 'latex' with \" + \\\n \"switched items as separate files without a valid \" + \\\n \"'output_dir' render option\"\n tablefig_output_dir = '.'\n render_dir = output_dir\n\n else:\n raise ValueError(\"Invalid `switched_item_mode` render option: %s\" %\n switched_item_mode)\n\n if render_dir is not None and not _os.path.exists(render_dir):\n _os.mkdir(render_dir)\n\n cwd = _os.getcwd()\n latex_list = []\n for i, table in enumerate(self.tables):\n tableDivID = tableID + \"_%d\" % i\n if i in overrideIDs: tableDivID = overrideIDs[i]\n if isinstance(table, NotApplicable): continue\n\n if render_dir: _os.chdir(render_dir)\n table_dict = table.render(\"latex\",\n precision=precDict['normal'],\n polarprecision=precDict['polar'],\n sciprecision=precDict['sci'],\n output_dir=tablefig_output_dir,\n render_includes=render_includes)\n if render_dir: _os.chdir(cwd)\n\n if switched_item_mode == 'inline':\n latex_list.append(table_dict['latex'])\n\n elif switched_item_mode == 'separate files':\n if render_includes or leave_src:\n d = {'toLatex': table_dict['latex']}\n _merge.merge_latex_template(d, \"standalone.tex\",\n _os.path.join(str(output_dir), \"%s.tex\" % tableDivID))\n\n if render_includes:\n assert('latex_cmd' in self.options and self.options['latex_cmd']), \\\n \"Cannot render latex include files without a valid 'latex_cmd' render option\"\n\n try:\n _os.chdir(render_dir)\n latex_cmd = self.options['latex_cmd']\n latex_call = [latex_cmd] + self.options.get('latex_flags', []) \\\n + [\"%s.tex\" % tableDivID]\n stdout, stderr, returncode = _merge.process_call(latex_call)\n _merge.evaluate_call(latex_call, stdout, stderr, returncode, printer)\n if not _os.path.isfile(\"%s.pdf\" % tableDivID):\n raise Exception(\"File %s.pdf was not created by %s\"\n % (tableDivID, latex_cmd))\n if not leave_src: _os.remove(\"%s.tex\" % tableDivID)\n _os.remove(\"%s.log\" % tableDivID)\n _os.remove(\"%s.aux\" % tableDivID)\n except _subprocess.CalledProcessError as e:\n printer.error(\"%s returned code %d \" % (latex_cmd, e.returncode)\n + \"trying to render standalone %s.tex. \" % tableDivID\n + \"Check %s.log to see details.\" % tableDivID)\n finally:\n _os.chdir(cwd)\n\n latex_list.append(\"\\\\includegraphics[width=%.2fin,height=%.2fin,keepaspectratio]{%s}\" %\n (W, H, _os.path.join(str(output_dir), \"%s.pdf\" % tableDivID)))\n elif leave_src:\n latex_list.append(\"\\\\input{%s}\" % _os.path.join(str(output_dir), \"%s.tex\" % tableDivID))\n else:\n latex_list.append(\"%% Didn't generated anything for tableID=%s\" % tableDivID)\n else:\n raise ValueError(\"Invalid `switched_item_mode` render option: %s\" %\n switched_item_mode) # pragma: no cover\n\n return {'latex': \"\\n\".join(latex_list)}\n\n elif typ == \"python\":\n\n if switched_item_mode == 'separate files':\n assert(output_dir), \"Cannot render tables as 'python' in separate\" \\\n + \" files without a valid 'output_dir' render option\"\n\n tables_python = _collections.OrderedDict()\n for i, table in enumerate(self.tables):\n if isinstance(table, NotApplicable): continue\n tableDivID = tableID + \"_%d\" % i\n if i in overrideIDs: tableDivID = overrideIDs[i]\n\n if switched_item_mode == \"inline\":\n table_dict = table.render(\"python\", output_dir=None)\n tables_python[tableDivID] = table_dict['python']\n elif switched_item_mode == \"separate files\":\n outputFilename = _os.path.join(str(output_dir), \"%s.pkl\" % tableDivID)\n table_dict = table.render(\"python\", output_dir=output_dir)\n #( setting output_dir generates separate files for plots in table )\n table_dict['python'].to_pickle(outputFilename) # a DataFrame\n tables_python[tableDivID] = \"df_%s = pd.read_pickle('%s')\" \\\n % (tableDivID, outputFilename)\n else:\n raise ValueError(\"Invalid `switched_item_mode` render option: %s\" %\n switched_item_mode)\n\n return {'python': tables_python}\n\n else:\n assert(len(self.tables) == 1), \\\n \"Can only render %s format for a non-switched table\" % typ\n return {typ: self.tables[0].render(typ)}\n\n def saveas(self, filename, index=None, verbosity=0):\n \"\"\"\n Saves this workspace table object to a file.\n\n The type of file that is saved is determined automatically by the\n extension of `filename`. Recognized extensions are `pdf` (PDF),\n `tex` (LaTeX), `pkl` (Python pickle) and `html` (HTML). Since this\n object may contain different instances of its data based on switch\n positions, when their are multiple instances the user must specify\n the `index` argument to disambiguate.\n\n Parameters\n ----------\n filename : str\n The destination filename. Its extension determines what type\n of file is saved.\n\n index : int, optional\n An absolute index into the list of different switched \"versions\"\n of this object's data. In most cases, the object being saved\n doesn't depend on any switch boards and has only a single \"version\",\n in which caes this can be left as the default.\n\n verbosity : int, optional\n Controls the level of detail printed to stdout.\n\n Returns\n -------\n None\n \"\"\"\n N = len(self.tables)\n\n if filename.endswith(\".html\"):\n if index is None:\n if N == 1:\n index = 0\n else:\n raise ValueError(\"Must supply `index` argument for a non-trivially-switched WorkspaceTable\")\n\n saved_switchposmap = self.switchpos_map\n saved_switchboards = self.switchboards\n saved_switchinds = self.sbSwitchIndices\n\n #Temporarily pretend we don't depend on any switchboards and\n # by default display the user-specified index\n self.switchboards = []\n self.sbSwitchIndices = []\n self.switchpos_map = {(): index}\n\n qtys = {'title': _os.path.splitext(_os.path.basename(str(filename)))[0],\n 'singleItem': self}\n _merge.merge_jinja_template(qtys, filename, templateName=\"standalone.html\",\n verbosity=verbosity)\n\n self.switchpos_map = saved_switchposmap\n self.switchboards = saved_switchboards\n self.sbSwitchIndices = saved_switchinds\n\n elif filename.endswith(\".pkl\"):\n if index is None and N == 1: index = 0\n overrides = {i: \"index%d\" % i for i in range(N)}\n self.set_render_options(switched_item_mode=\"inline\",\n switched_item_id_overrides=overrides)\n render_out = self.render(\"python\")\n\n if index is not None: # just pickle a single element\n to_pickle = render_out['python']['index%d' % index]\n else: # pickle dictionary of all indices\n to_pickle = render_out['python']\n\n with open(str(filename), 'wb') as f:\n _pickle.dump(to_pickle, f)\n\n else:\n if index is None:\n if N == 1:\n index = 0\n else:\n raise ValueError(\"Must supply `index` argument for a non-trivially-switched WorkspaceTable\")\n\n output_dir = _os.path.dirname(str(filename))\n filebase, ext = _os.path.splitext(_os.path.basename(str(filename)))\n\n tempDir = _os.path.join(str(output_dir), \"%s_temp\" % filebase)\n if not _os.path.exists(tempDir): _os.mkdir(tempDir)\n\n self.set_render_options(switched_item_mode=\"separate files\",\n switched_item_id_overrides={index: filebase},\n output_dir=tempDir)\n\n if ext == \".tex\":\n self.set_render_options(render_includes=False,\n leave_includes_src=True)\n elif ext == \".pdf\":\n self.set_render_options(render_includes=True,\n leave_includes_src=False)\n else:\n raise ValueError(\"Unknown file type for %s\" % filename)\n\n self.render(\"latex\") # renders everything in temp dir\n _os.rename(_os.path.join(str(tempDir), \"%s%s\" % (filebase, ext)),\n _os.path.join(str(output_dir), \"%s%s\" % (filebase, ext)))\n\n #remove all other files\n _shutil.rmtree(tempDir)\n\n def _form_table_js(self, tableID, table_html, table_plot_handlers,\n switchboard_init_js):\n\n resizable = self.options.get('resizable', True)\n autosize = self.options.get('autosize', 'none')\n create_table_plots = bool(table_plot_handlers is not None)\n queue_math_render = bool(table_html and '$' in table_html\n and self.options.get('render_math', True))\n add_autosize_handler = bool(switchboard_init_js is not None)\n #only add ws-table-wide autosize handler when initializing the table's switchboard (once\n # per workspace table)\n\n content = \"\"\n\n # put plot handlers *above* switchboard init JS\n if table_plot_handlers: content += table_plot_handlers\n if switchboard_init_js: content += switchboard_init_js\n\n #Table initialization javascript: this will either be within the math-rendering (queued) function\n # (if '$' in ret['html']) or else at the *end* of the ready handler (if no math needed rendering).\n init_table_js = ''\n if create_table_plots and resizable: # make a resizable widget on *entire* plot\n # (will only act on first call, but wait until first plots are created)\n init_table_js += ' make_wstable_resizable(\"{tableID}\");\\n'.format(tableID=tableID)\n if add_autosize_handler and autosize == \"continual\":\n init_table_js += ' make_wsobj_autosize(\"{tableID}\");\\n'.format(tableID=tableID)\n if create_table_plots:\n init_table_js += ' trigger_wstable_plot_creation(\"{tableID}\",{initautosize});\\n'.format(\n tableID=tableID, initautosize=str(autosize in (\"initial\", \"continual\")).lower())\n\n if queue_math_render:\n # then there is math text that needs rendering,\n # so queue this, *then* trigger plot creation\n content += (' plotman.enqueue(function() {{ \\n'\n ' renderMathInElement(document.getElementById(\"{tableID}\"), {{ delimiters: [\\n'\n ' {{left: \"$$\", right: \"$$\", display: true}},\\n'\n ' {{left: \"$\", right: \"$\", display: false}},\\n'\n ' ] }} );\\n').format(tableID=tableID)\n content += init_table_js\n content += ' }}, \"Rendering math in {tableID}\" );\\n'.format(tableID=tableID) # end enqueue\n else:\n #Note: this MUST be below plot handler init, when it triggers plot creation\n content += init_table_js\n\n return self._create_onready_handler(content, tableID)\n\n\nclass WorkspacePlot(WorkspaceOutput):\n \"\"\"\n Encapsulates a plot within a `Workspace` context.\n\n A base class which provides the logic required to take a\n single plot.ly figure-generating function and make it into a\n legitimate `WorkspaceOutput` object for using within workspaces.\n \"\"\"\n\n def __init__(self, ws, fn, *args):\n \"\"\"\n Create a new WorkspaceTable. Usually not called directly.\n\n Parameters\n ----------\n ws : Workspace\n The workspace containing the new object.\n\n fn : function\n A table-creating function.\n\n args : various\n The arguments to `fn`.\n \"\"\"\n super(WorkspacePlot, self).__init__(ws)\n '''\n # LSaldyt: removed plotfn for easier pickling? It doesn't seem to be used anywhere\n self.plotfn = fn\n self.initargs = args\n self.figs, self.switchboards, self.sbSwitchIndices, self.switchpos_map = \\\n self.ws.switchedCompute(self.plotfn, *self.initargs)\n '''\n self.initargs = args\n self.figs, self.switchboards, self.sbSwitchIndices, self.switchpos_map = \\\n self.ws.switchedCompute(fn, *self.initargs)\n\n def render(self, typ=\"html\", ID=None):\n \"\"\"\n Renders this plot into the specifed format, specifically for\n embedding it within a larger document.\n\n Parameters\n ----------\n typ : str\n The format to render as. Currently `\"html\"`, `\"latex\"`\n and `\"python\"` are supported.\n\n ID : str, optional\n A base ID to use when rendering. If None, the object's\n persistent ID is used, which usually what you want.\n\n Returns\n -------\n dict\n A dictionary of strings giving the HTML and Javascript portions\n of the embeddable output. Keys are `\"html\"` and `\"js\"`.\n \"\"\"\n resizable = self.options.get('resizable', True)\n valign = self.options.get('valign', 'top')\n overrideIDs = self.options.get('switched_item_id_overrides', {})\n switched_item_mode = self.options.get('switched_item_mode', 'inline')\n output_dir = self.options.get('output_dir', None)\n\n if valign == 'top':\n abswrap_cls = 'abswrap'\n relwrap_cls = 'relwrap'\n elif valign == 'bottom':\n abswrap_cls = 'bot_abswrap'\n relwrap_cls = 'bot_relwrap'\n else:\n raise ValueError(\"Invalid 'valign' value: %s\" % valign)\n\n if ID is None: ID = self.ID\n plotID = \"plot_\" + ID\n\n if typ == \"html\":\n\n #def getPlotlyDivID(html):\n # #could make this more robust using lxml or something later...\n # iStart = html.index('div id=\"')\n # iEnd = html.index('\"', iStart+8)\n # return html[iStart+8:iEnd]\n\n ##pick \"master\" plot, whose resizing dictates the resizing of other plots,\n ## as the largest-height plot.\n #iMaster = None; maxH = 0;\n #for i, fig in enumerate(self.figs):\n # if isinstance(fig, NotApplicable):\n # continue\n # NOTE: master=None below, but it's unclear whether this will later be needed.\n\n # \"handlers only\" mode is when plot is embedded in something\n # larger (e.g. a table) that takes responsibility for putting\n # the JS returned into an on-ready handler and triggering the\n # initialization and creation of the plots.\n handlersOnly = bool(resizable == \"handlers only\")\n\n divHTML = []\n divIDs = []\n divJS = []\n\n for i, fig in enumerate(self.figs):\n plotDivID = plotID + \"_%d\" % i\n if i in overrideIDs: plotDivID = overrideIDs[i]\n\n if isinstance(fig, NotApplicable):\n fig_dict = fig.render(typ, plotDivID)\n else:\n #use auto-sizing (fluid layout)\n fig.plotlyfig.update_layout(template=DEFAULT_PLOTLY_TEMPLATE)\n fig_dict = _plotly_ex.plot_ex(\n fig.plotlyfig, show_link=False, resizable=resizable,\n lock_aspect_ratio=True, master=True, # bool(i==iMaster)\n click_to_display=self.options['click_to_display'],\n link_to=self.options['link_to'], link_to_id=plotDivID,\n rel_figure_dir=_os.path.basename(\n str(output_dir)) if not (output_dir in (None, False)) else None)\n\n if switched_item_mode == 'separate files':\n assert(handlersOnly is False) # doesn't make sense to put only handlers in a separate file\n divJS.append(self._form_plot_js(plotDivID, fig_dict['js'], None))\n else:\n divJS.append(fig_dict['js'])\n\n divIDs.append(plotDivID)\n divHTML.append(\"<div class='%s'>%s</div>\" % (abswrap_cls, fig_dict['html']))\n\n if switched_item_mode == 'inline':\n base = self._render_html(plotID, divHTML, None, divIDs, self.switchpos_map,\n self.switchboards, self.sbSwitchIndices, [relwrap_cls])\n # Don't link_to b/c plots will all have download buttons\n if handlersOnly:\n js = '\\n'.join(divJS) + base['js'] # insert plot handlers above switchboard init JS\n else:\n js = self._form_plot_js(plotID, '\\n'.join(divJS), base['js'])\n\n elif switched_item_mode == 'separate files':\n base = self._render_html(plotID, divHTML, divJS, divIDs, self.switchpos_map,\n self.switchboards, self.sbSwitchIndices, [relwrap_cls],\n None, self.options['output_dir'], embed_figures=False)\n js = self._form_plot_js(plotID, None, base['js'])\n else:\n raise ValueError(\"Invalid `switched_item_mode` render option: %s\" %\n switched_item_mode)\n\n return {'html': base['html'], 'js': js}\n\n elif typ == \"latex\":\n assert('output_dir' in self.options and self.options['output_dir']), \\\n \"Cannot render a plot as 'latex' without a valid \" +\\\n \"'output_dir' render option (regardless of switched_item_mode)\"\n\n if switched_item_mode not in ('inline', 'separate files'):\n raise ValueError(\"Invalid `switched_item_mode` render option: %s\" %\n switched_item_mode) # for uniformity with other cases,\n # even though it's not used.\n\n from .mpl_colormaps import plotly_to_matplotlib as _plotly_to_matplotlib\n\n output_dir = self.options['output_dir']\n maxW, maxH = self.options.get('page_size', (6.5, 8.0))\n includes = []\n for i, fig in enumerate(self.figs):\n if isinstance(fig, NotApplicable): continue\n plotDivID = plotID + \"_%d\" % i\n if i in overrideIDs: plotDivID = overrideIDs[i]\n\n if self.options.get('render_includes', True):\n filename = _os.path.join(str(output_dir), plotDivID + \".pdf\")\n _plotly_to_matplotlib(fig, filename)\n\n W, H = maxW, maxH\n if 'mpl_fig_size' in fig.metadata: # added by plotly_to_matplotlib call above\n figW, figH = fig.metadata['mpl_fig_size'] # gives the \"normal size\" of the figure\n W = min(W, figW)\n W = min(H, figH)\n del fig.metadata['mpl_fig_size']\n\n includes.append(\"\\\\includegraphics[width=%.2fin,height=%.2fin,keepaspectratio]{%s}\" %\n (W, H, filename))\n else:\n includes.append(\"%% Didn't render plotID=%s\" % plotDivID)\n return {'latex': '\\n'.join(includes)}\n\n elif typ == \"python\":\n\n if switched_item_mode == 'separate files':\n assert(output_dir), \"Cannot render plots as 'python' in separate\" \\\n + \" files without a valid 'output_dir' render option\"\n\n plots_python = _collections.OrderedDict()\n for i, fig in enumerate(self.figs):\n plotDivID = plotID + \"_%d\" % i\n if i in overrideIDs: plotDivID = overrideIDs[i]\n if isinstance(fig, NotApplicable): continue\n\n if fig.pythonvalue is not None:\n data = {'value': fig.pythonvalue}\n if \"pythonErrorBar\" in fig.metadata:\n data['errorbar'] = fig.metadata['pythonErrorBar']\n else:\n data = {'value': \"Opaque Figure\"}\n\n if switched_item_mode == \"inline\":\n plots_python[plotDivID] = data\n elif switched_item_mode == \"separate files\":\n outputFilename = _os.path.join(str(output_dir), \"%s.pkl\" % plotDivID)\n with open(outputFilename, \"wb\") as fPkl:\n _pickle.dump(data, fPkl)\n plots_python[plotDivID] = \"data_%s = pickle.load(open('%s','rb'))\" \\\n % (plotDivID, outputFilename)\n else:\n raise ValueError(\"Invalid `switched_item_mode` render option: %s\" %\n switched_item_mode)\n\n return {'python': plots_python}\n\n else:\n raise NotImplementedError(\"Invalid rendering format: %s\" % typ)\n\n def saveas(self, filename, index=None, verbosity=0):\n \"\"\"\n Saves this workspace plot object to a file.\n\n The type of file that is saved is determined automatically by the\n extension of `filename`. Recognized extensions are `pdf` (PDF),\n `pkl` (Python pickle) and `html` (HTML). Since this object may\n contain different instances of its data based on switch positions,\n when their are multiple instances the user must specify the `index`\n argument to disambiguate.\n\n Parameters\n ----------\n filename : str\n The destination filename. Its extension determines what type\n of file is saved.\n\n index : int, optional\n An absolute index into the list of different switched \"versions\"\n of this object's data. In most cases, the object being saved\n doesn't depend on any switch boards and has only a single \"version\",\n in which caes this can be left as the default.\n\n verbosity : int, optional\n Controls the level of detail printed to stdout.\n\n Returns\n -------\n None\n \"\"\"\n N = len(self.figs)\n\n if filename.endswith(\".html\"):\n #Note: Same as WorkspaceTable except for N\n if index is None:\n if N == 1:\n index = 0\n else:\n raise ValueError(\"Must supply `index` argument for a non-trivially-switched WorkspacePlot\")\n\n saved_switchposmap = self.switchpos_map\n saved_switchboards = self.switchboards\n saved_switchinds = self.sbSwitchIndices\n\n #Temporarily pretend we don't depend on any switchboards and\n # by default display the user-specified index\n self.switchboards = []\n self.sbSwitchIndices = []\n self.switchpos_map = {(): index}\n\n qtys = {'title': _os.path.splitext(_os.path.basename(str(filename)))[0],\n 'singleItem': self}\n _merge.merge_jinja_template(qtys, filename, templateName=\"standalone.html\",\n verbosity=verbosity)\n\n self.switchpos_map = saved_switchposmap\n self.switchboards = saved_switchboards\n self.sbSwitchIndices = saved_switchinds\n\n elif filename.endswith(\".pkl\"):\n #Note: Same as WorkspaceTable except for N\n if index is None and N == 1: index = 0\n overrides = {i: \"index%d\" % i for i in range(N)}\n self.set_render_options(switched_item_mode=\"inline\",\n switched_item_id_overrides=overrides)\n render_out = self.render(\"python\")\n\n if index is not None: # just pickle a single element\n to_pickle = render_out['python']['index%d' % index]\n else: # pickle dictionary of all indices\n to_pickle = render_out['python']\n\n with open(filename, 'wb') as f:\n _pickle.dump(to_pickle, f)\n\n elif filename.endswith(\".tex\"):\n raise ValueError(\"Cannot save a WorkspacePlot as LaTeX - try PDF.\")\n\n elif filename.endswith(\".pdf\"):\n from .mpl_colormaps import plotly_to_matplotlib as _plotly_to_matplotlib\n\n if index is None:\n if N == 1:\n index = 0\n else:\n raise ValueError(\"Must supply `index` argument for a non-trivially-switched WorkspacePlot\")\n _plotly_to_matplotlib(self.figs[index], filename)\n\n else:\n raise ValueError(\"Unknown file type for %s\" % filename)\n\n def _form_plot_js(self, plotID, plot_handlers, switchboard_init_js):\n\n resizable = self.options.get('resizable', True)\n autosize = self.options.get('autosize', 'none')\n create_plots = bool(plot_handlers is not None)\n add_autosize_handler = bool(switchboard_init_js is not None)\n #only add ws-plot-wide autosize handler when initializing the plot's switchboard (once\n # per workspace table)\n\n content = \"\"\n\n #put plot handlers above switchboard init JS\n if plot_handlers: content += plot_handlers\n if switchboard_init_js: content += switchboard_init_js\n\n if resizable: # make a resizable widget\n content += 'make_wsplot_resizable(\"{plotID}\");\\n'.format(plotID=plotID)\n if add_autosize_handler and autosize == \"continual\": # add window resize handler\n content += 'make_wsobj_autosize(\"{plotID}\");\\n'.format(plotID=plotID)\n if create_plots:\n #trigger init & create of plots\n content += 'trigger_wsplot_plot_creation(\"{plotID}\",{initautosize});\\n'.format(\n plotID=plotID, initautosize=str(autosize in (\"initial\", \"continual\")).lower())\n\n return self._create_onready_handler(content, plotID)\n\n\nclass WorkspaceText(WorkspaceOutput):\n \"\"\"\n Encapsulates a block of text within a `Workspace` context.\n\n A base class which provides the logic required to take a\n single text-generating function and make it into a legitimate\n `WorkspaceOutput` object for using within workspaces.\n \"\"\"\n\n def __init__(self, ws, fn, *args):\n \"\"\"\n Create a new WorkspaceText object. Usually not called directly.\n\n Parameters\n ----------\n ws : Workspace\n The workspace containing the new object.\n\n fn : function\n A text-creating function.\n\n args : various\n The arguments to `fn`.\n \"\"\"\n super(WorkspaceText, self).__init__(ws)\n self.textfn = fn\n self.initargs = args\n self.texts, self.switchboards, self.sbSwitchIndices, self.switchpos_map = \\\n self.ws.switchedCompute(self.textfn, *self.initargs)\n\n def render(self, typ):\n \"\"\"\n Renders this text block into the specifed format, specifically for\n embedding it within a larger document.\n\n Parameters\n ----------\n typ : str\n The format to render as. Currently `\"html\"`, `\"latex\"`\n and `\"python\"` are supported.\n\n Returns\n -------\n dict\n A dictionary of strings giving the different portions of the\n embeddable output. For `\"html\"`, keys are `\"html\"` and `\"js\"`.\n For `\"latex\"`, there is a single key `\"latex\"`.\n \"\"\"\n\n switched_item_mode = self.options.get('switched_item_mode', 'inline')\n overrideIDs = self.options.get('switched_item_id_overrides', {})\n output_dir = self.options.get('output_dir', None)\n\n ID = self.ID\n textID = \"text_\" + ID\n\n if typ == \"html\":\n\n divHTML = []\n divIDs = []\n divJS = []\n\n for i, text in enumerate(self.texts):\n textDivID = textID + \"_%d\" % i\n if i in overrideIDs: textDivID = overrideIDs[i]\n\n if isinstance(text, NotApplicable):\n text_dict = text.render(\"html\", textDivID)\n else:\n text_dict = text.render(\"html\", textDivID)\n\n if switched_item_mode == 'separate files':\n divJS.append(self._form_text_js(textDivID, text_dict['html'], None))\n #else: divJS is unused\n\n divHTML.append(text_dict['html'])\n divIDs.append(textDivID)\n\n if switched_item_mode == 'inline':\n base = self._render_html(textID, divHTML, None, divIDs, self.switchpos_map,\n self.switchboards, self.sbSwitchIndices, None,\n self.options.get('link_to', None), output_dir) # no JS yet...\n js = self._form_text_js(textID, base['html'], base['js'])\n # creates JS for everything: plot creation, switchboard init, autosize\n\n elif switched_item_mode == 'separate files':\n base = self._render_html(textID, divHTML, divJS, divIDs, self.switchpos_map,\n self.switchboards, self.sbSwitchIndices, None,\n self.options.get('link_to', None), output_dir, embed_figures=False)\n js = self._form_text_js(textID, None, base['js'])\n else:\n raise ValueError(\"Invalid `switched_item_mode` render option: %s\" %\n switched_item_mode)\n\n return {'html': base['html'], 'js': js}\n\n elif typ == \"latex\":\n\n leave_src = self.options.get('leave_includes_src', False)\n render_includes = self.options.get('render_includes', True)\n W, H = self.options.get('page_size', (6.5, 8.0))\n printer = _objs.VerbosityPrinter(1) # TEMP - add verbosity arg?\n\n #Note: in both cases output_dir needs to be the *relative* path\n # between the current directory and the output directory if\n # \\includegraphics statements are to work. If this isn't needed\n # (e.g. if just the standalone files are needed) then output_dir\n # can be an absolute path as well.\n\n cwd = _os.getcwd()\n latex_list = []\n for i, text in enumerate(self.texts):\n textDivID = textID + \"_%d\" % i\n if i in overrideIDs: textDivID = overrideIDs[i]\n if isinstance(text, NotApplicable): continue\n\n text_dict = text.render(\"latex\")\n\n if switched_item_mode == 'inline':\n latex_list.append(text_dict['latex'])\n\n elif switched_item_mode == 'separate files':\n if render_includes or leave_src:\n d = {'toLatex': text_dict['latex']}\n _merge.merge_latex_template(d, \"standalone.tex\",\n _os.path.join(str(output_dir), \"%s.tex\" % textDivID))\n\n if render_includes:\n render_dir = output_dir\n assert('latex_cmd' in self.options and self.options['latex_cmd']), \\\n \"Cannot render latex include files without a valid 'latex_cmd' render option\"\n\n try:\n _os.chdir(render_dir)\n latex_cmd = self.options['latex_cmd']\n latex_call = [latex_cmd] + self.options.get('latex_flags', []) \\\n + [\"%s.tex\" % textDivID]\n stdout, stderr, returncode = _merge.process_call(latex_call)\n _merge.evaluate_call(latex_call, stdout, stderr, returncode, printer)\n if not _os.path.isfile(\"%s.pdf\" % textDivID):\n raise Exception(\"File %s.pdf was not created by %s\"\n % (textDivID, latex_cmd))\n if not leave_src: _os.remove(\"%s.tex\" % textDivID)\n _os.remove(\"%s.log\" % textDivID)\n _os.remove(\"%s.aux\" % textDivID)\n except _subprocess.CalledProcessError as e:\n printer.error(\"%s returned code %d \" % (latex_cmd, e.returncode)\n + \"trying to render standalone %s.tex. \" % textDivID\n + \"Check %s.log to see details.\" % textDivID)\n finally:\n _os.chdir(cwd)\n\n latex_list.append(\"\\\\includegraphics[width=%.2fin,height=%.2fin,keepaspectratio]{%s}\" %\n (W, H, _os.path.join(str(output_dir), \"%s.pdf\" % textDivID)))\n elif leave_src:\n latex_list.append(\"\\\\input{%s}\" % _os.path.join(str(output_dir), \"%s.tex\" % textDivID))\n else:\n latex_list.append(\"%% Didn't generated anything for textID=%s\" % textDivID)\n else:\n raise ValueError(\"Invalid `switched_item_mode` render option: %s\" %\n switched_item_mode)\n\n return {'latex': \"\\n\".join(latex_list)}\n\n elif typ == \"python\":\n\n if switched_item_mode == 'separate files':\n assert(output_dir), \"Cannot render texts as 'python' in separate\" \\\n + \" files without a valid 'output_dir' render option\"\n\n texts_python = _collections.OrderedDict()\n for i, text in enumerate(self.texts):\n if isinstance(text, NotApplicable): continue\n textDivID = textID + \"_%d\" % i\n if i in overrideIDs: textDivID = overrideIDs[i]\n\n text_dict = text.render(\"python\")\n\n if switched_item_mode == \"inline\":\n texts_python[textDivID] = text_dict['python']\n elif switched_item_mode == \"separate files\":\n outputFilename = _os.path.join(str(output_dir), \"%s.pkl\" % textDivID)\n with open(outputFilename, 'wb') as f:\n _pickle.dump(text_dict['python'], f)\n texts_python[textDivID] = \"text_%s = pickle.load(open('%s','rb'))\" \\\n % (textDivID, outputFilename)\n else:\n raise ValueError(\"Invalid `switched_item_mode` render option: %s\" %\n switched_item_mode)\n\n return {'python': texts_python}\n\n else:\n assert(len(self.texts) == 1), \\\n \"Can only render %s format for a non-switched text block\" % typ\n return {typ: self.texts[0].render(typ)}\n\n def saveas(self, filename, index=None, verbosity=0):\n \"\"\"\n Saves this workspace text block object to a file.\n\n The type of file that is saved is determined automatically by the\n extension of `filename`. Recognized extensions are `pdf` (PDF),\n `tex` (LaTeX), `pkl` (Python pickle) and `html` (HTML). Since this\n object may contain different instances of its data based on switch\n positions, when their are multiple instances the user must specify\n the `index` argument to disambiguate.\n\n Parameters\n ----------\n filename : str\n The destination filename. Its extension determines what type\n of file is saved.\n\n index : int, optional\n An absolute index into the list of different switched \"versions\"\n of this object's data. In most cases, the object being saved\n doesn't depend on any switch boards and has only a single \"version\",\n in which caes this can be left as the default.\n\n verbosity : int, optional\n Controls the level of detail printed to stdout.\n\n Returns\n -------\n None\n \"\"\"\n N = len(self.texts)\n\n if filename.endswith(\".html\"):\n if index is None:\n if N == 1:\n index = 0\n else:\n raise ValueError(\"Must supply `index` argument for a non-trivially-switched WorkspaceText\")\n\n saved_switchposmap = self.switchpos_map\n saved_switchboards = self.switchboards\n saved_switchinds = self.sbSwitchIndices\n\n #Temporarily pretend we don't depend on any switchboards and\n # by default display the user-specified index\n self.switchboards = []\n self.sbSwitchIndices = []\n self.switchpos_map = {(): index}\n\n qtys = {'title': _os.path.splitext(_os.path.basename(str(filename)))[0],\n 'singleItem': self}\n _merge.merge_jinja_template(qtys, filename, templateName=\"standalone.html\",\n verbosity=verbosity)\n\n self.switchpos_map = saved_switchposmap\n self.switchboards = saved_switchboards\n self.sbSwitchIndices = saved_switchinds\n\n elif filename.endswith(\".pkl\"):\n if index is None and N == 1: index = 0\n overrides = {i: \"index%d\" % i for i in range(N)}\n self.set_render_options(switched_item_mode=\"inline\",\n switched_item_id_overrides=overrides)\n render_out = self.render(\"python\")\n\n if index is not None: # just pickle a single element\n to_pickle = render_out['python']['index%d' % index]\n else: # pickle dictionary of all indices\n to_pickle = render_out['python']\n\n with open(filename, 'wb') as f:\n _pickle.dump(to_pickle, f)\n\n else:\n if index is None:\n if N == 1:\n index = 0\n else:\n raise ValueError(\"Must supply `index` argument for a non-trivially-switched WorkspaceText\")\n\n output_dir = _os.path.dirname(filename)\n filebase, ext = _os.path.splitext(_os.path.basename(filename))\n\n tempDir = _os.path.join(str(output_dir), \"%s_temp\" % filebase)\n if not _os.path.exists(tempDir): _os.mkdir(tempDir)\n\n self.set_render_options(switched_item_mode=\"separate files\",\n switched_item_id_overrides={index: filebase},\n output_dir=tempDir)\n\n if ext == \".tex\":\n self.set_render_options(render_includes=False,\n leave_includes_src=True)\n elif ext == \".pdf\":\n self.set_render_options(render_includes=True,\n leave_includes_src=False)\n else:\n raise ValueError(\"Unknown file type for %s\" % filename)\n\n self.render(\"latex\") # renders everything in temp dir\n _os.rename(_os.path.join(str(tempDir), \"%s%s\" % (filebase, ext)),\n _os.path.join(str(output_dir), \"%s%s\" % (filebase, ext)))\n\n #remove all other files\n _shutil.rmtree(tempDir)\n\n def _form_text_js(self, textID, text_html, switchboard_init_js):\n\n content = \"\"\n if switchboard_init_js: content += switchboard_init_js\n\n queue_math_render = bool(text_html and '$' in text_html\n and self.options.get('render_math', True))\n\n if text_html is not None:\n init_text_js = (\n 'el = $(\"#{textid}\");\\n'\n 'if(el.hasClass(\"pygsti-wsoutput-group\")) {{\\n'\n ' el.children(\"div.single_switched_value\").each( function(i,el){{\\n'\n ' CollapsibleLists.applyTo( $(el).find(\"ul\").first()[0] );\\n'\n ' }});\\n'\n '}} else if(el.hasClass(\"single_switched_value\")){{\\n'\n ' CollapsibleLists.applyTo(el[0]);\\n'\n '}}\\n'\n 'caption = el.closest(\"figure\").children(\"figcaption:first\");\\n'\n 'caption.css(\"width\", Math.round(el.width()*0.9) + \"px\");\\n'\n ).format(textid=textID)\n else:\n init_text_js = \"\" # no per-div init needed\n\n if queue_math_render:\n # then there is math text that needs rendering,\n # so queue this, *then* trigger plot creation\n content += (' plotman.enqueue(function() {{ \\n'\n ' renderMathInElement(document.getElementById(\"{textID}\"), {{ delimiters: [\\n'\n ' {{left: \"$$\", right: \"$$\", display: true}},\\n'\n ' {{left: \"$\", right: \"$\", display: false}},\\n'\n ' ] }} );\\n').format(textID=textID)\n content += init_text_js\n content += ' }}, \"Rendering math in {textID}\" );\\n'.format(textID=textID) # end enqueue\n else:\n content += init_text_js\n\n return self._create_onready_handler(content, textID)\n",
"import unittest\nimport itertools\nimport time\nimport sys\nimport pickle\nimport numpy as np\nfrom mpinoseutils import *\n\nimport pygsti\nfrom pygsti.modelpacks.legacy import std1Q_XYI as std\nfrom pygsti.objects import profiler\n\ng_maxLengths = [1,2,4,8]\ng_numSubTrees = 3\n\ndef assertGatesetsInSync(mdl, comm):\n if comm is not None:\n bc = mdl if comm.Get_rank() == 0 else None\n mdl_cmp = comm.bcast(bc, root=0)\n assert(mdl.frobeniusdist(mdl_cmp) < 1e-6)\n\n\ndef runAnalysis(obj, ds, prepStrs, effectStrs, gsTarget, lsgstStringsToUse,\n useFreqWeightedChiSq=False,\n minProbClipForWeighting=1e-4, fidPairList=None,\n comm=None, distributeMethod=\"circuits\"):\n\n #Run LGST to get starting model\n assertGatesetsInSync(gsTarget, comm)\n mdl_lgst = pygsti.do_lgst(ds, prepStrs, effectStrs, gsTarget,\n svdTruncateTo=gsTarget.dim, verbosity=3)\n\n assertGatesetsInSync(mdl_lgst, comm)\n mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst,gsTarget)\n\n assertGatesetsInSync(mdl_lgst_go, comm)\n\n #Run full iterative LSGST\n tStart = time.time()\n if obj == \"chi2\":\n all_gs_lsgst = pygsti.do_iterative_mc2gst(\n ds, mdl_lgst_go, lsgstStringsToUse,\n minProbClipForWeighting=minProbClipForWeighting,\n probClipInterval=(-1e5,1e5),\n verbosity=1, memLimit=3*(1024)**3, returnAll=True,\n useFreqWeightedChiSq=useFreqWeightedChiSq, comm=comm,\n distributeMethod=distributeMethod)\n elif obj == \"logl\":\n all_gs_lsgst = pygsti.do_iterative_mlgst(\n ds, mdl_lgst_go, lsgstStringsToUse,\n minProbClip=minProbClipForWeighting,\n probClipInterval=(-1e5,1e5),\n verbosity=1, memLimit=3*(1024)**3, returnAll=True,\n useFreqWeightedChiSq=useFreqWeightedChiSq, comm=comm,\n distributeMethod=distributeMethod)\n\n tEnd = time.time()\n print(\"Time = \",(tEnd-tStart)/3600.0,\"hours\")\n\n return all_gs_lsgst\n\n\ndef runOneQubit(obj, ds, lsgstStrings, comm=None, distributeMethod=\"circuits\"):\n #specs = pygsti.construction.build_spam_specs(\n # std.fiducials, prep_labels=std.target_model().get_prep_labels(),\n # effect_labels=std.target_model().get_effect_labels())\n\n return runAnalysis(obj, ds, std.fiducials, std.fiducials, std.target_model(),\n lsgstStrings, comm=comm,\n distributeMethod=distributeMethod)\n\n\ndef create_fake_dataset(comm):\n fidPairList = None\n maxLengths = [1,2,4,8,16]\n nSamples = 1000\n #specs = pygsti.construction.build_spam_specs(\n # std.fiducials, prep_labels=std.target_model().get_prep_labels(),\n # effect_labels=std.target_model().get_effect_labels())\n #rhoStrs, EStrs = pygsti.construction.get_spam_strs(specs)\n\n rhoStrs = EStrs = std.fiducials\n lgstStrings = pygsti.construction.list_lgst_circuits(\n rhoStrs, EStrs, list(std.target_model().operations.keys()))\n lsgstStrings = pygsti.construction.make_lsgst_lists(\n list(std.target_model().operations.keys()), rhoStrs, EStrs,\n std.germs, maxLengths, fidPairList )\n\n lsgstStringsToUse = lsgstStrings\n allRequiredStrs = pygsti.remove_duplicates(lgstStrings + lsgstStrings[-1])\n\n if comm is None or comm.Get_rank() == 0:\n mdl_dataGen = std.target_model().depolarize(op_noise=0.1)\n dsFake = pygsti.construction.generate_fake_data(\n mdl_dataGen, allRequiredStrs, nSamples, sampleError=\"multinomial\",\n seed=1234)\n dsFake = comm.bcast(dsFake, root=0)\n else:\n dsFake = comm.bcast(None, root=0)\n\n #for mdl in dsFake:\n # if abs(dsFake[mdl]['0']-dsFake_cmp[mdl]['0']) > 0.5:\n # print(\"DS DIFF: \",mdl, dsFake[mdl]['0'], \"vs\", dsFake_cmp[mdl]['0'] )\n return dsFake, lsgstStrings\n\n\n@mpitest(4)\ndef test_MPI_products(comm):\n assert(comm.Get_size() == 4)\n #Create some model\n mdl = std.target_model()\n\n #Remove spam elements so product calculations have element indices <=> product indices\n del mdl.preps['rho0']\n del mdl.povms['Mdefault']\n\n mdl.kick(0.1,seed=1234)\n\n #Get some operation sequences\n maxLengths = [1,2,4,8]\n gstrs = pygsti.construction.make_lsgst_experiment_list(\n list(std.target_model().operations.keys()), std.fiducials, std.fiducials, std.germs, maxLengths)\n tree,lookup,outcome_lookup = mdl.bulk_evaltree(gstrs)\n split_tree = tree.copy()\n split_lookup = split_tree.split(lookup,numSubTrees=g_numSubTrees)\n\n # Check wrtFilter functionality in dproduct\n some_wrtFilter = [0,2,3,5,10]\n for s in gstrs[0:20]:\n result = mdl._fwdsim().dproduct(s, wrtFilter=some_wrtFilter)\n chk_result = mdl.dproduct(s) #no filtering\n for ii,i in enumerate(some_wrtFilter):\n assert(np.linalg.norm(chk_result[i]-result[ii]) < 1e-6)\n taken_chk_result = chk_result.take( some_wrtFilter, axis=0 )\n assert(np.linalg.norm(taken_chk_result-result) < 1e-6)\n\n\n #Check bulk products\n\n #bulk_product - no parallelization unless tree is split\n serial = mdl.bulk_product(tree, bScale=False)\n parallel = mdl.bulk_product(tree, bScale=False, comm=comm)\n assert(np.linalg.norm(serial-parallel) < 1e-6)\n\n serial_scl, sscale = mdl.bulk_product(tree, bScale=True)\n parallel, pscale = mdl.bulk_product(tree, bScale=True, comm=comm)\n assert(np.linalg.norm(serial_scl*sscale[:,None,None] -\n parallel*pscale[:,None,None]) < 1e-6)\n\n # will use a split tree to parallelize\n parallel = mdl.bulk_product(split_tree, bScale=False, comm=comm)\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(serial[lookup[i]]-parallel[split_lookup[i]]) < 1e-6)\n\n parallel, pscale = mdl.bulk_product(split_tree, bScale=True, comm=comm)\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(serial_scl[lookup[i]]*sscale[lookup[i],None,None] -\n parallel[split_lookup[i]]*pscale[split_lookup[i],None,None]) < 1e-6)\n\n\n #bulk_dproduct - no split tree => parallel by col\n serial = mdl.bulk_dproduct(tree, bScale=False)\n parallel = mdl.bulk_dproduct(tree, bScale=False, comm=comm)\n assert(np.linalg.norm(serial-parallel) < 1e-6)\n\n serial_scl, sscale = mdl.bulk_dproduct(tree, bScale=True)\n parallel, pscale = mdl.bulk_dproduct(tree, bScale=True, comm=comm)\n assert(np.linalg.norm(serial_scl*sscale[:,None,None,None] -\n parallel*pscale[:,None,None,None]) < 1e-6)\n\n # will just ignore a split tree for now (just parallel by col)\n parallel = mdl.bulk_dproduct(split_tree, bScale=False, comm=comm)\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(serial[lookup[i]] - parallel[split_lookup[i]]) < 1e-6)\n\n parallel, pscale = mdl.bulk_dproduct(split_tree, bScale=True, comm=comm)\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(serial_scl[lookup[i]]*sscale[lookup[i],None,None,None] -\n parallel[split_lookup[i]]*pscale[split_lookup[i],None,None,None]) < 1e-6)\n\n\n\n #bulk_hproduct - no split tree => parallel by col\n serial = mdl.bulk_hproduct(tree, bScale=False)\n parallel = mdl.bulk_hproduct(tree, bScale=False, comm=comm)\n assert(np.linalg.norm(serial-parallel) < 1e-6)\n\n serial_scl, sscale = mdl.bulk_hproduct(tree, bScale=True)\n parallel, pscale = mdl.bulk_hproduct(tree, bScale=True, comm=comm)\n assert(np.linalg.norm(serial_scl*sscale[:,None,None,None,None] -\n parallel*pscale[:,None,None,None,None]) < 1e-6)\n\n # will just ignore a split tree for now (just parallel by col)\n parallel = mdl.bulk_hproduct(split_tree, bScale=False, comm=comm)\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(serial[lookup[i]] - parallel[split_lookup[i]]) < 1e-6)\n\n parallel, pscale = mdl.bulk_hproduct(split_tree, bScale=True, comm=comm)\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(serial_scl[lookup[i]]*sscale[lookup[i],None,None,None,None] -\n parallel[split_lookup[i]]*pscale[split_lookup[i],None,None,None,None]) < 1e-6)\n\n\n\n#OLD: pr functions deprecated\n#@mpitest(4)\n#def test_MPI_pr(comm):\n#\n# #Create some model\n# mdl = std.target_model()\n# mdl.kick(0.1,seed=1234)\n#\n# #Get some operation sequences\n# maxLengths = g_maxLengths\n# gstrs = pygsti.construction.make_lsgst_experiment_list(\n# list(std.target_model().operations.keys()), std.fiducials, std.fiducials, std.germs, maxLengths)\n# tree,lookup,outcome_lookup = mdl.bulk_evaltree(gstrs)\n# split_tree = tree.copy()\n# lookup = split_tree.split(lookup,numSubTrees=g_numSubTrees)\n#\n# #Check single-spam-label bulk probabilities\n#\n# # non-split tree => automatically adjusts wrtBlockSize to accomodate\n# # the number of processors\n# serial = mdl.bulk_pr('0', tree, clipTo=(-1e6,1e6))\n# parallel = mdl.bulk_pr('0', tree, clipTo=(-1e6,1e6), comm=comm)\n# assert(np.linalg.norm(serial-parallel) < 1e-6)\n#\n# serial = mdl.bulk_dpr('0', tree, clipTo=(-1e6,1e6))\n# parallel = mdl.bulk_dpr('0', tree, clipTo=(-1e6,1e6), comm=comm)\n# assert(np.linalg.norm(serial-parallel) < 1e-6)\n#\n# serial, sp = mdl.bulk_dpr('0', tree, returnPr=True, clipTo=(-1e6,1e6))\n# parallel, pp = mdl.bulk_dpr('0', tree, returnPr=True, clipTo=(-1e6,1e6), comm=comm)\n# assert(np.linalg.norm(serial-parallel) < 1e-6)\n# assert(np.linalg.norm(sp-pp) < 1e-6)\n#\n# serial, sdp, sp = mdl.bulk_hpr('0', tree, returnPr=True, returnDeriv=True,\n# clipTo=(-1e6,1e6))\n# parallel, pdp, pp = mdl.bulk_hpr('0', tree, returnPr=True,\n# returnDeriv=True, clipTo=(-1e6,1e6), comm=comm)\n# assert(np.linalg.norm(serial-parallel) < 1e-6)\n# assert(np.linalg.norm(sdp-pdp) < 1e-6)\n# assert(np.linalg.norm(sp-pp) < 1e-6)\n#\n#\n# # split tree => distribures on sub-trees prior to adjusting\n# # wrtBlockSize to accomodate remaining processors\n# serial = mdl.bulk_pr('0', tree, clipTo=(-1e6,1e6))\n# parallel = mdl.bulk_pr('0', split_tree, clipTo=(-1e6,1e6), comm=comm)\n# parallel = split_tree.permute_computation_to_original(parallel)\n# assert(np.linalg.norm(serial-parallel) < 1e-6)\n#\n# serial = mdl.bulk_dpr('0', tree, clipTo=(-1e6,1e6))\n# parallel = mdl.bulk_dpr('0', split_tree, clipTo=(-1e6,1e6), comm=comm)\n# parallel = split_tree.permute_computation_to_original(parallel)\n# assert(np.linalg.norm(serial-parallel) < 1e-6)\n#\n# serial, sp = mdl.bulk_dpr('0', tree, returnPr=True, clipTo=(-1e6,1e6))\n# parallel, pp = mdl.bulk_dpr('0', split_tree, returnPr=True, clipTo=(-1e6,1e6), comm=comm)\n# parallel = split_tree.permute_computation_to_original(parallel)\n# pp = split_tree.permute_computation_to_original(pp)\n# assert(np.linalg.norm(serial-parallel) < 1e-6)\n# assert(np.linalg.norm(sp-pp) < 1e-6)\n#\n# serial, sdp, sp = mdl.bulk_hpr('0', tree, returnPr=True, returnDeriv=True,\n# clipTo=(-1e6,1e6))\n# parallel, pdp, pp = mdl.bulk_hpr('0', split_tree, returnPr=True,\n# returnDeriv=True, clipTo=(-1e6,1e6), comm=comm)\n# parallel = split_tree.permute_computation_to_original(parallel)\n# pdp = split_tree.permute_computation_to_original(pdp)\n# pp = split_tree.permute_computation_to_original(pp)\n# assert(np.linalg.norm(serial-parallel) < 1e-6)\n# assert(np.linalg.norm(sdp-pdp) < 1e-6)\n# assert(np.linalg.norm(sp-pp) < 1e-6)\n\n\n\n@mpitest(4)\ndef test_MPI_probs(comm):\n\n #Create some model\n mdl = std.target_model()\n mdl.kick(0.1,seed=1234)\n\n #Get some operation sequences\n maxLengths = g_maxLengths\n gstrs = pygsti.construction.make_lsgst_experiment_list(\n list(std.target_model().operations.keys()), std.fiducials, std.fiducials, std.germs, maxLengths)\n #tree,lookup,outcome_lookup = mdl.bulk_evaltree(gstrs)\n #split_tree = tree.copy()\n #lookup = split_tree.split(lookup, numSubTrees=g_numSubTrees)\n\n #Check all-spam-label bulk probabilities\n def compare_prob_dicts(a,b,indices=None):\n for opstr in gstrs:\n for outcome in a[opstr].keys():\n if indices is None:\n assert(np.linalg.norm(a[opstr][outcome] -b[opstr][outcome]) < 1e-6)\n else:\n for i in indices:\n assert(np.linalg.norm(a[opstr][outcome][i] -b[opstr][outcome][i]) < 1e-6)\n\n # non-split tree => automatically adjusts wrtBlockSize to accomodate\n # the number of processors\n serial = mdl.bulk_probs(gstrs, clipTo=(-1e6,1e6))\n parallel = mdl.bulk_probs(gstrs, clipTo=(-1e6,1e6), comm=comm)\n compare_prob_dicts(serial,parallel)\n\n serial = mdl.bulk_dprobs(gstrs, clipTo=(-1e6,1e6))\n parallel = mdl.bulk_dprobs(gstrs, clipTo=(-1e6,1e6), comm=comm)\n compare_prob_dicts(serial,parallel)\n\n serial = mdl.bulk_dprobs(gstrs, returnPr=True, clipTo=(-1e6,1e6))\n parallel = mdl.bulk_dprobs(gstrs, returnPr=True, clipTo=(-1e6,1e6), comm=comm)\n compare_prob_dicts(serial,parallel,(0,1))\n\n serial = mdl.bulk_hprobs(gstrs, returnPr=True, returnDeriv=True,\n clipTo=(-1e6,1e6))\n parallel = mdl.bulk_hprobs(gstrs, returnPr=True,\n returnDeriv=True, clipTo=(-1e6,1e6), comm=comm)\n compare_prob_dicts(serial,parallel,(0,1,2))\n\n ##OLD: cannot tell bulk_probs to use a split tree anymore (just give list)\n ## split tree => distribures on sub-trees prior to adjusting\n ## wrtBlockSize to accomodate remaining processors\n #serial = mdl.bulk_probs(tree, clipTo=(-1e6,1e6))\n #parallel = mdl.bulk_probs(split_tree, clipTo=(-1e6,1e6), comm=comm)\n #for sl in serial:\n # p = split_tree.permute_computation_to_original(parallel[sl])\n # assert(np.linalg.norm(serial[sl]-p) < 1e-6)\n #\n #serial = mdl.bulk_dprobs(tree, clipTo=(-1e6,1e6))\n #parallel = mdl.bulk_dprobs(split_tree, clipTo=(-1e6,1e6), comm=comm)\n #for sl in serial:\n # p = split_tree.permute_computation_to_original(parallel[sl])\n # assert(np.linalg.norm(serial[sl]-p) < 1e-6)\n #\n #serial = mdl.bulk_dprobs(tree, returnPr=True, clipTo=(-1e6,1e6))\n #parallel = mdl.bulk_dprobs(split_tree, returnPr=True, clipTo=(-1e6,1e6), comm=comm)\n #for sl in serial:\n # p0 = split_tree.permute_computation_to_original(parallel[sl][0])\n # p1 = split_tree.permute_computation_to_original(parallel[sl][1])\n # assert(np.linalg.norm(serial[sl][0]-p0) < 1e-6)\n # assert(np.linalg.norm(serial[sl][1]-p1) < 1e-6)\n #\n #serial = mdl.bulk_hprobs(tree, returnPr=True, returnDeriv=True,\n # clipTo=(-1e6,1e6))\n #parallel = mdl.bulk_hprobs(split_tree, returnPr=True,\n # returnDeriv=True, clipTo=(-1e6,1e6), comm=comm)\n #for sl in serial:\n # p0 = split_tree.permute_computation_to_original(parallel[sl][0])\n # p1 = split_tree.permute_computation_to_original(parallel[sl][1])\n # p2 = split_tree.permute_computation_to_original(parallel[sl][2])\n # assert(np.linalg.norm(serial[sl][0]-p0) < 1e-6)\n # assert(np.linalg.norm(serial[sl][1]-p1) < 1e-6)\n # assert(np.linalg.norm(serial[sl][2]-p2) < 1e-6)\n\n\n\n@mpitest(4)\ndef test_MPI_fills(comm):\n\n #Create some model\n mdl = std.target_model()\n mdl.kick(0.1,seed=1234)\n\n #Get some operation sequences\n maxLengths = g_maxLengths\n gstrs = pygsti.construction.make_lsgst_experiment_list(\n list(std.target_model().operations.keys()), std.fiducials, std.fiducials, std.germs, maxLengths)\n tree,lookup,outcome_lookup = mdl.bulk_evaltree(gstrs)\n split_tree = tree.copy()\n split_lookup = split_tree.split(lookup,numSubTrees=g_numSubTrees)\n\n\n #Check fill probabilities\n nEls = tree.num_final_elements()\n nCircuits = len(gstrs)\n nDerivCols = mdl.num_params()\n\n #Get serial results\n vhp_serial = np.empty( (nEls,nDerivCols,nDerivCols),'d')\n vdp_serial = np.empty( (nEls,nDerivCols), 'd' )\n vp_serial = np.empty( nEls, 'd' )\n\n vhp_serial2 = np.empty( (nEls,nDerivCols,nDerivCols),'d')\n vdp_serial2 = np.empty( (nEls,nDerivCols), 'd' )\n vp_serial2 = np.empty( nEls, 'd' )\n\n mdl.bulk_fill_probs(vp_serial, tree,\n (-1e6,1e6), comm=None)\n\n mdl.bulk_fill_dprobs(vdp_serial, tree,\n vp_serial2, (-1e6,1e6), comm=None,\n wrtBlockSize=None)\n assert(np.linalg.norm(vp_serial2-vp_serial) < 1e-6)\n\n mdl.bulk_fill_hprobs(vhp_serial, tree,\n vp_serial2, vdp_serial2, (-1e6,1e6), comm=None,\n wrtBlockSize1=None, wrtBlockSize2=None)\n assert(np.linalg.norm(vp_serial2-vp_serial) < 1e-6)\n assert(np.linalg.norm(vdp_serial2-vdp_serial) < 1e-6)\n\n\n #Check serial results with a split tree, just to be sure\n mdl.bulk_fill_probs(vp_serial2, split_tree,\n (-1e6,1e6), comm=None)\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(vp_serial[ lookup[i] ] -\n vp_serial2[ split_lookup[i] ]) < 1e-6)\n\n mdl.bulk_fill_dprobs(vdp_serial2, split_tree,\n vp_serial2, (-1e6,1e6), comm=None,\n wrtBlockSize=None)\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(vp_serial[ lookup[i] ] -\n vp_serial2[ split_lookup[i] ]) < 1e-6)\n assert(np.linalg.norm(vdp_serial[ lookup[i] ] -\n vdp_serial2[ split_lookup[i] ]) < 1e-6)\n\n mdl.bulk_fill_hprobs(vhp_serial2, split_tree,\n vp_serial2, vdp_serial2, (-1e6,1e6), comm=None,\n wrtBlockSize1=None, wrtBlockSize2=None)\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(vp_serial[ lookup[i] ] -\n vp_serial2[ split_lookup[i] ]) < 1e-6)\n assert(np.linalg.norm(vdp_serial[ lookup[i] ] -\n vdp_serial2[ split_lookup[i] ]) < 1e-6)\n assert(np.linalg.norm(vhp_serial[ lookup[i] ] -\n vhp_serial2[ split_lookup[i] ]) < 1e-6)\n\n #Get parallel results - with and without split tree\n vhp_parallel = np.empty( (nEls,nDerivCols,nDerivCols),'d')\n vdp_parallel = np.empty( (nEls,nDerivCols), 'd' )\n vp_parallel = np.empty( nEls, 'd' )\n\n for tstTree,tstLookup in zip([tree, split_tree],[lookup,split_lookup]):\n\n mdl.bulk_fill_probs(vp_parallel, tstTree,\n (-1e6,1e6), comm=comm)\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(vp_parallel[ tstLookup[i] ] -\n vp_serial[ lookup[i] ]) < 1e-6)\n\n for blkSize in [None, 4]:\n mdl.bulk_fill_dprobs(vdp_parallel, tstTree,\n vp_parallel, (-1e6,1e6), comm=comm,\n wrtBlockSize=blkSize)\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(vp_parallel[ tstLookup[i] ] -\n vp_serial[ lookup[i] ]) < 1e-6)\n assert(np.linalg.norm(vdp_parallel[ tstLookup[i] ] -\n vdp_serial[ lookup[i] ]) < 1e-6)\n\n for blkSize2 in [None, 2, 4]:\n mdl.bulk_fill_hprobs(vhp_parallel, tstTree,\n vp_parallel, vdp_parallel, (-1e6,1e6), comm=comm,\n wrtBlockSize1=blkSize, wrtBlockSize2=blkSize2)\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(vp_parallel[ tstLookup[i] ] -\n vp_serial[ lookup[i] ]) < 1e-6)\n assert(np.linalg.norm(vdp_parallel[ tstLookup[i] ] -\n vdp_serial[ lookup[i] ]) < 1e-6)\n assert(np.linalg.norm(vhp_parallel[ tstLookup[i] ] -\n vhp_serial[ lookup[i] ]) < 1e-6)\n\n #Test Serial vs Parallel use of wrtFilter\n some_wrtFilter = [0,1,2,3,4,5,6,7] #must be contiguous now - not arbitraray\n some_wrtFilter2 = [6,7,8,9,10,11,12] #must be contiguous now - not arbitraray\n vhp_parallelF = np.empty( (nEls,nDerivCols,len(some_wrtFilter)),'d')\n vhp_parallelF2 = np.empty( (nEls,len(some_wrtFilter),len(some_wrtFilter2)),'d')\n vdp_parallelF = np.empty( (nEls,len(some_wrtFilter)), 'd' )\n\n for tstTree,tstLookup in zip([tree, split_tree],[lookup, split_lookup]):\n\n mdl._fwdsim().bulk_fill_dprobs(vdp_parallelF, tstTree,\n None, (-1e6,1e6), comm=comm,\n wrtFilter=some_wrtFilter, wrtBlockSize=None)\n for k,opstr in enumerate(gstrs):\n for ii,i in enumerate(some_wrtFilter):\n assert(np.linalg.norm(vdp_serial[lookup[k],i]-vdp_parallelF[tstLookup[k],ii]) < 1e-6)\n taken_result = vdp_serial.take( some_wrtFilter, axis=1 )\n for k,opstr in enumerate(gstrs):\n assert(np.linalg.norm(taken_result[lookup[k]]-vdp_parallelF[tstLookup[k]]) < 1e-6)\n\n mdl._fwdsim().bulk_fill_hprobs(vhp_parallelF, tstTree,\n None, None,None, (-1e6,1e6), comm=comm,\n wrtFilter2=some_wrtFilter, wrtBlockSize2=None)\n for k,opstr in enumerate(gstrs):\n for ii,i in enumerate(some_wrtFilter):\n assert(np.linalg.norm(vhp_serial[lookup[k],:,i]-vhp_parallelF[tstLookup[k],:,ii]) < 1e-6)\n taken_result = vhp_serial.take( some_wrtFilter, axis=2 )\n for k,opstr in enumerate(gstrs):\n assert(np.linalg.norm(taken_result[lookup[k]]-vhp_parallelF[tstLookup[k]]) < 1e-6)\n\n mdl._fwdsim().bulk_fill_hprobs(vhp_parallelF2, tstTree,\n None, None,None, (-1e6,1e6), comm=comm,\n wrtFilter1=some_wrtFilter, wrtFilter2=some_wrtFilter2)\n for k,opstr in enumerate(gstrs):\n for ii,i in enumerate(some_wrtFilter):\n for jj,j in enumerate(some_wrtFilter2):\n assert(np.linalg.norm(vhp_serial[lookup[k],i,j]-vhp_parallelF2[tstLookup[k],ii,jj]) < 1e-6)\n taken_result = vhp_serial.take( some_wrtFilter, axis=1 ).take( some_wrtFilter2, axis=2)\n for k,opstr in enumerate(gstrs):\n assert(np.linalg.norm(taken_result[lookup[k]]-vhp_parallelF2[tstLookup[k]]) < 1e-6)\n\n@mpitest(4)\ndef test_MPI_compute_cache(comm):\n #try to run hard-to-reach cases where there are lots of processors compared to\n # the number of elements being computed:\n from pygsti.modelpacks.legacy import std1Q_XY #nice b/c only 2 gates\n\n #Create some model\n mdl = std.target_model()\n mdl.kick(0.1,seed=1234)\n\n #Get some operation sequences\n gstrs = pygsti.construction.circuit_list([('Gx',), ('Gy')])\n tree,lookup,outcome_lookup = mdl.bulk_evaltree(gstrs)\n\n #Check fill probabilities\n nEls = tree.num_final_elements()\n nCircuits = len(gstrs)\n nDerivCols = mdl.num_params()\n print(\"NUMS = \",nEls,nCircuits,nDerivCols)\n\n #Get serial results\n vhp_serial = np.empty( (nEls,nDerivCols,nDerivCols),'d')\n\n d = mdl.dim\n slc1 = slice(0,2)\n slc2 = slice(0,2)\n scache = np.empty(nEls,'d')\n pcache = np.empty((nEls,d,d),'d')\n dcache1 = np.empty((nEls,2,d,d),'d')\n dcache2 = np.empty((nEls,2,d,d),'d')\n hcache = mdl._fwdsim()._compute_hproduct_cache(tree, pcache, dcache1, dcache2, scache,\n comm, wrtSlice1=slc1, wrtSlice2=slc2)\n\n #without comm\n hcache_chk = mdl._fwdsim()._compute_hproduct_cache(tree, pcache, dcache1, dcache2, scache,\n comm=None, wrtSlice1=slc1, wrtSlice2=slc2)\n assert(np.linalg.norm(hcache-hcache_chk) < 1e-6)\n\n\n\n@mpitest(4)\ndef test_MPI_by_block(comm):\n\n #Create some model\n if comm is None or comm.Get_rank() == 0:\n mdl = std.target_model()\n mdl.kick(0.1,seed=1234)\n mdl = comm.bcast(mdl, root=0)\n else:\n mdl = comm.bcast(None, root=0)\n\n #Get some operation sequences\n maxLengths = g_maxLengths\n gstrs = pygsti.construction.make_lsgst_experiment_list(\n list(std.target_model().operations.keys()), std.fiducials, std.fiducials, std.germs, maxLengths)\n tree,lookup,outcome_lookkup = mdl.bulk_evaltree(gstrs)\n #split_tree = tree.copy()\n #split_lookup = split_tree.split(lookup,numSubTrees=g_numSubTrees)\n\n\n #Check that \"by column\" matches standard \"at once\" methods:\n nEls = tree.num_final_elements()\n nCircuits = len(gstrs)\n nDerivCols = mdl.num_params()\n\n #Get serial results\n vhp_serial = np.empty( (nEls,nDerivCols,nDerivCols),'d')\n vdp_serial = np.empty( (nEls,nDerivCols), 'd' )\n vp_serial = np.empty( nEls, 'd' )\n\n mdl.bulk_fill_hprobs(vhp_serial, tree,\n vp_serial, vdp_serial, (-1e6,1e6), comm=None)\n dprobs12_serial = vdp_serial[:,:,None] * vdp_serial[:,None,:]\n\n\n for tstTree,tstLookup in zip([tree],[lookup]): # currently no split trees allowed (ValueError), split_tree]:\n hcols = []\n d12cols = []\n slicesList = [ (slice(0,nDerivCols),slice(i,i+1)) for i in range(nDerivCols) ]\n for s1,s2, hprobs, dprobs12 in mdl.bulk_hprobs_by_block(\n tstTree, slicesList, True, comm):\n hcols.append(hprobs)\n d12cols.append(dprobs12)\n\n all_hcols = np.concatenate( hcols, axis=2 )\n all_d12cols = np.concatenate( d12cols, axis=2 )\n\n\n #print \"SHAPES:\"\n #print \"hcols[0] = \",hcols[0].shape\n #print \"all_hcols = \",all_hcols.shape\n #print \"all_d12cols = \",all_d12cols.shape\n #print \"vhp_serial = \",vhp_serial.shape\n #print \"dprobs12_serial = \",dprobs12_serial.shape\n\n #for i in range(all_hcols.shape[3]):\n # print \"Diff(%d) = \" % i, np.linalg.norm(all_hcols[0,:,8:,i]-vhp_serial[0,:,8:,i])\n # if np.linalg.norm(all_hcols[0,:,8:,i]-vhp_serial[0,:,8:,i]) > 1e-6:\n # for j in range(all_hcols.shape[3]):\n # print \"Diff(%d,%d) = \" % (i,j), np.linalg.norm(all_hcols[0,:,8:,i]-vhp_serial[0,:,8:,j])\n # assert(np.linalg.norm(all_hcols[0,:,8:,i]-vhp_serial[0,:,8:,i]) < 1e-6)\n\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(all_hcols[tstLookup[i]]-vhp_serial[lookup[i]]) < 1e-6)\n\n #for i in range(all_d12cols.shape[3]):\n # print \"Diff(%d) = \" % i, np.linalg.norm(all_d12cols[0,:,8:,i]-dprobs12_serial[0,:,8:,i])\n # if np.linalg.norm(all_d12cols[0,:,8:,i]-dprobs12_serial[0,:,8:,i]) > 1e-6:\n # for j in range(all_d12cols.shape[3]):\n # print \"Diff(%d,%d) = \" % (i,j), np.linalg.norm(all_d12cols[0,:,8:,i]-dprobs12_serial[0,:,8:,j])\n\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(all_d12cols[tstLookup[i]]-dprobs12_serial[lookup[i]]) < 1e-6)\n\n\n hcols = []\n d12cols = []\n slicesList = [ (slice(2,12),slice(i,i+1)) for i in range(1,10) ]\n for s1,s2, hprobs, dprobs12 in mdl.bulk_hprobs_by_block(\n tstTree, slicesList, True, comm):\n hcols.append(hprobs)\n d12cols.append(dprobs12)\n\n all_hcols = np.concatenate( hcols, axis=2 )\n all_d12cols = np.concatenate( d12cols, axis=2 )\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(all_hcols[tstLookup[i]]-vhp_serial[lookup[i],2:12,1:10]) < 1e-6)\n assert(np.linalg.norm(all_d12cols[tstLookup[i]]-dprobs12_serial[lookup[i],2:12,1:10]) < 1e-6)\n\n\n hprobs_by_block = np.zeros(vhp_serial.shape,'d')\n dprobs12_by_block = np.zeros(dprobs12_serial.shape,'d')\n blocks1 = pygsti.tools.mpitools.slice_up_range(nDerivCols, 3)\n blocks2 = pygsti.tools.mpitools.slice_up_range(nDerivCols, 5)\n slicesList = list(itertools.product(blocks1,blocks2))\n for s1,s2, hprobs_blk, dprobs12_blk in mdl.bulk_hprobs_by_block(\n tstTree, slicesList, True, comm):\n hprobs_by_block[:,s1,s2] = hprobs_blk\n dprobs12_by_block[:,s1,s2] = dprobs12_blk\n\n for i,opstr in enumerate(gstrs):\n assert(np.linalg.norm(hprobs_by_block[tstLookup[i]]-vhp_serial[lookup[i]]) < 1e-6)\n assert(np.linalg.norm(dprobs12_by_block[tstLookup[i]]-dprobs12_serial[lookup[i]]) < 1e-6)\n\n\n\n#SCRATCH\n#if np.linalg.norm(chk_ret[0]-dGs) >= 1e-6:\n# #if bScale:\n# # print \"SCALED\"\n# # print chk_ret[-1]\n#\n# rank = comm.Get_rank()\n# if rank == 0:\n# print \"DEBUG: parallel mismatch\"\n# print \"len(all_results) = \",len(all_results)\n# print \"diff = \",np.linalg.norm(chk_ret[0]-dGs)\n# for row in range(dGs.shape[0]):\n# rowA = my_results[0][row,:].flatten()\n# rowB = all_results[rank][0][row,:].flatten()\n# rowC = dGs[row,:].flatten()\n# chk_C = chk_ret[0][row,:].flatten()\n#\n# def sp(ar):\n# for i,x in enumerate(ar):\n# if abs(x) > 1e-4:\n# print i,\":\", x\n# def spc(ar1,ar2):\n# for i,x in enumerate(ar1):\n# if (abs(x) > 1e-4 or abs(ar2[i]) > 1e-4): # and abs(x-ar2[i]) > 1e-6:\n# print i,\":\", x, ar2[i], \"(\", (x-ar2[i]), \")\", \"[\",x/ar2[i],\"]\"\n#\n# assert( _np.linalg.norm(rowA-rowB) < 1e-6)\n# assert( _np.linalg.norm(rowC[0:len(rowA)]-rowA) < 1e-6)\n# #if _np.linalg.norm(rowA) > 1e-6:\n# if _np.linalg.norm(rowC - chk_C) > 1e-6:\n# print \"SCALE for row%d = %g\" % (row,rest_of_result[-1][row])\n# print \"CHKSCALE for row%d = %g\" % (row,chk_ret[-1][row])\n# print \"row%d diff = \" % row, _np.linalg.norm(rowC - chk_C)\n# print \"row%d (rank%d)A = \" % (row,rank)\n# sp(rowA)\n# print \"row%d (all vs check) = \" % row\n# spc(rowC, chk_C)\n#\n# assert(False)\n# assert(False)\n\n\n\n\n\n@mpitest(4)\ndef test_MPI_gatestrings_chi2(comm):\n #Create dataset for serial and parallel runs\n ds,lsgstStrings = create_fake_dataset(comm)\n\n #Individual processors\n my1ProcResults = runOneQubit(\"chi2\",ds,lsgstStrings)\n\n #Using all processors\n myManyProcResults = runOneQubit(\"chi2\",ds,lsgstStrings,comm,\"circuits\")\n\n for i,(gs1,gs2) in enumerate(zip(my1ProcResults,myManyProcResults)):\n assertGatesetsInSync(gs1, comm)\n assertGatesetsInSync(gs2, comm)\n\n gs2_go = pygsti.gaugeopt_to_target(gs2, gs1, {'gates': 1.0, 'spam': 1.0})\n print(\"Frobenius distance %d (rank %d) = \" % (i,comm.Get_rank()), gs1.frobeniusdist(gs2_go))\n if gs1.frobeniusdist(gs2_go) >= 1e-5:\n print(\"DIFF (%d) = \" % comm.Get_rank(), gs1.strdiff(gs2_go))\n assert(gs1.frobeniusdist(gs2_go) < 1e-5)\n return\n\n\n@mpitest(4)\ndef test_MPI_gaugeopt(comm):\n #Gauge Opt to Target\n mdl_other = std.target_model().depolarize(op_noise=0.01, spam_noise=0.01)\n mdl_other['Gx'].rotate( (0,0,0.01) )\n mdl_other['Gy'].rotate( (0,0,0.01) )\n mdl_gopt = pygsti.gaugeopt_to_target(mdl_other, std.target_model(), verbosity=10, comm=comm)\n\n #use a method that isn't parallelized with non-None comm (warning is given)\n mdl_gopt_slow = pygsti.gaugeopt_to_target(mdl_other, std.target_model(), verbosity=10, method=\"BFGS\", comm=comm)\n\n\n@mpitest(4)\ndef test_MPI_gatestrings_logl(comm):\n #Create dataset for serial and parallel runs\n ds,lsgstStrings = create_fake_dataset(comm)\n\n #Individual processors\n my1ProcResults = runOneQubit(\"logl\",ds,lsgstStrings)\n\n #Using all processors\n myManyProcResults = runOneQubit(\"logl\",ds,lsgstStrings,comm,\"circuits\")\n\n for i,(gs1,gs2) in enumerate(zip(my1ProcResults,myManyProcResults)):\n assertGatesetsInSync(gs1, comm)\n assertGatesetsInSync(gs2, comm)\n\n gs2_go = pygsti.gaugeopt_to_target(gs2, gs1, {'gates': 1.0, 'spam': 1.0})\n print(\"Frobenius distance %d (rank %d) = \" % (i,comm.Get_rank()), gs1.frobeniusdist(gs2_go))\n if gs1.frobeniusdist(gs2_go) >= 1e-5:\n print(\"DIFF (%d) = \" % comm.Get_rank(), gs1.strdiff(gs2_go))\n assert(gs1.frobeniusdist(gs2_go) < 1e-5)\n return\n\n@mpitest(4)\ndef test_MPI_mlgst_forcefn(comm):\n fiducials = std.fiducials\n target_model = std.target_model()\n lgstStrings = pygsti.construction.list_lgst_circuits(fiducials, fiducials,\n list(target_model.operations.keys()))\n #Create dataset on root proc\n if comm is None or comm.Get_rank() == 0:\n datagen_gateset = target_model.depolarize(op_noise=0.01, spam_noise=0.01)\n ds = pygsti.construction.generate_fake_data(datagen_gateset, lgstStrings,\n nSamples=10000, sampleError='binomial', seed=100)\n ds = comm.bcast(ds, root=0)\n else:\n ds = comm.bcast(None, root=0)\n\n\n mdl_lgst = pygsti.do_lgst(ds, fiducials, fiducials, target_model, svdTruncateTo=4, verbosity=0)\n mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst,target_model, {'spam':1.0, 'gates': 1.0})\n\n forcingfn_grad = np.ones((1,mdl_lgst_go.num_params()), 'd')\n mdl_lsgst_chk_opts3 = pygsti.algorithms.core._do_mlgst_base(\n ds, mdl_lgst_go, lgstStrings, verbosity=3,\n minProbClip=1e-4, probClipInterval=(-1e2,1e2),\n forcefn_grad=forcingfn_grad, comm=comm)\n\n\n\n@mpitest(4)\ndef test_MPI_derivcols(comm):\n #Create dataset for serial and parallel runs\n ds,lsgstStrings = create_fake_dataset(comm)\n\n #Individual processors\n my1ProcResults = runOneQubit(\"chi2\",ds,lsgstStrings)\n\n #Using all processors\n myManyProcResults = runOneQubit(\"chi2\",ds,lsgstStrings,comm,\"deriv\")\n\n for i,(gs1,gs2) in enumerate(zip(my1ProcResults,myManyProcResults)):\n assertGatesetsInSync(gs1, comm)\n assertGatesetsInSync(gs2, comm)\n\n gs2_go = pygsti.gaugeopt_to_target(gs2, gs1, {'gates': 1.0, 'spam': 1.0})\n print(\"Frobenius distance %d (rank %d) = \" % (i,comm.Get_rank()), gs1.frobeniusdist(gs2_go))\n if gs1.frobeniusdist(gs2_go) >= 1e-5:\n print(\"DIFF (%d) = \" % comm.Get_rank(), gs1.strdiff(gs2_go))\n assert(gs1.frobeniusdist(gs2_go) < 1e-5)\n return\n\n@mpitest(4)\ndef test_run1Q_end2end(comm):\n from pygsti.modelpacks.legacy import std1Q_XYI\n target_model = std1Q_XYI.target_model()\n fiducials = std1Q_XYI.fiducials\n germs = std1Q_XYI.germs\n maxLengths = [1,2,4]\n\n mdl_datagen = target_model.depolarize(op_noise=0.1, spam_noise=0.001)\n listOfExperiments = pygsti.construction.make_lsgst_experiment_list(\n list(target_model.operations.keys()), fiducials, fiducials, germs, maxLengths)\n ds = pygsti.construction.generate_fake_data(mdl_datagen, listOfExperiments,\n nSamples=1000,\n sampleError=\"binomial\",\n seed=1234, comm=comm)\n if comm.Get_rank() == 0:\n pickle.dump(ds, open(\"mpi_dataset.pkl\",\"wb\"))\n comm.barrier() #to make sure dataset file is written\n\n #test with pkl file - should only read in on rank0 then broadcast\n results = pygsti.do_long_sequence_gst(\"mpi_dataset.pkl\", target_model, fiducials, fiducials,\n germs, [1], comm=comm)\n\n #test with dataset object\n results = pygsti.do_long_sequence_gst(ds, target_model, fiducials, fiducials,\n germs, maxLengths, comm=comm)\n\n #Use dummy duplicate of results to trigger MPI data-comparison processing:\n pygsti.report.create_standard_report({\"one\": results, \"two\": results}, \"mpi_test_report\",\n \"MPI test report\", confidenceLevel=95,\n verbosity=2, comm=comm)\n\n\n@mpitest(4)\ndef test_MPI_germsel(comm):\n if comm is None or comm.Get_rank() == 0:\n gatesetNeighborhood = pygsti.alg.randomize_model_list(\n [std.target_model()], randomizationStrength=1e-3,\n numCopies=3, seed=2018)\n comm.bcast(gatesetNeighborhood, root=0)\n else:\n gatesetNeighborhood = comm.bcast(None, root=0)\n\n max_length = 6\n gates = std.target_model().operations.keys()\n superGermSet = pygsti.construction.list_all_circuits_without_powers_and_cycles(gates, max_length)\n\n #germs = pygsti.alg.build_up_breadth(gatesetNeighborhood, superGermSet,\n # randomize=False, seed=2018, scoreFunc='all',\n # threshold=1e6, verbosity=1, opPenalty=1.0,\n # memLimit=3*(1024**3), comm=comm)\n\n germs_lowmem = pygsti.alg.build_up_breadth(gatesetNeighborhood, superGermSet,\n randomize=False, seed=2018, scoreFunc='all',\n threshold=1e6, verbosity=1, opPenalty=1.0,\n memLimit=3*(1024**2), comm=comm) # force \"single-Jac\" mode\n\n@mpitest(4)\ndef test_MPI_profiler(comm):\n mem = profiler._get_root_mem_usage(comm)\n mem = profiler._get_max_mem_usage(comm)\n\n start_time = time.time()\n p = profiler.Profiler(comm, default_print_memcheck=True)\n p.add_time(\"My Name\", start_time, prefix=1)\n p.add_count(\"My Count\", inc=1, prefix=1)\n p.add_count(\"My Count\", inc=2, prefix=1)\n p.mem_check(\"My Memcheck\", prefix=1)\n p.mem_check(\"My Memcheck\", prefix=1)\n p.print_mem(\"My Memcheck just to print\")\n p.print_mem(\"My Memcheck just to print\", show_minmax=True)\n p.print_msg(\"My Message\")\n p.print_msg(\"My Message\", all_ranks=True)\n\n s = p.format_times(sortBy=\"name\")\n s = p.format_times(sortBy=\"time\")\n #with self.assertRaises(ValueError):\n # p.format_times(sortBy=\"foobar\")\n\n s = p.format_counts(sortBy=\"name\")\n s = p.format_counts(sortBy=\"count\")\n #with self.assertRaises(ValueError):\n # p.format_counts(sortBy=\"foobar\")\n\n s = p.format_memory(sortBy=\"name\")\n s = p.format_memory(sortBy=\"usage\")\n #with self.assertRaises(ValueError):\n # p.format_memory(sortBy=\"foobar\")\n #with self.assertRaises(NotImplementedError):\n # p.format_memory(sortBy=\"timestamp\")\n\n\n@mpitest(4)\ndef test_MPI_tools(comm):\n from pygsti.tools import mpitools as mpit\n\n indices = list(range(10))\n nprocs = comm.Get_size()\n rank = comm.Get_rank()\n\n # ------------------ distribute_indices_base --------------------------------\n\n #case of procs < nIndices\n loc_indices, owners = mpit.distribute_indices_base(indices, nprocs, rank, allow_split_comm=True)\n if nprocs == 4: #should always be the case\n if rank == 0: assert(loc_indices == [0,1,2])\n if rank == 1: assert(loc_indices == [3,4,5])\n if rank == 2: assert(loc_indices == [6,7])\n if rank == 3: assert(loc_indices == [8,9])\n assert(owners == {0: 0, 1: 0, 2: 0,\n 3: 1, 4: 1, 5: 1,\n 6: 2, 7: 2,\n 8: 3, 9: 3}) # index : owner-rank\n\n #case of nIndices > procs, allow_split_comm = True, no extras\n indices = list(range(2))\n loc_indices, owners = mpit.distribute_indices_base(indices, nprocs, rank, allow_split_comm=True)\n if nprocs == 4: #should always be the case\n if rank == 0: assert(loc_indices == [0])\n if rank == 1: assert(loc_indices == [0])\n if rank == 2: assert(loc_indices == [1])\n if rank == 3: assert(loc_indices == [1])\n assert(owners == {0: 0, 1: 2}) # only gives *first* owner\n\n #case of nIndices > procs, allow_split_comm = True, 1 extra proc\n indices = list(range(3))\n loc_indices, owners = mpit.distribute_indices_base(indices, nprocs, rank, allow_split_comm=True)\n if nprocs == 4: #should always be the case\n if rank == 0: assert(loc_indices == [0])\n if rank == 1: assert(loc_indices == [0])\n if rank == 2: assert(loc_indices == [1])\n if rank == 3: assert(loc_indices == [2])\n assert(owners == {0: 0, 1: 2, 2: 3}) # only gives *first* owner\n\n #case of nIndices > procs, allow_split_comm = False\n indices = list(range(3))\n loc_indices, owners = mpit.distribute_indices_base(indices, nprocs, rank, allow_split_comm=False)\n if nprocs == 4: #should always be the case\n if rank == 0: assert(loc_indices == [0])\n if rank == 1: assert(loc_indices == [1])\n if rank == 2: assert(loc_indices == [2])\n if rank == 3: assert(loc_indices == []) #only one proc per index\n assert(owners == {0: 0, 1: 1, 2: 2}) # only gives *first* owner\n\n #Boundary case of no indices\n loc_indices, owners = mpit.distribute_indices_base([], nprocs, rank, allow_split_comm=False)\n assert(loc_indices == [])\n assert(owners == {})\n\n\n # ------------------ slice_up_slice --------------------------------\n slices = mpit.slice_up_slice( slice(0,4), num_slices=2)\n assert(slices[0] == slice(0,2))\n assert(slices[1] == slice(2,4))\n slices = mpit.slice_up_slice( slice(None,None), num_slices=2)\n assert(slices[0] == slice(0,0))\n assert(slices[1] == slice(0,0))\n\n\n # ------------------ distribute & gather slices--------------------------------\n master = np.arange(100)\n\n def test(slc, allow_split_comm=True, maxbuf=None):\n slices, loc_slice, owners, loc_comm = mpit.distribute_slice(slc,comm,allow_split_comm)\n my_array = np.zeros(100,'d')\n my_array[loc_slice] = master[loc_slice] # ~ computation (just copy from \"master\")\n mpit.gather_slices(slices, owners, my_array,\n arToFillInds=[], axes=0, comm=comm,\n max_buffer_size=maxbuf)\n assert(np.linalg.norm(my_array[slc] - master[slc]) < 1e-6)\n\n my_array2 = np.zeros(100,'d')\n my_array2[loc_slice] = master[loc_slice] # ~ computation (just copy from \"master\")\n mpit.gather_slices_by_owner([loc_slice], my_array2, arToFillInds=[],\n axes=0, comm=comm, max_buffer_size=maxbuf)\n assert(np.linalg.norm(my_array2[slc] - master[slc]) < 1e-6)\n\n indices = [ pygsti.tools.slicetools.as_array(s) for s in slices ]\n loc_indices = pygsti.tools.slicetools.as_array(loc_slice)\n my_array3 = np.zeros(100,'d')\n my_array3[loc_indices] = master[loc_indices] # ~ computation (just copy from \"master\")\n mpit.gather_indices(indices, owners, my_array3, arToFillInds=[], axes=0,\n comm=comm, max_buffer_size=maxbuf)\n assert(np.linalg.norm(my_array3[slc] - master[slc]) < 1e-6)\n\n test(slice(0,8)) #more indices than processors\n test(slice(0,8),False) #more indices than processors w/out split comm\n test(slice(0,3)) #fewer indices than processors\n test(slice(0,3),False) #fewer indices than processors w/out split comm\n test(slice(0,10),maxbuf=12) #with max-buffer\n test(slice(0,10),maxbuf=0) #with max-buffer that cannot be attained - should WARN\n\n master2D = np.arange(100).reshape((10,10))\n\n def test2D(slc1,slc2, allow_split_comm=True, maxbuf=None):\n slices1, loc_slice1, owners1, loc_comm1 = mpit.distribute_slice(slc1,comm,allow_split_comm)\n slices2, loc_slice2, owners2, loc_comm2 = mpit.distribute_slice(slc2,loc_comm1,allow_split_comm)\n\n my_array = np.zeros((10,10),'d')\n my_array[loc_slice1,loc_slice2] = master2D[loc_slice1,loc_slice2].copy() # ~ computation (just copy from \"master\")\n\n #Can't do this until distribute_slice gets upgraded to work with multiple dims...\n # mpit.gather_slices(slices, owners, my_array,\n # arToFillInds=[], axes=0, comm=comm,\n # max_buffer_size=maxbuf)\n # assert(np.linalg.norm(my_array[slc] - master2D[slc]) < 1e-6)\n\n my_array2 = np.zeros((10,10),'d')\n my_array2[loc_slice1,loc_slice2] = master2D[loc_slice1,loc_slice2].copy() # ~ computation (just copy from \"master\")\n mpit.gather_slices_by_owner([(loc_slice1,loc_slice2)], my_array2, arToFillInds=[],\n axes=(0,1), comm=comm, max_buffer_size=maxbuf)\n #print(\"Rank %d: locslc1 = %s, locslc2 = %s, loc_comm1_size=%d\" % (rank, str(loc_slice1),str(loc_slice2),\n # loc_comm1.Get_size() if loc_comm1 else -1))\n assert(np.linalg.norm(my_array2[slc1,slc2] - master2D[slc1,slc2]) < 1e-6)\n\n\n test2D(slice(0,8),slice(0,4)) #more indices than processors\n test2D(slice(0,3),slice(0,3)) #fewer indices than processors\n test2D(slice(0,3),slice(0,3),False) #fewer indices than processors w/split comm\n test2D(slice(0,10), slice(0,5), maxbuf=20) #with max-buffer\n\n #trivial case with comm = None => nothing to do\n mpit.gather_slices(None, None, master, arToFillInds=[], axes=0, comm=None)\n mpit.gather_slices_by_owner(slice(0,100), master, arToFillInds=[], axes=0, comm=None)\n\n # ------------------ parallel apply --------------------------------\n\n #Doesn't work in python3 b/c comm.split hands in distribute_indices...\n #def f(x):\n # return x + \"!\"\n #results = mpit.parallel_apply( f,[\"Hi\",\"there\"], comm)\n #assert(results == [\"Hi!\",\"there!\"])\n\n def f(i):\n return i + 10\n results = mpit.parallel_apply( f,[1,2], comm)\n assert(results == [11,12])\n\n # convenience method to avoid importing mpi4py at the top level\n c = mpit.get_comm()\n\n\n@mpitest(4)\ndef test_MPI_printer(comm):\n #Test output of each rank to separate file:\n pygsti.obj.VerbosityPrinter._commPath = \"./\"\n pygsti.obj.VerbosityPrinter._commFileName = \"mpi_test_output\"\n printer = pygsti.obj.VerbosityPrinter(verbosity=1, comm=comm)\n printer.log(\"HELLO!\")\n pygsti.obj.VerbosityPrinter._commPath = \"./\"\n pygsti.obj.VerbosityPrinter._commFileName = \"mpi_test_output\"\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n",
"\"\"\" Helper Functions for generating plots \"\"\"\n#***************************************************************************************************\n# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights\n# in this software.\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.\n#***************************************************************************************************\n\nimport numpy as _np\nimport warnings as _warnings\n\nfrom .. import tools as _tools\nfrom .. import objects as _objs\n\nfrom ..objects.smartcache import smart_cached\n\n\ndef total_count_matrix(gsplaq, dataset):\n \"\"\"\n Computes the total count matrix for a base circuit.\n\n Parameters\n ----------\n gsplaq : CircuitPlaquette\n Obtained via :method:`CircuitStructure.get_plaquette`, this object\n specifies which matrix indices should be computed and which operation sequences\n they correspond to.\n\n dataset : DataSet\n The data used to specify the counts\n\n Returns\n -------\n numpy array of shape (M,N)\n total count values (sum of count values for each SPAM label)\n corresponding to operation sequences where circuit is sandwiched\n between the specified set of N prep-fiducial and M effect-fiducial\n operation sequences.\n \"\"\"\n ret = _np.nan * _np.ones(gsplaq.num_simplified_elements, 'd')\n for i, j, opstr, elIndices, outcomes in gsplaq.iter_simplified():\n ret[elIndices] = dataset[opstr].total\n # OR should it sum only over outcomes, i.e.\n # = sum([dataset[opstr][ol] for ol in outcomes])\n return ret\n\n\ndef count_matrices(gsplaq, dataset):\n \"\"\"\n Computes spamLabel's count matrix for a base circuit.\n\n Parameters\n ----------\n gsplaq : CircuitPlaquette\n Obtained via :method:`CircuitStructure.get_plaquette`, this object\n specifies which matrix indices should be computed and which operation sequences\n they correspond to.\n\n dataset : DataSet\n The data used to specify the counts\n\n spamlabels : list of strings\n The spam labels to extract counts for, e.g. ['plus']\n\n Returns\n -------\n numpy array of shape ( len(spamlabels), len(effectStrs), len(prepStrs) )\n count values corresponding to spamLabel and operation sequences\n where circuit is sandwiched between the each prep-fiducial and\n effect-fiducial pair.\n \"\"\"\n ret = _np.nan * _np.ones(gsplaq.num_simplified_elements, 'd')\n for i, j, opstr, elIndices, outcomes in gsplaq.iter_simplified():\n datarow = dataset[opstr]\n ret[elIndices] = [datarow[ol] for ol in outcomes]\n return ret\n\n\ndef frequency_matrices(gsplaq, dataset):\n \"\"\"\n Computes spamLabel's frequency matrix for a base circuit.\n\n Parameters\n ----------\n gsplaq : CircuitPlaquette\n Obtained via :method:`CircuitStructure.get_plaquette`, this object\n specifies which matrix indices should be computed and which operation sequences\n they correspond to.\n\n dataset : DataSet\n The data used to specify the frequencies\n\n spamlabels : list of strings\n The spam labels to extract frequencies for, e.g. ['plus']\n\n\n Returns\n -------\n numpy array of shape ( len(spamlabels), len(effectStrs), len(prepStrs) )\n frequency values corresponding to spamLabel and operation sequences\n where circuit is sandwiched between the each prep-fiducial,\n effect-fiducial pair.\n \"\"\"\n return count_matrices(gsplaq, dataset) \\\n / total_count_matrix(gsplaq, dataset)\n\n\ndef probability_matrices(gsplaq, model,\n probs_precomp_dict=None):\n \"\"\"\n Computes spamLabel's probability matrix for a base circuit.\n\n Parameters\n ----------\n gsplaq : CircuitPlaquette\n Obtained via :method:`CircuitStructure.get_plaquette`, this object\n specifies which matrix indices should be computed and which operation sequences\n they correspond to.\n\n model : Model\n The model used to specify the probabilities\n\n spamlabels : list of strings\n The spam labels to extract probabilities for, e.g. ['plus']\n\n probs_precomp_dict : dict, optional\n A dictionary of precomputed probabilities. Keys are operation sequences\n and values are prob-dictionaries (as returned from Model.probs)\n corresponding to each operation sequence.\n\n Returns\n -------\n numpy array of shape ( len(spamlabels), len(effectStrs), len(prepStrs) )\n probability values corresponding to spamLabel and operation sequences\n where circuit is sandwiched between the each prep-fiducial,\n effect-fiducial pair.\n \"\"\"\n ret = _np.nan * _np.ones(gsplaq.num_simplified_elements, 'd')\n if probs_precomp_dict is None:\n if model is not None:\n for i, j, opstr, elIndices, outcomes in gsplaq.iter_simplified():\n probs = model.probs(opstr)\n ret[elIndices] = [probs[ol] for ol in outcomes]\n else:\n for i, j, opstr, elIndices, _ in gsplaq.iter_simplified():\n ret[elIndices] = probs_precomp_dict[opstr] # precomp is already in element-array form\n return ret\n\n\n@smart_cached\ndef chi2_matrix(gsplaq, dataset, model, minProbClipForWeighting=1e-4,\n probs_precomp_dict=None):\n \"\"\"\n Computes the chi^2 matrix for a base circuit.\n\n Parameters\n ----------\n gsplaq : CircuitPlaquette\n Obtained via :method:`CircuitStructure.get_plaquette`, this object\n specifies which matrix indices should be computed and which operation sequences\n they correspond to.\n\n dataset : DataSet\n The data used to specify frequencies and counts\n\n model : Model\n The model used to specify the probabilities and SPAM labels\n\n minProbClipForWeighting : float, optional\n defines the clipping interval for the statistical weight (see chi2fn).\n\n probs_precomp_dict : dict, optional\n A dictionary of precomputed probabilities. Keys are operation sequences\n and values are prob-dictionaries (as returned from Model.probs)\n corresponding to each operation sequence.\n\n Returns\n -------\n numpy array of shape ( len(effectStrs), len(prepStrs) )\n chi^2 values corresponding to operation sequences where\n circuit is sandwiched between the each prep-fiducial,\n effect-fiducial pair.\n \"\"\"\n gsplaq_ds = gsplaq.expand_aliases(dataset, circuit_simplifier=model)\n cnts = total_count_matrix(gsplaq_ds, dataset)\n probs = probability_matrices(gsplaq, model,\n probs_precomp_dict)\n freqs = frequency_matrices(gsplaq_ds, dataset)\n\n ret = _np.nan * _np.ones((gsplaq.rows, gsplaq.cols), 'd')\n for (i, j, opstr, elIndices, _), (_, _, _, elIndices_ds, _) in zip(\n gsplaq.iter_simplified(), gsplaq_ds.iter_simplified()):\n chiSqs = _tools.chi2fn(cnts[elIndices_ds], probs[elIndices],\n freqs[elIndices_ds], minProbClipForWeighting)\n ret[i, j] = sum(chiSqs) # sum all elements for each (i,j) pair\n return ret\n\n\n@smart_cached\ndef logl_matrix(gsplaq, dataset, model, minProbClip=1e-6,\n probs_precomp_dict=None):\n \"\"\"\n Computes the log-likelihood matrix of 2*( log(L)_upperbound - log(L) )\n values for a base circuit.\n\n Parameters\n ----------\n gsplaq : CircuitPlaquette\n Obtained via :method:`CircuitStructure.get_plaquette`, this object\n specifies which matrix indices should be computed and which operation sequences\n they correspond to.\n\n dataset : DataSet\n The data used to specify frequencies and counts\n\n model : Model\n The model used to specify the probabilities and SPAM labels\n\n minProbClip : float, optional\n defines the minimum probability \"patch-point\" of the log-likelihood function.\n\n probs_precomp_dict : dict, optional\n A dictionary of precomputed probabilities. Keys are operation sequences\n and values are prob-dictionaries (as returned from Model.probs)\n corresponding to each operation sequence.\n\n\n Returns\n -------\n numpy array of shape ( len(effectStrs), len(prepStrs) )\n logl values corresponding to operation sequences where\n circuit is sandwiched between the each prep-fiducial,\n effect-fiducial pair.\n \"\"\"\n gsplaq_ds = gsplaq.expand_aliases(dataset, circuit_simplifier=model)\n\n cnts = total_count_matrix(gsplaq_ds, dataset)\n probs = probability_matrices(gsplaq, model,\n probs_precomp_dict)\n freqs = frequency_matrices(gsplaq_ds, dataset)\n\n ret = _np.nan * _np.ones((gsplaq.rows, gsplaq.cols), 'd')\n for (i, j, opstr, elIndices, _), (_, _, _, elIndices_ds, _) in zip(\n gsplaq.iter_simplified(), gsplaq_ds.iter_simplified()):\n logLs = _tools.two_delta_loglfn(cnts[elIndices_ds], probs[elIndices],\n freqs[elIndices_ds], minProbClip)\n ret[i, j] = sum(logLs) # sum all elements for each (i,j) pair\n return ret\n\n\n@smart_cached\ndef tvd_matrix(gsplaq, dataset, model, probs_precomp_dict=None):\n \"\"\"\n Computes the total-variational distance matrix of `0.5 * |p-f|`\n values for a base circuit.\n\n Parameters\n ----------\n gsplaq : CircuitPlaquette\n Obtained via :method:`CircuitStructure.get_plaquette`, this object\n specifies which matrix indices should be computed and which operation sequences\n they correspond to.\n\n dataset : DataSet\n The data used to specify frequencies and counts\n\n model : Model\n The model used to specify the probabilities and SPAM labels\n\n probs_precomp_dict : dict, optional\n A dictionary of precomputed probabilities. Keys are operation sequences\n and values are prob-dictionaries (as returned from Model.probs)\n corresponding to each operation sequence.\n\n\n Returns\n -------\n numpy array of shape ( len(effectStrs), len(prepStrs) )\n logl values corresponding to operation sequences where\n circuit is sandwiched between the each prep-fiducial,\n effect-fiducial pair.\n \"\"\"\n gsplaq_ds = gsplaq.expand_aliases(dataset, circuit_simplifier=model)\n\n probs = probability_matrices(gsplaq, model,\n probs_precomp_dict)\n freqs = frequency_matrices(gsplaq_ds, dataset)\n\n ret = _np.nan * _np.ones((gsplaq.rows, gsplaq.cols), 'd')\n for (i, j, opstr, elIndices, _), (_, _, _, elIndices_ds, _) in zip(\n gsplaq.iter_simplified(), gsplaq_ds.iter_simplified()):\n TVDs = 0.5 * _np.abs(probs[elIndices] - freqs[elIndices_ds])\n ret[i, j] = sum(TVDs) # sum all elements for each (i,j) pair\n return ret\n\n\ndef small_eigval_err_rate(sigma, directGSTmodels):\n \"\"\"\n Compute per-gate error rate.\n\n The per-gate error rate, extrapolated from the smallest eigvalue\n of the Direct GST estimate of the given operation sequence sigma.\n\n Parameters\n ----------\n sigma : Circuit or tuple of operation labels\n The gate sequence that is used to estimate the error rate\n\n dataset : DataSet\n The dataset used obtain operation sequence frequencies\n\n directGSTmodels : dictionary of Models\n A dictionary with keys = operation sequences and\n values = Models.\n\n Returns\n -------\n float\n the approximate per-gate error rate.\n \"\"\"\n if sigma is None: return _np.nan # in plot processing, \"None\" circuits = no plot output = nan values\n mdl_direct = directGSTmodels[sigma]\n minEigval = min(abs(_np.linalg.eigvals(mdl_direct.operations[\"GsigmaLbl\"])))\n # (approximate) per-gate error rate; max averts divide by zero error\n return 1.0 - minEigval**(1.0 / max(len(sigma), 1))\n\n\ndef _eformat(f, prec):\n \"\"\"\n Formatting routine for writing compact representations of\n numbers in plot boxes\n \"\"\"\n if _np.isnan(f): return \"\" # show NAN as blanks\n if prec == 'compact' or prec == 'compacthp':\n if f < 0:\n ef = _eformat(-f, prec)\n return \"-\" + ef if (ef != \"0\") else \"0\"\n\n if prec == 'compacthp':\n if f <= 0.5e-9: # can't fit in 3 digits; 1e-9 = \"1m9\" is the smallest 3-digit (not counting minus signs)\n return \"0\"\n if f < 0.005: # then need scientific notation since 3-digit float would be 0.00...\n s = \"%.0e\" % f\n try:\n mantissa, exp = s.split('e')\n exp = int(exp); assert(exp < 0)\n if exp < -9: return \"0\" # should have been caugth above, but just in case\n return \"%sm%d\" % (mantissa, -exp)\n except:\n return \"?\"\n if f < 1:\n z = \"%.2f\" % f # print first two decimal places\n if z.startswith(\"0.\"): return z[1:] # fails for '1.00'; then thunk down to next f<10 case\n if f < 10:\n return \"%.1f\" % f # print whole number and tenths\n\n if f < 100:\n return \"%.0f\" % f # print nearest whole number if only 1 or 2 digits\n\n #if f >= 100, minimal scientific notation, such as \"4e7\", not \"4e+07\"\n s = \"%.0e\" % f\n try:\n mantissa, exp = s.split('e')\n exp = int(exp)\n if exp >= 100: return \"B\" # if number is too big to print\n if exp >= 10: return \"*%d\" % exp\n return \"%se%d\" % (mantissa, exp)\n except:\n return str(s)[0:3]\n\n elif type(prec) == int:\n if prec >= 0:\n return \"%.*f\" % (prec, f)\n else:\n return \"%.*g\" % (-prec, f)\n else:\n return \"%g\" % f # fallback to general format\n\n\ndef _num_non_nan(array):\n ixs = _np.where(_np.isnan(_np.array(array).flatten()) == False)[0] # noqa: E712\n return int(len(ixs))\n\n\ndef _all_same(items):\n return all(x == items[0] for x in items)\n\n\ndef _compute_num_boxes_dof(subMxs, sumUp, element_dof):\n \"\"\"\n A helper function to compute the number of boxes, and corresponding\n number of degrees of freedom, for the GST chi2/logl boxplots.\n\n \"\"\"\n if sumUp:\n s = _np.shape(subMxs)\n # Reshape the subMxs into a \"flattened\" form (as opposed to a\n # two-dimensional one)\n reshape_subMxs = _np.array(_np.reshape(subMxs, (s[0] * s[1], s[2], s[3])))\n\n #Get all the boxes where the entries are not all NaN\n non_all_NaN = reshape_subMxs[_np.where(_np.array([_np.isnan(k).all() for k in reshape_subMxs]) == False)] # noqa: E712,E501\n s = _np.shape(non_all_NaN)\n dof_each_box = [_num_non_nan(k) * element_dof for k in non_all_NaN]\n\n # Don't assert this anymore -- just use average below\n if not _all_same(dof_each_box):\n _warnings.warn('Number of degrees of freedom different for different boxes!')\n\n # The number of boxes is equal to the number of rows in non_all_NaN\n n_boxes = s[0]\n\n if n_boxes > 0:\n # Each box is a chi2_(sum) random variable\n dof_per_box = _np.average(dof_each_box)\n else:\n dof_per_box = None # unknown, since there are no boxes\n else:\n # Each box is a chi2_m random variable currently dictated by the number of\n # dataset degrees of freedom.\n dof_per_box = element_dof\n\n # Gets all the non-NaN boxes, flattens the resulting\n # array, and does the sum.\n n_boxes = _np.sum(~_np.isnan(subMxs).flatten())\n\n return n_boxes, dof_per_box\n\n\ndef _computeProbabilities(gss, model, dataset, probClipInterval=(-1e6, 1e6),\n check=False, opLabelAliases=None,\n comm=None, smartc=None, wildcard=None):\n \"\"\"\n Returns a dictionary of probabilities for each gate sequence in\n CircuitStructure `gss`.\n \"\"\"\n def smart(fn, *args, **kwargs):\n if smartc:\n return smartc.cached_compute(fn, args, kwargs)[1]\n else:\n if '_filledarrays' in kwargs: del kwargs['_filledarrays']\n return fn(*args, **kwargs)\n\n circuitList = gss.allstrs\n\n #compute probabilities\n #OLD: evt,lookup,_ = smart(model.bulk_evaltree, circuitList, dataset=dataset)\n evt, _, _, lookup, outcomes_lookup = smart(model.bulk_evaltree_from_resources,\n circuitList, comm, dataset=dataset)\n\n # _np.empty(evt.num_final_elements(), 'd') - .zeros b/c of caching\n bulk_probs = _np.zeros(evt.num_final_elements(), 'd')\n smart(model.bulk_fill_probs, bulk_probs, evt, probClipInterval, check, comm, _filledarrays=(0,))\n # bulk_probs indexed by [element_index]\n\n if wildcard:\n freqs = _np.empty(evt.num_final_elements(), 'd')\n #ds_circuit_list = _tools.find_replace_tuple_list(circuitList, opLabelAliases)\n ds_circuit_list = _tools.apply_aliases_to_circuit_list(circuitList, opLabelAliases)\n for (i, opStr) in enumerate(ds_circuit_list):\n cnts = dataset[opStr].counts; total = sum(cnts.values())\n freqs[lookup[i]] = [cnts.get(x, 0) / total for x in outcomes_lookup[i]]\n\n probs_in = bulk_probs.copy()\n wildcard.update_probs(probs_in, bulk_probs, freqs, circuitList, lookup)\n\n probs_dict = \\\n {circuitList[i]: bulk_probs.take(_tools.as_array(lookup[i]))\n for i in range(len(circuitList))}\n return probs_dict\n\n\n#@smart_cached\ndef _computeSubMxs(gss, model, subMxCreationFn, dataset=None, subMxCreationFn_extra_arg=None):\n if model is not None: gss.simplify_plaquettes(model, dataset)\n subMxs = [[subMxCreationFn(gss.get_plaquette(x, y), x, y, subMxCreationFn_extra_arg)\n for x in gss.used_xvals()] for y in gss.used_yvals()]\n #Note: subMxs[y-index][x-index] is proper usage\n return subMxs\n\n\n@smart_cached\ndef direct_chi2_matrix(gsplaq, gss, dataset, directModel,\n minProbClipForWeighting=1e-4):\n \"\"\"\n Computes the Direct-X chi^2 matrix for a base circuit sigma.\n\n Similar to chi2_matrix, except the probabilities used to compute\n chi^2 values come from using the \"composite gate\" of directModels[sigma],\n a Model assumed to contain some estimate of sigma stored under the\n operation label \"GsigmaLbl\".\n\n Parameters\n ----------\n gsplaq : CircuitPlaquette\n Obtained via :method:`CircuitStructure.get_plaquette`, this object\n specifies which matrix indices should be computed and which operation sequences\n (for accessing the dataset) they correspond to.\n\n gss : CircuitStructure\n The operation sequence structure object containing `gsplaq`. The structure is\n neede to create a special plaquette for computing probabilities from the\n direct model containing a \"GsigmaLbl\" gate.\n\n dataset : DataSet\n The data used to specify frequencies and counts\n\n directModel : Model\n Model which contains an estimate of sigma stored\n under the operation label \"GsigmaLbl\".\n\n minProbClipForWeighting : float, optional\n defines the clipping interval for the statistical weight (see chi2fn).\n\n\n Returns\n -------\n numpy array of shape ( len(effectStrs), len(prepStrs) )\n Direct-X chi^2 values corresponding to operation sequences where\n circuit is sandwiched between the each (effectStr,prepStr) pair.\n \"\"\"\n if len(gsplaq.get_all_strs()) > 0: # skip cases with no strings\n plaq_ds = gsplaq.expand_aliases(dataset, circuit_simplifier=directModel)\n plaq_pr = gss.create_plaquette(_objs.Circuit((\"GsigmaLbl\",)))\n plaq_pr.simplify_circuits(directModel)\n\n cnts = total_count_matrix(plaq_ds, dataset)\n probs = probability_matrices(plaq_pr, directModel) # no probs_precomp_dict\n freqs = frequency_matrices(plaq_ds, dataset)\n\n ret = _np.empty((plaq_ds.rows, plaq_ds.cols), 'd')\n for (i, j, opstr, elIndices, _), (_, _, _, elIndices_ds, _) in zip(\n plaq_pr.iter_simplified(), plaq_ds.iter_simplified()):\n chiSqs = _tools.chi2fn(cnts[elIndices_ds], probs[elIndices],\n freqs[elIndices_ds], minProbClipForWeighting)\n ret[i, j] = sum(chiSqs) # sum all elements for each (i,j) pair\n\n return ret\n else:\n return _np.nan * _np.ones((gsplaq.rows, gsplaq.cols), 'd')\n\n\n@smart_cached\ndef direct_logl_matrix(gsplaq, gss, dataset, directModel,\n minProbClip=1e-6):\n \"\"\"\n Computes the Direct-X log-likelihood matrix, containing the values\n of 2*( log(L)_upperbound - log(L) ) for a base circuit sigma.\n\n Similar to logl_matrix, except the probabilities used to compute\n LogL values come from using the \"composite gate\" of directModels[sigma],\n a Model assumed to contain some estimate of sigma stored under the\n operation label \"GsigmaLbl\".\n\n Parameters\n ----------\n gsplaq : CircuitPlaquette\n Obtained via :method:`CircuitStructure.get_plaquette`, this object\n specifies which matrix indices should be computed and which operation sequences\n (for accessing the dataset) they correspond to.\n\n gss : CircuitStructure\n The operation sequence structure object containing `gsplaq`. The structure is\n neede to create a special plaquette for computing probabilities from the\n direct model containing a \"GsigmaLbl\" gate.\n\n dataset : DataSet\n The data used to specify frequencies and counts\n\n directModel : Model\n Model which contains an estimate of sigma stored\n under the operation label \"GsigmaLbl\".\n\n minProbClip : float, optional\n defines the minimum probability clipping.\n\n Returns\n -------\n numpy array of shape ( len(effectStrs), len(prepStrs) )\n Direct-X logL values corresponding to operation sequences where\n circuit is sandwiched between the each (effectStr,prepStr) pair.\n \"\"\"\n if len(gsplaq.get_all_strs()) > 0: # skip cases with no strings\n plaq_ds = gsplaq.expand_aliases(dataset, circuit_simplifier=directModel)\n plaq_pr = gss.create_plaquette(_objs.Circuit((\"GsigmaLbl\",)))\n plaq_pr.simplify_circuits(directModel)\n\n cnts = total_count_matrix(plaq_ds, dataset)\n probs = probability_matrices(plaq_pr, directModel) # no probs_precomp_dict\n freqs = frequency_matrices(plaq_ds, dataset)\n\n ret = _np.empty((plaq_ds.rows, plaq_ds.cols), 'd')\n for (i, j, opstr, elIndices, _), (_, _, _, elIndices_ds, _) in zip(\n plaq_pr.iter_simplified(), plaq_ds.iter_simplified()):\n logLs = _tools.two_delta_loglfn(cnts[elIndices_ds], probs[elIndices],\n freqs[elIndices_ds], minProbClip)\n ret[i, j] = sum(logLs) # sum all elements for each (i,j) pair\n return ret\n else:\n return _np.nan * _np.ones((gsplaq.rows, gsplaq.cols), 'd')\n\n\n@smart_cached\ndef dscompare_llr_matrices(gsplaq, dscomparator):\n \"\"\"\n Computes matrix of 2*log-likelihood-ratios comparing the\n datasets of `dscomparator`.\n\n Parameters\n ----------\n gsplaq : CircuitPlaquette\n Obtained via :method:`CircuitStructure.get_plaquette`, this object\n specifies which matrix indices should be computed and which operation sequences\n they correspond to.\n\n dscomparator : DataComparator\n The object specifying the data to be compared.\n\n Returns\n -------\n numpy array of shape ( len(effectStrs), len(prepStrs) )\n log-likelihood-ratio values corresponding to the operation sequences\n where a base circuit is sandwiched between the each prep-fiducial and\n effect-fiducial pair.\n \"\"\"\n ret = _np.nan * _np.ones((gsplaq.rows, gsplaq.cols), 'd')\n for i, j, opstr in gsplaq:\n ret[i, j] = dscomparator.llrs[opstr]\n return ret\n\n\n@smart_cached\ndef drift_neglog10pvalue_matrices(gsplaq, drifttuple):\n \"\"\"\n Computes matrix of -log10(pvalues) for testing the stable-circuit (\"no drift\") null hypothesis\n in each cirucit, using the \"max power in spectra\" test.\n\n Parameters\n ----------\n gsplaq : CircuitPlaquette\n Obtained via :method:`CircuitStructure.get_plaquette`, this object\n specifies which matrix indices should be computed and which operation sequences\n they correspond to.\n\n drifttuple : 2-tuple\n The first element of the tuple is a StabilityAnalyzer. The second element is a\n tuple that specifies the hypothesis test(s) from which to extract the p-values.\n This can be None, and then the default is used.\n\n Returns\n -------\n numpy array of shape ( len(effectStrs), len(prepStrs) )\n -log10(pvalues) for testing the \"no drift\" null hypothesis, using the \"max power in\n spectra\" test, on the relevant sequences. This operation sequences correspond to the\n operation sequences where a base circuit is sandwiched between the each prep-fiducial\n and effect-fiducial pair.\n\n \"\"\"\n ret = _np.nan * _np.ones((gsplaq.rows, gsplaq.cols), 'd')\n stabilityanalyzer = drifttuple[0]\n dictlabel = drifttuple[1]\n assert(dictlabel == ('circuit',)), \"Currently can only create these matrices for this single type of test!\"\n for i, j, opstr in gsplaq:\n try:\n pval = stabilityanalyzer.get_pvalue(dictlabel={'circuit': opstr}, cutoff=1e-16)\n ret[i, j] = -1 * _np.log10(pval)\n except:\n pass\n return ret\n\n\n@smart_cached\ndef drift_maxtvd_matrices(gsplaq, drifttuple):\n \"\"\"\n Computes matrix of max-tvds for quantifying the size of any detected drift.\n\n Parameters\n ----------\n gsplaq : CircuitPlaquette\n Obtained via :method:`CircuitStructure.get_plaquette`, this object\n specifies which matrix indices should be computed and which operation sequences\n they correspond to.\n\n drifttuple : 2-tuple\n The first element of the tuple is a StabilityAnalyzer. The second element is a\n tuple that specifies the estimatorkey, and the third element is an estimator\n name, that specifies the estimates to use (both can be None, and then the\n default is used).\n\n Returns\n -------\n numpy array of shape ( len(effectStrs), len(prepStrs) )\n The max tvd for quantifying deviations from the data mean. This\n operation sequences correspond to the operation sequences where a base circuit\n is sandwiched between the each prep-fiducial and effect-fiducial pair.\n\n \"\"\"\n ret = _np.nan * _np.ones((gsplaq.rows, gsplaq.cols), 'd')\n stabilityanalyzer = drifttuple[0]\n estimatekey = drifttuple[1]\n estimator = drifttuple[2]\n for i, j, opstr in gsplaq:\n try:\n ret[i, j] = stabilityanalyzer.get_max_tvd_bound(opstr, dskey=None,\n estimatekey=estimatekey, estimator=estimator)\n except:\n pass\n return ret\n\n\n# future: delete this if we decide not to add this option back in.\n# @smart_cached\n# def drift_maxpower_matrices(gsplaq, driftresults):\n# \"\"\"\n# Computes matrix of max powers in the time-series power spectra. This\n# value is a reasonable proxy for how \"drifty\" the sequence appears\n# to be.\n\n# Parameters\n# ----------\n# gsplaq : CircuitPlaquette\n# Obtained via :method:`CircuitStructure.get_plaquette`, this object\n# specifies which matrix indices should be computed and which operation sequences\n# they correspond to.\n\n# driftresults : BasicDriftResults\n# The drift analysis results.\n\n# Returns\n# -------\n# numpy array of shape ( len(effectStrs), len(prepStrs) )\n# Matrix of max powers in the time-series power spectra forthe operation sequences where a\n# base circuit is sandwiched between the each prep-fiducial and effect-fiducial pair.\n\n# \"\"\"\n# ret = _np.nan * _np.ones((gsplaq.rows, gsplaq.cols), 'd')\n# for i, j, opstr in gsplaq:\n# try:\n# ret[i, j] = driftresults.get_maxpower(sequence=opstr)\n# except:\n# pass\n# return ret\n\n\ndef ratedNsigma(dataset, model, gss, objective, Np=None, wildcard=None, returnAll=False,\n comm=None, smartc=None, minProbClip=1e-4): # TODO: pipe down minprobclip, radius, probclipinterval?\n \"\"\"\n Computes the number of standard deviations of model violation, comparing\n the data in `dataset` with the `model` model at the \"points\" (sequences)\n specified by `gss`.\n\n Parameters\n ----------\n dataset : DataSet\n The data set.\n\n model : Model\n The model (model).\n\n gss : CircuitStructure\n A operation sequence structure whose `.allstrs` member contains a list of\n `Circuits` specifiying the sequences used to compare the data and\n model. Its `.aliases` member optionally specifies operation label aliases\n to be used when querying `dataset`.\n\n objective : {\"logl\", \"chi2\"}\n Which objective function is used to compute the model violation.\n\n Np : int, optional\n The number of free parameters in the model. If None, then\n `model.num_nongauge_params()` is used.\n\n wildcard : WildcardBudget\n A wildcard budget to apply to the objective function (`objective`),\n which increases the goodness of fit by adjusting (by an amount measured\n in TVD) the probabilities produced by `model` before comparing with\n the frequencies in `dataset`. Currently, this functionality is only\n supported for `objective == \"logl\"`.\n\n returnAll : bool, optional\n Returns additional information such as the raw and expected model\n violation (see below).\n\n comm : mpi4py.MPI.Comm, optional\n When not None, an MPI communicator for distributing the computation\n across multiple processors.\n\n smartc : SmartCache, optional\n A cache object to cache & use previously cached values inside this\n function.\n\n minProbClip : float, optional\n The minimum probability treated normally in the evaluation of the log-likelihood.\n A penalty function replaces the true log-likelihood for probabilities that lie\n below this threshold so that the log-likelihood never becomes undefined (which improves\n optimizer performance).\n\n Returns\n -------\n Nsig : float\n The number of sigma of model violaition\n\n rating : int\n A 1-5 rating (e.g. \"number of stars\") used to indicate the rough\n abililty of the model to fit the data (better fit = higher rating).\n\n modelViolation : float\n The raw value of the objective function. Only returned when\n `returnAll==True`.\n\n expectedViolation : float\n The expected value of the objective function. Only returned when\n `returnAll==True`.\n\n Ns, Np : int\n The number of dataset and model parameters, respectively. Only\n returned when `returnAll==True`.\n\n \"\"\"\n gstrs = gss.allstrs\n if objective == \"chi2\":\n assert(wildcard is None), \"Can only use wildcard budget with 'logl' objective!\"\n fitQty = _tools.chi2(model, dataset, gstrs,\n minProbClipForWeighting=minProbClip,\n opLabelAliases=gss.aliases,\n comm=comm, smartc=smartc)\n elif objective == \"logl\":\n logL_upperbound = _tools.logl_max(model, dataset, gstrs, opLabelAliases=gss.aliases,\n smartc=smartc)\n logl = _tools.logl(model, dataset, gstrs, opLabelAliases=gss.aliases,\n minProbClip=minProbClip, comm=comm, smartc=smartc,\n wildcard=wildcard)\n fitQty = 2 * (logL_upperbound - logl) # twoDeltaLogL\n\n if(logL_upperbound < logl):\n if _np.isclose(logL_upperbound, logl):\n logl = logL_upperbound; fitQty = 0.0\n else:\n raise ValueError(\"LogL upper bound = %g but logl = %g!!\" % (logL_upperbound, logl))\n\n ds_gstrs = _tools.apply_aliases_to_circuit_list(gstrs, gss.aliases)\n\n if hasattr(model, 'num_nongauge_params'):\n Np = model.num_nongauge_params()\n else:\n Np = model.num_params()\n Ns = dataset.get_degrees_of_freedom(ds_gstrs) # number of independent parameters in dataset\n k = max(Ns - Np, 1) # expected chi^2 or 2*(logL_ub-logl) mean\n Nsig = (fitQty - k) / _np.sqrt(2 * k)\n if Ns <= Np: _warnings.warn(\"Max-model params (%d) <= model params (%d)! Using k == 1.\" % (Ns, Np))\n #pv = 1.0 - _stats.chi2.cdf(chi2,k) # reject GST model if p-value < threshold (~0.05?)\n\n if Nsig <= 2: rating = 5\n elif Nsig <= 20: rating = 4\n elif Nsig <= 100: rating = 3\n elif Nsig <= 500: rating = 2\n else: rating = 1\n\n if returnAll:\n return Nsig, rating, fitQty, k, Ns, Np\n else:\n return Nsig, rating\n",
"import numpy as np\nimport pickle\n\nfrom ..util import BaseCase\n\nfrom pygsti.objects import FullGaugeGroupElement, Basis, ExplicitOpModel, TPPOVM, UnconstrainedPOVM\nimport pygsti.construction as pc\nimport pygsti.objects.spamvec as sv\n\n\nclass SpamvecUtilTester(BaseCase):\n def test_convert_to_vector_raises_on_bad_input(self):\n bad_vecs = [\n 'akdjsfaksdf',\n [[], [1, 2]],\n [[[]], [[1, 2]]]\n ]\n for bad_vec in bad_vecs:\n with self.assertRaises(ValueError):\n sv.SPAMVec.convert_to_vector(bad_vec)\n with self.assertRaises(ValueError):\n sv.SPAMVec.convert_to_vector(0.0) # something with no len()\n\n def test_base_spamvec(self):\n raw = sv.SPAMVec(4, \"densitymx\", \"prep\")\n\n T = FullGaugeGroupElement(\n np.array([[0, 1, 0, 0],\n [1, 0, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]], 'd'))\n\n with self.assertRaises(NotImplementedError):\n raw.todense()\n with self.assertRaises(NotImplementedError):\n raw.transform(T, \"prep\")\n with self.assertRaises(NotImplementedError):\n raw.depolarize(0.01)\n\n\nclass SpamvecBase(object):\n def setUp(self):\n self.vec = self.build_vec()\n ExplicitOpModel._strict = False\n\n def test_num_params(self):\n self.assertEqual(self.vec.num_params(), self.n_params)\n\n def test_copy(self):\n vec_copy = self.vec.copy()\n self.assertArraysAlmostEqual(vec_copy, self.vec)\n self.assertEqual(type(vec_copy), type(self.vec))\n\n def test_get_dimension(self):\n self.assertEqual(self.vec.get_dimension(), 4)\n\n def test_set_value_raises_on_bad_size(self):\n with self.assertRaises(ValueError):\n self.vec.set_value(np.zeros((1, 1), 'd')) # bad size\n\n def test_vector_conversion(self):\n v = self.vec.to_vector()\n self.vec.from_vector(v)\n deriv = self.vec.deriv_wrt_params()\n # TODO assert correctness\n\n def test_element_accessors(self):\n a = self.vec[:]\n b = self.vec[0]\n #with self.assertRaises(ValueError):\n # self.vec.shape = (2,2) #something that would affect the shape??\n\n self.vec_as_str = str(self.vec)\n a1 = self.vec[:] # invoke getslice method\n # TODO assert correctness\n\n def test_pickle(self):\n pklstr = pickle.dumps(self.vec)\n vec_pickle = pickle.loads(pklstr)\n self.assertArraysAlmostEqual(vec_pickle, self.vec)\n self.assertEqual(type(vec_pickle), type(self.vec))\n\n def test_arithmetic(self):\n result = self.vec + self.vec\n self.assertEqual(type(result), np.ndarray)\n result = self.vec + (-self.vec)\n self.assertEqual(type(result), np.ndarray)\n result = self.vec - self.vec\n self.assertEqual(type(result), np.ndarray)\n result = self.vec - abs(self.vec)\n self.assertEqual(type(result), np.ndarray)\n result = 2 * self.vec\n self.assertEqual(type(result), np.ndarray)\n result = self.vec * 2\n self.assertEqual(type(result), np.ndarray)\n result = 2 / self.vec\n self.assertEqual(type(result), np.ndarray)\n result = self.vec / 2\n self.assertEqual(type(result), np.ndarray)\n result = self.vec // 2\n self.assertEqual(type(result), np.ndarray)\n result = self.vec**2\n self.assertEqual(type(result), np.ndarray)\n result = self.vec.transpose()\n self.assertEqual(type(result), np.ndarray)\n\n V = np.ones((4, 1), 'd')\n\n result = self.vec + V\n self.assertEqual(type(result), np.ndarray)\n result = self.vec - V\n self.assertEqual(type(result), np.ndarray)\n result = V + self.vec\n self.assertEqual(type(result), np.ndarray)\n result = V - self.vec\n self.assertEqual(type(result), np.ndarray)\n\n def test_hessian(self):\n self.assertFalse(self.vec.has_nonzero_hessian())\n\n def test_frobeniusdist2(self):\n self.vec.frobeniusdist2(self.vec, \"prep\")\n self.vec.frobeniusdist2(self.vec, \"effect\")\n # TODO assert correctness\n\n def test_frobeniusdist2_raises_on_bad_type(self):\n with self.assertRaises(ValueError):\n self.vec.frobeniusdist2(self.vec, \"foobar\")\n\n\nclass MutableSpamvecBase(SpamvecBase):\n def test_set_value(self):\n v = np.asarray(self.vec)\n self.vec.set_value(v)\n # TODO assert correctness\n\n def test_transform(self):\n S = FullGaugeGroupElement(np.identity(4, 'd'))\n self.vec.transform(S, 'prep')\n self.vec.transform(S, 'effect')\n # TODO assert correctness\n\n def test_transform_raises_on_bad_type(self):\n S = FullGaugeGroupElement(np.identity(4, 'd'))\n with self.assertRaises(ValueError):\n self.vec.transform(S, 'foobar')\n\n def test_depolarize(self):\n self.vec.depolarize(0.9)\n self.vec.depolarize([0.9, 0.8, 0.7])\n # TODO assert correctness\n\n\nclass ImmutableSpamvecBase(SpamvecBase):\n def test_raises_on_set_value(self):\n v = np.asarray(self.vec)\n with self.assertRaises(ValueError):\n self.vec.set_value(v)\n\n def test_raises_on_transform(self):\n S = FullGaugeGroupElement(np.identity(4, 'd'))\n with self.assertRaises(ValueError):\n self.vec.transform(S, 'prep')\n\n def test_raises_on_depolarize(self):\n with self.assertRaises(ValueError):\n self.vec.depolarize(0.9)\n\n\nclass FullSpamvecTester(MutableSpamvecBase, BaseCase):\n n_params = 4\n\n @staticmethod\n def build_vec():\n return sv.FullSPAMVec([1.0 / np.sqrt(2), 0, 0, 1.0 / np.sqrt(2)])\n\n def test_raises_on_bad_dimension_2(self):\n with self.assertRaises(ValueError):\n sv.FullSPAMVec([[1.0 / np.sqrt(2), 0, 0, 1.0 / np.sqrt(2)], [0, 0, 0, 0]])\n\n def test_convert(self):\n basis = Basis.cast(\"pp\", 4)\n conv = sv.convert(self.vec, \"full\", basis)\n # TODO assert correctness\n\n def test_raises_on_invalid_conversion_type(self):\n basis = Basis.cast(\"pp\", 4)\n with self.assertRaises(ValueError):\n sv.convert(self.vec, \"foobar\", basis)\n\n\nclass TPSpamvecTester(MutableSpamvecBase, BaseCase):\n n_params = 3\n\n @staticmethod\n def build_vec():\n return sv.TPSPAMVec([1.0 / np.sqrt(2), 0, 0, 1.0 / np.sqrt(2)])\n\n def test_raises_on_bad_initial_element(self):\n with self.assertRaises(ValueError):\n sv.TPSPAMVec([1.0, 0, 0, 0])\n # incorrect initial element for TP!\n with self.assertRaises(ValueError):\n self.vec.set_value([1.0, 0, 0, 0])\n # incorrect initial element for TP!\n\n def test_convert(self):\n basis = Basis.cast(\"pp\", 4)\n conv = sv.convert(self.vec, \"TP\", basis)\n # TODO assert correctness\n\n\nclass CPTPSpamvecTester(MutableSpamvecBase, BaseCase):\n n_params = 4\n\n @staticmethod\n def build_vec():\n v_tp = np.zeros((4, 1), 'd')\n v_tp[0] = 1.0 / np.sqrt(2)\n v_tp[3] = 1.0 / np.sqrt(2) - 0.05\n return sv.CPTPSPAMVec(v_tp, \"pp\")\n\n def test_hessian(self):\n self.skipTest(\"Hessian computation isn't implemented for CPTPSPAMVec; remove this skip when it becomes a priority\")\n self.vec.hessian_wrt_params()\n self.vec.hessian_wrt_params([0])\n self.vec.hessian_wrt_params([0], [0])\n # TODO assert correctness\n\n\nclass StaticSpamvecTester(ImmutableSpamvecBase, BaseCase):\n n_params = 0\n v_tp = [1.0 / np.sqrt(2), 0, 0, 1.0 / np.sqrt(2)]\n\n @staticmethod\n def build_vec():\n return sv.StaticSPAMVec(StaticSpamvecTester.v_tp)\n\n def test_convert(self):\n basis = Basis.cast(\"pp\", 4)\n conv = sv.convert(self.vec, \"static\", basis)\n # TODO assert correctness\n\n def test_optimize(self):\n s = sv.FullSPAMVec(StaticSpamvecTester.v_tp)\n sv.optimize_spamvec(self.vec, s)\n # TODO assert correctness\n\n\nclass POVMSpamvecBase(ImmutableSpamvecBase):\n def test_vector_conversion(self):\n with self.assertRaises(ValueError):\n self.vec.to_vector()\n\n\nclass ComplementSpamvecTester(POVMSpamvecBase, BaseCase):\n n_params = 4\n\n @staticmethod\n def build_vec():\n v = np.ones((4, 1), 'd')\n v_id = np.zeros((4, 1), 'd')\n v_id[0] = 1.0 / np.sqrt(2)\n tppovm = TPPOVM([('0', sv.FullSPAMVec(v, typ=\"effect\")),\n ('1', sv.FullSPAMVec(v_id - v, typ=\"effect\"))])\n return tppovm['1'] # complement POVM\n\n def test_vector_conversion(self):\n with self.assertRaises(ValueError):\n self.vec.to_vector()\n\n\nclass TensorProdSpamvecBase(ImmutableSpamvecBase):\n def test_arithmetic(self):\n with self.assertRaises(TypeError):\n self.vec + self.vec\n\n def test_copy(self):\n vec_copy = self.vec.copy()\n self.assertArraysAlmostEqual(vec_copy.todense(), self.vec.todense())\n self.assertEqual(type(vec_copy), type(self.vec))\n\n def test_element_accessors(self):\n with self.assertRaises(TypeError):\n self.vec[:]\n\n def test_pickle(self):\n pklstr = pickle.dumps(self.vec)\n vec_pickle = pickle.loads(pklstr)\n self.assertArraysAlmostEqual(vec_pickle.todense(), self.vec.todense())\n self.assertEqual(type(vec_pickle), type(self.vec))\n\n\nclass TensorProdPrepSpamvecTester(TensorProdSpamvecBase, BaseCase):\n n_params = 4\n\n @staticmethod\n def build_vec():\n v = np.ones((2, 1), 'd')\n return sv.TensorProdSPAMVec(\"prep\", [sv.FullSPAMVec(v),\n sv.FullSPAMVec(v)])\n\n\nclass TensorProdEffectSpamvecTester(TensorProdSpamvecBase, POVMSpamvecBase, BaseCase):\n n_params = 4\n\n @staticmethod\n def build_vec():\n v = np.ones((4, 1), 'd')\n povm = UnconstrainedPOVM([('0', sv.FullSPAMVec(v,typ=\"effect\"))])\n return sv.TensorProdSPAMVec(\"effect\", [povm], ['0'])\n"
] | [
[
"numpy.set_printoptions",
"numpy.linalg.norm",
"numpy.array_equal"
],
[
"numpy.array",
"numpy.empty"
],
[
"numpy.arange",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.linalg.eigvals",
"numpy.sqrt",
"numpy.abs",
"numpy.isnan",
"numpy.reshape",
"numpy.ones",
"numpy.log10",
"numpy.shape",
"numpy.array",
"numpy.average",
"numpy.empty",
"numpy.isclose"
],
[
"numpy.sqrt",
"numpy.asarray",
"numpy.ones",
"numpy.identity",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qwsae10/pygemini | [
"adc6b2401ac9fc8b7cb1fc8870322f730a3383a3",
"adc6b2401ac9fc8b7cb1fc8870322f730a3383a3",
"adc6b2401ac9fc8b7cb1fc8870322f730a3383a3"
] | [
"src/gemini3d/compare/input.py",
"src/gemini3d/grid/cartesian.py",
"src/gemini3d/raw/read.py"
] | [
"from __future__ import annotations\nfrom pathlib import Path\nimport logging\n\nimport numpy as np\n\nfrom .. import read\n\nfrom .plot import plotdiff\nfrom .utils import err_pct, load_tol\nfrom .precip import compare_precip\nfrom .efield import compare_Efield\n\n\ndef compare_input(\n new_dir: Path,\n refdir: Path,\n *,\n tol: dict[str, float] = None,\n file_format: str = None,\n plot: bool = True,\n) -> int:\n\n names = {\"ns\", \"Ts\", \"vs1\"}\n\n ref_params = read.config(refdir)\n ref_indir = refdir / ref_params[\"indat_file\"].parts[-2]\n ref = read.data(ref_indir / ref_params[\"indat_file\"].name, var=names)\n\n new_params = read.config(new_dir)\n if len(new_params[\"time\"]) <= 1:\n raise ValueError(\n f\"{new_dir} simulation did not run long enough, must run for more than one time step\"\n )\n new_indir = new_dir / new_params[\"indat_file\"].parts[-2]\n new = read.data(new_indir / new_params[\"indat_file\"].name, var=names)\n\n if tol is None:\n tol = load_tol()\n\n errs = 0\n # %% initial conditions\n\n for k in names:\n b = ref[k]\n a = new[k]\n\n n = k[0].upper()\n\n assert a.shape == b.shape, f\"{k}: ref shape {b.shape} != data shape {a.shape}\"\n\n if not np.allclose(a, b, rtol=0.1 * tol[f\"rtol{n}\"], atol=0.1 * tol[f\"atol{n}\"]):\n errs += 1\n logging.error(f\"{k} {err_pct(a, b):.1f} %\")\n\n if plot:\n if k == \"ns\":\n # just plot electron density\n a = a[-1]\n b = b[-1]\n plotdiff(a, b, ref_params[\"time\"][0], new_dir, refdir)\n\n if \"precdir\" in new_params:\n prec_errs = compare_precip(\n ref_params[\"time\"],\n new_indir / new_params[\"precdir\"].name,\n ref_indir / ref_params[\"precdir\"].name,\n tol=tol,\n plot=plot,\n file_format=file_format,\n )\n errs += prec_errs\n\n if \"E0dir\" in new_params:\n efield_errs = compare_Efield(\n ref_params[\"time\"],\n new_indir / new_params[\"E0dir\"].name,\n ref_indir / ref_params[\"E0dir\"].name,\n tol=tol,\n plot=plot,\n file_format=file_format,\n )\n errs += efield_errs\n\n return errs\n",
"\"\"\"\ncartesian grid\n\"\"\"\n\nfrom __future__ import annotations\nimport logging\nimport typing as T\n\nimport numpy as np\n\nfrom .. import read\nfrom ..coord import geog2geomag, geomag2geog\nfrom .uniform import altitude_grid, grid1d\n\n\ndef cart3d(p: dict[str, T.Any]) -> dict[str, T.Any]:\n \"\"\"make cartesian grid\n\n Parameters\n -----------\n\n p: dict\n simulation parameters\n\n Returns\n -------\n\n xg: dict\n simulation grid\n \"\"\"\n\n # %%create altitude grid\n # original Matlab params\n # p.alt_min = 80e3;\n # p.alt_max = 1000e3;\n # p.alt_scale = [10e3, 8e3, 500e3, 150e3];\n\n if {\"alt_min\", \"alt_max\", \"alt_scale\", \"Bincl\"} <= p.keys():\n # https://docs.python.org/3/library/stdtypes.html#frozenset.issubset\n z = altitude_grid(p[\"alt_min\"], p[\"alt_max\"], p[\"Bincl\"], p[\"alt_scale\"])\n elif \"eq_dir\" in p and p[\"eq_dir\"].is_file():\n logging.info(f\"reusing grid from {p['eq_dir']}\")\n xeq = read.grid(p[\"eq_dir\"])\n z = xeq[\"x1\"]\n del xeq\n elif {\"alt_min\", \"alt_max\", \"lzp\"} <= p.keys():\n logging.info(\"make uniform altitude grid\")\n z = np.linspace(p[\"alt_min\"], p[\"alt_max\"], p[\"lzp\"])\n dz = z[1] - z[0]\n z = np.concatenate((z[0] - 2 * dz, z[0] - dz, z, z[-1] + dz, z[-1] + 2 * dz))\n else:\n raise ValueError(\"must specify altitude grid parameters or grid file to reuse\")\n\n # %% TRANSVERSE GRID (BASED ON SIZE OF CURRENT REGION SPECIFIED ABOVE)\n # EAST\n if \"x2parms\" in p:\n x = grid1d(p[\"xdist\"], p[\"lxp\"], p[\"x2parms\"])\n else:\n x = grid1d(p[\"xdist\"], p[\"lxp\"])\n\n # NORTH\n if \"x3parms\" in p:\n y = grid1d(p[\"ydist\"], p[\"lyp\"], p[\"x3parms\"])\n else:\n y = grid1d(p[\"ydist\"], p[\"lyp\"])\n\n # %% COMPUTE CELL WALL LOCATIONS\n lx2 = x.size\n xi = np.empty(lx2 + 1)\n xi[1:-1] = 1 / 2 * (x[1:] + x[:-1])\n xi[0] = x[0] - 1 / 2 * (x[1] - x[0])\n xi[-1] = x[-1] + 1 / 2 * (x[-1] - x[-2])\n\n lx3 = y.size\n yi = np.empty(lx3 + 1)\n yi[1:-1] = 1 / 2 * (y[1:] + y[:-1])\n yi[0] = y[0] - 1 / 2 * (y[1] - y[0])\n yi[-1] = y[-1] + 1 / 2 * (y[-1] - y[-2])\n\n lx1 = z.size\n zi = np.empty(lx1 + 1)\n zi[1:-1] = 1 / 2 * (z[1:] + z[:-1])\n zi[0] = z[0] - 1 / 2 * (z[1] - z[0])\n zi[-1] = z[-1] + 1 / 2 * (z[-1] - z[-2])\n\n # %% GRAVITATIONAL FIELD COMPONENTS IN DIPOLE SYSTEM\n Re = 6370e3\n G = 6.67428e-11\n Me = 5.9722e24\n r = z + Re\n g = G * Me / r ** 2\n gz = np.broadcast_to(-g[:, None, None], (g.size, lx2, lx3))\n assert gz.shape == (lx1, lx2, lx3)\n\n # DISTANCE EW AND NS (FROM ENU (or UEN in our case - cyclic permuted) COORD. SYSTEM)\n # #NEED TO BE CONVERTED TO DIPOLE SPHERICAL AND THEN\n # GLAT/GLONG - BASICALLY HERE WE ARE MAPPING THE CARTESIAN GRID ONTO THE\n # SURFACE OF A SPHERE THEN CONVERTING TO GEOGRAPHIC.\n # get the magnetic coordinates of the grid center, based on user input\n thetactr, phictr = geog2geomag(p[\"glat\"], p[\"glon\"])\n\n # %% Center of earth distance\n r = Re + z\n r = np.broadcast_to(r[:, None, None], (r.size, lx2, lx3))\n assert r.shape == (lx1, lx2, lx3)\n\n # %% Northward angular distance\n gamma2 = y / Re\n # must retain the sign of x3\n theta = thetactr - gamma2\n # minus because distance north is against theta's direction\n theta = np.broadcast_to(theta[None, None, :], (lx1, lx2, theta.size))\n assert theta.shape == (lx1, lx2, lx3)\n\n # %% Eastward angular distance\n # gamma1=x/Re; %must retain the sign of x2\n gamma1 = x / Re / np.sin(thetactr)\n # must retain the sign of x2, just use theta of center of grid\n phi = phictr + gamma1\n phi = np.broadcast_to(phi[None, :, None], (lx1, phi.size, lx3))\n assert phi.shape == (lx1, lx2, lx3)\n\n # %% COMPUTE THE GEOGRAPHIC COORDINATES OF EACH GRID POINT\n glatgrid, glongrid = geomag2geog(theta, phi)\n\n # %% COMPUTE ECEF CARTESIAN IN CASE THEY ARE NEEDED\n xECEF = r * np.sin(theta) * np.cos(phi)\n yECEF = r * np.sin(theta) * np.sin(phi)\n zECEF = r * np.cos(theta)\n\n # %% COMPUTE SPHERICAL ECEF UNIT VECTORS - CARTESIAN-ECEF COMPONENTS\n er = np.empty((lx1, lx2, lx3, 3))\n etheta = np.empty_like(er)\n ephi = np.empty_like(er)\n\n er[:, :, :, 0] = np.sin(theta) * np.cos(phi)\n # xECEF-component of er\n er[:, :, :, 1] = np.sin(theta) * np.sin(phi)\n # yECEF\n er[:, :, :, 2] = np.cos(theta)\n # zECEF\n etheta[:, :, :, 0] = np.cos(theta) * np.cos(phi)\n etheta[:, :, :, 1] = np.cos(theta) * np.sin(phi)\n etheta[:, :, :, 2] = -np.sin(theta)\n ephi[:, :, :, 0] = -np.sin(phi)\n ephi[:, :, :, 1] = np.cos(phi)\n ephi[:, :, :, 2] = 0\n\n # %% UEN UNIT VECTORS IN ECEF COMPONENTS\n e1 = er\n # up is the same direction as from ctr of earth\n e2 = ephi\n # e2 is same as ephi\n e3 = -etheta\n # etheta is positive south, e3 is pos. north\n\n # %% STORE RESULTS IN GRID DATA STRUCTURE\n xg = {\n \"x1\": z,\n \"x2\": x,\n \"x3\": y,\n \"x1i\": zi,\n \"x2i\": xi,\n \"x3i\": yi,\n }\n\n lx = (xg[\"x1\"].size, xg[\"x2\"].size, xg[\"x3\"].size)\n xg[\"lx\"] = np.array(lx)\n\n xg[\"dx1f\"] = np.append(xg[\"x1\"][1:] - xg[\"x1\"][:-1], xg[\"x1\"][-1] - xg[\"x1\"][-2])\n # FWD DIFF\n xg[\"dx1b\"] = np.insert(xg[\"x1\"][1:] - xg[\"x1\"][:-1], 0, xg[\"x1\"][1] - xg[\"x1\"][0])\n # BACK DIFF\n xg[\"dx1h\"] = xg[\"x1i\"][1:-1] - xg[\"x1i\"][:-2]\n # MIDPOINT DIFFS\n\n xg[\"dx2f\"] = np.append(xg[\"x2\"][1:] - xg[\"x2\"][:-1], xg[\"x2\"][-1] - xg[\"x2\"][-2])\n # FWD DIFF\n xg[\"dx2b\"] = np.insert(xg[\"x2\"][1:] - xg[\"x2\"][:-1], 0, xg[\"x2\"][1] - xg[\"x2\"][0])\n # BACK DIFF\n xg[\"dx2h\"] = xg[\"x2i\"][1:-1] - xg[\"x2i\"][:-2]\n # MIDPOINT DIFFS\n\n xg[\"dx3f\"] = np.append(xg[\"x3\"][1:] - xg[\"x3\"][:-1], xg[\"x3\"][-1] - xg[\"x3\"][-2])\n # FWD DIFF\n xg[\"dx3b\"] = np.insert(xg[\"x3\"][1:] - xg[\"x3\"][:-1], 0, xg[\"x3\"][1] - xg[\"x3\"][0])\n # BACK DIFF\n xg[\"dx3h\"] = xg[\"x3i\"][1:-1] - xg[\"x3i\"][:-2]\n # MIDPOINT DIFFS\n\n xg[\"h1\"] = np.ones(lx)\n xg[\"h2\"] = np.ones(lx)\n xg[\"h3\"] = np.ones(lx)\n xg[\"h1x1i\"] = np.ones((lx[0] + 1, lx[1], lx[2]))\n xg[\"h2x1i\"] = np.ones((lx[0] + 1, lx[1], lx[2]))\n xg[\"h3x1i\"] = np.ones((lx[0] + 1, lx[1], lx[2]))\n xg[\"h1x2i\"] = np.ones((lx[0], lx[1] + 1, lx[2]))\n xg[\"h2x2i\"] = np.ones((lx[0], lx[1] + 1, lx[2]))\n xg[\"h3x2i\"] = np.ones((lx[0], lx[1] + 1, lx[2]))\n xg[\"h1x3i\"] = np.ones((lx[0], lx[1], lx[2] + 1))\n xg[\"h2x3i\"] = np.ones((lx[0], lx[1], lx[2] + 1))\n xg[\"h3x3i\"] = np.ones((lx[0], lx[1], lx[2] + 1))\n\n # %% Cartesian, ECEF representation of curvilinar coordinates\n xg[\"e1\"] = e1\n xg[\"e2\"] = e2\n xg[\"e3\"] = e3\n\n # %% ECEF spherical coordinates\n xg[\"r\"] = r\n xg[\"theta\"] = theta\n xg[\"phi\"] = phi\n # xg.rx1i=[]; xg.thetax1i=[];\n # xg.rx2i=[]; xg.thetax2i=[];\n\n # %% These are cartesian representations of the ECEF, spherical unit vectors\n xg[\"er\"] = er\n xg[\"etheta\"] = etheta\n xg[\"ephi\"] = ephi\n\n xg[\"I\"] = np.broadcast_to(p[\"Bincl\"], (lx2, lx3))\n\n # %% Cartesian ECEF coordinates\n xg[\"x\"] = xECEF\n xg[\"z\"] = zECEF\n xg[\"y\"] = yECEF\n xg[\"alt\"] = xg[\"r\"] - Re\n # since we need a 3D array use xg.r here...\n\n xg[\"gx1\"] = gz\n xg[\"gx2\"] = np.zeros(lx)\n xg[\"gx3\"] = np.zeros(lx)\n\n xg[\"Bmag\"] = np.broadcast_to(-50000e-9, xg[\"lx\"])\n # minus for northern hemisphere...\n\n xg[\"glat\"] = glatgrid\n xg[\"glon\"] = glongrid\n\n # xg['xp']=x; xg['zp']=z;\n\n # xg['inull']=[];\n xg[\"nullpts\"] = np.zeros(lx)\n\n # %% TRIM DATA STRUCTURE TO BE THE SIZE FORTRAN EXPECTS\n # note: xgf is xg == True\n xgf = xg\n\n # indices corresponding to non-ghost cells for 1 dimension\n i1 = slice(2, lx[0] - 2)\n i2 = slice(2, lx[1] - 2)\n i3 = slice(2, lx[2] - 2)\n\n # any dx variable will not need to first element (backward diff of two ghost cells)\n idx1 = slice(1, lx[0])\n idx2 = slice(1, lx[1])\n idx3 = slice(1, lx[2])\n\n # x1-interface variables need only non-ghost cell values (left interface) plus one\n ix1i = slice(2, lx[0] - 1)\n ix2i = slice(2, lx[1] - 1)\n ix3i = slice(2, lx[2] - 1)\n\n # remove ghost cells\n # now that indices have been define we can go ahead and make this change\n xgf[\"lx\"] = xgf[\"lx\"] - 4\n\n xgf[\"dx1b\"] = xgf[\"dx1b\"][idx1]\n xgf[\"dx2b\"] = xgf[\"dx2b\"][idx2]\n xgf[\"dx3b\"] = xgf[\"dx3b\"][idx3]\n\n xgf[\"x1i\"] = xgf[\"x1i\"][ix1i]\n xgf[\"x2i\"] = xgf[\"x2i\"][ix2i]\n xgf[\"x3i\"] = xgf[\"x3i\"][ix3i]\n\n xgf[\"dx1h\"] = xgf[\"dx1h\"][i1]\n xgf[\"dx2h\"] = xgf[\"dx2h\"][i2]\n xgf[\"dx3h\"] = xgf[\"dx3h\"][i3]\n\n xgf[\"h1x1i\"] = xgf[\"h1x1i\"][ix1i, i2, i3]\n xgf[\"h2x1i\"] = xgf[\"h2x1i\"][ix1i, i2, i3]\n xgf[\"h3x1i\"] = xgf[\"h3x1i\"][ix1i, i2, i3]\n\n xgf[\"h1x2i\"] = xgf[\"h1x2i\"][i1, ix2i, i3]\n xgf[\"h2x2i\"] = xgf[\"h2x2i\"][i1, ix2i, i3]\n xgf[\"h3x2i\"] = xgf[\"h3x2i\"][i1, ix2i, i3]\n\n xgf[\"h1x3i\"] = xgf[\"h1x3i\"][i1, i2, ix3i]\n xgf[\"h2x3i\"] = xgf[\"h2x3i\"][i1, i2, ix3i]\n xgf[\"h3x3i\"] = xgf[\"h3x3i\"][i1, i2, ix3i]\n\n xgf[\"gx1\"] = xgf[\"gx1\"][i1, i2, i3]\n xgf[\"gx2\"] = xgf[\"gx2\"][i1, i2, i3]\n xgf[\"gx3\"] = xgf[\"gx3\"][i1, i2, i3]\n\n xgf[\"glat\"] = xgf[\"glat\"][i1, i2, i3]\n xgf[\"glon\"] = xgf[\"glon\"][i1, i2, i3]\n xgf[\"alt\"] = xgf[\"alt\"][i1, i2, i3]\n\n xgf[\"Bmag\"] = xgf[\"Bmag\"][i1, i2, i3]\n\n xgf[\"I\"] = xgf[\"I\"][i2, i3]\n\n xgf[\"nullpts\"] = xgf[\"nullpts\"][i1, i2, i3]\n\n xgf[\"e1\"] = xgf[\"e1\"][i1, i2, i3, :]\n xgf[\"e2\"] = xgf[\"e2\"][i1, i2, i3, :]\n xgf[\"e3\"] = xgf[\"e3\"][i1, i2, i3, :]\n\n xgf[\"er\"] = xgf[\"er\"][i1, i2, i3, :]\n xgf[\"etheta\"] = xgf[\"etheta\"][i1, i2, i3, :]\n xgf[\"ephi\"] = xgf[\"ephi\"][i1, i2, i3, :]\n\n xgf[\"r\"] = xgf[\"r\"][i1, i2, i3]\n xgf[\"theta\"] = xgf[\"theta\"][i1, i2, i3]\n xgf[\"phi\"] = xgf[\"phi\"][i1, i2, i3]\n\n xgf[\"x\"] = xgf[\"x\"][i1, i2, i3]\n xgf[\"y\"] = xgf[\"y\"][i1, i2, i3]\n xgf[\"z\"] = xgf[\"z\"][i1, i2, i3]\n\n xgf[\"glonctr\"] = p[\"glon\"]\n xgf[\"glatctr\"] = p[\"glat\"]\n\n return xgf\n",
"\"\"\"\nraw binary file I/O.\nRaw files are deprecated and do not contain most features of Gemini\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom pathlib import Path\nimport typing as T\nimport logging\nimport struct\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport xarray\n\nfrom .. import find\nfrom .. import WAVELEN, LSP\n\n\ndef simsize(path: Path) -> tuple[int, ...]:\n \"\"\"\n get simulation size\n\n Parameters\n ----------\n fn: pathlib.Path\n filepath to simsize.dat\n\n Returns\n -------\n size: tuple of int, int, int\n 3 integers telling simulation grid size\n \"\"\"\n\n path = find.simsize(path, suffix=\".dat\")\n\n fsize = path.stat().st_size\n if fsize == 12:\n lx = struct.unpack(\"III\", path.open(\"rb\").read(12))\n elif fsize == 8:\n lx = struct.unpack(\"II\", path.open(\"rb\").read(8))\n else:\n raise ValueError(f\"{path} is not expected 8 bytes (2-D) or 12 bytes (3-D) long\")\n\n return lx\n\n\ndef grid(file: Path, shape: bool = False) -> dict[str, T.Any]:\n \"\"\"\n get simulation grid\n\n Parameters\n ----------\n fn: pathlib.Path\n filepath to simgrid\n\n Returns\n -------\n grid: dict\n grid parameters\n \"\"\"\n\n if shape:\n raise NotImplementedError(\"grid shape for raw would be straightforward.\")\n\n lx = simsize(file)\n\n if not file.is_file():\n file = find.grid(file, suffix=\".dat\")\n\n if len(lx) == 2:\n return grid2(file, lx)\n elif len(lx) == 3:\n return grid3(file, lx)\n else:\n raise ValueError(\"lx must be 2-D or 3-D\")\n\n\ndef grid2(fn: Path, lx: tuple[int, ...] | list[int]) -> dict[str, T.Any]:\n \"\"\"for Efield\"\"\"\n\n ft = np.float64\n\n if not fn.is_file():\n raise FileNotFoundError(fn)\n\n with fn.open(\"rb\") as f:\n xg = {\"lx\": lx, \"mlon\": np.fromfile(f, ft, lx[0]), \"mlat\": np.fromfile(f, ft, lx[1])}\n\n return xg\n\n\ndef grid3(fn: Path, lx: tuple[int, ...] | list[int]) -> dict[str, T.Any]:\n \"\"\"\n load 3D grid\n \"\"\"\n\n if not fn.is_file():\n raise FileNotFoundError(fn)\n\n lgridghost = (lx[0] + 4) * (lx[1] + 4) * (lx[2] + 4)\n gridsizeghost = [lx[0] + 4, lx[1] + 4, lx[2] + 4]\n\n ft = np.float64\n\n xg = {\"lx\": lx}\n\n read = np.fromfile\n\n with fn.open(\"rb\") as f:\n for i in (1, 2, 3):\n xg[f\"x{i}\"] = read(f, ft, lx[i - 1] + 4)\n xg[f\"x{i}i\"] = read(f, ft, lx[i - 1] + 1)\n xg[f\"dx{i}b\"] = read(f, ft, lx[i - 1] + 3)\n xg[f\"dx{i}h\"] = read(f, ft, lx[i - 1])\n for i in (1, 2, 3):\n xg[f\"h{i}\"] = read(f, ft, lgridghost).reshape(gridsizeghost)\n L = [lx[0] + 1, lx[1], lx[2]]\n for i in (1, 2, 3):\n xg[f\"h{i}x1i\"] = read(f, ft, np.prod(L)).reshape(L)\n L = [lx[0], lx[1] + 1, lx[2]]\n for i in (1, 2, 3):\n xg[f\"h{i}x2i\"] = read(f, ft, np.prod(L)).reshape(L)\n L = [lx[0], lx[1], lx[2] + 1]\n for i in (1, 2, 3):\n xg[f\"h{i}x3i\"] = read(f, ft, np.prod(L)).reshape(L)\n for i in (1, 2, 3):\n xg[f\"gx{i}\"] = read(f, ft, np.prod(lx)).reshape(lx)\n for k in (\"alt\", \"glat\", \"glon\", \"Bmag\"):\n xg[k] = read(f, ft, np.prod(lx)).reshape(lx)\n xg[\"Bincl\"] = read(f, ft, lx[1] * lx[2]).reshape(lx[1:])\n xg[\"nullpts\"] = read(f, ft, np.prod(lx)).reshape(lx)\n if f.tell() == fn.stat().st_size: # not EOF\n return xg\n\n L = [lx[0], lx[1], lx[2], 3]\n for i in (1, 2, 3):\n xg[f\"e{i}\"] = read(f, ft, np.prod(L)).reshape(L)\n for k in (\"er\", \"etheta\", \"ephi\"):\n xg[k] = read(f, ft, np.prod(L)).reshape(L)\n for k in (\"r\", \"theta\", \"phi\"):\n xg[k] = read(f, ft, np.prod(lx)).reshape(lx)\n if f.tell() == fn.stat().st_size: # not EOF\n return xg\n\n for k in (\"x\", \"y\", \"z\"):\n xg[k] = read(f, ft, np.prod(lx)).reshape(lx)\n\n return xg\n\n\ndef Efield(file: Path) -> xarray.Dataset:\n \"\"\"\n load Efield_inputs files that contain input electric field in V/m\n \"\"\"\n\n ft = np.float64\n\n lx = simsize(file.parent)\n\n assert lx[0] > 0, \"must have strictly positive number of longitude cells\"\n assert lx[1] > 0, \"must have strictly positive number of latitude cells\"\n\n m = grid2(file.parent / \"simgrid.dat\", lx)\n\n if ((m[\"mlat\"] < -90) | (m[\"mlat\"] > 90)).any():\n raise ValueError(f\"impossible latitude, was file read correctly? {file}\")\n\n dat = xarray.Dataset(coords={\"mlon\": m[\"mlon\"], \"mlat\": m[\"mlat\"]})\n\n with file.open(\"rb\") as f:\n \"\"\"\n NOTE:\n this is mistakenly a float from Matlab\n to keep compatibility with old files, we left it as real64.\n New work should be using HDF5 instead of raw in any case.\n \"\"\"\n dat[\"flagdirich\"] = int(np.fromfile(f, ft, 1))\n for p in (\"Exit\", \"Eyit\", \"Vminx1it\", \"Vmaxx1it\"):\n dat[p] = ((\"x2\", \"x3\"), read2D(f, lx))\n for p in (\"Vminx2ist\", \"Vmaxx2ist\"):\n dat[p] = ((\"x2\",), np.fromfile(f, ft, lx[1]))\n for p in (\"Vminx3ist\", \"Vmaxx3ist\"):\n dat[p] = ((\"x3\",), np.fromfile(f, ft, lx[0]))\n filesize = file.stat().st_size\n if f.tell() != filesize:\n logging.error(f\"{file} size {filesize} != file read position {f.tell()}\")\n\n return dat\n\n\ndef frame3d_curv(file: Path, xg: dict[str, T.Any] = None) -> xarray.Dataset:\n \"\"\"\n curvilinear\n\n Parameters\n ----------\n\n file: pathlib.Path\n filename to read\n \"\"\"\n\n if not file.is_file():\n raise FileNotFoundError(file)\n\n lx = simsize(file.parent)\n\n try:\n if not xg:\n xg = grid(file.parent)\n\n dat = xarray.Dataset(\n coords={\"x1\": xg[\"x1\"][2:-2], \"x2\": xg[\"x2\"][2:-2], \"x3\": xg[\"x3\"][2:-2]}\n )\n except FileNotFoundError:\n # perhaps converting raw data, and didn't have the huge grid file\n logging.error(\"simgrid.dat missing, returning data without grid information\")\n dat = xarray.Dataset(coords={\"x1\": range(lx[0]), \"x2\": range(lx[1]), \"x3\": range(lx[2])})\n\n with file.open(\"rb\") as f:\n dat = dat.assign_coords({\"time\": time(f)})\n\n ns = read4D(f, LSP, lx)\n dat[\"ne\"] = ((\"x1\", \"x2\", \"x3\"), ns[:, :, :, LSP - 1])\n\n vs1 = read4D(f, LSP, lx)\n dat[\"v1\"] = (\n (\"x1\", \"x2\", \"x3\"),\n (ns[:, :, :, :6] * vs1[:, :, :, :6]).sum(axis=3) / dat[\"ne\"],\n )\n\n Ts = read4D(f, LSP, lx)\n dat[\"Ti\"] = (\n (\"x1\", \"x2\", \"x3\"),\n (ns[:, :, :, :6] * Ts[:, :, :, :6]).sum(axis=3) / dat[\"ne\"],\n )\n dat[\"Te\"] = ((\"x1\", \"x2\", \"x3\"), Ts[:, :, :, LSP - 1].squeeze())\n\n for p in (\"J1\", \"J2\", \"J3\", \"v2\", \"v3\"):\n dat[p] = ((\"x1\", \"x2\", \"x3\"), read3D(f, lx))\n\n dat[\"Phitop\"] = ((\"x2\", \"x3\"), read2D(f, lx))\n\n return dat\n\n\ndef frame3d_curvavg(file: Path, xg: dict[str, T.Any] = None) -> xarray.Dataset:\n \"\"\"\n\n Parameters\n ----------\n file: pathlib.Path\n filename of this timestep of simulation output\n \"\"\"\n\n if not file.is_file():\n raise FileNotFoundError(file)\n\n lx = simsize(file.parent)\n\n try:\n if not xg:\n xg = grid(file.parent)\n\n dat = xarray.Dataset(\n coords={\"x1\": xg[\"x1\"][2:-2], \"x2\": xg[\"x2\"][2:-2], \"x3\": xg[\"x3\"][2:-2]}\n )\n except FileNotFoundError:\n # perhaps converting raw data, and didn't have the huge grid file\n logging.error(\"simgrid.dat missing, returning data without grid information\")\n dat = xarray.Dataset(coords={\"x1\": range(lx[0]), \"x2\": range(lx[1]), \"x3\": range(lx[2])})\n\n with file.open(\"rb\") as f:\n dat = dat.assign_coords({\"time\": time(f)})\n\n for p in (\"ne\", \"v1\", \"Ti\", \"Te\", \"J1\", \"J2\", \"J3\", \"v2\", \"v3\"):\n dat[p] = ((\"x1\", \"x2\", \"x3\"), read3D(f, lx))\n\n dat[\"Phitop\"] = ((\"x2\", \"x3\"), read2D(f, lx))\n\n return dat\n\n\ndef frame3d_curvne(file: Path, xg: dict[str, T.Any] = None) -> xarray.Dataset:\n\n if not file.is_file():\n raise FileNotFoundError(file)\n\n lx = simsize(file.parent)\n\n try:\n if not xg:\n xg = grid(file.parent)\n\n dat = xarray.Dataset(\n coords={\"x1\": xg[\"x1\"][2:-2], \"x2\": xg[\"x2\"][2:-2], \"x3\": xg[\"x3\"][2:-2]}\n )\n except FileNotFoundError:\n # perhaps converting raw data, and didn't have the huge grid file\n logging.error(\"simgrid.dat missing, returning data without grid information\")\n dat = xarray.Dataset(coords={\"x1\": range(lx[0]), \"x2\": range(lx[1]), \"x3\": range(lx[2])})\n\n with file.open(\"rb\") as f:\n dat = dat.assign_coords({\"time\": time(f)})\n\n dat[\"ne\"] = ((\"x1\", \"x2\", \"x3\"), read3D(f, lx))\n\n return dat\n\n\ndef read4D(f: T.BinaryIO, lsp: int, lx: tuple[int, ...] | list[int]) -> np.ndarray:\n \"\"\"\n read 4D array from raw file\n \"\"\"\n\n if not len(lx) == 3:\n raise ValueError(f\"lx must have 3 elements, you have lx={lx}\")\n\n return np.fromfile(f, np.float64, np.prod(lx) * lsp).reshape((*lx, lsp), order=\"F\")\n\n\ndef read3D(f: T.BinaryIO, lx: tuple[int, ...] | list[int]) -> np.ndarray:\n \"\"\"\n read 3D array from raw file\n \"\"\"\n\n if not len(lx) == 3:\n raise ValueError(f\"lx must have 3 elements, you have lx={lx}\")\n\n return np.fromfile(f, np.float64, np.prod(lx)).reshape(*lx, order=\"F\")\n\n\ndef read2D(f: T.BinaryIO, lx: tuple[int, ...] | list[int]) -> np.ndarray:\n \"\"\"\n read 2D array from raw file\n \"\"\"\n\n if not len(lx) == 3:\n raise ValueError(f\"lx must have 3 elements, you have lx={lx}\")\n\n return np.fromfile(f, np.float64, np.prod(lx[1:])).reshape(*lx[1:], order=\"F\")\n\n\ndef glow_aurmap(file: Path, xg: dict[str, T.Any] = None) -> xarray.Dataset:\n \"\"\"\n read the auroral output from GLOW\n \"\"\"\n\n lx = simsize(file.parent)\n if not xg:\n xg = grid(file.parent)\n\n dat = xarray.Dataset(coords={\"wavelength\": WAVELEN, \"x2\": xg[\"x2\"][2:-2], \"x3\": xg[\"x3\"][2:-2]})\n\n if not len(lx) == 3:\n raise ValueError(f\"lx must have 3 elements, you have lx={lx}\")\n\n with file.open(\"rb\") as f:\n raw = np.fromfile(f, np.float64, np.prod(lx[1:]) * len(WAVELEN)).reshape(\n np.prod(lx[1:]) * len(WAVELEN), order=\"F\"\n )\n\n dat[\"rayleighs\"] = ((\"wavelength\", \"x2\", \"x3\"), raw)\n\n return dat\n\n\ndef time(f: T.BinaryIO) -> datetime:\n\n t = np.fromfile(f, np.float64, 4)\n\n return datetime(int(t[0]), int(t[1]), int(t[2])) + timedelta(hours=t[3])\n"
] | [
[
"numpy.allclose"
],
[
"numpy.linspace",
"numpy.empty_like",
"numpy.cos",
"numpy.ones",
"numpy.sin",
"numpy.concatenate",
"numpy.append",
"numpy.broadcast_to",
"numpy.insert",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.fromfile",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rsoren/limetr | [
"d1145e6811bc492e5641fd04108d4d342f904284"
] | [
"tests/izmat_lsvd.py"
] | [
"# check utils lsvd\n\n\ndef izmat_lsvd():\n import numpy as np\n from limetr.special_mat import izmat\n\n ok = True\n tol = 1e-10\n # problem 1, tall matrix\n # -------------------------------------------------------------------------\n n, k = 6, 3\n z = np.random.randn(n, k)\n tr_u, tr_s, tr_vt = np.linalg.svd(z, full_matrices=False)\n my_u = np.zeros(tr_u.size)\n my_s = np.zeros(tr_s.size)\n izmat.lsvd(z, my_u, my_s)\n\n err = np.linalg.norm(my_u.reshape(k, n).T - tr_u)\n ok = ok and err < tol\n\n if not ok:\n print('err in lsvd tall matrix')\n print('err:', err)\n\n # problem 2, fat matrix\n # -------------------------------------------------------------------------\n n, k = 3, 6\n z = np.random.randn(n, k)\n tr_u, tr_s, tr_vt = np.linalg.svd(z, full_matrices=False)\n my_u = np.zeros(tr_u.size)\n my_s = np.zeros(tr_s.size)\n izmat.lsvd(z, my_u, my_s)\n\n err = np.linalg.norm(my_u.reshape(n, n).T - tr_u)\n ok = ok and err < tol\n\n if not ok:\n print('err in lsvd fat matrix')\n print('err:', err)\n\n return ok\n"
] | [
[
"numpy.linalg.svd",
"numpy.random.randn",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
serge2016/deepvariant | [
"07dc4bedd0d124fff6386dcfa5fd88c6b200362c",
"07dc4bedd0d124fff6386dcfa5fd88c6b200362c"
] | [
"deepvariant/vcf_stats.py",
"deepvariant/variant_caller_test.py"
] | [
"# Copyright 2019 Google LLC.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\nr\"\"\"Library to produce variant statistics from a VCF file.\"\"\"\n\nimport collections\nimport itertools\nimport math\nimport numpy as np\n\nfrom third_party.nucleus.util import variant_utils\nfrom third_party.nucleus.util import variantcall_utils\nfrom deepvariant import vcf_stats_vis\n\n_VARIANT_STATS_COLUMNS = [\n 'reference_name', 'position', 'reference_bases', 'alternate_bases',\n 'variant_type', 'is_variant', 'is_transition', 'is_transversion', 'depth',\n 'genotype_quality', 'genotype', 'vaf', 'qual'\n]\n\nVariantStats = collections.namedtuple('VariantStats', _VARIANT_STATS_COLUMNS)\n\nBIALLELIC_SNP = 'Biallelic_SNP'\nBIALLELIC_INSERTION = 'Biallelic_Insertion'\nBIALLELIC_DELETION = 'Biallelic_Deletion'\nBIALLELIC_MNP = 'Biallelic_MNP'\nMULTIALLELIC_SNP = 'Multiallelic_SNP'\nMULTIALLELIC_INSERTION = 'Multiallelic_Insertion'\nMULTIALLELIC_DELETION = 'Multiallelic_Deletion'\nMULTIALLELIC_COMPLEX = 'Multiallelic_Complex'\nREFCALL = 'RefCall'\n\n\ndef _get_variant_type(variant):\n \"\"\"Returns the type of variant as a string.\"\"\"\n if variant_utils.is_variant_call(variant):\n biallelic = variant_utils.is_biallelic(variant)\n snp = variant_utils.is_snp(variant)\n insertion = variant_utils.variant_is_insertion(variant)\n deletion = variant_utils.variant_is_deletion(variant)\n\n if biallelic:\n if snp:\n return BIALLELIC_SNP\n elif insertion:\n return BIALLELIC_INSERTION\n elif deletion:\n return BIALLELIC_DELETION\n else:\n return BIALLELIC_MNP\n else:\n if snp:\n return MULTIALLELIC_SNP\n elif insertion:\n return MULTIALLELIC_INSERTION\n elif deletion:\n return MULTIALLELIC_DELETION\n else:\n return MULTIALLELIC_COMPLEX\n else:\n return REFCALL\n\n\ndef _tstv(variant, vtype):\n \"\"\"Returns a pair of bools indicating Transition, Transversion status.\"\"\"\n if vtype == BIALLELIC_SNP:\n is_transition = variant_utils.is_transition(variant.reference_bases,\n variant.alternate_bases[0])\n is_transversion = not is_transition\n else:\n is_transition = is_transversion = False\n\n return is_transition, is_transversion\n\n\ndef _get_vaf(variant, vcf_reader):\n \"\"\"Gets the VAF (variant allele frequency).\"\"\"\n vafs = variantcall_utils.get_format(\n variant_utils.only_call(variant), 'VAF', vcf_reader)\n return sum(vafs)\n\n\ndef _get_variant_stats(variant, vaf_available=False, vcf_reader=None):\n \"\"\"Returns a VariantStats object corresponding to the input variant.\"\"\"\n vtype = _get_variant_type(variant)\n is_transition, is_transversion = _tstv(variant, vtype)\n vaf = None\n if vaf_available:\n vaf = _get_vaf(variant, vcf_reader)\n\n return VariantStats(\n reference_name=variant.reference_name,\n position=(variant.start + 1),\n reference_bases=variant.reference_bases,\n alternate_bases=list(variant.alternate_bases),\n variant_type=vtype,\n is_transition=is_transition,\n is_transversion=is_transversion,\n is_variant=variant_utils.is_variant_call(variant),\n depth=variantcall_utils.get_format(\n variant_utils.only_call(variant), 'DP'),\n genotype_quality=variantcall_utils.get_gq(\n variant_utils.only_call(variant)),\n genotype=str(\n sorted(variantcall_utils.get_gt(variant_utils.only_call(variant)))),\n vaf=vaf,\n qual=variant.quality)\n\n\ndef _single_variant_stats(variants, vaf_available=False, vcf_reader=None):\n return [\n _get_variant_stats(v, vaf_available=vaf_available, vcf_reader=vcf_reader)\n for v in variants\n ]\n\n\ndef _format_histogram_for_vega(counts, bins):\n \"\"\"Format histogram counts and bins for vega.\n\n Args:\n counts: list of bin counts from np.histogram\n bins: list of bins from np.histogram\n\n Returns:\n A list of objects with s (bin start), e (bin end), and c (bin count) for\n each bin in the histogram.\n \"\"\"\n # Avoid floats becoming 0.6000000000000001 to save space in output json\n rounded_bins = [round(x, 10) for x in bins]\n # pylint: disable=g-complex-comprehension\n vega_formatted_hist = [{\n 's': rounded_bins[idx],\n 'e': rounded_bins[idx + 1],\n 'c': count\n } for idx, count in enumerate(counts)]\n # pylint: enable=g-complex-comprehension\n return vega_formatted_hist\n\n\ndef _fraction_histogram(values, number_of_bins=10):\n counts, bins = np.histogram(values, bins=number_of_bins, range=(0, 1))\n return _format_histogram_for_vega(counts, bins)\n\n\ndef _vaf_histograms_by_genotype(single_stats, number_of_bins=10):\n \"\"\"Computes histograms of allele frequency for each genotype.\n\n Args:\n single_stats: list of VariantStats objects.\n number_of_bins: integer, number of bins in allele frequency histogram.\n\n Returns:\n A dictionary keyed by genotype where each value is a list of bins.\n \"\"\"\n\n # Group by genotype\n sorted_by_genotype = sorted(single_stats, key=lambda x: x.genotype)\n grouped_by_genotype = itertools.groupby(sorted_by_genotype,\n lambda x: x.genotype)\n # Fill in empty placeholders for genotypes to populate all five charts\n stats_by_genotype = {}\n required_genotypes = ['[0, 0]', '[0, 1]', '[1, 1]', '[-1, -1]', '[1, 2]']\n for genotype in required_genotypes:\n # Create a few placeholder bins\n stats_by_genotype[genotype] = _fraction_histogram([], 2)\n # Count vafs from variants (replacing placeholders)\n for genotype, group in grouped_by_genotype:\n # Get VAF for each variant where it is defined\n vafs = [x.vaf for x in group if x.vaf is not None]\n stats_by_genotype[genotype] = _fraction_histogram(vafs, number_of_bins)\n\n return stats_by_genotype\n\n\ndef _count_base_changes_and_indel_sizes(single_stats):\n \"\"\"Count each base change, such as A->G or C->T, and count the number of indels of each size.\n\n Args:\n single_stats: list of VariantStats objects.\n\n Returns:\n base_changes: {(ref, alt): count, ...}\n indel_sizes: {size: count, ...}\n \"\"\"\n base_changes = collections.defaultdict(int)\n indel_sizes = collections.defaultdict(int)\n for v in single_stats:\n ref = v.reference_bases\n alts = v.alternate_bases\n # RefCalls are ignored\n if v.is_variant:\n # Multiallelic variants ignored here because they have different indel\n # sizes and/or base changes\n if v.variant_type == BIALLELIC_SNP:\n # SNV: get base change\n base_changes[(ref, alts[0])] += 1\n elif v.variant_type in [BIALLELIC_INSERTION, BIALLELIC_DELETION]:\n # indel: get size\n # + = insertion\n # - = deletion\n size = len(alts[0]) - len(ref)\n indel_sizes[size] += 1\n\n base_changes_for_json = []\n for key in base_changes:\n ref, alt = key\n base_changes_for_json.append([ref, alt, base_changes[key]])\n\n indel_sizes_for_json = []\n for key in indel_sizes:\n indel_sizes_for_json.append([int(key), indel_sizes[key]])\n\n return base_changes_for_json, indel_sizes_for_json\n\n\ndef _round_down(num):\n return int(math.floor(num))\n\n\ndef _round_up(num):\n return int(math.ceil(num))\n\n\ndef _compute_qual_histogram(single_var_stats):\n \"\"\"Compute a histogram over variant quality (QUAL column in VCF).\n\n Args:\n single_var_stats: list of VariantStats objects.\n\n Returns:\n histogram of variant quality scores.\n \"\"\"\n quals = [round(v.qual, 4) for v in single_var_stats]\n\n if quals:\n bin_range = (_round_down(min(quals)), _round_up(max(quals) + 1))\n counts, bins = np.histogram(\n quals, range=bin_range, bins=bin_range[1] - bin_range[0])\n hist = _format_histogram_for_vega(counts, bins)\n return [x for x in hist if x['c'] > 0]\n else:\n return []\n\n\ndef _get_integer_counts(nums):\n \"\"\"Turn a list of integers into a list of counts of those integers.\n\n Args:\n nums: a list of numbers (e.g. [1,2,2,4])\n\n Returns:\n a list of [num, count] (e.g. [[1,1],[2,2],[4,1]]) for all integers with\n non-zero counts\n \"\"\"\n bin_counts = np.bincount(nums)\n non_zero_counts = [[i, x] for i, x in enumerate(bin_counts) if x > 0]\n return non_zero_counts\n\n\ndef _compute_gq_histogram(single_var_stats):\n \"\"\"Compute a histogram over genotype quality (GQ sub-column under FORMAT in VCF).\n\n Args:\n single_var_stats: list of VariantStats objects.\n\n Returns:\n histogram of genotype quality scores.\n \"\"\"\n quals = [\n v.genotype_quality\n for v in single_var_stats\n if not isinstance(v.genotype_quality, list)\n ]\n return _get_integer_counts(quals)\n\n\ndef _compute_depth_histogram(single_var_stats):\n \"\"\"Compute a histogram on the depth, with larger bins as depth increases.\"\"\"\n depths = [v.depth for v in single_var_stats if not isinstance(v.depth, list)]\n return _get_integer_counts(depths)\n\n\ndef _count_variant_types(single_stats):\n count_all_variant_types = collections.defaultdict(int)\n for v in single_stats:\n count_all_variant_types[v.variant_type] += 1\n\n return count_all_variant_types\n\n\ndef _count_titv(single_stats):\n titv_counts = {'Transition': 0, 'Transversion': 0}\n titv_counts['Transition'] = sum([v.is_transition for v in single_stats])\n titv_counts['Transversion'] = sum([v.is_transversion for v in single_stats])\n return titv_counts\n\n\ndef _compute_variant_stats_for_charts(variants, vcf_reader=None):\n \"\"\"Computes variant statistics of each variant.\n\n Args:\n variants: iterable(Variant).\n vcf_reader: VcfReader.\n\n Returns:\n A dict with summarized data prepared for charts.\n \"\"\"\n vaf_available = False\n if vcf_reader:\n vcf_columns = [col.id for col in vcf_reader.header.formats]\n vaf_available = 'VAF' in vcf_columns\n\n single_var_stats = _single_variant_stats(\n variants, vaf_available=vaf_available, vcf_reader=vcf_reader)\n\n titv_counts = _count_titv(single_var_stats)\n variant_type_counts = _count_variant_types(single_var_stats)\n\n base_changes, indel_sizes = _count_base_changes_and_indel_sizes(\n single_var_stats)\n\n histograms = _vaf_histograms_by_genotype(single_var_stats, number_of_bins=50)\n\n qual_histogram = _compute_qual_histogram(single_var_stats)\n gq_hist = _compute_gq_histogram(single_var_stats)\n depth_histogram = _compute_depth_histogram(single_var_stats)\n\n vis_data = {\n 'vaf_histograms_by_genotype': histograms,\n 'indel_sizes': indel_sizes,\n 'base_changes': base_changes,\n 'qual_histogram': qual_histogram,\n 'gq_histogram': gq_hist,\n 'variant_type_counts': variant_type_counts,\n 'depth_histogram': depth_histogram,\n 'titv_counts': titv_counts\n }\n\n return vis_data\n\n\ndef create_vcf_report(variants, output_basename, sample_name, vcf_reader=None):\n \"\"\"Calculate VCF stats and create a visual report.\"\"\"\n vis_data = _compute_variant_stats_for_charts(variants, vcf_reader=vcf_reader)\n\n vcf_stats_vis.create_visual_report(output_basename, vis_data, sample_name)\n",
"# Copyright 2017 Google LLC.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Tests for deepvariant .variant_caller.\"\"\"\n\n\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport mock\nimport numpy as np\nimport numpy.testing as npt\nimport six\n\nfrom third_party.nucleus.util import variant_utils\nfrom third_party.nucleus.util import variantcall_utils\nfrom deepvariant import testdata\nfrom deepvariant import variant_caller\nfrom deepvariant.protos import deepvariant_pb2\n\n\ndef setUpModule():\n testdata.init()\n\n\ndef _reference_model_options(p_error, max_gq, gq_resolution=1):\n return deepvariant_pb2.VariantCallerOptions(\n sample_name='UNKNOWN',\n p_error=p_error,\n max_gq=max_gq,\n gq_resolution=gq_resolution,\n ploidy=2)\n\n\nclass PlaceholderVariantCaller(variant_caller.VariantCaller):\n \"\"\"A placeholder VariantCaller.\n\n This class provides a get_candidates implementation and so allows\n the base class to be instantiated and its methods tested.\n \"\"\"\n\n def __init__(self,\n p_error,\n max_gq,\n gq_resolution=1,\n use_cache_table=False,\n max_cache_coverage=100):\n super(PlaceholderVariantCaller, self).__init__(\n options=_reference_model_options(p_error, max_gq, gq_resolution),\n use_cache_table=use_cache_table,\n max_cache_coverage=max_cache_coverage)\n\n def get_candidates(self, allele_counters, sample_name):\n return None\n\n\nclass VariantCallerTests(parameterized.TestCase):\n\n def fake_allele_counter(self, start_pos, counts):\n allele_counter = mock.Mock()\n # pylint: disable=g-complex-comprehension\n allele_counter.summary_counts.return_value = [\n deepvariant_pb2.AlleleCountSummary(\n ref_supporting_read_count=n_ref,\n total_read_count=n_ref + n_alt,\n ref_base=ref,\n reference_name='chr1',\n position=start_pos + i)\n for i, (n_alt, n_ref, ref) in enumerate(counts)\n ]\n # pylint: enable=g-complex-comprehension\n allele_counter.counts.return_value = counts\n return allele_counter\n\n # R code to produce the testdata expectation table.\n # expected <- function(n_ref, n_alt, perr, max_gq = 100) {\n # p_ref <- dbinom(n_alt, n_ref, perr)\n # p_het <- dbinom(n_alt, n_ref, 0.5)\n # p_alt <- dbinom(n_ref - n_alt, n_ref, perr)\n # raw <- c(p_ref, p_het, p_alt)\n # norm <- raw / sum(raw)\n # gq = min(floor(-10 * log10(1 - norm[1])), max_gq)\n # likelihoods = paste(sprintf(\"%.6f\", log10(norm)), collapse=\", \")\n # likelihoods = paste(\"[\", likelihoods, \"]\", sep=\"\")\n # result = paste(n_ref, n_alt, perr, 100, 1, likelihoods, gq, sep=\", \")\n # cat(paste(\"[\", result, \"],\\n\", sep=\"\"))\n # }\n #\n # for (n in c(10, 20)) {\n # for (k in seq(0, n)) {\n # expected(n, k, 0.01)\n # }\n # }\n #\n # for (perr in c(0.1, 0.01, 0.001, 0.0001)) {\n # expected(10, 0, perr)\n # expected(10, 1, perr)\n # }\n #\n # for (n_ref in c(10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 1000, 10000)) {\n # expected(n_ref, 0, 0.01)\n # }\n @parameterized.parameters(\n # No coverage case.\n [0, 0, 0.01, 100, [-0.477121, -0.477121, -0.477121], 1],\n # Test systematically values of n and k.\n [10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],\n [10, 1, 0.01, 100, [-0.044109, -1.015126, -16.009190], 10],\n [10, 2, 0.01, 100, [-1.063830, -0.039211, -13.037641], 0],\n [10, 3, 0.01, 100, [-3.020668, -0.000414, -11.003209], 0],\n [10, 4, 0.01, 100, [-5.015893, -0.000004, -9.007163], 0],\n [10, 5, 0.01, 100, [-7.011524, -0.000000, -7.011524], 0],\n [10, 6, 0.01, 100, [-9.007163, -0.000004, -5.015893], 0],\n [10, 7, 0.01, 100, [-11.003209, -0.000414, -3.020668], 0],\n [10, 8, 0.01, 100, [-13.037641, -0.039211, -1.063830], 0],\n [10, 9, 0.01, 100, [-16.009190, -1.015126, -0.044109], 0],\n [10, 10, 0.01, 100, [-19.956821, -2.967121, -0.000469], 0],\n [20, 0, 0.01, 100, [-0.000001, -5.933304, -39.912704], 59],\n [20, 1, 0.01, 100, [-0.000050, -3.937719, -35.921484], 39],\n [20, 2, 0.01, 100, [-0.004935, -1.946968, -31.935098], 19],\n [20, 3, 0.01, 100, [-0.328657, -0.275056, -28.267550], 2],\n [20, 4, 0.01, 100, [-2.053097, -0.003860, -26.000720], 0],\n [20, 5, 0.01, 100, [-4.044911, -0.000039, -24.001263], 0],\n [20, 6, 0.01, 100, [-6.040508, -0.000000, -22.005589], 0],\n [20, 7, 0.01, 100, [-8.036143, -0.000000, -20.009954], 0],\n [20, 8, 0.01, 100, [-10.031778, -0.000000, -18.014319], 0],\n [20, 9, 0.01, 100, [-12.027413, -0.000000, -16.018683], 0],\n [20, 10, 0.01, 100, [-14.023048, -0.000000, -14.023048], 0],\n [20, 11, 0.01, 100, [-16.018683, -0.000000, -12.027413], 0],\n [20, 12, 0.01, 100, [-18.014319, -0.000000, -10.031778], 0],\n [20, 13, 0.01, 100, [-20.009954, -0.000000, -8.036143], 0],\n [20, 14, 0.01, 100, [-22.005589, -0.000000, -6.040508], 0],\n [20, 15, 0.01, 100, [-24.001263, -0.000039, -4.044911], 0],\n [20, 16, 0.01, 100, [-26.000720, -0.003860, -2.053097], 0],\n [20, 17, 0.01, 100, [-28.267550, -0.275056, -0.328657], 0],\n [20, 18, 0.01, 100, [-31.935098, -1.946968, -0.004935], 0],\n [20, 19, 0.01, 100, [-35.921484, -3.937719, -0.000050], 0],\n [20, 20, 0.01, 100, [-39.912704, -5.933304, -0.000001], 0],\n # Testing different values of p_error.\n [10, 0, 0.1, 100, [-0.001215, -2.553940, -9.543640], 25],\n [10, 1, 0.1, 100, [-0.010811, -1.609294, -7.644752], 16],\n [10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],\n [10, 1, 0.01, 100, [-0.044109, -1.015126, -16.009190], 10],\n [10, 0, 0.001, 100, [-0.000428, -3.006383, -29.996083], 30],\n [10, 1, 0.001, 100, [-0.297847, -0.304236, -24.294371], 3],\n [10, 0, 1e-04, 100, [-0.000424, -3.010290, -39.999990], 30],\n [10, 1, 1e-04, 100, [-1.032394, -0.042303, -33.032046], 0],\n # Test scaling of calculation with more coverage, hitting max_gq.\n [10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],\n [20, 0, 0.01, 100, [-0.000001, -5.933304, -39.912704], 59],\n [30, 0, 0.01, 100, [-0.000000, -8.899956, -59.869056], 88],\n [40, 0, 0.01, 100, [-0.000000, -11.866608, -79.825408], 100],\n [50, 0, 0.01, 100, [-0.000000, -14.833260, -99.781760], 100],\n [60, 0, 0.01, 100, [0.000000, -17.799911, -119.738112], 100],\n [70, 0, 0.01, 100, [0.000000, -20.766563, -139.694464], 100],\n [80, 0, 0.01, 100, [0.000000, -23.733215, -159.650816], 100],\n [90, 0, 0.01, 100, [0.000000, -26.699867, -179.607168], 100],\n [100, 0, 0.01, 100, [0.000000, -29.666519, -199.563519], 100],\n )\n def test_ref_calc(self, total_n, alt_n, p_error, max_gq, expected_likelihoods,\n expected_gq):\n caller = PlaceholderVariantCaller(p_error, max_gq)\n gq, likelihoods = caller.reference_confidence(total_n - alt_n, total_n)\n npt.assert_allclose(expected_likelihoods, likelihoods, atol=1e-6)\n self.assertEqual(expected_gq, gq)\n\n @parameterized.parameters(\n # Values below max_allowed_reads are returned without modification.\n [0, 10, 100, (0, 10)],\n [5, 10, 100, (5, 10)],\n [10, 10, 100, (10, 10)],\n [10, 100, 100, (10, 100)],\n [100, 100, 100, (100, 100)],\n\n # Checks that the rescaling works when n_total_reads > max_allowed.\n [0, 200, 100, (0, 100)],\n [0, 200, 100, (0, 100)],\n [0, 1000, 100, (0, 100)],\n [0, 10000, 100, (0, 100)],\n [1, 200, 100, (1, 100)],\n [1, 1000, 100, (1, 100)],\n [1, 10000, 100, (1, 100)],\n [1, 100000, 100, (1, 100)],\n [2, 200, 100, (1, 100)],\n [3, 200, 100, (2, 100)],\n [4, 200, 100, (2, 100)],\n [10, 200, 100, (5, 100)],\n [50, 200, 100, (25, 100)],\n [100, 200, 100, (50, 100)],\n [200, 200, 100, (100, 100)],\n # I saw a bug at runtime, and the testcase makes sure we scale values of\n # n_ref_reads close to n_total_reads appropriately.\n [99, 100, 100, (99, 100)],\n )\n def test_rescale_read_counts(self, n_ref, n_total, max_allowed_reads,\n expected):\n actual = variant_caller._rescale_read_counts_if_necessary(\n n_ref, n_total, max_allowed_reads)\n self.assertEqual(actual, expected)\n\n # pylint: disable=g-complex-comprehension\n @parameterized.parameters((n_ref, n_alt_fraction)\n for n_ref in [1000, 10000, 100000, 1000000]\n for n_alt_fraction in [0.0, 0.01, 0.02])\n # pylint: enable=g-complex-comprehension\n def test_handles_large_reference_counts(self, n_ref, n_alt_fraction):\n \"\"\"Tests that we don't blow up when the coverage gets really high.\"\"\"\n caller = PlaceholderVariantCaller(0.01, 100)\n n_alt = int(n_alt_fraction * n_ref)\n gq, likelihoods = caller._calc_reference_confidence(n_ref, n_ref + n_alt)\n self.assertTrue(\n np.isfinite(likelihoods).all(),\n 'Non-finite likelihoods {}'.format(likelihoods))\n self.assertEqual(100, gq)\n\n @parameterized.parameters((base, include_med_dp)\n for base in variant_caller.CANONICAL_DNA_BASES\n for include_med_dp in [True, False])\n def test_gvcf_basic(self, ref, include_med_dp):\n options = _reference_model_options(0.01, 100)\n caller = PlaceholderVariantCaller(0.01, 100)\n allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])\n gvcfs = list(\n caller.make_gvcfs(allele_counter.summary_counts(), include_med_dp))\n self.assertLen(gvcfs, 1)\n self.assertGVCF(\n gvcfs[0],\n ref=ref,\n gq=1.0,\n start=100,\n end=101,\n min_dp=0,\n chrom='chr1',\n gls=[-0.47712125472] * 3,\n sample_name=options.sample_name,\n med_dp=0 if include_med_dp else None)\n\n @parameterized.parameters('N', 'R', 'W', 'B')\n def test_gvcf_basic_skips_iupac_ref_base(self, ref):\n caller = PlaceholderVariantCaller(0.01, 100)\n allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])\n self.assertEmpty(list(caller.make_gvcfs(allele_counter.summary_counts())))\n\n @parameterized.parameters('X', '>', '!')\n def test_gvcf_basic_raises_with_bad_ref_base(self, ref):\n caller = PlaceholderVariantCaller(0.01, 100)\n allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])\n with six.assertRaisesRegex(self, ValueError,\n 'Invalid reference base={}'.format(ref)):\n list(caller.make_gvcfs(allele_counter.summary_counts()))\n\n def assertGVCF(self,\n gvcf,\n ref,\n gq,\n start,\n end,\n min_dp,\n chrom='chr1',\n gls=None,\n sample_name=None,\n gts=None,\n med_dp=None):\n if chrom:\n self.assertEqual(gvcf.reference_name, chrom)\n call = variant_utils.only_call(gvcf)\n self.assertNotEmpty(gvcf.reference_name)\n self.assertEqual(gvcf.reference_bases, ref)\n self.assertEqual(gvcf.alternate_bases, ['<*>'])\n self.assertEqual(gvcf.start, start)\n self.assertEqual(gvcf.end, end if end else start + 1)\n self.assertEqual(variantcall_utils.get_gq(call), gq)\n self.assertNotEmpty(call.genotype_likelihood)\n self.assertIn('MIN_DP', call.info)\n self.assertLen(call.info['MIN_DP'].values, 1)\n self.assertEqual(variantcall_utils.get_min_dp(call), min_dp)\n if med_dp is not None:\n self.assertIn('MED_DP', call.info)\n self.assertLen(call.info['MED_DP'].values, 1)\n self.assertEqual(variantcall_utils.get_med_dp(call), med_dp)\n else:\n self.assertNotIn('MED_DP', call.info)\n if gls is not None:\n npt.assert_allclose(list(gvcf.calls[0].genotype_likelihood), gls)\n if sample_name:\n self.assertEqual(gvcf.calls[0].call_set_name, sample_name)\n if gts is not None:\n self.assertEqual(list(gvcf.calls[0].genotype), gts)\n\n @parameterized.parameters(\n # Check some basics.\n ([(0, 0, 'A')], [dict(start=1, end=2, ref='A', gq=1, min_dp=0)]),\n # Two equal records are merged, and the reference base is the first one.\n ([(0, 0, 'A'),\n (0, 0, 'C')], [dict(start=1, end=3, ref='A', gq=1, min_dp=0)]),\n ([(0, 0, 'C'),\n (0, 0, 'A')], [dict(start=1, end=3, ref='C', gq=1, min_dp=0)]),\n # Three equal records are merged into a single block.\n ([(0, 0, 'A'), (0, 0, 'C'),\n (0, 0, 'T')], [dict(start=1, end=4, ref='A', gq=1, min_dp=0)]),\n # We don't merge together different GQ value blocks:\n ([(0, 0, 'A'), (0, 100, 'C')], [\n dict(start=1, end=2, ref='A', gq=1, min_dp=0),\n dict(start=2, end=3, ref='C', gq=100, min_dp=100),\n ]),\n ([(0, 100, 'A'), (0, 0, 'C')], [\n dict(start=1, end=2, ref='A', gq=100, min_dp=100),\n dict(start=2, end=3, ref='C', gq=1, min_dp=0),\n ]),\n ([(0, 0, 'A'), (0, 20, 'C'), (0, 100, 'T')], [\n dict(start=1, end=2, ref='A', gq=1, min_dp=0),\n dict(start=2, end=3, ref='C', gq=59, min_dp=20),\n dict(start=3, end=4, ref='T', gq=100, min_dp=100),\n ]),\n )\n def test_make_gvcfs(self, counts, expecteds):\n allele_counts = self.fake_allele_counter(1, counts).summary_counts()\n caller = PlaceholderVariantCaller(0.01, 100)\n gvcfs = list(caller.make_gvcfs(allele_counts))\n\n self.assertLen(gvcfs, len(expecteds))\n for actual, expected in zip(gvcfs, expecteds):\n self.assertGVCF(actual, **expected)\n\n @parameterized.parameters(\n dict(\n gq_resolution=1,\n expecteds=[\n dict(start=1, end=2, ref='A', gq=53, med_dp=18, min_dp=18),\n dict(start=2, end=3, ref='C', gq=56, med_dp=19, min_dp=19),\n dict(start=3, end=4, ref='A', gq=0, med_dp=35, min_dp=35),\n dict(start=4, end=5, ref='T', gq=0, med_dp=20, min_dp=20),\n dict(start=5, end=6, ref='A', gq=0, med_dp=16, min_dp=16),\n dict(start=6, end=7, ref='A', gq=72, med_dp=31, min_dp=31),\n dict(start=7, end=8, ref='C', gq=83, med_dp=35, min_dp=35),\n dict(start=8, end=9, ref='T', gq=59, med_dp=20, min_dp=20),\n dict(start=9, end=10, ref='G', gq=56, med_dp=19, min_dp=19),\n ]),\n # Binning by 3 does not cause any records to be merged.\n dict(\n gq_resolution=3,\n expecteds=[\n dict(start=1, end=2, ref='A', gq=53, med_dp=18, min_dp=18),\n dict(start=2, end=3, ref='C', gq=56, med_dp=19, min_dp=19),\n dict(start=3, end=4, ref='A', gq=0, med_dp=35, min_dp=35),\n dict(start=4, end=5, ref='T', gq=0, med_dp=20, min_dp=20),\n dict(start=5, end=6, ref='A', gq=0, med_dp=16, min_dp=16),\n dict(start=6, end=7, ref='A', gq=72, med_dp=31, min_dp=31),\n dict(start=7, end=8, ref='C', gq=83, med_dp=35, min_dp=35),\n dict(start=8, end=9, ref='T', gq=59, med_dp=20, min_dp=20),\n dict(start=9, end=10, ref='G', gq=56, med_dp=19, min_dp=19),\n ]),\n # Binning by 4 causes the first merge, of the first two records.\n dict(\n gq_resolution=4,\n expecteds=[\n dict(start=1, end=3, ref='A', gq=53, med_dp=18, min_dp=18),\n dict(start=3, end=4, ref='A', gq=0, med_dp=35, min_dp=35),\n dict(start=4, end=5, ref='T', gq=0, med_dp=20, min_dp=20),\n dict(start=5, end=6, ref='A', gq=0, med_dp=16, min_dp=16),\n dict(start=6, end=7, ref='A', gq=72, med_dp=31, min_dp=31),\n dict(start=7, end=8, ref='C', gq=83, med_dp=35, min_dp=35),\n dict(start=8, end=9, ref='T', gq=59, med_dp=20, min_dp=20),\n dict(start=9, end=10, ref='G', gq=56, med_dp=19, min_dp=19),\n ]),\n dict(\n gq_resolution=10,\n expecteds=[\n dict(start=1, end=3, ref='A', gq=53, med_dp=18, min_dp=18),\n dict(start=3, end=4, ref='A', gq=0, med_dp=35, min_dp=35),\n dict(start=4, end=5, ref='T', gq=0, med_dp=20, min_dp=20),\n dict(start=5, end=6, ref='A', gq=0, med_dp=16, min_dp=16),\n dict(start=6, end=7, ref='A', gq=72, med_dp=31, min_dp=31),\n dict(start=7, end=8, ref='C', gq=83, med_dp=35, min_dp=35),\n dict(start=8, end=10, ref='T', gq=56, med_dp=19, min_dp=19),\n ]),\n dict(\n gq_resolution=45,\n expecteds=[\n dict(start=1, end=3, ref='A', gq=53, med_dp=18, min_dp=18),\n dict(start=3, end=4, ref='A', gq=0, med_dp=35, min_dp=35),\n dict(start=4, end=5, ref='T', gq=0, med_dp=20, min_dp=20),\n dict(start=5, end=6, ref='A', gq=0, med_dp=16, min_dp=16),\n # 25 comes from int(median([31, 35, 20, 19])).\n dict(start=6, end=10, ref='A', gq=56, med_dp=25, min_dp=19),\n ]),\n )\n def test_quantize_gvcfs(self, gq_resolution, expecteds):\n # Each count tuple is n_alt, n_ref, ref_base.\n # The third, fourth, and the fifth ones should never be merged, since\n # either het or hom_alt has bigger GL than hom_ref.\n counts = [(0, 18, 'A'), (0, 19, 'C'), (35, 0, 'A'), (10, 10, 'T'),\n (4, 12, 'A'), (1, 30, 'A'), (1, 34, 'C'), (0, 20, 'T'),\n (0, 19, 'G')]\n allele_counts = self.fake_allele_counter(1, counts).summary_counts()\n caller = PlaceholderVariantCaller(0.01, 100, gq_resolution)\n gvcfs = list(caller.make_gvcfs(allele_counts, include_med_dp=True))\n self.assertLen(gvcfs, len(expecteds))\n for actual, expected in zip(gvcfs, expecteds):\n self.assertGVCF(actual, **expected)\n\n @parameterized.parameters(True, False)\n def test_gvcfs_counts(self, include_gvcfs):\n # Only tests the 'gvcfs' creation part of calls_and_gvcfs. The `calls`\n # portion of this method needs to be tested in subclasses, which have\n # implemented the get_candidates method.\n counts = [(0, 0, 'A'), (10, 10, 'G'), (0, 0, 'G'), (0, 0, 'G'),\n (10, 10, 'T')]\n caller = PlaceholderVariantCaller(0.01, 100)\n allele_counter = self.fake_allele_counter(10, counts)\n allele_counter_dict = {'SAMPLE_ID': allele_counter}\n _, gvcfs = caller.calls_and_gvcfs(\n allele_counters=allele_counter_dict,\n target_sample='SAMPLE_ID',\n include_gvcfs=include_gvcfs)\n # We expect our gvcfs to occur at the 10 position and that 12 and 13 have\n # been merged into a 2 bp block, if enabled. Otherwise should be empty.\n if include_gvcfs:\n self.assertLen(gvcfs, 4)\n # Expected diploid genotype likelihoods when there's no coverage. The\n # chance of having each genotype is 1/3, in log10 space.\n flat_gls = np.log10([1.0 / 3] * 3)\n self.assertGVCF(\n gvcfs[0], ref='A', start=10, end=11, gq=1, min_dp=0, gls=flat_gls)\n self.assertGVCF(\n gvcfs[1],\n ref='G',\n start=11,\n end=12,\n gq=0,\n min_dp=20,\n gls=np.array([-14.0230482368, -7.993606e-15, -14.0230482368]),\n # The genotype should NOT be called here (\"./.\") as the likelihood\n # for het is greater than hom_ref.\n gts=[-1, -1])\n self.assertGVCF(\n gvcfs[2], ref='G', start=12, end=14, gq=1, min_dp=0, gls=flat_gls)\n else:\n self.assertEmpty(gvcfs)\n\n\n_CACHE_COVERAGE = 20 # Outside class so we can refer to it in @Parameters.\n\n\nclass VariantCallerCacheTests(parameterized.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(VariantCallerCacheTests, cls).setUpClass()\n cls.raw_caller = PlaceholderVariantCaller(0.1, 50, use_cache_table=False)\n cls.cache_caller = PlaceholderVariantCaller(\n 0.1, 50, use_cache_table=True, max_cache_coverage=_CACHE_COVERAGE)\n\n # pylint: disable=g-complex-comprehension\n @parameterized.parameters((n_alt, n_total)\n for n_total in range(_CACHE_COVERAGE + 1)\n for n_alt in range(n_total + 1))\n # pylint: enable=g-complex-comprehension\n def test_caching(self, n_alt, n_total):\n # Note that we only expect the gq and gls to be close if we are not\n # rescaling the counts, so we are only looping over values that should be\n # cached. In practice the cache is set to values sufficiently large that\n # these differences don't matter, but for this test we are limiting the\n # cache size to a small value in _CACHE_COVERAGE so we can test that the\n # cache lookups are correct.\n raw_gq, raw_gls = self.raw_caller.reference_confidence(n_alt, n_total)\n cache_gq, cache_gls = self.cache_caller.reference_confidence(n_alt, n_total)\n self.assertEqual(raw_gq, cache_gq)\n npt.assert_allclose(raw_gls, cache_gls)\n\n\nif __name__ == '__main__':\n absltest.main()\n"
] | [
[
"numpy.histogram",
"numpy.bincount"
],
[
"numpy.array",
"numpy.log10",
"numpy.isfinite",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
barronh/MONET | [
"acd72487c7aeff66d89f87fa663a9c96fa9b7bb0"
] | [
"monet/models/hysplit.py"
] | [
"from __future__ import division, print_function\n\nfrom builtins import object, zip\n\nimport pandas as pd\nimport xarray as xr\nfrom dask.diagnostics import ProgressBar\nfrom numpy import array\nfrom past.utils import old_div\n\n# This file is to deal with CAMx code - try to make it general for CAMx 4.7.1 --> 5.1\n\n\nProgressBar().register()\n\n\nclass HYSPLIT(object):\n def __init__(self):\n self.objtype = 'HYSPLIT'\n self.dset = None\n self.fname = None\n self.dates = None\n self.keys = None\n self.indexdates = None\n self.latitude = None\n self.longitude = None\n self.map = None\n\n def get_dates(self):\n print('Reading CAMx dates...')\n print(self.dset)\n tflag1 = array(self.dset['TFLAG'][:, 0], dtype='|S7')\n tflag2 = array(old_div(self.dset['TFLAG'][:, 1], 10000), dtype='|S6')\n date = pd.to_datetime([i + j.zfill(2) for i, j in zip(tflag1, tflag2)], format='%Y%j%H')\n indexdates = pd.Series(date).drop_duplicates(keep='last').index.values\n self.dset = self.dset.isel(time=indexdates)\n self.dset['time'] = date[indexdates]\n\n def open_camx(self, file):\n from glob import glob\n from numpy import sort\n dropset = ['layer', 'longitude_bounds', 'latitude_bounds',\n 'x', 'y', 'level', 'lambert_conformal_conic']\n nameset = {'COL': 'x', 'ROW': 'y', 'TSTEP': 'time', 'LAY': 'z'}\n if type(file) == str:\n fname = sort(array(glob(file)))\n else:\n fname = sort(array(file))\n if fname.shape[0] >= 1:\n if self.dset is None:\n self.dset = xr.open_mfdataset(\n fname.tolist(), concat_dim='TSTEP', engine='pnc').drop(dropset).rename(nameset).squeeze()\n self.load_conus_basemap(res='l')\n self.get_dates()\n else:\n dset = xr.open_mfdataset(fname.tolist(), concat_dim='TSTEP',\n engine='pnc').drop(dropset).rename(nameset).squeeze()\n self.dset = xr.merge([self.dset, dset])\n else:\n print('Files not found')\n self.keys = list(self.dset.keys())\n\n def check_z(self, varname):\n if pd.Series(self.dset[varname].dims).isin('z').max():\n return True\n else:\n return False\n\n def get_nox(self, lay=None):\n if self.check_z('NO'):\n if lay is not None:\n var = self.dset['NO'][:, 0, :, :].squeeze().copy()\n var += self.dset['NO2'][:, 0, :, :].squeeze().copy()\n else:\n var = self.dset['NO'][:, :, :, :].copy()\n var += self.dset['NO2'][:, :, :, :].copy()\n else:\n var = self.dset['NO'][:, :, :].copy()\n var += self.dset['NO2'][:, :, :].copy()\n return var\n\n def get_pm25(self, lay=None):\n keys = list(self.dset.keys())\n allvars = self.fine\n index = pd.Series(allvars).isin(keys)\n newkeys = allvars[index]\n if self.check_z(newkeys[0]):\n if lay is not None:\n var = self.dset[newkeys[0]][:, 0, :, :].squeeze()\n for i in newkeys[1:]:\n var += self.dset[i][:, 0, :, :].squeeze()\n else:\n var = self.dset[newkeys[0]][:, :, :, :].squeeze()\n for i in newkeys[1:]:\n var += self.dset[i][:, :, :, :].squeeze()\n else:\n var = self.dset[newkeys[0]][:, :, :].copy()\n for i in newkeys[1:]:\n var += self.dset[i][:, :, :].squeeze()\n return var\n\n def get_pm10(self, lay=None):\n keys = list(self.dset.keys())\n allvars = self.coarse\n index = pd.Series(allvars).isin(keys)\n newkeys = allvars[index]\n if self.check_z(newkeys[0]):\n if lay is not None:\n var = self.dset[newkeys[0]][:, 0, :, :].squeeze()\n for i in newkeys[1:]:\n var += self.dset[i][:, 0, :, :].squeeze()\n else:\n var = self.dset[newkeys[0]][:, :, :, :].squeeze()\n for i in newkeys[1:]:\n var += self.dset[i][:, :, :, :].squeeze()\n else:\n var = self.dset[newkeys[0]][:, :, :].copy()\n for i in newkeys[1:]:\n var += self.dset[i][:, :, :].squeeze()\n return var\n\n def get_var(self, param='O3', lay=None):\n p = param.upper()\n print(param)\n if p == 'PM25':\n var = self.get_pm25(lay=lay)\n elif p == 'PM10':\n var = self.get_pm10(lay=lay)\n elif p == 'NOX':\n var = self.get_nox(lay=lay)\n elif p == 'OC':\n var = self.get_oc(lay=lay)\n elif p == 'VOC':\n if lay is not None:\n var = self.dset['VOC'][:, 0, :, :].copy().squeeze()\n else:\n var = self.dset['VOC'][:, :, :, :].copy().squeeze()\n else:\n if self.check_z(param):\n if lay is None:\n var = self.dset[param][:, :, :, :].copy()\n else:\n var = self.dset[param][:, lay, :, :].copy().squeeze()\n else:\n var = self.dset[param]\n return var\n\n def load_conus_basemap(self, res='l'):\n from mpl_toolkits.basemap import Basemap\n if self.map is None:\n lat1 = self.dset.P_ALP\n lat2 = self.dset.P_BET\n lon1 = self.dset.P_GAM\n lon0 = self.dset.XCENT\n lat0 = self.dset.YCENT\n m = Basemap(projection='lcc', resolution=res, lat_1=lat1, lat_2=lat2, lat_0=lat0, lon_0=lon0,\n lon_1=lon1,\n llcrnrlat=self.dset.latitude[0, 0], urcrnrlat=self.dset.latitude[-1, -1],\n llcrnrlon=self.dset.longitude[0, 0],\n urcrnrlon=self.dset.longitude[-1, -1], rsphere=6371200.,\n area_thresh=50.)\n self.map = m\n else:\n m = self.map\n return self.map\n"
] | [
[
"numpy.array",
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
aprilpear/holdout-sgd | [
"fa81bce57fb98aef262536fb2d7a26567d3143f7"
] | [
"holdout_sgd/train_decentralized.py"
] | [
"import argparse \nimport numpy as np\nimport json\nimport torch\nfrom torchvision import datasets, transforms\n\nfrom _params import add_common_params, add_decentralized_params\nfrom _train_utils import test, plot_learning_curve\nfrom _node import Node\nfrom _byzantine_node import ByzantineNode\nfrom _data_utils import default_transform, MNISTSlice\nfrom _logic import *\nfrom _krum import krum, _distance\nfrom _average import get_average_gradients, get_std_gradients\nfrom _attack import setup_lp_norm_attack, byzantine_committee_vote\nfrom _trimmed_mean import trimmed_mean\n\n \ndef main():\n parser = argparse.ArgumentParser(description='PyTorch MNIST Decentralized Training')\n add_common_params(parser)\n add_decentralized_params(parser)\n args = parser.parse_args()\n \n use_multiprocess = not(args.no_multiprocess)\n \n trainset_full = datasets.MNIST('../data', train=True, download=True, transform=default_transform)\n \n torch.manual_seed(args.seed)\n use_cuda = torch.cuda.is_available()\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n \n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=default_transform),\n batch_size=args.test_batch_size,\n shuffle=True,\n **kwargs)\n\n train_labels = trainset_full.train_labels.numpy()\n train_data = trainset_full.train_data.numpy()\n train_label_indices = {}\n \n # distribute data across nodes\n print('setting up the simulation:: creating {} distributed nodes...'.format(args.nodes))\n \n for digit in range(10):\n train_label_indices[digit] = np.where(train_labels == digit)[0]\n\n n_byzantine = int(args.nodes * args.byzantine)\n expected_n_byzantine_committee = int(np.ceil(args.committee_size * args.expected_byzantine))\n expected_n_byzantine_participants = int(np.ceil(args.participants_size * args.expected_byzantine))\n \n nodes = []\n byzantine_idx = []\n for node_idx in range(args.nodes):\n node_indices = []\n for digit in range(10):\n node_indices.extend(np.random.choice(\n train_label_indices[digit], \n size=int(args.sample_size / 10))) # sample randomly from each label\n\n node_data = torch.from_numpy(train_data[node_indices])\n node_labels = torch.from_numpy(train_labels[node_indices])\n\n node_trainset = MNISTSlice(\n root='../data', \n data=node_data, \n labels=node_labels, \n train=True, \n transform=default_transform, \n download=True)\n\n if node_idx < n_byzantine: # node was chosen as byzantine node\n byzantine_idx.append(node_idx)\n node = ByzantineNode.create(mode=args.byzantine_mode)( \n node_idx,\n node_trainset,\n batch_size=args.batch_size,\n learning_rate=args.lr,\n momentum=args.momentum,\n log_interval=args.log_interval,\n mode=args.byzantine_mode)\n else:\n node = Node(\n node_idx,\n node_trainset,\n batch_size=args.batch_size,\n learning_rate=args.lr,\n momentum=args.momentum,\n log_interval=args.log_interval)\n \n nodes.append(node)\n \n nodes = np.array(nodes)\n honest_nodes = [n for n in nodes if n.id not in byzantine_idx]\n\n print('Created {} Byzantine nodes: {}'.format(len(byzantine_idx), byzantine_idx))\n print('Done.')\n\n # decentralized training\n print('starting decentralized training...')\n print(' ==> expecting {} byzantines in each committee, and {} byzantines in each participants group.'.format(\n expected_n_byzantine_committee, expected_n_byzantine_participants))\n\n consensus_w = honest_nodes[0].get_weights()\n align_all_nodes_to_consensus(nodes, consensus_w)\n\n learning_curve = []\n test_accuracy = []\n for i in range(args.epochs):\n print('epoch:: {} out of {}'.format(i + 1, args.epochs))\n\n while True:\n participant_ids = select_participants(\n n_nodes=args.nodes,\n n_participants=args.participants_size)\n\n committe_ids = select_committee(\n n_nodes=args.nodes,\n n_committee=args.committee_size,\n exclude=participant_ids)\n\n byzantine_participants_ids = set(participant_ids).intersection(set(byzantine_idx))\n print('{} byzantine participants selected...'.format(len(byzantine_participants_ids)))\n\n byzantine_committee_ids = set(committe_ids).intersection(set(byzantine_idx))\n print('{} byzantine committe selected...'.format(len(byzantine_committee_ids)))\n\n if (len(byzantine_participants_ids) < args.participants_size / 2) and (len(byzantine_committee_ids) < args.committee_size / 2):\n break\n\n participants = nodes[participant_ids]\n committee = nodes[committe_ids]\n \n print('training all nodes...')\n all_train_loss = run_all(participants, multiprocess=use_multiprocess)\n avg_train_loss = np.mean([loss for id_, loss in all_train_loss if id_ not in byzantine_idx])\n\n # setting up the Lp-norm attack (if there are byzantines)\n if args.byzantine_mode == 'lp-norm':\n\n honest_participants = [n for n in participants if n.id not in byzantine_idx]\n mu = get_average_gradients(honest_participants)\n std = get_std_gradients(honest_participants)\n gamma = setup_lp_norm_attack(participants, byzantine_idx, mu, std, consensus_w, f=expected_n_byzantine_participants)\n print('Chosen Lp-norm attack gamma: {}'.format(gamma))\n\n if args.aggregator == 'union-consensus':\n\n print('collecting weights from participants...')\n w_array = collect_participants_weights(participants)\n\n print('collecting votes from committee...')\n honest_committee = [n for n in committee if n.id not in byzantine_idx]\n byzantine_committee = [n for n in committee if n.id in byzantine_idx]\n\n votes = collect_committee_votes(honest_committee, w_array, f=expected_n_byzantine_participants, multiprocess=True)\n byzantine_vote = byzantine_committee_vote(participants, byzantine_idx, f=expected_n_byzantine_participants)\n [votes.update({n.id: byzantine_vote}) for n in byzantine_committee]\n print(\"Votes:\", dict([(k, participant_ids[v]) for k, v in votes.items()]))\n\n union_consensus, n_unique_recipients = reach_union_consensus(votes, f=expected_n_byzantine_committee)\n union_consensus_ids = participant_ids[union_consensus]\n\n print('reached union consensous of size {}, with {} unique recipients'.format(\n len(union_consensus),\n n_unique_recipients))\n byzantine_consensus_ids = set(union_consensus_ids).intersection(byzantine_participants_ids)\n print('Consensus: {}, #Byzantine nodes inside: {} --> {}'.format(\n union_consensus_ids, len(byzantine_consensus_ids), byzantine_consensus_ids))\n\n consensus_w = get_average_union_consensus(w_array, union_consensus)\n align_all_nodes_to_consensus(nodes, consensus_w)\n\n learning_curve.append({\n 'train_loss': avg_train_loss,\n 'union_size': len(union_consensus),\n 'n_unique_recipients': n_unique_recipients,\n 'n_byzantine_participants': len(byzantine_participants_ids),\n 'n_byzantine_committee': len(byzantine_committee_ids),\n 'n_byzantine_consensus': len(byzantine_consensus_ids),\n })\n\n elif args.aggregator == 'krum':\n\n print('collecting gradients from participants and running krum...')\n krum_node_idx, krum_scores = krum(participants, f=expected_n_byzantine_participants)\n selected_node = participants[krum_node_idx]\n\n is_byzantine_selected = int(selected_node.id in byzantine_participants_ids)\n print('Selected node by krum: {}, is byzantine: {}'.format(selected_node.id, is_byzantine_selected))\n print('Krum selected score: {}'.format(krum_scores[krum_node_idx]))\n\n consensus_w = selected_node.get_weights()\n align_all_nodes_to_consensus(nodes, consensus_w)\n\n learning_curve.append({\n 'train_loss': avg_train_loss,\n 'selected_node': selected_node.id,\n 'is_byzantine_selected': is_byzantine_selected,\n })\n\n elif args.aggregator == 'trimmed-mean':\n\n print('collecting gradients from participants and running trimmed mean...')\n trimmed_mean_grads = trimmed_mean(participants, f=expected_n_byzantine_participants)\n\n # simulate the step take by the trimmed mean gradient\n honest_participants = [n for n in participants if n.id not in byzantine_idx]\n proxy_node = honest_participants[0]\n proxy_node.set_weights(consensus_w)\n proxy_node.set_gradients(trimmed_mean_grads)\n proxy_node.take_step()\n\n consensus_w = proxy_node.get_weights()\n align_all_nodes_to_consensus(nodes, consensus_w)\n\n learning_curve.append({\n 'train_loss': avg_train_loss\n })\n\n else: # average\n\n print('collecting gradients from participants and running average...')\n average_grads = get_average_gradients(participants)\n\n # simulate the step take by the average gradient\n honest_participants = [n for n in participants if n.id not in byzantine_idx]\n proxy_node = honest_participants[0]\n proxy_node.set_weights(consensus_w)\n proxy_node.set_gradients(average_grads)\n proxy_node.take_step()\n\n consensus_w = proxy_node.get_weights()\n align_all_nodes_to_consensus(nodes, consensus_w)\n\n learning_curve.append({\n 'train_loss': avg_train_loss\n })\n\n if args.byzantine_mode == 'lp-norm':\n learning_curve[-1]['gamma'] = gamma\n\n if i % 1 == 0:\n accuracy, popular_misses = test(\n args, participants[0]._model, participants[0]._device, test_loader)\n test_accuracy.append({'accuracy': accuracy, 'popular_misses': popular_misses})\n\n\n with open('raw_learning_curve__{}.json'.format(args.aggregator), 'w') as f_raw:\n json.dump(\n {\n 'setting': vars(args),\n 'train': learning_curve,\n 'evaluation': test_accuracy\n },\n f_raw\n )\n \nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.manual_seed",
"torch.from_numpy",
"numpy.ceil",
"numpy.mean",
"torch.cuda.is_available",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Dangzilla/BiorbdOptim | [
"adb1898b86282c87b3fd186b13e00a8202ceb21c",
"adb1898b86282c87b3fd186b13e00a8202ceb21c"
] | [
"tests/test_torque_driven_with_contact_ocp.py",
"biorbd_optim/path_conditions.py"
] | [
"\"\"\"\nTest for file IO.\nIt tests the results of an optimal control problem with torque_driven_with_contact problem type regarding the proper functioning of :\n- the maximize/minimize_predicted_height_CoM objective\n- the contact_forces_inequality constraint\n- the non_slipping constraint\n\"\"\"\nimport importlib.util\nfrom pathlib import Path\n\nimport pytest\nimport numpy as np\n\nfrom biorbd_optim import Data, OdeSolver\nfrom .utils import TestUtils\n\nPROJECT_FOLDER = Path(__file__).parent / \"..\"\nspec = importlib.util.spec_from_file_location(\n \"maximize_predicted_height_CoM\",\n str(PROJECT_FOLDER) + \"/examples/torque_driven_with_contact/maximize_predicted_height_CoM.py\",\n)\nmaximize_predicted_height_CoM = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(maximize_predicted_height_CoM)\n\nspec = importlib.util.spec_from_file_location(\n \"contact_forces_inequality_constraint\",\n str(PROJECT_FOLDER) + \"/examples/torque_driven_with_contact/contact_forces_inequality_constraint.py\",\n)\ncontact_forces_inequality_GREATER_THAN_constraint = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(contact_forces_inequality_GREATER_THAN_constraint)\n\nspec = importlib.util.spec_from_file_location(\n \"contact_forces_inequality_constraint\",\n str(PROJECT_FOLDER) + \"/examples/torque_driven_with_contact/contact_forces_inequality_constraint.py\",\n)\ncontact_forces_inequality_LESSER_THAN_constraint = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(contact_forces_inequality_LESSER_THAN_constraint)\n\nspec = importlib.util.spec_from_file_location(\n \"non_slipping_constraint\", str(PROJECT_FOLDER) + \"/examples/torque_driven_with_contact/non_slipping_constraint.py\",\n)\nnon_slipping_constraint = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(non_slipping_constraint)\n\n\[email protected](\"ode_solver\", [OdeSolver.RK])\ndef test_maximize_predicted_height_CoM(ode_solver):\n ocp = maximize_predicted_height_CoM.prepare_ocp(\n model_path=str(PROJECT_FOLDER) + \"/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod\",\n phase_time=0.5,\n number_shooting_points=20,\n )\n sol = ocp.solve()\n\n # Check objective function value\n f = np.array(sol[\"f\"])\n np.testing.assert_equal(f.shape, (1, 1))\n np.testing.assert_almost_equal(f[0, 0], 0.7592028279017864)\n\n # Check constraints\n g = np.array(sol[\"g\"])\n np.testing.assert_equal(g.shape, (160, 1))\n np.testing.assert_almost_equal(g, np.zeros((160, 1)))\n\n # Check some of the results\n states, controls = Data.get_data(ocp, sol[\"x\"])\n q, qdot, tau = states[\"q\"], states[\"q_dot\"], controls[\"tau\"]\n\n # initial and final position\n np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.5, 0.5)))\n np.testing.assert_almost_equal(q[:, -1], np.array((0.1189651, -0.0904378, -0.7999996, 0.7999996)))\n # initial and final velocities\n np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))\n np.testing.assert_almost_equal(qdot[:, -1], np.array((1.2636414, -1.3010929, -3.6274687, 3.6274687)))\n # initial and final controls\n np.testing.assert_almost_equal(tau[:, 0], np.array((-22.1218282)))\n np.testing.assert_almost_equal(tau[:, -1], np.array(0.2653957))\n\n # save and load\n TestUtils.save_and_load(sol, ocp, False)\n\n\[email protected](\"ode_solver\", [OdeSolver.RK])\ndef test_contact_forces_inequality_GREATER_THAN_constraint(ode_solver):\n boundary = 50\n ocp = contact_forces_inequality_GREATER_THAN_constraint.prepare_ocp(\n model_path=str(PROJECT_FOLDER) + \"/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod\",\n phase_time=0.3,\n number_shooting_points=10,\n direction=\"GREATER_THAN\",\n boundary=boundary,\n )\n sol = ocp.solve()\n\n # Check objective function value\n f = np.array(sol[\"f\"])\n np.testing.assert_equal(f.shape, (1, 1))\n np.testing.assert_almost_equal(f[0, 0], 0.14525621569048172)\n\n # Check constraints\n g = np.array(sol[\"g\"])\n np.testing.assert_equal(g.shape, (100, 1))\n np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))\n np.testing.assert_array_less(-g[80:], -boundary)\n expected_pos_g = np.array(\n [\n [50.76491919],\n [51.42493119],\n [57.79007374],\n [64.29551934],\n [67.01905769],\n [68.3225625],\n [67.91793917],\n [65.26700138],\n [59.57311867],\n [50.18463134],\n [160.14834799],\n [141.15361769],\n [85.13345729],\n [56.33535022],\n [53.32684286],\n [52.21679255],\n [51.62923106],\n [51.25728666],\n [50.9871531],\n [50.21972377],\n ]\n )\n np.testing.assert_almost_equal(g[80:], expected_pos_g)\n\n # Check some of the results\n states, controls = Data.get_data(ocp, sol[\"x\"])\n q, qdot, tau = states[\"q\"], states[\"q_dot\"], controls[\"tau\"]\n\n # initial and final position\n np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.75, 0.75)))\n np.testing.assert_almost_equal(q[:, -1], np.array((-0.34054748, 0.1341555, -0.0005438, 0.0005438)))\n # initial and final velocities\n np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))\n np.testing.assert_almost_equal(qdot[:, -1], np.array((-2.01097559, 1.09352001e-03, 4.02195175, -4.02195175)))\n # initial and final controls\n np.testing.assert_almost_equal(tau[:, 0], np.array((-54.1684018)))\n np.testing.assert_almost_equal(tau[:, -1], np.array((-15.69338332)))\n\n # save and load\n TestUtils.save_and_load(sol, ocp, False)\n\n\[email protected](\"ode_solver\", [OdeSolver.RK])\ndef test_contact_forces_inequality_LESSER_THAN_constraint(ode_solver):\n boundary = 100\n ocp = contact_forces_inequality_LESSER_THAN_constraint.prepare_ocp(\n model_path=str(PROJECT_FOLDER) + \"/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod\",\n phase_time=0.3,\n number_shooting_points=10,\n direction=\"LESSER_THAN\",\n boundary=boundary,\n )\n sol = ocp.solve()\n\n # Check objective function value\n f = np.array(sol[\"f\"])\n np.testing.assert_equal(f.shape, (1, 1))\n np.testing.assert_almost_equal(f[0, 0], 0.14525619649247054)\n\n # Check constraints\n g = np.array(sol[\"g\"])\n np.testing.assert_equal(g.shape, (100, 1))\n np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))\n np.testing.assert_array_less(g[80:], boundary)\n expected_non_zero_g = np.array(\n [\n [63.27237842],\n [63.02339946],\n [62.13898369],\n [60.38380769],\n [57.31193141],\n [52.19952395],\n [43.9638679],\n [31.14938032],\n [12.45022537],\n [-6.35179034],\n [99.06328211],\n [98.87711942],\n [98.64440005],\n [98.34550037],\n [97.94667107],\n [97.38505013],\n [96.52820867],\n [95.03979128],\n [91.73734926],\n [77.48803304],\n ]\n )\n np.testing.assert_almost_equal(g[80:], expected_non_zero_g)\n\n # Check some of the results\n states, controls = Data.get_data(ocp, sol[\"x\"])\n q, qdot, tau = states[\"q\"], states[\"q_dot\"], controls[\"tau\"]\n\n # initial and final position\n np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.75, 0.75)))\n np.testing.assert_almost_equal(\n q[:, -1], np.array((-3.40655617e-01, 1.34155544e-01, -3.27530886e-04, 3.27530886e-04))\n )\n # initial and final velocities\n np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))\n np.testing.assert_almost_equal(qdot[:, -1], np.array((-2.86650427, 9.38827988e-04, 5.73300901, -5.73300901)))\n # initial and final controls\n np.testing.assert_almost_equal(tau[:, 0], np.array((-32.78862874)))\n np.testing.assert_almost_equal(tau[:, -1], np.array((-25.23729156)))\n\n # save and load\n TestUtils.save_and_load(sol, ocp, False)\n\n\[email protected](\"ode_solver\", [OdeSolver.RK])\ndef test_non_slipping_constraint(ode_solver):\n ocp = non_slipping_constraint.prepare_ocp(\n model_path=str(PROJECT_FOLDER) + \"/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod\",\n phase_time=0.6,\n number_shooting_points=10,\n mu=0.005,\n )\n sol = ocp.solve()\n\n # Check objective function value\n f = np.array(sol[\"f\"])\n np.testing.assert_equal(f.shape, (1, 1))\n np.testing.assert_almost_equal(f[0, 0], 0.23984490846250128)\n\n # Check constraints\n g = np.array(sol[\"g\"])\n np.testing.assert_equal(g.shape, (120, 1))\n np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))\n np.testing.assert_array_less(-g[80:], 0)\n expected_pos_g = np.array(\n [\n [8.74337995e01],\n [8.74671258e01],\n [8.75687834e01],\n [8.77422814e01],\n [8.79913157e01],\n [8.83197844e01],\n [8.87318039e01],\n [8.92317298e01],\n [8.98241976e01],\n [9.05145013e01],\n [4.63475930e01],\n [4.63130361e01],\n [4.62075073e01],\n [4.60271956e01],\n [4.57680919e01],\n [4.54259742e01],\n [4.49963909e01],\n [4.44746357e01],\n [4.38556802e01],\n [4.31334141e01],\n [1.33775343e00],\n [6.04899894e-05],\n [1.33773204e00],\n [6.95785950e-05],\n [1.33768173e00],\n [8.11784641e-05],\n [1.33759829e00],\n [9.64764869e-05],\n [1.33747653e00],\n [1.17543301e-04],\n [1.33730923e00],\n [1.48352248e-04],\n [1.33708435e00],\n [1.97600363e-04],\n [1.33677502e00],\n [2.88636453e-04],\n [1.33628619e00],\n [5.12590377e-04],\n [1.33466928e00],\n [1.80987419e-03],\n ]\n )\n np.testing.assert_almost_equal(g[80:], expected_pos_g)\n\n # Check some of the results\n states, controls = Data.get_data(ocp, sol[\"x\"])\n q, qdot, tau = states[\"q\"], states[\"q_dot\"], controls[\"tau\"]\n\n # initial and final position\n np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.5, 0.5)))\n np.testing.assert_almost_equal(q[:, -1], np.array((-0.02364845, 0.01211471, -0.44685185, 0.44685185)))\n # initial and final velocities\n np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))\n np.testing.assert_almost_equal(qdot[:, -1], np.array((-0.08703131, 0.04170362, 0.1930144, -0.1930144)))\n # initial and final controls\n np.testing.assert_almost_equal(tau[:, 0], np.array((-14.33813755)))\n np.testing.assert_almost_equal(tau[:, -1], np.array((-13.21317493)))\n\n # save and load\n TestUtils.save_and_load(sol, ocp, False)\n",
"import numpy as np\n\nfrom .mapping import BidirectionalMapping, Mapping\nfrom .enums import InterpolationType\n\n\nclass PathCondition(np.ndarray):\n def __new__(cls, input_array, interpolation_type=InterpolationType.CONSTANT):\n # Check and reinterpret input\n input_array = np.asarray(input_array, dtype=float)\n if len(input_array.shape) == 0:\n input_array = input_array[np.newaxis, np.newaxis]\n if interpolation_type == InterpolationType.CONSTANT:\n if len(input_array.shape) == 1:\n input_array = input_array[:, np.newaxis]\n if input_array.shape[1] != 1:\n raise RuntimeError(\"Value for InterpolationType.CONSTANT must have exactly one column\")\n\n elif interpolation_type == InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT:\n if len(input_array.shape) == 1:\n input_array = input_array[:, np.newaxis]\n if input_array.shape[1] != 1 and input_array.shape[1] != 3:\n raise RuntimeError(\"Value for InterpolationType.CONSTANT must have exactly one or three columns\")\n if input_array.shape[1] == 1:\n input_array = np.repeat(input_array, 3, axis=1)\n elif interpolation_type == InterpolationType.LINEAR:\n if input_array.shape[1] != 2:\n raise RuntimeError(\"Value for InterpolationType.LINEAR must have exactly two columns\")\n elif interpolation_type == InterpolationType.EACH_FRAME:\n if input_array.shape[1] < 2:\n raise RuntimeError(\"Value for InterpolationType.EACH_FRAME must exactly match the number of points\")\n else:\n raise RuntimeError(f\"InterpolationType is not implemented yet\")\n obj = np.asarray(input_array).view(cls)\n\n # Additional information\n obj.nb_shooting = None\n obj.type = interpolation_type\n\n return obj\n\n def __array_finalize__(self, obj):\n # see InfoArray.__array_finalize__ for comments\n if obj is None:\n return\n self.nb_shooting = getattr(obj, \"nb_shooting\", None)\n self.type = getattr(obj, \"type\", None)\n\n def __reduce__(self):\n pickled_state = super(PathCondition, self).__reduce__()\n new_state = pickled_state[2] + (self.nb_shooting, self.type)\n return (pickled_state[0], pickled_state[1], new_state)\n\n def __setstate__(self, state):\n self.nb_shooting = state[-2]\n self.type = state[-1]\n # Call the parent's __setstate__ with the other tuple elements.\n super(PathCondition, self).__setstate__(state[0:-2])\n\n def check_and_adjust_dimensions(self, nb_elements, nb_shooting, condition_type):\n if (\n self.type == InterpolationType.CONSTANT\n or self.type == InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT\n or self.type == InterpolationType.LINEAR\n ):\n self.nb_shooting = nb_shooting\n elif self.type == InterpolationType.EACH_FRAME:\n self.nb_shooting = nb_shooting + 1\n else:\n if self.nb_shooting != nb_shooting:\n raise RuntimeError(\n f\"Invalid number of shooting ({self.nb_shooting}), the expected number is {nb_shooting}\"\n )\n\n if self.shape[0] != nb_elements:\n raise RuntimeError(\n f\"Invalid number of {condition_type} ({self.shape[0] }), the expected size is {nb_elements}\"\n )\n\n if self.type == InterpolationType.CONSTANT:\n if self.shape[1] != 1:\n raise RuntimeError(\n f\"Invalid number of {condition_type} for InterpolationType.CONSTANT (ncols = {self.shape[1]}), \"\n f\"the expected number of column is 1\"\n )\n elif self.type == InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT:\n if self.shape[1] != 3:\n raise RuntimeError(\n f\"Invalid number of {condition_type} for InterpolationType.CONSTANT (ncols = {self.shape[1]}), \"\n f\"the expected number of column is 3\"\n )\n elif self.type == InterpolationType.LINEAR:\n if self.shape[1] != 2:\n raise RuntimeError(\n f\"Invalid number of {condition_type} for InterpolationType.LINEAR (ncols = {self.shape[1]}), \"\n f\"the expected number of column is 2\"\n )\n elif self.type == InterpolationType.EACH_FRAME:\n if self.shape[1] != self.nb_shooting:\n raise RuntimeError(\n f\"Invalid number of {condition_type} for InterpolationType.LINEAR (ncols = {self.shape[1]}), \"\n f\"the expected number of column is {self.nb_shooting}\"\n )\n else:\n raise RuntimeError(f\"InterpolationType is not implemented yet\")\n\n def evaluate_at(self, shooting_point):\n if self.nb_shooting is None:\n raise RuntimeError(f\"check_and_adjust_dimensions must be called at least once before evaluating at\")\n\n if self.type == InterpolationType.CONSTANT:\n return self[:, 0]\n elif self.type == InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT:\n if shooting_point == 0:\n return self[:, 0]\n elif shooting_point == self.nb_shooting:\n return self[:, 2]\n else:\n return self[:, 1]\n elif self.type == InterpolationType.LINEAR:\n return self[:, 0] + (self[:, 1] - self[:, 0]) * shooting_point / self.nb_shooting\n elif self.type == InterpolationType.EACH_FRAME:\n return self[:, shooting_point]\n else:\n raise RuntimeError(f\"InterpolationType is not implemented yet\")\n\n\nclass Bounds:\n \"\"\"\n Organizes bounds of states(\"X\"), controls(\"U\") and \"V\".\n \"\"\"\n\n def __init__(\n self,\n min_bound=(),\n max_bound=(),\n interpolation_type=InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT,\n **parameters,\n ):\n if isinstance(min_bound, PathCondition):\n self.min = min_bound\n else:\n self.min = PathCondition(min_bound, interpolation_type=interpolation_type, **parameters)\n\n if isinstance(max_bound, PathCondition):\n self.max = max_bound\n else:\n self.max = PathCondition(max_bound, interpolation_type=interpolation_type, **parameters)\n\n def check_and_adjust_dimensions(self, nb_elements, nb_shooting):\n \"\"\"\n Detects if bounds are not correct (wrong size of list: different than degrees of freedom).\n Detects if first or last nodes are not complete, in that case they have same bounds than intermediates nodes.\n :param nb_elements: Length of each list.\n \"\"\"\n self.min.check_and_adjust_dimensions(nb_elements, nb_shooting, \"Bound min\")\n self.max.check_and_adjust_dimensions(nb_elements, nb_shooting, \"Bound max\")\n\n def concatenate(self, other):\n self.min = PathCondition(np.concatenate((self.min, other.min)), interpolation_type=self.min.type)\n self.max = PathCondition(np.concatenate((self.max, other.max)), interpolation_type=self.max.type)\n\n\nclass QAndQDotBounds(Bounds):\n def __init__(self, biorbd_model, all_generalized_mapping=None, q_mapping=None, q_dot_mapping=None):\n if all_generalized_mapping is not None:\n if q_mapping is not None or q_dot_mapping is not None:\n raise RuntimeError(\"all_generalized_mapping and a specified mapping cannot be used along side\")\n q_mapping = all_generalized_mapping\n q_dot_mapping = all_generalized_mapping\n\n if not q_mapping:\n q_mapping = BidirectionalMapping(Mapping(range(biorbd_model.nbQ())), Mapping(range(biorbd_model.nbQ())))\n if not q_dot_mapping:\n q_dot_mapping = BidirectionalMapping(\n Mapping(range(biorbd_model.nbQdot())), Mapping(range(biorbd_model.nbQdot()))\n )\n\n QRanges = []\n QDotRanges = []\n for i in range(biorbd_model.nbSegment()):\n segment = biorbd_model.segment(i)\n QRanges += [q_range for q_range in segment.QRanges()]\n QDotRanges += [qdot_range for qdot_range in segment.QDotRanges()]\n\n x_min = [QRanges[i].min() for i in q_mapping.reduce.map_idx] + [\n QDotRanges[i].min() for i in q_dot_mapping.reduce.map_idx\n ]\n x_max = [QRanges[i].max() for i in q_mapping.reduce.map_idx] + [\n QDotRanges[i].max() for i in q_dot_mapping.reduce.map_idx\n ]\n\n super(QAndQDotBounds, self).__init__(min_bound=x_min, max_bound=x_max)\n\n\nclass InitialConditions:\n def __init__(self, initial_guess=(), interpolation_type=InterpolationType.CONSTANT, **parameters):\n if isinstance(initial_guess, PathCondition):\n self.init = initial_guess\n else:\n self.init = PathCondition(initial_guess, interpolation_type=interpolation_type, **parameters)\n\n def check_and_adjust_dimensions(self, nb_elements, nb_shooting):\n \"\"\"\n Detects if initial values are not given, in that case \"0\" is given for all degrees of freedom.\n Detects if initial values are not correct (wrong size of list: different than degrees of freedom).\n Detects if first or last nodes are not complete, in that case they have same values than intermediates nodes.\n \"\"\"\n self.init.check_and_adjust_dimensions(nb_elements, nb_shooting, \"InitialConditions\")\n\n def concatenate(self, other):\n self.init = PathCondition(np.concatenate((self.init, other.init)), interpolation_type=self.init.type,)\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.testing.assert_array_less",
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.zeros"
],
[
"numpy.asarray",
"numpy.repeat",
"numpy.concatenate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
johnson7788/TextBrewer | [
"fa7fa4d4a2a8debde5b148d448238f3b4fa1aa9a",
"fa7fa4d4a2a8debde5b148d448238f3b4fa1aa9a"
] | [
"examples/mnli_example/main.distill.py",
"huazhuang/main.trainer_predict_api.py"
] | [
"import logging\nimport time\nlogging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%Y/%m/%d %H:%M:%S',\n level=logging.INFO,\n )\nlogger = logging.getLogger(\"Main\")\n\nimport os,random\nimport numpy as np\nimport torch\nfrom utils_glue import output_modes, processors\nfrom pytorch_pretrained_bert.my_modeling import BertConfig\nfrom pytorch_pretrained_bert import BertTokenizer\nfrom optimization import BERTAdam\nimport config\nfrom utils import divide_parameters, load_and_cache_examples\nfrom modeling import BertForGLUESimple,BertForGLUESimpleAdaptor\n\nfrom textbrewer import DistillationConfig, TrainingConfig, GeneralDistiller\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler\nfrom tqdm import tqdm\nfrom utils_glue import compute_metrics\nfrom functools import partial\n\n\ndef args_check(args):\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir):\n logger.warning(\"Output directory () already exists and is not empty.\")\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n if not args.do_train and not args.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if args.local_rank == -1:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count() if torch.cuda.is_available() else 0\n else:\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n torch.distributed.init_process_group(backend='nccl')\n logger.info(\"device %s n_gpu %d distributed training %r\", device, n_gpu, bool(args.local_rank != -1))\n args.n_gpu = n_gpu\n args.device = device\n return device, n_gpu\n\ndef predict(model,eval_datasets,step,args):\n eval_task_names = (\"mnli\", \"mnli-mm\") if args.task_name == \"mnli\" else (args.task_name,)\n eval_output_dir = args.output_dir\n results = {}\n for eval_task,eval_dataset in zip(eval_task_names, eval_datasets):\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n logger.info(\"Predicting...\")\n logger.info(\"***** Running predictions *****\")\n logger.info(\" task name = %s\", eval_task)\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.predict_batch_size)\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.predict_batch_size)\n model.eval()\n\n #起始时间\n start_time = time.time()\n pred_logits = []\n label_ids = []\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\", disable=None):\n input_ids, input_mask, segment_ids, labels = batch\n input_ids = input_ids.to(args.device)\n input_mask = input_mask.to(args.device)\n segment_ids = segment_ids.to(args.device)\n with torch.no_grad():\n logits = model(input_ids, input_mask, segment_ids)\n cpu_logits = logits.detach().cpu()\n for i in range(len(cpu_logits)):\n pred_logits.append(cpu_logits[i].numpy())\n label_ids.append(labels[i])\n\n pred_logits = np.array(pred_logits)\n label_ids = np.array(label_ids)\n\n if args.output_mode == \"classification\":\n preds = np.argmax(pred_logits, axis=1)\n else: # args.output_mode == \"regression\":\n preds = np.squeeze(pred_logits)\n result = compute_metrics(eval_task, preds, label_ids)\n logger.info(f\"task:,{eval_task}\")\n logger.info(f\"result: {result}\")\n results.update(result)\n\n cost_time = time.time() - start_time\n logger.info(f\"--- 评估{len(eval_dataset)}条数据的总耗时是 {cost_time} seconds, 每条耗时 {cost_time/len(eval_dataset)} seconds ---\")\n output_eval_file = os.path.join(eval_output_dir, \"eval_results-%s.txt\" % eval_task)\n with open(output_eval_file, \"a\") as writer:\n logger.info(\"***** Eval results {} task {} *****\".format(step, eval_task))\n writer.write(\"step: %d ****\\n \" % step)\n for key in sorted(results.keys()):\n logger.info(\"%s = %s\", key, str(results[key]))\n writer.write(\"%s = %s\\n\" % (key, str(results[key])))\n model.train()\n return results\n\ndef main():\n #parse arguments\n config.parse()\n args = config.args\n for k,v in vars(args).items():\n logger.info(f\"{k}:{v}\")\n #set seeds\n torch.manual_seed(args.random_seed)\n torch.cuda.manual_seed_all(args.random_seed)\n np.random.seed(args.random_seed)\n random.seed(args.random_seed)\n\n #arguments check\n device, n_gpu = args_check(args)\n os.makedirs(args.output_dir, exist_ok=True)\n forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)\n args.forward_batch_size = forward_batch_size\n\n #load bert config\n bert_config_T = BertConfig.from_json_file(args.bert_config_file_T)\n bert_config_S = BertConfig.from_json_file(args.bert_config_file_S)\n assert args.max_seq_length <= bert_config_T.max_position_embeddings\n assert args.max_seq_length <= bert_config_S.max_position_embeddings\n\n #Prepare GLUE task\n processor = processors[args.task_name]()\n args.output_mode = output_modes[args.task_name]\n label_list = processor.get_labels()\n num_labels = len(label_list)\n\n #read data\n train_dataset = None\n eval_datasets = None\n num_train_steps = None\n tokenizer = BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)\n # 加载数据集\n if args.do_train:\n train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)\n if args.aux_task_name:\n aux_train_dataset = load_and_cache_examples(args, args.aux_task_name, tokenizer, evaluate=False, is_aux=True)\n train_dataset = torch.utils.data.ConcatDataset([train_dataset, aux_train_dataset])\n num_train_steps = int(len(train_dataset)/args.train_batch_size) * args.num_train_epochs\n if args.do_predict:\n eval_datasets = []\n eval_task_names = (\"mnli\", \"mnli-mm\") if args.task_name == \"mnli\" else (args.task_name,)\n for eval_task in eval_task_names:\n eval_datasets.append(load_and_cache_examples(args, eval_task, tokenizer, evaluate=True))\n logger.info(\"数据集加载成功\")\n\n #加载模型,加载teacher和student模型\n model_T = BertForGLUESimple(bert_config_T, num_labels=num_labels,args=args)\n model_S = BertForGLUESimple(bert_config_S, num_labels=num_labels,args=args)\n #加载teacher模型参数\n if args.tuned_checkpoint_T is not None:\n state_dict_T = torch.load(args.tuned_checkpoint_T, map_location='cpu')\n model_T.load_state_dict(state_dict_T)\n model_T.eval()\n else:\n assert args.do_predict is True\n #Load student\n if args.load_model_type=='bert':\n assert args.init_checkpoint_S is not None\n state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu')\n if args.only_load_embedding:\n state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.embeddings')}\n missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False)\n logger.info(f\"Missing keys {list(missing_keys)}\")\n else:\n state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')}\n missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False)\n assert len(missing_keys)==0\n logger.info(\"Model loaded\")\n elif args.load_model_type=='all':\n assert args.tuned_checkpoint_S is not None\n state_dict_S = torch.load(args.tuned_checkpoint_S,map_location='cpu')\n model_S.load_state_dict(state_dict_S)\n logger.info(\"Model loaded\")\n else:\n logger.info(\"Student模型没有可加载参数,随机初始化参数 randomly initialized.\")\n model_T.to(device)\n model_S.to(device)\n\n if args.local_rank != -1 or n_gpu > 1:\n if args.local_rank != -1:\n raise NotImplementedError\n elif n_gpu > 1:\n model_T = torch.nn.DataParallel(model_T) #,output_device=n_gpu-1)\n model_S = torch.nn.DataParallel(model_S) #,output_device=n_gpu-1)\n\n if args.do_train:\n #parameters\n params = list(model_S.named_parameters())\n all_trainable_params = divide_parameters(params, lr=args.learning_rate)\n logger.info(\"Length of all_trainable_params: %d\", len(all_trainable_params))\n #优化器配置\n optimizer = BERTAdam(all_trainable_params,lr=args.learning_rate,\n warmup=args.warmup_proportion,t_total=num_train_steps,schedule=args.schedule,\n s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3)\n\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Forward batch size = %d\", forward_batch_size)\n logger.info(\" Num backward steps = %d\", num_train_steps)\n\n ########### DISTILLATION ###########\n train_config = TrainingConfig(\n gradient_accumulation_steps = args.gradient_accumulation_steps,\n ckpt_frequency = args.ckpt_frequency,\n log_dir = args.output_dir,\n output_dir = args.output_dir,\n device = args.device)\n # 定义了一些固定的matches配置文件\n from matches import matches\n intermediate_matches = None\n if isinstance(args.matches,(list,tuple)):\n intermediate_matches = []\n for match in args.matches:\n intermediate_matches += matches[match]\n logger.info(f\"中间层match信息: {intermediate_matches}\")\n distill_config = DistillationConfig(\n temperature = args.temperature,\n intermediate_matches=intermediate_matches)\n\n logger.info(f\"训练配置: {train_config}\")\n logger.info(f\"蒸馏配置: {distill_config}\")\n adaptor_T = partial(BertForGLUESimpleAdaptor, no_logits=args.no_logits, no_mask = args.no_inputs_mask)\n adaptor_S = partial(BertForGLUESimpleAdaptor, no_logits=args.no_logits, no_mask = args.no_inputs_mask)\n # 支持中间状态匹配的通用蒸馏模型\n distiller = GeneralDistiller(train_config = train_config,\n distill_config = distill_config,\n model_T = model_T, model_S = model_S,\n adaptor_T = adaptor_T,\n adaptor_S = adaptor_S)\n\n if args.local_rank == -1:\n train_sampler = RandomSampler(train_dataset)\n else:\n raise NotImplementedError\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True)\n callback_func = partial(predict, eval_datasets=eval_datasets, args=args)\n with distiller:\n distiller.train(optimizer, scheduler=None, dataloader=train_dataloader,\n num_epochs=args.num_train_epochs, callback=callback_func)\n\n if not args.do_train and args.do_predict:\n res = predict(model_S,eval_datasets,step=0,args=args)\n print (res)\n\nif __name__ == \"__main__\":\n main()\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2020/12/23 4:56 下午\n# @File : main.trainer_predict_api.py\n# @Author: johnson\n# @Contact : github: johnson7788\n# @Desc :\nimport logging\n\nlogging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%Y/%m/%d %H:%M:%S',\n level=logging.INFO,\n)\nlogger = logging.getLogger(\"Main\")\n\nimport os, random, time\nimport numpy as np\nimport torch\nfrom pytorch_pretrained_bert.my_modeling import BertConfig\nfrom pytorch_pretrained_bert import BertTokenizer\nfrom modeling import BertSPCSimple, BertForGLUESimpleAdaptorTraining\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler\nfrom tqdm import tqdm\nfrom utils_glue import InputExample, convert_examples_to_features\nimport argparse\n\n\nfrom flask import Flask, request, jsonify, abort\n\n######################################################\n# 使用没有蒸馏的模型预测,改造成一个flask api,\n######################################################\n\napp = Flask(__name__)\n\n\ndef load_examples(contents, max_seq_length, tokenizer, label_list):\n \"\"\"\n :param contents: eg: [('苹果很好用', '苹果')]\n :param max_seq_length:\n :param tokenizer: 初始化后的tokenizer\n :param label_list:\n :return:\n \"\"\"\n examples = []\n for guid, content in enumerate(contents):\n sentence, aspect = content\n examples.append(\n InputExample(guid=guid, text_a=sentence, text_b=aspect))\n features = convert_examples_to_features(examples, label_list, max_seq_length, tokenizer,\n output_mode=\"classification\",\n cls_token_segment_id=0, pad_token_segment_id=0)\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)\n return dataset\n\n\nclass TorchAsBertModel(object):\n def __init__(self, verbose=0):\n self.verbose = verbose\n self.label_list = [\"NEG\", \"NEU\", \"POS\"]\n self.num_labels = len(self.label_list)\n # 判断使用的设备\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.n_gpu = torch.cuda.device_count() if torch.cuda.is_available() else 0\n self.tokenizer, self.model = self.load_model()\n # 句子左右最大truncate序列长度\n self.left_max_seq_len = 15\n self.right_max_seq_len = 20\n self.aspect_max_seq_len = 30\n\n def load_model(self):\n parser = argparse.ArgumentParser()\n args = parser.parse_args()\n args.output_encoded_layers = True\n args.output_attention_layers = True\n args.output_att_score = True\n args.output_att_sum = True\n self.args = args\n # 解析配置文件, 教师模型和student模型的vocab是不变的\n self.vocab_file = \"bert_model/vocab.txt\"\n # 这里是使用的teacher的config和微调后的teacher模型, 也可以换成student的config和蒸馏后的student模型\n # student config: config/chinese_bert_config_L4t.json\n # distil student model: distil_model/gs8316.pkl\n self.bert_config_file_S = \"bert_model/config.json\"\n self.tuned_checkpoint_S = \"trained_teacher_model/gs3024.pkl\"\n self.max_seq_length = 70\n # 预测的batch_size大小\n self.predict_batch_size = 64\n # 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度\n bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)\n\n # 加载tokenizer\n tokenizer = BertTokenizer(vocab_file=self.vocab_file)\n\n # 加载模型\n model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)\n state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)\n model_S.load_state_dict(state_dict_S)\n if self.verbose:\n print(\"模型已加载\")\n\n return tokenizer, model_S\n\n def truncate(self, input_text, max_len, trun_post='post'):\n \"\"\"\n 实施截断数据\n :param input_text:\n :param max_len: eg: 15\n :param trun_post: 截取方向,向前还是向后截取,\n \"pre\":截取前面的, \"post\":截取后面的\n :return:\n \"\"\"\n if max_len is not None and len(input_text) > max_len:\n if trun_post == \"post\":\n return input_text[-max_len:]\n else:\n return input_text[:max_len]\n else:\n return input_text\n\n def clean(self, text_left, aspect, text_right):\n \"\"\"\n 截断数据\n :param text_left:\n :param aspect:\n :param text_right:\n :return:\n \"\"\"\n text_left = self.truncate(text_left, self.left_max_seq_len)\n aspect = self.truncate(aspect, self.aspect_max_seq_len)\n text_right = self.truncate(text_right, self.right_max_seq_len, trun_post=\"pre\")\n\n return text_left, aspect, text_right\n\n def predict_batch(self, data):\n \"\"\"\n batch_size数据处理\n :param data: 是一个要处理的数据列表\n :return:\n \"\"\"\n contents = []\n for one_data in data:\n content, aspect, aspect_start, aspect_end = one_data\n text_left = content[:aspect_start]\n text_right = content[aspect_end:]\n text_left, aspect, text_right = self.clean(text_left, aspect, text_right)\n new_content = text_left + aspect + text_right\n contents.append((new_content, aspect))\n\n eval_dataset = load_examples(contents, self.max_seq_length, self.tokenizer, self.label_list)\n if self.verbose:\n print(\"评估数据集已加载\")\n\n res = self.do_predict(self.model, eval_dataset)\n if self.verbose:\n print(f\"预测的结果是: {res}, {[self.label_list[id] for id in res]}\")\n\n # TODO 输入为一条数据,返回也只返回一条结果即可以了\n return res\n def predict_batch_without_turncate(self, data):\n \"\"\"\n batch_size数据处理\n :param data: 是一个要处理的数据列表[(content,aspect),...,]\n :return:\n \"\"\"\n eval_dataset = load_examples(data, self.max_seq_length, self.tokenizer, self.label_list)\n if self.verbose:\n print(\"评估数据集已加载\")\n\n res = self.do_predict(self.model, eval_dataset)\n if self.verbose:\n print(f\"预测的结果是: {res}, {[self.label_list[id] for id in res]}\")\n\n #把id变成标签\n result = [self.label_list[r] for r in res]\n return result\n\n def do_predict(self, model, eval_dataset):\n # 任务名字\n results = []\n if self.verbose:\n print(\"***** 开始预测 *****\")\n print(\" 样本数 = %d\", len(eval_dataset))\n print(\" Batch size = %d\", self.predict_batch_size)\n # 评估样本\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=self.predict_batch_size)\n model.eval()\n model.to(self.device)\n # 起始时间\n start_time = time.time()\n # 存储预测值\n pred_logits = []\n for batch in tqdm(eval_dataloader, desc=\"评估中\", disable=True):\n input_ids, input_mask, segment_ids = batch\n input_ids = input_ids.to(self.device)\n input_mask = input_mask.to(self.device)\n segment_ids = segment_ids.to(self.device)\n with torch.no_grad():\n logits = model(input_ids, input_mask, segment_ids)\n cpu_logits = logits.detach().cpu()\n for i in range(len(cpu_logits)):\n pred_logits.append(cpu_logits[i].numpy())\n pred_logits = np.array(pred_logits)\n\n # 找到最大的概率label\n preds = np.argmax(pred_logits, axis=1)\n if self.verbose:\n print(f\"preds: {preds}\")\n results.extend(preds.tolist())\n\n cost_time = time.time() - start_time\n if self.verbose:\n print(\n f\"--- 评估{len(eval_dataset)}条数据的总耗时是 {cost_time} seconds, 每条耗时 {cost_time / len(eval_dataset)} seconds ---\")\n return results\n\n\[email protected](\"/api\", methods=['POST'])\ndef api():\n \"\"\"\n Args:\n test_data: 需要预测的数据,是一个文字列表, [(content,aspect),...,]\n Returns:\n \"\"\"\n jsonres = request.get_json()\n test_data = jsonres.get('data', None)\n model = TorchAsBertModel()\n results = model.predict_batch_without_turncate(test_data)\n return jsonify(results)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)\n"
] | [
[
"torch.utils.data.DistributedSampler",
"torch.distributed.init_process_group",
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"numpy.squeeze",
"torch.utils.data.DataLoader",
"torch.utils.data.RandomSampler",
"torch.utils.data.ConcatDataset",
"numpy.argmax",
"torch.no_grad",
"torch.nn.DataParallel",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.cuda.device_count",
"numpy.array"
],
[
"torch.load",
"torch.utils.data.TensorDataset",
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"torch.tensor",
"numpy.argmax",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.device_count",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mf-zhang/flownet2-pytorch | [
"aba5bb57e9b9c145be909f61cd9212a59fdfa737"
] | [
"zmf_main_py37_2resume.py"
] | [
"#!/usr/bin/env python\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom tensorboardX import SummaryWriter\n\nimport argparse, os, sys, subprocess\nimport setproctitle, colorama\nimport numpy as np\nfrom tqdm import tqdm\nfrom glob import glob\nfrom os.path import *\n\nimport models, losses, datasets\nfrom utils import flow_utils, tools\n\n# fp32 copy of parameters for update\nglobal param_copy\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--start_epoch', type=int, default=1)\n parser.add_argument('--total_epochs', type=int, default=10000)\n parser.add_argument('--batch_size', '-b', type=int, default=8, help=\"Batch size\")\n parser.add_argument('--train_n_batches', type=int, default = -1, help='Number of min-batches per epoch. If < 0, it will be determined by training_dataloader')\n parser.add_argument('--crop_size', type=int, nargs='+', default = [256, 256], help=\"Spatial dimension to crop training samples for training\")\n parser.add_argument('--gradient_clip', type=float, default=None)\n parser.add_argument('--schedule_lr_frequency', type=int, default=0, help='in number of iterations (0 for no schedule)')\n parser.add_argument('--schedule_lr_fraction', type=float, default=10)\n parser.add_argument(\"--rgb_max\", type=float, default = 255.)\n\n parser.add_argument('--number_workers', '-nw', '--num_workers', type=int, default=8)\n parser.add_argument('--number_gpus', '-ng', type=int, default=-1, help='number of GPUs to use')\n parser.add_argument('--no_cuda', action='store_true')\n\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--name', default='run', type=str, help='a name to append to the save directory')\n parser.add_argument('--save', '-s', default='./work', type=str, help='directory for saving')\n\n parser.add_argument('--validation_frequency', type=int, default=5, help='validate every n epochs')\n parser.add_argument('--validation_n_batches', type=int, default=-1)\n parser.add_argument('--render_validation', action='store_true', help='run inference (save flows to file) and every validation_frequency epoch')\n\n parser.add_argument('--inference', action='store_true')\n parser.add_argument('--inference_size', type=int, nargs='+', default = [-1,-1], help='spatial size divisible by 64. default (-1,-1) - largest possible valid size would be used')\n parser.add_argument('--inference_batch_size', type=int, default=1)\n parser.add_argument('--inference_n_batches', type=int, default=-1)\n parser.add_argument('--save_flow', action='store_true', help='save predicted flows to file')\n\n parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')\n parser.add_argument('--resume2', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')\n parser.add_argument('--log_frequency', '--summ_iter', type=int, default=1, help=\"Log every n batches\")\n\n parser.add_argument('--skip_training', action='store_true')\n parser.add_argument('--skip_validation', action='store_true')\n\n parser.add_argument('--fp16', action='store_true', help='Run model in pseudo-fp16 mode (fp16 storage fp32 math).')\n parser.add_argument('--fp16_scale', type=float, default=1024., help='Loss scaling, positive power of 2 values can improve fp16 convergence.')\n\n tools.add_arguments_for_module(parser, models, argument_for_class='model', default='FlowNet2')\n\n tools.add_arguments_for_module(parser, losses, argument_for_class='loss', default='L1Loss')\n\n tools.add_arguments_for_module(parser, torch.optim, argument_for_class='optimizer', default='Adam', skip_params=['params'])\n \n tools.add_arguments_for_module(parser, datasets, argument_for_class='training_dataset', default='MpiSintelFinal', \n skip_params=['is_cropped'],\n parameter_defaults={'root': './MPI-Sintel/flow/training'})\n \n tools.add_arguments_for_module(parser, datasets, argument_for_class='validation_dataset', default='MpiSintelClean', \n skip_params=['is_cropped'],\n parameter_defaults={'root': './MPI-Sintel/flow/training',\n 'replicates': 1})\n \n tools.add_arguments_for_module(parser, datasets, argument_for_class='inference_dataset', default='MpiSintelClean', \n skip_params=['is_cropped'],\n parameter_defaults={'root': './MPI-Sintel/flow/training',\n 'replicates': 1})\n\n main_dir = os.path.dirname(os.path.realpath(__file__))\n os.chdir(main_dir)\n\n # Parse the official arguments\n with tools.TimerBlock(\"Parsing Arguments\") as block:\n args = parser.parse_args()\n if args.number_gpus < 0 : args.number_gpus = torch.cuda.device_count()\n\n # Get argument defaults (hastag #thisisahack)\n parser.add_argument('--IGNORE', action='store_true')\n defaults = vars(parser.parse_args(['--IGNORE']))\n\n # Print all arguments, color the non-defaults\n for argument, value in sorted(vars(args).items()):\n reset = colorama.Style.RESET_ALL\n color = reset if value == defaults[argument] else colorama.Fore.MAGENTA\n block.log('{}{}: {}{}'.format(color, argument, value, reset))\n\n args.model_class = tools.module_to_dict(models)[args.model]\n args.optimizer_class = tools.module_to_dict(torch.optim)[args.optimizer]\n args.loss_class = tools.module_to_dict(losses)[args.loss]\n\n args.training_dataset_class = tools.module_to_dict(datasets)[args.training_dataset]\n args.validation_dataset_class = tools.module_to_dict(datasets)[args.validation_dataset]\n args.inference_dataset_class = tools.module_to_dict(datasets)[args.inference_dataset]\n\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n args.current_hash = subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"]).rstrip()\n args.log_file = join(args.save, 'args.txt')\n\n # dict to collect activation gradients (for training debug purpose)\n args.grads = {}\n\n if args.inference:\n args.skip_validation = True\n args.skip_training = True\n args.total_epochs = 1\n args.inference_dir = \"{}/inference\".format(args.save)\n\n print('Source Code')\n print((' Current Git Hash: {}\\n'.format(args.current_hash)))\n\n # Change the title for `top` and `pkill` commands\n setproctitle.setproctitle(args.save)\n\n # Dynamically load the dataset class with parameters passed in via \"--argument_[param]=[value]\" arguments\n with tools.TimerBlock(\"Initializing Datasets\") as block:\n args.effective_batch_size = args.batch_size * args.number_gpus\n args.effective_inference_batch_size = args.inference_batch_size * args.number_gpus\n args.effective_number_workers = args.number_workers * args.number_gpus\n gpuargs = {'num_workers': args.effective_number_workers, \n 'pin_memory': True, \n 'drop_last' : True} if args.cuda else {}\n inf_gpuargs = gpuargs.copy()\n inf_gpuargs['num_workers'] = args.number_workers\n\n if exists(args.training_dataset_root):\n train_dataset = args.training_dataset_class(args, True, **tools.kwargs_from_args(args, 'training_dataset'))\n block.log('Training Dataset: {}'.format(args.training_dataset))\n block.log('Training Input: {}'.format(' '.join([str([d for d in x.size()]) for x in train_dataset[0][0]])))\n block.log('Training Targets: {}'.format(' '.join([str([d for d in x.size()]) for x in train_dataset[0][1]])))\n train_loader = DataLoader(train_dataset, batch_size=args.effective_batch_size, shuffle=True, **gpuargs)\n\n if exists(args.validation_dataset_root):\n validation_dataset = args.validation_dataset_class(args, True, **tools.kwargs_from_args(args, 'validation_dataset'))\n block.log('Validation Dataset: {}'.format(args.validation_dataset))\n block.log('Validation Input: {}'.format(' '.join([str([d for d in x.size()]) for x in validation_dataset[0][0]])))\n block.log('Validation Targets: {}'.format(' '.join([str([d for d in x.size()]) for x in validation_dataset[0][1]])))\n validation_loader = DataLoader(validation_dataset, batch_size=args.effective_batch_size, shuffle=False, **gpuargs)\n\n if exists(args.inference_dataset_root):\n inference_dataset = args.inference_dataset_class(args, False, **tools.kwargs_from_args(args, 'inference_dataset'))\n block.log('Inference Dataset: {}'.format(args.inference_dataset))\n block.log('Inference Input: {}'.format(' '.join([str([d for d in x.size()]) for x in inference_dataset[0][0]])))\n block.log('Inference Targets: {}'.format(' '.join([str([d for d in x.size()]) for x in inference_dataset[0][1]])))\n inference_loader = DataLoader(inference_dataset, batch_size=args.effective_inference_batch_size, shuffle=False, **inf_gpuargs)\n\n # Dynamically load model and loss class with parameters passed in via \"--model_[param]=[value]\" or \"--loss_[param]=[value]\" arguments\n with tools.TimerBlock(\"Building {} model\".format(args.model)) as block:\n class ModelAndLoss(nn.Module):\n def __init__(self, args):\n super(ModelAndLoss, self).__init__()\n kwargs = tools.kwargs_from_args(args, 'model')\n self.model = args.model_class(args, **kwargs)\n kwargs = tools.kwargs_from_args(args, 'loss')\n self.loss = args.loss_class(args, **kwargs)\n \n def forward(self, data, target, inference=False ):\n output = self.model(data)\n\n loss_values = self.loss(output, target)\n\n if not inference :\n return loss_values\n else :\n return loss_values, output\n\n model_and_loss = ModelAndLoss(args)\n\n block.log('Effective Batch Size: {}'.format(args.effective_batch_size))\n block.log('Number of parameters: {}'.format(sum([p.data.nelement() if p.requires_grad else 0 for p in model_and_loss.parameters()])))\n\n # assing to cuda or wrap with dataparallel, model and loss \n if args.cuda and (args.number_gpus > 0) and args.fp16:\n block.log('Parallelizing')\n model_and_loss = nn.parallel.DataParallel(model_and_loss, device_ids=list(range(args.number_gpus)))\n\n block.log('Initializing CUDA')\n model_and_loss = model_and_loss.cuda().half()\n torch.cuda.manual_seed(args.seed) \n param_copy = [param.clone().type(torch.cuda.FloatTensor).detach() for param in model_and_loss.parameters()]\n\n elif args.cuda and args.number_gpus > 0:\n block.log('Initializing CUDA')\n model_and_loss = model_and_loss.cuda()\n block.log('Parallelizing')\n model_and_loss = nn.parallel.DataParallel(model_and_loss, device_ids=list(range(args.number_gpus)))\n torch.cuda.manual_seed(args.seed) \n\n else:\n block.log('CUDA not being used')\n torch.manual_seed(args.seed)\n\n fix_param_tensor_number = 0\n # Load weights if needed, otherwise randomly initialize\n if args.resume and os.path.isfile(args.resume) and os.path.isfile(args.resume2):\n block.log(\"Loading checkpoint '{}'\".format(args.resume))\n block.log(\"Loading checkpoint2 '{}'\".format(args.resume2))\n checkpoint = torch.load(args.resume)\n checkpoint2 = torch.load(args.resume2)\n if not args.inference:\n args.start_epoch = checkpoint['epoch']\n if args.start_epoch == 0:\n args.start_epoch = 1\n best_err = checkpoint['best_EPE']\n model_and_loss.module.model.load_state_dict(checkpoint2['state_dict'], strict=False) # input css\n model_and_loss.module.model.load_state_dict(checkpoint['state_dict'], strict=False) # input cs\n\n print(\"\\033[1;33m ATTENTION! \\033[0m\")\n print(\"fix param tensor number: \"+str(len(checkpoint['state_dict'])))\n fix_param_tensor_number = len(checkpoint['state_dict'])\n print(\"ft param tensor number: \"+str(len(checkpoint2['state_dict'])-len(checkpoint['state_dict'])))\n ft_param_tensor_number = len(checkpoint2['state_dict'])-len(checkpoint['state_dict'])\n block.log(\"Loaded checkpoint '{}' (at epoch {})\".format(args.resume, checkpoint['epoch']))\n block.log(\"Loaded checkpoint '{}' (at epoch {})\".format(args.resume2, checkpoint2['epoch']))\n\n elif args.resume and args.inference:\n block.log(\"No checkpoint found at '{}'\".format(args.resume))\n quit()\n\n else:\n block.log(\"Random initialization\")\n\n # zmf\n count_grad_num = 0\n for i,param in enumerate(model_and_loss.parameters()):\n if (i < fix_param_tensor_number): # 90 for cs\n if (param.requires_grad == False):\n print(\"\\033[1;33m ATTENTION! \\033[0m\")\n print(\"wrong1\")\n else:\n param.requires_grad = False\n elif param.requires_grad == True:\n count_grad_num += 1\n else:\n print(\"\\033[1;33m ATTENTION! \\033[0m\")\n print(\"wrong2\")\n if (count_grad_num == ft_param_tensor_number):\n print(\"\\033[1;33m ATTENTION! \\033[0m\")\n print(\"left need grad tensor num: \"+str(count_grad_num))\n else:\n print(\"\\033[1;33m ATTENTION! \\033[0m\")\n print(\"wrong3\")\n # fmz\n\n block.log(\"Initializing save directory: {}\".format(args.save))\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n\n train_logger = SummaryWriter(log_dir = os.path.join(args.save, 'train'), comment = 'training')\n validation_logger = SummaryWriter(log_dir = os.path.join(args.save, 'validation'), comment = 'validation')\n\n # print(\"Model's state_dict:\")\n # for param_tensor in model_and_loss.module.model.state_dict():\n # print(param_tensor)\n # print(model_and_loss.module.model.state_dict()[param_tensor])\n \n # Dynamically load the optimizer with parameters passed in via \"--optimizer_[param]=[value]\" arguments \n with tools.TimerBlock(\"Initializing {} Optimizer\".format(args.optimizer)) as block:\n kwargs = tools.kwargs_from_args(args, 'optimizer')\n if args.fp16:\n optimizer = args.optimizer_class([p for p in param_copy if p.requires_grad], **kwargs)\n else:\n optimizer = args.optimizer_class([p for p in model_and_loss.parameters() if p.requires_grad], **kwargs)\n for param, default in list(kwargs.items()):\n block.log(\"{} = {} ({})\".format(param, default, type(default)))\n\n # Log all arguments to file\n for argument, value in sorted(vars(args).items()):\n block.log2file(args.log_file, '{}: {}'.format(argument, value))\n\n # Reusable function for training and validataion\n def train(args, epoch, start_iteration, data_loader, model, optimizer, logger, is_validate=False, offset=0):\n statistics = []\n total_loss = 0\n\n if is_validate:\n model.eval()\n title = 'Validating Epoch {}'.format(epoch)\n args.validation_n_batches = np.inf if args.validation_n_batches < 0 else args.validation_n_batches\n progress = tqdm(tools.IteratorTimer(data_loader), ncols=100, total=np.minimum(len(data_loader), args.validation_n_batches), leave=True, position=offset, desc=title)\n else:\n model.train()\n title = 'Training Epoch {}'.format(epoch)\n args.train_n_batches = np.inf if args.train_n_batches < 0 else args.train_n_batches\n progress = tqdm(tools.IteratorTimer(data_loader), ncols=120, total=np.minimum(len(data_loader), args.train_n_batches), smoothing=.9, miniters=1, leave=True, position=offset, desc=title)\n\n last_log_time = progress._time()\n for batch_idx, (data, target) in enumerate(progress):\n\n data, target = [Variable(d) for d in data], [Variable(t) for t in target]\n if args.cuda and args.number_gpus == 1:\n data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target]\n\n optimizer.zero_grad() if not is_validate else None\n losses = model(data[0], target[0])\n losses = [torch.mean(loss_value) for loss_value in losses] \n loss_val = losses[0] # Collect first loss for weight update\n total_loss += loss_val.item()\n loss_values = [v.item() for v in losses]\n\n # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'\n if type(model.module.loss.loss_labels) is tuple:\n loss_labels = list(model.module.loss.loss_labels[0])\n else:\n loss_labels = list(model.module.loss.loss_labels)\n\n assert not np.isnan(total_loss)\n\n if not is_validate and args.fp16:\n loss_val.backward()\n if args.gradient_clip:\n torch.nn.utils.clip_grad_norm(model.parameters(), args.gradient_clip)\n\n params = list(model.parameters())\n for i in range(len(params)):\n param_copy[i].grad = params[i].grad.clone().type_as(params[i]).detach()\n param_copy[i].grad.mul_(1./args.loss_scale)\n optimizer.step()\n for i in range(len(params)):\n params[i].data.copy_(param_copy[i].data)\n\n elif not is_validate:\n loss_val.backward()\n if args.gradient_clip:\n torch.nn.utils.clip_grad_norm(model.parameters(), args.gradient_clip)\n optimizer.step()\n\n # Update hyperparameters if needed\n global_iteration = start_iteration + batch_idx\n if not is_validate:\n tools.update_hyperparameter_schedule(args, epoch, global_iteration, optimizer)\n loss_labels.append('lr')\n loss_values.append(optimizer.param_groups[0]['lr'])\n\n loss_labels.append('load')\n loss_values.append(progress.iterable.last_duration)\n\n # Print out statistics\n statistics.append(loss_values)\n title = '{} Epoch {}'.format('Validating' if is_validate else 'Training', epoch)\n\n progress.set_description(title + ' ' + tools.format_dictionary_of_losses(loss_labels, statistics[-1]))\n\n # if ((((global_iteration + 1) % args.log_frequency) == 0 and not is_validate) or\n # (is_validate and batch_idx == args.validation_n_batches - 1)):\n\n\n logger.add_scalar('batch logs per second', len(statistics) / (progress._time() - last_log_time), global_iteration)\n last_log_time = progress._time()\n\n all_losses = np.array(statistics)\n\n for i, key in enumerate(loss_labels):\n logger.add_scalar('average batch ' + str(key), all_losses[:, i].mean(), global_iteration)\n logger.add_histogram(str(key), all_losses[:, i], global_iteration)\n\n # Reset Summary\n statistics = []\n\n if ( is_validate and ( batch_idx == args.validation_n_batches) ):\n break\n\n if ( (not is_validate) and (batch_idx == (args.train_n_batches)) ):\n break\n\n progress.close()\n\n return total_loss / float(batch_idx + 1), (batch_idx + 1)\n\n # Reusable function for inference\n def inference(args, epoch, data_loader, model, offset=0):\n\n model.eval()\n \n if args.save_flow or args.render_validation:\n flow_folder = \"{}/inference/{}.epoch-{}-flow-field\".format(args.save,args.name.replace('/', '.'),epoch)\n if not os.path.exists(flow_folder):\n os.makedirs(flow_folder)\n\n \n args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches\n\n progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), args.inference_n_batches), desc='Inferencing ', \n leave=True, position=offset)\n\n statistics = []\n total_loss = 0\n for batch_idx, (data, target) in enumerate(progress):\n if args.cuda:\n data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target]\n data, target = [Variable(d) for d in data], [Variable(t) for t in target]\n\n # when ground-truth flows are not available for inference_dataset, \n # the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows, \n # depending on the type of loss norm passed in\n with torch.no_grad():\n losses, output = model(data[0], target[0], inference=True)\n\n losses = [torch.mean(loss_value) for loss_value in losses] \n loss_val = losses[0] # Collect first loss for weight update\n total_loss += loss_val.item()\n loss_values = [v.item() for v in losses]\n\n # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'\n loss_labels = list(model.module.loss.loss_labels)\n\n statistics.append(loss_values)\n # import IPython; IPython.embed()\n if args.save_flow or args.render_validation:\n for i in range(args.inference_batch_size):\n _pflow = output[i].data.cpu().numpy().transpose(1, 2, 0)\n flow_utils.writeFlow( join(flow_folder, '%06d.flo'%(batch_idx * args.inference_batch_size + i)), _pflow)\n\n progress.set_description('Inference Averages for Epoch {}: '.format(epoch) + tools.format_dictionary_of_losses(loss_labels, np.array(statistics).mean(axis=0)))\n progress.update(1)\n\n if batch_idx == (args.inference_n_batches - 1):\n break\n\n progress.close()\n\n return\n\n # Primary epoch loop\n best_err = 1e8\n progress = tqdm(list(range(args.start_epoch, args.total_epochs + 1)), miniters=1, ncols=100, desc='Overall Progress', leave=True, position=0)\n offset = 1\n last_epoch_time = progress._time()\n global_iteration = 0\n val_iter = 0\n\n for epoch in progress:\n if args.inference or (args.render_validation and ((epoch - 1) % args.validation_frequency) == 0):\n stats = inference(args=args, epoch=epoch - 1, data_loader=inference_loader, model=model_and_loss, offset=offset)\n offset += 1\n\n if not args.skip_validation and ((epoch - 1) % args.validation_frequency) == 0:\n validation_loss, val_it_perep = train(args=args, epoch=epoch - 1,start_iteration=val_iter, data_loader=validation_loader, model=model_and_loss, optimizer=optimizer, logger=validation_logger, is_validate=True, offset=offset)\n val_iter += val_it_perep\n offset += 1\n\n is_best = False\n if validation_loss < best_err:\n best_err = validation_loss\n is_best = True\n\n checkpoint_progress = tqdm(ncols=100, desc='Saving Checkpoint', position=offset)\n tools.save_checkpoint({ 'arch' : args.model,\n 'epoch': epoch,\n 'state_dict': model_and_loss.module.model.state_dict(),\n 'best_EPE': best_err}, \n is_best, args.save, args.model)\n checkpoint_progress.update(1)\n checkpoint_progress.close()\n offset += 1\n\n if not args.skip_training:\n train_loss, iterations = train(args=args, epoch=epoch, start_iteration=global_iteration, data_loader=train_loader, model=model_and_loss, optimizer=optimizer, logger=train_logger, offset=offset)\n global_iteration += iterations\n offset += 1\n\n # save checkpoint after every validation_frequency number of epochs\n if ((epoch - 1) % args.validation_frequency) == 0:\n checkpoint_progress = tqdm(ncols=100, desc='Saving Checkpoint', position=offset)\n tools.save_checkpoint({ 'arch' : args.model,\n 'epoch': epoch,\n 'state_dict': model_and_loss.module.model.state_dict(),\n 'best_EPE': train_loss}, \n False, args.save, args.model, filename = 'train-checkpoint.pth.tar')\n checkpoint_progress.update(1)\n checkpoint_progress.close()\n\n\n train_logger.add_scalar('seconds per epoch', progress._time() - last_epoch_time, epoch)\n last_epoch_time = progress._time()\n print(\"\\n\")\n"
] | [
[
"torch.mean",
"torch.cuda.manual_seed",
"torch.load",
"numpy.isnan",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.device_count",
"numpy.array",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gchure/diauxic_evolution | [
"5917d3ae5f9a3a5db0f037e9f7b5768cd1ffe92f",
"5917d3ae5f9a3a5db0f037e9f7b5768cd1ffe92f"
] | [
"code/processing/2021-02-25_NCM3722_RNA_isolation/growth_processing.py",
"code/processing/plate_reader/2021-04-15_NCM3722_glucose_acetate_tecan_comparison/analysis.py"
] | [
"#%%\nimport numpy as np \nimport pandas as pd \nimport diaux.viz \nimport arviz as az\nimport cmdstanpy\nimport altair as alt\nfrom altair_saver import save\ncolors, _ = diaux.viz.altair_style()\nalt.data_transformers.disable_max_rows()\n\n# Define constants of the experiment \nDATE = '2021-02-25'\nSTRAIN = 'NCM3722'\nCONDITION = 'glucose'\nOTHER = 'RNA_isolation'\nDATA_PATH = '../../../data'\n\n# Load the raw data \ndata = pd.read_csv(f'{DATA_PATH}/{DATE}_{STRAIN}_{CONDITION}_{OTHER}/raw/{DATE}_{STRAIN}_{CONDITION}_growth.csv')\n\n# Do minor datacleaning\ndata = data[(data['od_600nm'] >= 0.04) & (data['od_600nm'] <= 0.4)]\n\n# Compute the elapsed time\ndata['clock_time'] = pd.to_datetime(data['clock_time'])\ndata.sort_values(by='clock_time', inplace=True) \ndata['elapsed_time_hr'] = data['clock_time'] - data['clock_time'].values[0]\ndata['rel_od_600nm'] = data['od_600nm'].values / data['od_600nm'].values[0]\ndata['elapsed_time_hr'] = (data['elapsed_time_hr'].astype('timedelta64[m]'))/60\ndata.drop(columns=['clock_time'], inplace=True)\n\ndata.head()\n\n# %%\n# Load the stan model \nsm = cmdstanpy.CmdStanModel(stan_file='../../stan/growth_rate_glm.stan')\n\n# Define the data dictionary\ndata_dict = {'N': len(data),\n 'absorbance': data['od_600nm'].values,\n 'time': data['elapsed_time_hr'].values}\n\n#%% Sample the model\nsamples = sm.sample(data=data_dict)\n\n# Translate samples to an arviz object for inspection\nsamples = az.from_cmdstanpy(posterior=samples)\nsamples_df = samples.posterior.to_dataframe().reset_index()\n\n#%%\n# Save the sampling statistics and the processed growth data\nsamples_df.to_csv(f'{DATA_PATH}/{DATE}_{STRAIN}_{CONDITION}_{OTHER}/processed/{DATE}_{STRAIN}_{CONDITION}_parameter_samples.csv', index=False)\ndata.to_csv(f'{DATA_PATH}/{DATE}_{STRAIN}_{CONDITION}_{OTHER}/processed/{DATE}_{STRAIN}_{CONDITION}_growth.csv', index=False)\n#%%\n# Compute all of the best fits for display, ignoring the homoscedastic error for now. \ntime_range = np.linspace(0, 2.5, 2)\nfit_df = pd.DataFrame()\nfor t in time_range:\n fit = samples_df['absorbance_0'] * np.exp(samples_df['lambda'] * t)\n lower, upper = np.percentile(fit, [2.5, 97.5])\n fit_df = fit_df.append({'elapsed_time_hr': t,\n 'od_600nm_lower': lower,\n 'od_600nm_upper': upper},\n ignore_index=True)\n# %%\n# Plot the growth data \ndata_points = alt.Chart(data, width=600, height=200).mark_point(size=100).encode(\n x=alt.X('elapsed_time_hr:Q', title='elapsed time [inv. hr]',\n scale=alt.Scale(domain=[0, 2.6])),\n y=alt.Y('od_600nm:Q', title='optical density [a.u.]',\n scale=alt.Scale(type='log', domain=[0.04, 0.4]))\n)\n\nfit = alt.Chart(fit_df).mark_area(opacity=0.5).encode(\n x=alt.X('elapsed_time_hr:Q', title='elapsed time [inv. hr]'),\n y=alt.Y('od_600nm_lower:Q', title='optical density [a.u.]',\n scale=alt.Scale(type='log', domain=[0.04, 0.4])),\n y2='od_600nm_upper:Q',\n )\ngrowth_curve = (fit + data_points)\n\n# Plot the parameter estimates\nlambda_posterior = alt.Chart(samples_df, height=250, width=275).mark_bar().encode(\n x=alt.X('lambda:Q', title='growth rate [inv. hr]', \n bin=alt.Bin(maxbins=100)),\n y=alt.Y('count()', title='number of samples')\n)\n\nabs0_posterior = alt.Chart(samples_df, height=250, width=275).mark_bar().encode(\n x=alt.X('absorbance_0:Q', title='initial absorbance [a.u.]', \n bin=alt.Bin(maxbins=100)),\n y=alt.Y('count()', title='number of samples')\n)\nposteriors = (lambda_posterior | abs0_posterior)\nlayout = growth_curve & posteriors\nsave(layout,f'./output/{DATE}_{STRAIN}_growth_statistics.png')\nsave(layout,f'./output/{DATE}_{STRAIN}_growth_statistics.pdf')\n\n# %%\n",
"#%%\nimport numpy as np\nimport pandas as pd \nimport altair as alt\nimport scipy.stats\nfrom altair_saver import save\nimport diaux.viz\ncolors, palette = diaux.viz.altair_style()\nalt.data_transformers.disable_max_rows()\n# %%\nDATA_PATH = '../../../../data/plate_reader/2021-04-15_NCM3722_glucose_acetate_tecan_comparison/processed/'\ndata = pd.read_csv(f'{DATA_PATH}/2021-04-15_NCM3722_glucose_acetate_tecan_comparison_tidy.csv')\n\n# Convert time to hours \ndata['time_hr'] = data['time_s'] / 3600\ndata.drop(columns='time_s', inplace=True)\n# Compute the sub\ndfs = []\nfor g, d in data.groupby(['instrument', 'time_hr']):\n avg_blank = d[d['strain']=='blank']['od_600nm'].mean()\n d['od_600nm_sub'] = d['od_600nm'].values - avg_blank\n dfs.append(d)\nsubbed = pd.concat(dfs, sort=False)\nsubbed = subbed[subbed['strain']!='blank']\nsubbed\n# %%\nraw = alt.Chart(subbed).mark_line().encode(\n x=alt.X('time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm_sub:Q', title='optical density [a.u.]',\n scale=alt.Scale(type='log')),\n color=alt.Color('instrument:N', title='tecan instrument'),\n opacity=alt.Opacity('replicate:O', legend=None)\n).facet(column='media')\nraw\nsave(raw, 'output/2021-04-15_glucose_acetate_tecan_comparison_raw.pdf')\n# %%\n\n# Plot the averages\nout = []\nfor media in ['glucose', 'acetate']:\n _data = subbed[subbed['media']==media]\n avg = alt.Chart(_data).mark_line().encode(\n x=alt.X('time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('mean(od_600nm_sub):Q', title='optical density [a.u.]'),\n # scale=alt.Scale(type='log')),\n color=alt.Color('instrument:N', title='tecan instrument'))\n\n err = alt.Chart(_data).mark_errorband(extent='stdev').encode(\n x=alt.X('time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm_sub:Q', title='optical density [a.u.]'),\n color=alt.Color('instrument:N', title='tecan instrument'))\n\n\n layer = (avg + err).properties(title=f'N-C- + {media}')\n out.append(layer)\n\nresult = out[0] | out[1]\nresult\nsave(result, 'output/2021-04-15_glucose_acetate_tecan_comparison_avg.pdf')\n\n\n\n# %%\n# Consider only the first five hogurs\ngluc_bounds = [1, 4]\nac_bounds = [3, 6]\n\n\nfits = []\nfor media, bounds in zip(['glucose', 'acetate'], [gluc_bounds, ac_bounds]):\n crop = subbed[(subbed['media']==media) & ((subbed['time_hr'] >= bounds[0]) &\n (subbed['time_hr'] <= bounds[1]))]\n\n # Compute the average\n crop = crop.groupby(['instrument', 'time_hr']).mean().reset_index()\n\n # for each instrument, to the regression\n params = {}\n time_range = np.linspace(bounds[0] - 0.5, bounds[1] + 0.5, 200)\n for g, d in crop.groupby(['instrument']):\n popt = scipy.stats.linregress(d['time_hr'], np.log(d['od_600nm_sub']))\n params[g] = {'media': media, 'slope': popt[0], 'inter':popt[1], 'err':popt[-1]}\n fit = np.exp(popt[1] + popt[0] * time_range)\n _df = pd.DataFrame([])\n _df['time_hr'] = time_range \n _df['od_600nm_sub'] = fit\n _df['instrument'] = g\n _df['media'] = media\n fits.append(_df)\n print(f'Estimate for Tecan {g}, {media} medium: λ ≈ {popt[0]:0.3f} ± {popt[-1]:0.3f} per hr') \n\nfits = pd.concat(fits, sort=False)\n\n\n# %%\nout = []\nfor media, bounds in zip(['glucose', 'acetate'], [gluc_bounds, ac_bounds]):\n crop = subbed[(subbed['media']==media) & ((subbed['time_hr'] >= bounds[0]) &\n (subbed['time_hr'] <= bounds[1]))]\n\n # Compute the average\n crop = crop.groupby(['instrument', 'time_hr']).mean().reset_index()\n _fits = fits[fits['media']==media]\n\n points = alt.Chart(crop).mark_point(size=50, opacity=0.5).encode(\n x=alt.X('time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm_sub:Q', title='optical density [a.u.]',\n scale=alt.Scale(type='log')),\n color=alt.Color('instrument:N', title='tecan instrument')\n )\n\n fit = alt.Chart(_fits).mark_line().encode(\n x=alt.X('time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm_sub:Q', title='optical density [a.u.]',\n scale=alt.Scale(type='log')),\n color=alt.Color('instrument:N', title='tecan instrument')\n )\n\n layer = (points + fit).resolve_scale(color='independent').properties(title='N-C- + {media}')\n out.append(layer)\nlayer = out[0] | out[1]\nlayer\nsave(layer, './output/2021-04-15_glucose_acetate_tecan_comparison_fits.pdf')\n# %%\nout = []\nfor media in ['glucose', 'acetate']:\n _data = subbed[subbed['media']==media]\n avg = alt.Chart(_data).mark_line(clip=True).encode(\n x=alt.X('time_hr:Q', title='elapsed time [hr]',\n scale=alt.Scale(domain=[0, 6])),\n y=alt.Y('mean(od_600nm_sub):Q', title='optical density [a.u.]'),\n # scale=alt.Scale(type='log')),\n color=alt.Color('instrument:N', title='tecan instrument'))\n\n err = alt.Chart(_data).mark_errorband(extent='stdev', clip=True).encode(\n x=alt.X('time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm_sub:Q', title='optical density [a.u.]'),\n color=alt.Color('instrument:N', title='tecan instrument'))\n\n\n layer = (avg + err).properties(title=f'N-C- + {media}')\n out.append(layer)\n\nresult = out[0] | out[1]\nresult\n# save(result, 'output/2021-04-15_glucose_acetate_tecan_comparison_avg_crop.pdf')\n\n\n\n# %%\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"numpy.linspace",
"numpy.percentile",
"pandas.DataFrame",
"numpy.exp"
],
[
"pandas.concat",
"pandas.read_csv",
"numpy.log",
"numpy.linspace",
"pandas.DataFrame",
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
siagholami/aws-documentation | [
"2d06ee9011f3192b2ff38c09f04e01f1ea9e0191"
] | [
"documents/aws-deep-learning-amis/examples/keras-mxnet/cifar10_resnet_multi_gpu.py"
] | [
"\"\"\"Trains a ResNet on the CIFAR10 dataset.\n\nResNet v1\n[a] Deep Residual Learning for Image Recognition\nhttps://arxiv.org/pdf/1512.03385.pdf\n\nResNet v2\n[b] Identity Mappings in Deep Residual Networks\nhttps://arxiv.org/pdf/1603.05027.pdf\n\"\"\"\n\nfrom __future__ import print_function\nimport keras\nfrom keras.layers import Dense, Conv2D, BatchNormalization, Activation\nfrom keras.layers import AveragePooling2D, Input, Flatten\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.datasets import cifar10\nimport numpy as np\nimport os\nfrom keras.utils import multi_gpu_model\n\n# Training parameters\nbatch_size = 32 # orig paper trained all networks with batch_size=128\nepochs = 200\ndata_augmentation = True\nnum_classes = 10\n\n# Subtracting pixel mean improves accuracy\nsubtract_pixel_mean = True\n\n# Model parameter\n# ----------------------------------------------------------------------------\n# | | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch\n# Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti\n# |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2)\n# ----------------------------------------------------------------------------\n# ResNet20 | 3 (2)| 92.16 | 91.25 | ----- | ----- | 35 (---)\n# ResNet32 | 5(NA)| 92.46 | 92.49 | NA | NA | 50 ( NA)\n# ResNet44 | 7(NA)| 92.50 | 92.83 | NA | NA | 70 ( NA)\n# ResNet56 | 9 (6)| 92.71 | 93.03 | 93.01 | NA | 90 (100)\n# ResNet110 |18(12)| 92.65 | 93.39+-.16| 93.15 | 93.63 | 165(180)\n# ResNet164 |27(18)| ----- | 94.07 | ----- | 94.54 | ---(---)\n# ResNet1001| (111)| ----- | 92.39 | ----- | 95.08+-.14| ---(---)\n# ---------------------------------------------------------------------------\nn = 3\n\n# Model version\n# Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)\nversion = 1\n\n# Computed depth from supplied model parameter n\nif version == 1:\n depth = n * 6 + 2\nelif version == 2:\n depth = n * 9 + 2\n\n# Model name, depth and version\nmodel_type = 'ResNet%dv%d' % (depth, version)\n\n# Load the CIFAR10 data.\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n# Input image dimensions.\ninput_shape = x_train.shape[1:]\n\n# Normalize data.\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n\n# If subtract pixel mean is enabled\nif subtract_pixel_mean:\n x_train_mean = np.mean(x_train, axis=0)\n x_train -= x_train_mean\n x_test -= x_train_mean\n\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\nprint('y_train shape:', y_train.shape)\n\n# Convert class vectors to binary class matrices.\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n\ndef lr_schedule(epoch):\n \"\"\"Learning Rate Schedule\n\n Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.\n Called automatically every epoch as part of callbacks during training.\n\n # Arguments\n epoch (int): The number of epochs\n\n # Returns\n lr (float32): learning rate\n \"\"\"\n lr = 1e-3\n if epoch > 180:\n lr *= 0.5e-3\n elif epoch > 160:\n lr *= 1e-3\n elif epoch > 120:\n lr *= 1e-2\n elif epoch > 80:\n lr *= 1e-1\n print('Learning rate: ', lr)\n return lr\n\n\ndef resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n \"\"\"2D Convolution-Batch Normalization-Activation stack builder\n\n # Arguments\n inputs (tensor): input tensor from input image or previous layer\n num_filters (int): Conv2D number of filters\n kernel_size (int): Conv2D square kernel dimensions\n strides (int): Conv2D square stride dimensions\n activation (string): activation name\n batch_normalization (bool): whether to include batch normalization\n conv_first (bool): conv-bn-activation (True) or\n activation-bn-conv (False)\n\n # Returns\n x (tensor): tensor as input to the next layer\n \"\"\"\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x\n\n\ndef resnet_v1(input_shape, depth, num_classes=10):\n \"\"\"ResNet Version 1 Model builder [a]\n\n Stacks of 2 x (3 x 3) Conv2D-BN-ReLU\n Last ReLU is after the shortcut connection.\n At the beginning of each stage, the feature map size is halved (downsampled)\n by a convolutional layer with strides=2, while the number of filters is\n doubled. Within each stage, the layers have the same number filters and the\n same number of filters.\n Features maps sizes:\n stage 0: 32x32, 16\n stage 1: 16x16, 32\n stage 2: 8x8, 64\n The Number of parameters is approx the same as Table 6 of [a]:\n ResNet20 0.27M\n ResNet32 0.46M\n ResNet44 0.66M\n ResNet56 0.85M\n ResNet110 1.7M\n\n # Arguments\n input_shape (tensor): shape of input image tensor\n depth (int): number of core convolutional layers\n num_classes (int): number of classes (CIFAR10 has 10)\n\n # Returns\n model (Model): Keras model instance\n \"\"\"\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model\n\n\ndef resnet_v2(input_shape, depth, num_classes=10):\n \"\"\"ResNet Version 2 Model builder [b]\n\n Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as\n bottleneck layer\n First shortcut connection per layer is 1 x 1 Conv2D.\n Second and onwards shortcut connection is identity.\n At the beginning of each stage, the feature map size is halved (downsampled)\n by a convolutional layer with strides=2, while the number of filter maps is\n doubled. Within each stage, the layers have the same number filters and the\n same filter map sizes.\n Features maps sizes:\n conv1 : 32x32, 16\n stage 0: 32x32, 64\n stage 1: 16x16, 128\n stage 2: 8x8, 256\n\n # Arguments\n input_shape (tensor): shape of input image tensor\n depth (int): number of core convolutional layers\n num_classes (int): number of classes (CIFAR10 has 10)\n\n # Returns\n model (Model): Keras model instance\n \"\"\"\n if (depth - 2) % 9 != 0:\n raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')\n # Start model definition.\n num_filters_in = 16\n num_res_blocks = int((depth - 2) / 9)\n\n inputs = Input(shape=input_shape)\n # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths\n x = resnet_layer(inputs=inputs,\n num_filters=num_filters_in,\n conv_first=True)\n\n # Instantiate the stack of residual units\n for stage in range(3):\n for res_block in range(num_res_blocks):\n activation = 'relu'\n batch_normalization = True\n strides = 1\n if stage == 0:\n num_filters_out = num_filters_in * 4\n if res_block == 0: # first layer and first stage\n activation = None\n batch_normalization = False\n else:\n num_filters_out = num_filters_in * 2\n if res_block == 0: # first layer but not first stage\n strides = 2 # downsample\n\n # bottleneck residual unit\n y = resnet_layer(inputs=x,\n num_filters=num_filters_in,\n kernel_size=1,\n strides=strides,\n activation=activation,\n batch_normalization=batch_normalization,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_in,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_out,\n kernel_size=1,\n conv_first=False)\n if res_block == 0:\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters_out,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n\n num_filters_in = num_filters_out\n\n # Add classifier on top.\n # v2 has BN-ReLU before Pooling\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model\n\n\nif version == 2:\n model = resnet_v2(input_shape=input_shape, depth=depth)\nelse:\n model = resnet_v1(input_shape=input_shape, depth=depth)\n\n# use 4 gpus to train the model\nmodel = multi_gpu_model(model, gpus=4)\nmodel.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=lr_schedule(0)),\n metrics=['accuracy'])\nmodel.summary()\nprint(model_type)\n\n# Prepare model model saving directory.\nsave_dir = os.path.join(os.getcwd(), 'saved_models')\nmodel_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type\nif not os.path.isdir(save_dir):\n os.makedirs(save_dir)\nfilepath = os.path.join(save_dir, model_name)\n\n# Prepare callbacks for model saving and for learning rate adjustment.\ncheckpoint = ModelCheckpoint(filepath=filepath,\n monitor='val_acc',\n verbose=1,\n save_best_only=True)\n\nlr_scheduler = LearningRateScheduler(lr_schedule)\n\nlr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),\n cooldown=0,\n patience=5,\n min_lr=0.5e-6)\n\ncallbacks = [checkpoint, lr_reducer, lr_scheduler]\n\n# Run training, with or without data augmentation.\nif not data_augmentation:\n print('Not using data augmentation.')\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test),\n shuffle=True,\n callbacks=callbacks)\nelse:\n print('Using real-time data augmentation.')\n # This will do preprocessing and realtime data augmentation:\n datagen = ImageDataGenerator(\n # set input mean to 0 over the dataset\n featurewise_center=False,\n # set each sample mean to 0\n samplewise_center=False,\n # divide inputs by std of dataset\n featurewise_std_normalization=False,\n # divide each input by its std\n samplewise_std_normalization=False,\n # apply ZCA whitening\n zca_whitening=False,\n # randomly rotate images in the range (deg 0 to 180)\n rotation_range=0,\n # randomly shift images horizontally\n width_shift_range=0.1,\n # randomly shift images vertically\n height_shift_range=0.1,\n # randomly flip images\n horizontal_flip=True,\n # randomly flip images\n vertical_flip=False)\n\n # Compute quantities required for featurewise normalization\n # (std, mean, and principal components if ZCA whitening is applied).\n datagen.fit(x_train)\n\n # Fit the model on the batches generated by datagen.flow().\n model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),\n validation_data=(x_test, y_test),\n epochs=epochs, verbose=1, workers=4,\n callbacks=callbacks)\n\n# Score trained model.\nscores = model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])\n"
] | [
[
"numpy.mean",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ianmcook/strata-ny-2019 | [
"715d1db65ed64f5be700790fef938802375159e2"
] | [
"2_machine_learning/61_tf_confusion_matrix.py"
] | [
"# Copyright 2019 Cloudera, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# # Confusion matrix of TF model predictions vs. labels\n\n# This example computes and displays a confusion matrix\n# comparing a TensorFlow classification model's\n# predictions to the true labels, using data from the\n# test (evaluation) set.\n\n# Run the code in `60_tf_image_classify.py` \n# before running the code below.\n\n\n# ## Generate actual and predicted labels for the test set\n\ntest_pred = label_encoder.inverse_transform(\n model.predict_classes(test_x)\n)\n\ntest_actual = label_encoder.inverse_transform(\n test_y\n)\n\n\n# ## Compute the confusion matrix\n\n# TensorFlow has a function \n# [`tf.confusion_matrix`](https://www.tensorflow.org/api_docs/python/tf/confusion_matrix)\n# for creating a confusion matrix, but it's often easier\n# to use scikit-learn's \n# [`sklearn.metrics.confusion_matrix`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html)\n# function\n\n# Import the required module\nfrom sklearn import metrics\n\n# Compute the confusion matrix\nconfusion = metrics.confusion_matrix(\n y_true=test_actual,\n y_pred=test_pred,\n labels=['King', 'Queen', 'Rook', 'Bishop', 'Knight', 'Pawn']\n)\n\n\n# ## Display and interpret the confusion matrix\n\n# Print the confusion matrix\nprint(confusion)\n\n# - Rows represent the true labels\n# - Columns represent predicted labels.\n\n# The scikit-learn website provides\n# [sample code](http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html)\n# to visualize a confusion matrix in a plot. You could\n# use the function defined in that code to plot the \n# confusion matrix computed here.\n"
] | [
[
"sklearn.metrics.confusion_matrix"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aasseman/pytorchpipe | [
"9cb17271666061cb19fe24197ecd5e4c8d32c5da",
"9cb17271666061cb19fe24197ecd5e4c8d32c5da"
] | [
"ptp/components/models/vision/lenet5.py",
"ptp/workers/trainer.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__author__ = \"Tomasz Kornuta & Vincent Marois\"\n\n\nimport torch\n\nfrom ptp.components.models.model import Model\nfrom ptp.data_types.data_definition import DataDefinition\n\n\nclass LeNet5(Model):\n \"\"\"\n A classical LeNet-5 model for MNIST digits classification. \n \"\"\" \n def __init__(self, name, config):\n \"\"\"\n Initializes the ``LeNet5`` model, creates the required layers.\n\n :param name: Name of the model (taken from the configuration file).\n\n :param config: Parameters read from configuration file.\n :type config: ``ptp.configuration.ConfigInterface``\n\n \"\"\"\n super(LeNet5, self).__init__(name, LeNet5, config)\n\n # Get key mappings.\n self.key_inputs = self.stream_keys[\"inputs\"]\n self.key_predictions = self.stream_keys[\"predictions\"]\n\n # Retrieve prediction size from globals.\n self.prediction_size = self.globals[\"prediction_size\"]\n\n # Create the LeNet-5 layers.\n self.conv1 = torch.nn.Conv2d(1, 6, kernel_size=(5, 5))\n self.maxpool1 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)\n self.conv2 = torch.nn.Conv2d(6, 16, kernel_size=(5, 5))\n self.maxpool2 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)\n self.conv3 = torch.nn.Conv2d(16, 120, kernel_size=(5, 5))\n self.linear1 = torch.nn.Linear(120, 84)\n self.linear2 = torch.nn.Linear(84, self.prediction_size)\n\n def input_data_definitions(self):\n \"\"\" \n Function returns a dictionary with definitions of input data that are required by the component.\n\n :return: dictionary containing input data definitions (each of type :py:class:`ptp.utils.DataDefinition`).\n \"\"\"\n return {\n self.key_inputs: DataDefinition([-1, 1, 32, 32], [torch.Tensor], \"Batch of images [BATCH_SIZE x IMAGE_DEPTH x IMAGE_HEIGHT x IMAGE WIDTH]\"),\n }\n\n\n def output_data_definitions(self):\n \"\"\" \n Function returns a dictionary with definitions of output data produced the component.\n\n :return: dictionary containing output data definitions (each of type :py:class:`ptp.utils.DataDefinition`).\n \"\"\"\n return {\n self.key_predictions: DataDefinition([-1, self.prediction_size], [torch.Tensor], \"Batch of predictions, each represented as probability distribution over classes [BATCH_SIZE x PREDICTION_SIZE]\")\n }\n\n def forward(self, data_streams):\n \"\"\"\n Main forward pass of the ``LeNet5`` model.\n\n :param data_streams: DataStreams({'images',**}), where:\n\n - images: [batch_size, num_channels, width, height]\n\n :type data_streams: ``miprometheus.utils.DataStreams``\n\n :return: Predictions [batch_size, num_classes]\n\n \"\"\"\n # Add noise to weights\n #for _, param in self.named_parameters():\n # if param.requires_grad:\n # #print (name, param.data)\n # #noise = -torch.randn(param.data.shape)*0.3\n # noise = 0.3\n # param.data = param.data * (1 + noise)\n # #print (name, param.data)\n\n\n # Unpack DataStreams.\n img = data_streams[self.key_inputs]\n\n # Pass inputs through layers.\n x = self.conv1(img)\n x = torch.nn.functional.relu(x)\n x = self.maxpool1(x)\n x = self.conv2(x)\n x = torch.nn.functional.relu(x)\n x = self.maxpool2(x)\n x = self.conv3(x)\n x = torch.nn.functional.relu(x)\n x = x.view(-1, 120)\n x = self.linear1(x)\n x = torch.nn.functional.relu(x)\n x = self.linear2(x)\n\n # Log softmax.\n predictions = torch.nn.functional.log_softmax(x, dim=1)\n # Add predictions to datadict.\n data_streams.publish({self.key_predictions: predictions})\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__author__ = \"Vincent Marois, Tomasz Kornuta\"\n\nfrom os import path,makedirs\nimport yaml\nimport torch\nfrom time import sleep\nfrom datetime import datetime\n\nimport ptp.configuration.config_parsing as config_parse\nimport ptp.utils.logger as logging\n\nfrom ptp.workers.worker import Worker\n\nfrom ptp.application.task_manager import TaskManager\nfrom ptp.application.pipeline_manager import PipelineManager\n\nfrom ptp.utils.statistics_collector import StatisticsCollector\nfrom ptp.utils.statistics_aggregator import StatisticsAggregator\n\n\nclass Trainer(Worker):\n \"\"\"\n Base class for the trainers.\n\n Iterates over epochs on the dataset.\n\n All other types of trainers (e.g. ``OnlineTrainer`` & ``OfflineTrainer``) should subclass it.\n\n \"\"\"\n\n def __init__(self, name, class_type):\n \"\"\"\n Base constructor for all trainers:\n\n - Adds default trainer command line arguments\n\n :param name: Name of the worker\n :type name: str\n\n :param class_type: Class type of the component.\n\n \"\"\" \n # Call base constructor to set up app state, registry and add default arguments.\n super(Trainer, self).__init__(name, class_type)\n\n # Add arguments to the specific parser.\n # These arguments will be shared by all basic trainers.\n self.parser.add_argument(\n '--tensorboard',\n action='store',\n dest='tensorboard', choices=[0, 1, 2],\n type=int,\n help=\"If present, enable logging to TensorBoard. Available log levels:\\n\"\n \"0: Log the collected statistics.\\n\"\n \"1: Add the histograms of the model's biases & weights (Warning: Slow).\\n\"\n \"2: Add the histograms of the model's biases & weights gradients \"\n \"(Warning: Even slower).\")\n\n self.parser.add_argument(\n '--saveall',\n dest='save_intermediate',\n action='store_true',\n help='Setting to true results in saving intermediate models during training (DEFAULT: False)')\n\n self.parser.add_argument(\n '--training',\n dest='training_section_name',\n type=str,\n default=\"training\",\n help='Name of the section defining the training procedure (DEFAULT: training)')\n\n self.parser.add_argument(\n '--validation',\n dest='validation_section_name',\n type=str,\n default=\"validation\",\n help='Name of the section defining the validation procedure (DEFAULT: validation)')\n\n\n def setup_experiment(self):\n \"\"\"\n Sets up experiment of all trainers:\n\n - Calls base class setup_experiment to parse the command line arguments,\n\n - Loads the config file(s)\n\n - Set up the log directory path\n\n - Add a ``FileHandler`` to the logger\n\n - Set random seeds\n\n - Creates the pipeline consisting of many components\n\n - Creates training task manager\n\n - Handles curriculum learning if indicated\n\n - Creates validation task manager\n\n - Set optimizer\n\n - Performs testing of compatibility of both training and validation tasks and created pipeline.\n\n \"\"\"\n # Call base method to parse all command line arguments and add default sections.\n super(Trainer, self).setup_experiment()\n\n # \"Pass\" configuration parameters from the \"default_training\" section to training section indicated by the section_name.\n self.config.add_default_params({ self.app_state.args.training_section_name : self.config['default_training'].to_dict()} )\n self.config.del_default_params('default_training')\n \n # \"Pass\" configuration parameters from the \"default_validation\" section to validation section indicated by the section_name.\n self.config.add_default_params({ self.app_state.args.validation_section_name: self.config['default_validation'].to_dict()} )\n self.config.del_default_params('default_validation')\n\n\n # Check the presence of the CUDA-compatible devices.\n if self.app_state.args.use_gpu and (torch.cuda.device_count() == 0):\n self.logger.error(\"Cannot use GPU as there are no CUDA-compatible devices present in the system!\")\n exit(-1)\n\n # Check if config file was selected.\n if self.app_state.args.config == '':\n print('Please pass configuration file(s) as --c parameter')\n exit(-2)\n\n # Split and make them absolute.\n root_configs = self.app_state.args.config.replace(\" \", \"\").split(',')\n # If there are - expand them to absolute paths.\n abs_root_configs = [path.expanduser(config) for config in root_configs]\n \n # Get the list of configurations which need to be loaded.\n configs_to_load = config_parse.recurrent_config_parse(abs_root_configs, [], self.app_state.absolute_config_path)\n\n # Read the YAML files one by one - but in reverse order -> overwrite the first indicated config(s)\n config_parse.reverse_order_config_load(self.config, configs_to_load)\n\n # -> At this point, the Param Registry contains the configuration loaded (and overwritten) from several files.\n # Log the resulting training configuration.\n conf_str = 'Loaded (initial) configuration:\\n'\n conf_str += '='*80 + '\\n'\n conf_str += yaml.safe_dump(self.config.to_dict(), default_flow_style=False)\n conf_str += '='*80 + '\\n'\n print(conf_str)\n\n # Get training section.\n try:\n tsn = self.app_state.args.training_section_name\n self.config_training = self.config[tsn]\n # We must additionally check if it is None - weird behvaiour when using default value.\n if self.config_training is None:\n raise KeyError()\n except KeyError:\n print(\"Error: Couldn't retrieve the training section '{}' from the loaded configuration\".format(tsn))\n exit(-1)\n\n # Get training task type.\n try:\n training_task_type = self.config_training['task']['type']\n except KeyError:\n print(\"Error: Couldn't retrieve the task 'type' from the training section '{}' in the loaded configuration\".format(tsn))\n exit(-1)\n\n # Get validation section.\n try:\n vsn = self.app_state.args.validation_section_name\n self.config_validation = self.config[vsn]\n if self.config_validation is None:\n raise KeyError()\n except KeyError:\n print(\"Error: Couldn't retrieve the validation section '{}' from the loaded configuration\".format(vsn))\n exit(-1)\n\n # Get validation task type.\n try:\n _ = self.config_validation['task']['type']\n except KeyError:\n print(\"Error: Couldn't retrieve the task 'type' from the validation section '{}' in the loaded configuration\".format(vsn))\n exit(-1)\n\n # Get pipeline section.\n try:\n psn = self.app_state.args.pipeline_section_name\n self.config_pipeline = self.config[psn]\n if self.config_pipeline is None:\n raise KeyError()\n except KeyError:\n print(\"Error: Couldn't retrieve the pipeline section '{}' from the loaded configuration\".format(psn))\n exit(-1)\n\n # Get pipeline name.\n try:\n pipeline_name = self.config_pipeline['name']\n except KeyError:\n # Using name of the first configuration file from command line.\n basename = path.basename(root_configs[0])\n # Take config filename without extension.\n pipeline_name = path.splitext(basename)[0] \n # Set pipeline name, so processor can use it afterwards.\n self.config_pipeline.add_config_params({'name': pipeline_name})\n\n # Prepare the output path for logging\n while True: # Dirty fix: if log_dir already exists, wait for 1 second and try again\n try:\n time_str = '{0:%Y%m%d_%H%M%S}'.format(datetime.now())\n if self.app_state.args.exptag != '':\n time_str = time_str + \"_\" + self.app_state.args.exptag\n self.app_state.log_dir = path.expanduser(self.app_state.args.expdir) + '/' + training_task_type + '/' + pipeline_name + '/' + time_str + '/'\n # Lowercase dir.\n self.app_state.log_dir = self.app_state.log_dir.lower()\n makedirs(self.app_state.log_dir, exist_ok=False)\n except FileExistsError:\n sleep(1)\n else:\n break\n\n # Set log dir.\n self.app_state.log_file = self.app_state.log_dir + 'trainer.log'\n # Initialize logger in app state.\n self.app_state.logger = logging.initialize_logger(\"AppState\")\n # Add handlers for the logfile to worker logger.\n logging.add_file_handler_to_logger(self.logger)\n self.logger.info(\"Logger directory set to: {}\".format(self.app_state.log_dir))\n\n # Set cpu/gpu types.\n self.app_state.set_types()\n\n # Models dir.\n self.checkpoint_dir = self.app_state.log_dir + 'checkpoints/'\n makedirs(self.checkpoint_dir, exist_ok=False)\n\n # Set random seeds in the training section.\n self.set_random_seeds('training', self.config_training)\n\n # Total number of detected errors.\n errors =0\n\n ################# TRAINING PROBLEM ################# \n\n # Build training task manager.\n self.training = TaskManager('training', self.config_training) \n errors += self.training.build()\n \n # parse the curriculum learning section in the loaded configuration.\n if 'curriculum_learning' in self.config_training:\n\n # Initialize curriculum learning - with values from loaded configuration.\n self.training.task.curriculum_learning_initialize(self.config_training['curriculum_learning'])\n\n # If the 'must_finish' key is not present in config then then it will be finished by default\n self.config_training['curriculum_learning'].add_default_params({'must_finish': True})\n\n self.must_finish_curriculum = self.config_training['curriculum_learning']['must_finish']\n self.logger.info(\"Curriculum Learning activated\")\n\n else:\n # If not using curriculum learning then it does not have to be finished.\n self.must_finish_curriculum = False\n self.curric_done = True\n\n ################# VALIDATION PROBLEM ################# \n \n # Build validation task manager.\n self.validation = TaskManager('validation', self.config_validation)\n errors += self.validation.build()\n\n ###################### PIPELINE ######################\n \n # Build the pipeline using the loaded configuration.\n self.pipeline = PipelineManager(pipeline_name, self.config_pipeline)\n errors += self.pipeline.build()\n\n # Check errors.\n if errors > 0:\n self.logger.error('Found {} errors, terminating execution'.format(errors))\n exit(-2)\n\n # Show pipeline.\n summary_str = self.pipeline.summarize_all_components_header()\n summary_str += self.training.task.summarize_io(\"training\")\n summary_str += self.validation.task.summarize_io(\"validation\")\n summary_str += self.pipeline.summarize_all_components()\n self.logger.info(summary_str)\n \n # Handshake definitions.\n self.logger.info(\"Handshaking training pipeline\")\n defs_training = self.training.task.output_data_definitions()\n errors += self.pipeline.handshake(defs_training)\n\n self.logger.info(\"Handshaking validation pipeline\")\n defs_valid = self.validation.task.output_data_definitions()\n errors += self.pipeline.handshake(defs_valid)\n\n # Check errors.\n if errors > 0:\n self.logger.error('Found {} errors, terminating execution'.format(errors))\n exit(-2)\n\n ################## MODEL LOAD/FREEZE #################\n\n # Load the pretrained models params from checkpoint.\n try: \n # Check command line arguments, then check load option in config.\n if self.app_state.args.load_checkpoint != \"\":\n pipeline_name = self.app_state.args.load_checkpoint\n msg = \"command line (--load)\"\n elif \"load\" in self.config_pipeline:\n pipeline_name = self.config_pipeline['load']\n msg = \"'pipeline' section of the configuration file\"\n else:\n pipeline_name = \"\"\n # Try to load the model.\n if pipeline_name != \"\":\n if path.isfile(pipeline_name):\n # Load parameters from checkpoint.\n self.pipeline.load(pipeline_name)\n else:\n raise Exception(\"Couldn't load the checkpoint {} indicated in the {}: file does not exist\".format(pipeline_name, msg))\n # If we succeeded, we do not want to load the models from the file anymore!\n else:\n # Try to load the models parameters - one by one, if set so in the configuration file.\n self.pipeline.load_models()\n\n except KeyError:\n self.logger.error(\"File {} indicated in the {} seems not to be a valid model checkpoint\".format(pipeline_name, msg))\n exit(-5)\n except Exception as e:\n self.logger.error(e)\n # Exit by following the logic: if user wanted to load the model but failed, then continuing the experiment makes no sense.\n exit(-6)\n\n # Finally, freeze the models (that the user wants to freeze).\n self.pipeline.freeze_models()\n\n # Log the model summaries.\n summary_str = self.pipeline.summarize_models_header()\n summary_str += self.pipeline.summarize_models()\n self.logger.info(summary_str)\n\n # Move the models in the pipeline to GPU.\n if self.app_state.args.use_gpu:\n self.pipeline.cuda() \n\n ################# OPTIMIZER ################# \n\n # Set the optimizer.\n optimizer_conf = dict(self.config_training['optimizer'])\n optimizer_type = optimizer_conf['type']\n del optimizer_conf['type']\n\n # Check if there are any models in the pipeline.\n if len(list(filter(lambda p: p.requires_grad, self.pipeline.parameters()))) == 0:\n self.logger.error('Cannot proceed with training, as there are no trainable models in the pipeline (or all models are frozen)')\n exit(-7)\n\n # Instantiate the optimizer and filter the model parameters based on if they require gradients.\n self.optimizer = getattr(torch.optim, optimizer_type)(\n filter(lambda p: p.requires_grad, self.pipeline.parameters()), **optimizer_conf)\n\n log_str = 'Optimizer:\\n' + '='*80 + \"\\n\"\n log_str += \" Type: \" + optimizer_type + \"\\n\"\n log_str += \" Params: {}\".format(optimizer_conf)\n\n self.logger.info(log_str)\n\n def add_statistics(self, stat_col):\n \"\"\"\n Calls base method and adds epoch statistics to ``StatisticsCollector``.\n\n :param stat_col: ``StatisticsCollector``.\n\n \"\"\"\n # Add loss and episode.\n super(Trainer, self).add_statistics(stat_col)\n\n # Add default statistics with formatting.\n stat_col.add_statistics('epoch', '{:02d}')\n\n\n def add_aggregators(self, stat_agg):\n \"\"\"\n Adds basic aggregators to to ``StatisticsAggregator`` and extends them with: epoch.\n\n :param stat_agg: ``StatisticsAggregator``.\n\n \"\"\"\n # Add basic aggregators.\n super(Trainer, self).add_aggregators(stat_agg)\n\n # add 'aggregators' for the epoch.\n stat_agg.add_aggregator('epoch', '{:02d}')\n\n\n def initialize_statistics_collection(self):\n \"\"\"\n - Initializes all ``StatisticsCollectors`` and ``StatisticsAggregators`` used by a given worker: \\\n\n - For training statistics (adds the statistics of the model & task),\n - For validation statistics (adds the statistics of the model & task).\n\n - Creates the output files (csv).\n\n \"\"\"\n # TRAINING.\n # Create statistics collector for training.\n self.training_stat_col = StatisticsCollector()\n self.add_statistics(self.training_stat_col)\n self.training.task.add_statistics(self.training_stat_col)\n self.pipeline.add_statistics(self.training_stat_col)\n # Create the csv file to store the training statistics.\n self.training_batch_stats_file = self.training_stat_col.initialize_csv_file(self.app_state.log_dir, 'training_statistics.csv')\n\n # Create statistics aggregator for training.\n self.training_stat_agg = StatisticsAggregator()\n self.add_aggregators(self.training_stat_agg)\n self.training.task.add_aggregators(self.training_stat_agg)\n self.pipeline.add_aggregators(self.training_stat_agg)\n # Create the csv file to store the training statistic aggregations.\n self.training_set_stats_file = self.training_stat_agg.initialize_csv_file(self.app_state.log_dir, 'training_set_agg_statistics.csv')\n\n # VALIDATION.\n # Create statistics collector for validation.\n self.validation_stat_col = StatisticsCollector()\n self.add_statistics(self.validation_stat_col)\n self.validation.task.add_statistics(self.validation_stat_col)\n self.pipeline.add_statistics(self.validation_stat_col)\n # Create the csv file to store the validation statistics.\n self.validation_batch_stats_file = self.validation_stat_col.initialize_csv_file(self.app_state.log_dir, 'validation_statistics.csv')\n\n # Create statistics aggregator for validation.\n self.validation_stat_agg = StatisticsAggregator()\n self.add_aggregators(self.validation_stat_agg)\n self.validation.task.add_aggregators(self.validation_stat_agg)\n self.pipeline.add_aggregators(self.validation_stat_agg)\n # Create the csv file to store the validation statistic aggregations.\n self.validation_set_stats_file = self.validation_stat_agg.initialize_csv_file(self.app_state.log_dir, 'validation_set_agg_statistics.csv')\n\n\n def finalize_statistics_collection(self):\n \"\"\"\n Finalizes the statistics collection by closing the csv files.\n\n \"\"\"\n # Close all files.\n self.training_batch_stats_file.close()\n self.training_set_stats_file.close()\n self.validation_batch_stats_file.close()\n self.validation_set_stats_file.close()\n\n\n def initialize_tensorboard(self):\n \"\"\"\n Initializes the TensorBoard writers, and log directories.\n\n \"\"\"\n # Create TensorBoard outputs - if TensorBoard is supposed to be used.\n if self.app_state.args.tensorboard is not None:\n from tensorboardX import SummaryWriter\n self.training_batch_writer = SummaryWriter(self.app_state.log_dir + '/training')\n self.training_stat_col.initialize_tensorboard(self.training_batch_writer)\n\n self.training_set_writer = SummaryWriter(self.app_state.log_dir + '/training_set_agg')\n self.training_stat_agg.initialize_tensorboard(self.training_set_writer)\n \n self.validation_batch_writer = SummaryWriter(self.app_state.log_dir + '/validation')\n self.validation_stat_col.initialize_tensorboard(self.validation_batch_writer)\n\n self.validation_set_writer = SummaryWriter(self.app_state.log_dir + '/validation_set_agg')\n self.validation_stat_agg.initialize_tensorboard(self.validation_set_writer)\n else:\n self.training_batch_writer = None\n self.training_set_writer = None\n self.validation_batch_writer = None\n self.validation_set_writer = None\n\n def finalize_tensorboard(self):\n \"\"\" \n Finalizes the operation of TensorBoard writers by closing them.\n \"\"\"\n # Close the TensorBoard writers.\n if self.training_batch_writer is not None:\n self.training_batch_writer.close()\n if self.training_set_writer is not None:\n self.training_set_writer.close()\n if self.validation_batch_writer is not None:\n self.validation_batch_writer.close()\n if self.validation_set_writer is not None:\n self.validation_set_writer.close()\n\n def validate_on_batch(self, valid_batch):\n \"\"\"\n Performs a validation of the model using the provided batch.\n\n Additionally logs results (to files, TensorBoard) and handles visualization.\n\n :param valid_batch: data batch generated by the task and used as input to the model.\n :type valid_batch: ``DataStreams``\n\n :return: Validation loss.\n\n \"\"\"\n # Turn on evaluation mode.\n self.pipeline.eval()\n # Empty the statistics collector.\n self.validation_stat_col.empty()\n\n # Compute the validation loss using the provided data batch.\n with torch.no_grad():\n # Forward pass.\n self.pipeline.forward(valid_batch)\n # Collect the statistics.\n self.collect_all_statistics(self.validation, self.pipeline, valid_batch, self.validation_stat_col)\n\n # Export collected statistics.\n self.export_all_statistics(self.validation_stat_col, '[Partial Validation]')\n\n def validate_on_set(self):\n \"\"\"\n Performs a validation of the model on the whole validation set, using the validation ``DataLoader``.\n\n Iterates over the entire validation set (through the `DataLoader``), aggregates the collected statistics \\\n and logs that to the console, csv and TensorBoard (if set).\n\n \"\"\"\n # Get number of samples.\n num_samples = len(self.validation)\n \n self.logger.info('Validating over the entire validation set ({} samples in {} episodes)'.format(\n num_samples, len(self.validation.dataloader)))\n\n # Turn on evaluation mode.\n self.pipeline.eval()\n\n # Reset the statistics.\n self.validation_stat_col.empty()\n\n # Remember global episode number.\n old_episode = self.app_state.episode\n\n with torch.no_grad():\n for ep, valid_batch in enumerate(self.validation.dataloader):\n\n self.app_state.episode = ep\n # Forward pass.\n self.pipeline.forward(valid_batch)\n # Collect the statistics.\n self.collect_all_statistics(self.validation, self.pipeline, valid_batch,\n self.validation_stat_col)\n\n # Revert to global episode number.\n self.app_state.episode = old_episode\n\n # Aggregate statistics for the whole set.\n self.aggregate_all_statistics(self.validation, self.pipeline,\n self.validation_stat_col, self.validation_stat_agg)\n\n # Export aggregated statistics.\n self.export_all_statistics(self.validation_stat_agg, '[Full Validation]')\n\n\nif __name__ == '__main__':\n print(\"The trainer.py file contains only an abstract base class. Please try to use the \\\nonline_trainer (mip-online-trainer) or offline_trainer (mip-offline-trainer) instead.\")\n"
] | [
[
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.functional.relu"
],
[
"torch.cuda.device_count",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CamHolman/spiketools | [
"56c37a50413a015cfa9c75725cbe7d4ef54968a5",
"56c37a50413a015cfa9c75725cbe7d4ef54968a5"
] | [
"spiketools/stats/generators.py",
"spiketools/plts/space.py"
] | [
"\"\"\"Generators for drawing data from statistical distributions.\"\"\"\n\nimport numpy as np\n\n###################################################################################################\n###################################################################################################\n\ndef poisson_train(frequency, duration, start_time=0):\n \"\"\"Generator function for a Homogeneous Poisson train.\n\n Parameters\n ----------\n frequency : float\n The mean spiking frequency.\n duration : float\n Maximum duration.\n start_time: float, optional\n Timestamp of the start time for the generated sequence.\n\n Yields\n ------\n float\n A relative spike time from t=start_time, in seconds (not ms).\n\n Examples\n --------\n Make a list of spikes at 20 Hz for 3 seconds:\n\n >>> poisson_generator = poisson_train(20, 3)\n >>> spikes = [spike for spike in poisson_generator]\n\n Sample spikes continuously from a generator:\n\n >>> spike_gen = poisson_train(20, duration=np.inf)\n >>> for ind in range(10):\n ... spike = next(spike_gen)\n \"\"\"\n\n isi = 1. / frequency\n\n cur_time = start_time\n while cur_time <= duration:\n\n cur_time += isi * np.random.exponential()\n\n if cur_time > duration:\n return\n\n yield cur_time\n",
"\"\"\"Plots for spatial measures and analyses.\"\"\"\n\nfrom copy import deepcopy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage import gaussian_filter\n\nfrom spiketools.plts.utils import check_ax, savefig, set_plt_kwargs\n\n###################################################################################################\n###################################################################################################\n\n@savefig\n@set_plt_kwargs\ndef plot_positions(positions, spike_positions=None, x_bins=None, y_bins=None,\n ax=None, **plt_kwargs):\n \"\"\"Plot positions.\n\n Parameters\n ----------\n positions : 2d array\n Position data.\n spike_positions : 2d array, optional\n Positions values at which spikes occur.\n If provided, these are added to the plot as red dots.\n x_bins, y_bins : list of float\n Bin edges for each axis.\n If provided, these are used to draw grid lines on the plot.\n ax : Axes, optional\n Axis object upon which to plot.\n plt_kwargs\n Additional arguments to pass into the plot function.\n \"\"\"\n\n ax = check_ax(ax, figsize=plt_kwargs.pop('figsize', None))\n\n ax.plot(*positions, alpha=plt_kwargs.pop('alpha', 0.35), **plt_kwargs)\n\n if spike_positions is not None:\n ax.plot(spike_positions[0, :], spike_positions[1, :],\n '.', color='red', alpha=0.35, ms=6)\n\n if x_bins is not None:\n ax.set_xticks(x_bins, minor=False)\n if y_bins is not None:\n ax.set_yticks(y_bins, minor=False)\n\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.xaxis.set_ticks_position('none')\n ax.yaxis.set_ticks_position('none')\n\n if x_bins is not None or y_bins is not None:\n ax.grid()\n\n\n@savefig\n@set_plt_kwargs\ndef plot_heatmap(data, transpose=False, smooth=False, smoothing_kernel=1.5,\n ignore_zero=False, cbar=False, cmap=None, vmin=None, vmax=None,\n ax=None, **plt_kwargs):\n \"\"\"Plot a spatial heat map.\n\n Parameters\n ----------\n data : 2d array\n Measure to plot across a grided environment.\n transpose : bool, optional, default: False\n Whether to transpose the data before plotting.\n smooth : bool, optional, default: False\n Whether to smooth the data before plotting.\n smoothing_kernel : float, optional, default: 1.5\n Standard deviation of the gaussian kernel to apply for smoothing.\n ignore_zero : bool, optional, default: False\n If True, replaces 0's with NaN for plotting.\n cbar : bool, optional, default: False\n Whether to add a colorbar to the plot.\n cmap : str, optional\n Which colormap to use to plot.\n vmin, vmax : float, optional\n Min and max plot ranges.\n title : str, optional\n Title to add to the figure.\n ax : Axes, optional\n Axis object upon which to plot.\n plt_kwargs\n Additional arguments to pass into the plot function.\n \"\"\"\n\n ax = check_ax(ax, figsize=plt_kwargs.pop('figsize', None))\n\n if transpose:\n data = data.T\n\n if smooth:\n data = _smooth_data(data, smoothing_kernel)\n\n if ignore_zero:\n data = deepcopy(data)\n data[data == 0.] = np.nan\n\n im = ax.imshow(data, cmap=cmap, vmin=vmin, vmax=vmax, **plt_kwargs)\n\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_axis_off()\n\n if cbar:\n colorbar = plt.colorbar(im)\n colorbar.outline.set_visible(False)\n\n\ndef _smooth_data(data, sigma):\n \"\"\"Smooth data for plotting, using a gaussian kernel.\n\n Parameters\n ----------\n data : 2d array\n Data to smooth.\n sigma : float\n Standard deviation of the gaussian kernel to apply for smoothing.\n\n Returns\n -------\n data : 2d array\n The smoothed data.\n \"\"\"\n\n data = deepcopy(data)\n data[np.isnan(data)] = 0\n\n data = gaussian_filter(data, sigma=sigma)\n\n return data\n"
] | [
[
"numpy.random.exponential"
],
[
"numpy.isnan",
"matplotlib.pyplot.colorbar",
"scipy.ndimage.gaussian_filter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
kslin/miRNA_models | [
"5b034b036e5aa10ab62f91f8adccec473e29ec34",
"5b034b036e5aa10ab62f91f8adccec473e29ec34"
] | [
"get_features/tf_utils.py",
"get_features/write_tfrecords_utr.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport tensorflow as tf\n\n\n### TFRecords Functions ###\ndef _float_feature(value):\n \"\"\"Returns a float_list from a float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\ndef _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\ndef _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _string_feature(value):\n \"\"\"Returns a bytes_list from a list of strings.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))",
"from optparse import OptionParser\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nimport utils\nimport tf_utils\n\nnp.set_printoptions(threshold=np.inf, linewidth=200)\npd.options.mode.chained_assignment = None\n\n\nif __name__ == '__main__':\n\n parser = OptionParser()\n parser.add_option(\"--tpm_file\", dest=\"TPM_FILE\", help=\"file with TPM data\")\n parser.add_option(\"--feature_file\", dest=\"FEATURE_FILE\", help=\"file with features\")\n parser.add_option(\"--mirseqs\", dest=\"MIR_SEQS\", help=\"tsv with miRNAs and their sequences\")\n parser.add_option(\"--mirlen\", dest=\"MIRLEN\", type=int)\n parser.add_option(\"--passenger\", dest=\"PASSENGER\", help=\"use passenger strand\", default=False, action='store_true')\n parser.add_option(\"--outfile\", dest=\"OUTFILE\", help=\"location for tfrecords\")\n\n (options, args) = parser.parse_args()\n\n # read miRNA DATA and filter for ones to keep\n MIRNAS = pd.read_csv(options.MIR_SEQS, sep='\\t')\n MIRNAS = MIRNAS[MIRNAS['use_tpms']]\n ALL_GUIDES = sorted(list(MIRNAS['mir'].values))\n\n MIR_DICT = {}\n ALL_MIRS = []\n for row in MIRNAS.iterrows():\n guide_seq = row[1]['guide_seq']\n pass_seq = row[1]['pass_seq']\n MIR_DICT[row[1]['mir']] = {\n 'mirseq': guide_seq,\n 'site8': utils.rev_comp(guide_seq[1:8]) + 'A',\n 'one_hot': utils.one_hot_encode(guide_seq[:options.MIRLEN])\n }\n ALL_MIRS.append(row[1]['mir'])\n if options.PASSENGER:\n MIR_DICT[row[1]['mir'] + '*'] = {\n 'mirseq': pass_seq,\n 'site8': utils.rev_comp(pass_seq[1:8]) + 'A',\n 'one_hot': utils.one_hot_encode(pass_seq[:options.MIRLEN])\n }\n ALL_MIRS.append(row[1]['mir'] + '*')\n\n ALL_MIRS = sorted(ALL_MIRS)\n print(\"Using mirs: {}\".format(ALL_MIRS))\n\n # read in features\n ALL_FEATS = []\n for mir in ALL_MIRS:\n mir = mir.replace('*', '_pass')\n temp = pd.read_csv(options.FEATURE_FILE.replace('MIR', mir), sep='\\t')\n\n # fill in SA_bg for noncanon sites\n mean_SA_diff = np.nanmean(temp['logSA_diff'])\n temp['logSA_diff'] = temp['logSA_diff'].fillna(mean_SA_diff)\n\n ALL_FEATS.append(temp)\n\n ALL_FEATS = pd.concat(ALL_FEATS, sort=False)\n print(len(ALL_FEATS))\n\n # get rid of noncanonical sites in ORF\n ALL_FEATS = ALL_FEATS[(ALL_FEATS['stype'] != 'no site') | (~ALL_FEATS['in_ORF'])]\n print(len(ALL_FEATS))\n\n # only take 3p-pairing scores for canonical sites\n ALL_FEATS['Threep_canon'] = ALL_FEATS['Threep'] * (ALL_FEATS['stype'] != 'no site')\n\n # read in expression data\n TPM = pd.read_csv(options.TPM_FILE, sep='\\t', index_col=0)\n for mir in ALL_GUIDES:\n if mir not in TPM.columns:\n raise ValueError('{} given in mirseqs file but not in TPM file.'.format(mir))\n\n num_batches = 11\n TPM['batch'] = [ix % num_batches for ix in TPM['ix']]\n keep_cols = ['in_ORF', 'logSA_diff', 'Threep_canon', 'PCT']\n\n with tf.python_io.TFRecordWriter(options.OUTFILE) as tfwriter:\n for ix, row in enumerate(TPM.iterrows()):\n\n # print progress\n if ix % 100 == 0:\n print(\"Processed {}/{} transcripts\".format(ix, len(TPM)))\n\n transcript = row[0]\n feat_temp = ALL_FEATS[ALL_FEATS['transcript'] == transcript]\n\n feature_dict = {\n 'transcript': tf_utils._bytes_feature(transcript.encode('utf-8')),\n 'batch': tf_utils._int64_feature([row[1]['batch']]),\n 'utr3_length': tf_utils._float_feature([row[1]['utr3_length']]),\n 'orf_length': tf_utils._float_feature([row[1]['orf_length']]),\n }\n\n for guide in ALL_GUIDES:\n feature_dict['{}_tpm'.format(guide)] = tf_utils._float_feature([row[1][guide]])\n\n for mir in ALL_MIRS:\n\n site8 = MIR_DICT[mir]['site8']\n mirseq = MIR_DICT[mir]['mirseq']\n\n feature_dict['{}_mir_1hot'.format(mir)] = tf_utils._float_feature(utils.one_hot_encode(mirseq[:options.MIRLEN]))\n\n feat_mir_temp = feat_temp[feat_temp['mir'] == mir].sort_values('loc')\n\n nsites = len(feat_mir_temp)\n if nsites > 0:\n keep_seqs = feat_mir_temp['12mer'].values\n long_seq = ''.join(keep_seqs)\n\n feature_dict['{}_seqs_1hot'.format(mir)] = tf_utils._float_feature(utils.one_hot_encode(long_seq))\n feature_dict['{}_ts7_features'.format(mir)] = tf_utils._float_feature(list(feat_mir_temp[keep_cols].values.flatten()))\n\n else:\n feature_dict['{}_seqs_1hot'.format(mir)] = tf_utils._float_feature([])\n feature_dict['{}_ts7_features'.format(mir)] = tf_utils._float_feature([])\n nsites = 0\n\n feature_dict['{}_nsites'.format(mir)] = tf_utils._int64_feature([nsites])\n\n # Create a Features message using tf.train.Example.\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature_dict))\n example_proto = example_proto.SerializeToString()\n\n tfwriter.write(example_proto)\n"
] | [
[
"tensorflow.train.BytesList",
"tensorflow.train.FloatList",
"tensorflow.train.Int64List"
],
[
"pandas.concat",
"pandas.read_csv",
"numpy.set_printoptions",
"tensorflow.python_io.TFRecordWriter",
"numpy.nanmean",
"tensorflow.train.Features"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
dewloosh/dewloosh-solid | [
"dbd6757ddd1373df870ccd99f5ee791c08d342cb",
"dbd6757ddd1373df870ccd99f5ee791c08d342cb"
] | [
"src/dewloosh/solid/fourier/preproc.py",
"src/dewloosh/solid/fem/structure/linemesh.py"
] | [
"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom numba import njit, prange\nfrom numpy import ndarray, pi as PI\n\nfrom dewloosh.math import squeeze\nfrom dewloosh.math.array import atleast3d\n\n\n@squeeze(True)\ndef lhs_Navier(size: tuple, shape: tuple, *args, D: ndarray,\n S: ndarray = None, model: str = 'mindlin', **kwargs):\n \"\"\"\n Returns coefficient matrix for a Navier solution, for a single or \n multiple left-hand sides.\n\n Parameters\n ----------\n size : tuple\n Tuple of floats, containing the sizes of the rectagle.\n\n shape : tuple\n Tuple of integers, containing the number of harmonic terms\n included in both directions.\n\n D : numpy.ndarray\n 2d or 3d float array of bending stiffnesses.\n\n S : numpy.ndarray, Optional\n 2d or 3d float array of shear stiffnesses. Default is None.\n\n squeeze : boolean, optional\n Removes single-dimensional entries from the shape of the \n resulting array. Default is True.\n\n Returns\n -------\n numpy.ndarray\n 3d or 4d float array of coefficients. The shape depends on\n the shape of the input parameters.\n \"\"\"\n if model.lower() in ['mindlin', 'm']:\n return lhs_Mindlin(size, shape, atleast3d(D), atleast3d(S))\n elif model.lower() in ['kirchhoff', 'k']:\n return lhs_Kirchhoff(size, shape, atleast3d(D))\n\n\n@njit(nogil=True, parallel=True, cache=True)\ndef lhs_Mindlin(size: tuple, shape: tuple, D: np.ndarray, S: np.ndarray):\n \"\"\"\n JIT compiled function, that returns coefficient matrices for a Navier \n solution for multiple left-hand sides.\n\n Parameters\n ----------\n size : tuple\n Tuple of floats, containing the sizes of the rectagle.\n\n shape : tuple\n Tuple of integers, containing the number of harmonic terms\n included in both directions.\n\n D : numpy.ndarray\n 3d float array of bending stiffnesses.\n\n S : numpy.ndarray\n 3d float array of shear stiffnesses.\n\n Returns\n -------\n numpy.ndarray\n 4d float array of coefficients.\n \"\"\"\n Lx, Ly = size\n nLHS = D.shape[0]\n M, N = shape\n res = np.zeros((nLHS, M * N, 3, 3), dtype=D.dtype)\n for iLHS in prange(nLHS):\n D11, D12, D22, D66 = D[iLHS, 0, 0], D[iLHS, 0, 1], \\\n D[iLHS, 1, 1], D[iLHS, 2, 2]\n S44, S55 = S[iLHS, 0, 0], S[iLHS, 1, 1]\n for m in prange(1, M + 1):\n for n in prange(1, N + 1):\n iMN = (m - 1) * N + n - 1\n res[iLHS, iMN, 0, 0] = -PI**2*D22*n**2 / \\\n Ly**2 - PI**2*D66*m**2/Lx**2 - S44\n res[iLHS, iMN, 0, 1] = PI**2*D12*m * \\\n n/(Lx*Ly) + PI**2*D66*m*n/(Lx*Ly)\n res[iLHS, iMN, 0, 2] = PI*S44*n/Ly\n res[iLHS, iMN, 1, 0] = -PI**2*D12*m * \\\n n/(Lx*Ly) - PI**2*D66*m*n/(Lx*Ly)\n res[iLHS, iMN, 1, 1] = PI**2*D11*m**2 / \\\n Lx**2 + PI**2*D66*n**2/Ly**2 + S55\n res[iLHS, iMN, 1, 2] = PI*S55*m/Lx\n res[iLHS, iMN, 2, 0] = -PI*S44*n/Ly\n res[iLHS, iMN, 2, 1] = PI*S55*m/Lx\n res[iLHS, iMN, 2, 2] = PI**2*S44 * \\\n n**2/Ly**2 + PI**2*S55*m**2/Lx**2\n return res\n\n\n@njit(nogil=True, parallel=True, cache=True)\ndef lhs_Kirchhoff(size: tuple, shape: tuple, D: np.ndarray):\n \"\"\"\n JIT compiled function, that returns coefficient matrices for a Navier \n solution for multiple left-hand sides.\n\n Parameters\n ----------\n size : tuple\n Tuple of floats, containing the sizes of the rectagle.\n\n shape : tuple\n Tuple of integers, containing the number of harmonic terms\n included in both directions.\n\n D : numpy.ndarray\n 3d float array of bending stiffnesses.\n\n Returns\n -------\n numpy.ndarray\n 2d float array of coefficients.\n \"\"\"\n Lx, Ly = size\n nLHS = D.shape[0]\n M, N = shape\n res = np.zeros((nLHS, M * N), dtype=D.dtype)\n for iLHS in prange(nLHS):\n D11, D12, D22, D66 = D[iLHS, 0, 0], D[iLHS, 0, 1], \\\n D[iLHS, 1, 1], D[iLHS, 2, 2]\n for m in prange(1, M + 1):\n for n in prange(1, N + 1):\n iMN = (m - 1) * N + n - 1\n res[iLHS, iMN] = PI**4*D11*m**4/Lx**4 + \\\n 2*PI**4*D12*m**2*n**2/(Lx**2*Ly**2) + \\\n PI**4*D22*n**4/Ly**4 + \\\n 4*PI**4*D66*m**2*n**2/(Lx**2*Ly**2)\n return res\n",
"# -*- coding: utf-8 -*-\nimport numpy as np\n\nfrom dewloosh.geom.config import __haspyvista__, __hasplotly__, __hasmatplotlib__\n\nfrom ..mesh import FemMesh\nfrom ..cells.bernoulli2 import Bernoulli2\nfrom ..cells.bernoulli3 import Bernoulli3\n\n__cache = True\n\n\nif __haspyvista__:\n import pyvista as pv\n\n\nclass LineMesh(FemMesh):\n \n _cell_classes_ = {\n 2: Bernoulli2,\n 3: Bernoulli3,\n }\n\n def __init__(self, *args, areas=None, connectivity=None, **kwargs): \n \n super().__init__(*args, **kwargs)\n \n if self.celldata is not None:\n nE = len(self.celldata)\n if areas is None:\n areas = np.ones(nE)\n else:\n assert len(areas.shape) == 1, \\\n \"'areas' must be a 1d float or integer numpy array!\"\n self.celldata.db['areas'] = areas\n \n if connectivity is not None:\n if isinstance(connectivity, np.ndarray):\n assert len(connectivity.shape) == 3\n assert connectivity.shape[0] == nE\n assert connectivity.shape[1] == 2\n assert connectivity.shape[2] == self.__class__.NDOFN\n self.celldata.db['conn'] = connectivity\n \n def masses(self, *args, **kwargs):\n blocks = self.cellblocks(*args, inclusive=True, **kwargs)\n vmap = map(lambda b: b.celldata.masses(), blocks)\n return np.concatenate(list(vmap))\n \n def mass(self, *args, **kwargs):\n return np.sum(self.masses(*args, **kwargs))\n \n def plot(self, *args, as_tubes=True, radius=0.1, **kwargs):\n if not as_tubes:\n return super().plot(*args, **kwargs)\n else:\n self.to_pv(as_tubes=True, radius=radius).plot(smooth_shading=True)\n\n def to_pv(self, *args, as_tubes=True, radius=0.1, **kwargs):\n \"\"\"\n Returns the mesh as a `pyvista` object.\n \"\"\"\n assert __haspyvista__\n if not as_tubes:\n return super().to_pv(*args, **kwargs)\n else:\n poly = pv.PolyData()\n poly.points = self.coords()\n topo = self.topology()\n lines = np.full((len(topo), 3), 2, dtype=int)\n lines[:, 1:] = topo\n poly.lines = lines\n return poly.tube(radius=radius)\n\n def to_plotly(self):\n assert __hasplotly__\n raise NotImplementedError\n\n def to_mpl(self):\n \"\"\"\n Returns the mesh as a `matplotlib` figure.\n \"\"\"\n assert __hasmatplotlib__\n raise NotImplementedError\n\n\nclass BernoulliFrame(LineMesh):\n \n NDOFN = 6\n \n\nif __name__ == '__main__':\n pass\n"
] | [
[
"numpy.zeros"
],
[
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
13060923171/Crawl-Project2 | [
"effab1bf31979635756fc272a7bcc666bb499be2",
"effab1bf31979635756fc272a7bcc666bb499be2"
] | [
"土流网/雷达图.py",
"台海舆论-数据分析(可视化系统)/demo/Terrace_Line.py"
] | [
"import pandas as pd\nfrom pyecharts.charts import Radar\nfrom pyecharts import options as opts\nfrom pyecharts.globals import CurrentConfig,ThemeType\n\ndf = pd.read_excel('real_estate_info.xlsx')['规划用途']\ndatas = df.value_counts()\nitems = datas.index.tolist()\ncolors = ['#FF0000', '#FF4500', '#00FA9A', '#FFFFF0', '#FFD700']\n#radaritem:雷达图数据项配置\nlabels = [opts.RadarIndicatorItem(name=items[i],max_=100,color=colors[i])for i in range(len(items))]\nvalue = [int(j) for j in datas.values]\nradar = (\n Radar(init_opts=opts.InitOpts(theme=ThemeType.DARK))\n .add_schema(\n schema=labels\n )\n .add(\n series_name='土地规划用途占比(%)',\n data = [[round(x/sum(value) * 100,3)for x in value]],\n areastyle_opts=opts.AreaStyleOpts(opacity=0.5,color='blue') #区域填充颜色\n )\n .set_global_opts()\n .render('radar.html')\n)",
"import pandas as pd\nimport pyecharts.options as opts\nfrom pyecharts.charts import Line\nfrom pyecharts.globals import ThemeType\n\ndef weibo_sum():\n df1 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/1.xlsx').loc[:,['from']]\n sum_weibo = []\n for d1 in df1['from']:\n d1 = str(d1)\n d1 = d1[0:8]\n sum_weibo.append(d1)\n\n df2 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/2.xlsx').loc[:,['from']]\n for d2 in df2['from']:\n d2 = str(d2)\n d2 = d2[0:8]\n if 'nan' not in d2:\n sum_weibo.append(d2)\n\n df3 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/3.xlsx').loc[:,['from']]\n for d3 in df3['from']:\n d3 = str(d3)\n d3 = d3[0:8]\n if 'nan' not in d3:\n sum_weibo.append(d3)\n\n df4 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/4.xlsx').loc[:,['from']]\n for d4 in df4['from']:\n d4 = str(d4)\n d4 = d4[0:8]\n if 'nan' not in d4:\n sum_weibo.append(d4)\n\n df5 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/4.xlsx').loc[:,['from']]\n for d5 in df5['from']:\n d5 = str(d5)\n d5 = d5[0:8]\n if 'nan' not in d5:\n sum_weibo.append(d5)\n\n df6 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/6.xlsx').loc[:,['from']]\n for d6 in df6['from']:\n d6 = str(d6)\n d6 = d6[0:8]\n if 'nan' not in d6:\n sum_weibo.append(d6)\n\n df7 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/7.xlsx').loc[:, ['from']]\n for d7 in df7['from']:\n d7 = str(d7)\n d7 = d7[0:8]\n if 'nan' not in d7:\n sum_weibo.append(d7)\n\n df8 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/8.xlsx').loc[:, ['from']]\n for d8 in df8['from']:\n d8 = str(d8)\n d8 = d8[0:8]\n if 'nan' not in d8:\n sum_weibo.append(d8)\n\n df9 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/9.xlsx').loc[:, ['from']]\n for d9 in df9['from']:\n d9 = str(d9)\n d9 = d9[0:8]\n if 'nan' not in d9:\n sum_weibo.append(d9)\n\n df10 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/10.xlsx').loc[:, ['from']]\n for d10 in df10['from']:\n d10 = str(d10)\n d10 = d10[0:8]\n if 'nan' not in d10:\n sum_weibo.append(d10)\n\n df11 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/11.xlsx').loc[:, ['from']]\n for d11 in df11['from']:\n d11 = str(d11)\n d11 = d11[0:8]\n if 'nan' not in d11:\n sum_weibo.append(d11)\n\n df12 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/12.xlsx').loc[:, ['from']]\n for d12 in df12['from']:\n d12 = str(d12)\n d12 = d12[0:8]\n if 'nan' not in d12:\n sum_weibo.append(d12)\n\n df13 = pd.read_excel('./台湾文本-数据/微博/两岸关系微博.xlsx').loc[:, ['from']]\n for d13 in df13['from']:\n d13 = str(d13)\n d13 = d13[0:8]\n if 'nan' not in d13:\n sum_weibo.append(d13)\n\n df14 = pd.read_excel('./台湾文本-数据/微博/微博中国内政.xlsx').loc[:, ['from']]\n for d14 in df14['from']:\n d14 = str(d14)\n d14 = d14[0:8]\n if 'nan' not in d14:\n sum_weibo.append(d14)\n\n df15 = pd.read_excel('./台湾文本-数据/微博/微博分裂势力.xlsx').loc[:, ['from']]\n for d15 in df15['from']:\n d15 = str(d15)\n d15 = d15[0:8]\n if 'nan' not in d15:\n sum_weibo.append(d15)\n\n df16 = pd.read_excel('./台湾文本-数据/微博/微博台军.xlsx').loc[:, ['from']]\n for d16 in df16['from']:\n d16 = str(d16)\n d16 = d16[0:8]\n if 'nan' not in d16:\n sum_weibo.append(d16)\n\n df17 = pd.read_excel('./台湾文本-数据/微博/微博台湾政治.xlsx').loc[:, ['from']]\n for d17 in df17['from']:\n d17 = str(d17)\n d17 = d17[0:8]\n if 'nan' not in d17:\n sum_weibo.append(d17)\n\n df18 = pd.read_excel('./台湾文本-数据/微博/微博台湾海峡.xlsx').loc[:, ['from']]\n for d18 in df18['from']:\n d18 = str(d18)\n d18 = d18[0:8]\n if 'nan' not in d18:\n sum_weibo.append(d18)\n\n df19 = pd.read_excel('./台湾文本-数据/微博/微博台湾牌.xlsx').loc[:, ['from']]\n for d19 in df19['from']:\n d19 = str(d19)\n d19 = d19[0:8]\n if 'nan' not in d19:\n sum_weibo.append(d19)\n\n df20 = pd.read_excel('./台湾文本-数据/微博/微博台湾疫情.xlsx').loc[:, ['from']]\n for d20 in df20['from']:\n d20 = str(d20)\n d20 = d20[0:8]\n if 'nan' not in d20:\n sum_weibo.append(d20)\n\n df21 = pd.read_excel('./台湾文本-数据/微博/微博台湾经济.xlsx').loc[:, ['from']]\n for d21 in df21['from']:\n d21 = str(d21)\n d21 = d21[0:8]\n if 'nan' not in d21:\n sum_weibo.append(d21)\n\n df22 = pd.read_excel('./台湾文本-数据/微博/微博台独.xlsx').loc[:, ['from']]\n for d22 in df22['from']:\n d22 = str(d22)\n d22 = d22[0:8]\n if 'nan' not in d22:\n sum_weibo.append(d22)\n\n df23 = pd.read_excel('./台湾文本-数据/微博/微博和平统一.xlsx').loc[:, ['from']]\n for d23 in df23['from']:\n d23 = str(d23)\n d23 = d23[0:8]\n if 'nan' not in d23:\n sum_weibo.append(d23)\n\n df24 = pd.read_excel('./台湾文本-数据/微博/微博拜登台湾.xlsx').loc[:, ['from']]\n for d24 in df24['from']:\n d24 = str(d24)\n d24 = d24[0:8]\n if 'nan' not in d24:\n sum_weibo.append(d24)\n\n df25 = pd.read_excel('./台湾文本-数据/微博/微博武统.xlsx').loc[:, ['from']]\n for d25 in df25['from']:\n d25 = str(d25)\n d25 = d25[0:8]\n if 'nan' not in d25:\n sum_weibo.append(d25)\n\n df26 = pd.read_excel('./台湾文本-数据/微博/微博特朗普台湾.xlsx').loc[:, ['from']]\n for d26 in df26['from']:\n d26 = str(d26)\n d26 = d26[0:8]\n if 'nan' not in d26:\n sum_weibo.append(d26)\n\n df27 = pd.read_excel('./台湾文本-数据/微博/微博美台.xlsx').loc[:, ['from']]\n for d27 in df27['from']:\n d27 = str(d27)\n d27 = d27[0:8]\n if 'nan' not in d27:\n sum_weibo.append(d27)\n\n df28 = pd.read_excel('./台湾文本-数据/微博/微博蔡英文.xlsx').loc[:, ['from1']]\n for d28 in df28['from1']:\n d28 = str(d28)\n d28 = d28[0:8]\n if 'nan' not in d28:\n sum_weibo.append(d28)\n\n df29 = pd.read_excel('./台湾文本-数据/微博/微博领土主权.xlsx').loc[:, ['from']]\n for d29 in df29['from']:\n d29 = str(d29)\n d29 = d29[0:8]\n if 'nan' not in d29:\n sum_weibo.append(d29)\n\n d = {}\n for s in sum_weibo:\n d[s] = d.get(s, 0) + 1\n ls = list(d.items())\n ls.sort(key=lambda x: x[0], reverse=True)\n del ls[-10:]\n del ls[0]\n return ls\n\ndef huanqiuw_sum():\n df = pd.read_excel('./台湾文本-数据/环球网/环球网台海.xlsx').loc[:, ['time']]\n sum_list = []\n for d in df['time']:\n d = str(d)\n d = d[0:7]\n if 'nan' not in d:\n sum_list.append(d)\n d = {}\n for s in sum_list:\n d[s] = d.get(s, 0) + 1\n ls = list(d.items())\n ls.sort(key=lambda x: x[0], reverse=False)\n return ls\n\ndef zhongguotaiwanwang():\n df1 = pd.read_excel('./台湾文本-数据/中国台湾网/两岸.xlsx').loc[:, ['info']]\n sum_list = []\n for d1 in df1['info']:\n d1 = str(d1)\n d1 = d1[0:7]\n if 'nan' not in d1:\n sum_list.append(d1)\n\n df2 = pd.read_excel('./台湾文本-数据/中国台湾网/两岸快评.xlsx').loc[:, ['info']]\n for d2 in df2['info']:\n d2 = str(d2)\n d2 = d2[0:7]\n if 'nan' not in d2:\n sum_list.append(d2)\n\n df3 = pd.read_excel('./台湾文本-数据/中国台湾网/台商.xlsx').loc[:, ['info']]\n for d3 in df3['info']:\n d3 = str(d3)\n d3 = d3[0:7]\n if 'nan' not in d3:\n sum_list.append(d3)\n\n df4 = pd.read_excel('./台湾文本-数据/中国台湾网/台湾自2020年12月-2021.1.4时事.xlsx').loc[:, ['info']]\n for d4 in df4['info']:\n d4 = str(d4)\n d4 = d4[0:7]\n if 'nan' not in d4:\n sum_list.append(d4)\n\n df5 = pd.read_excel('./台湾文本-数据/中国台湾网/文化.xlsx').loc[:, ['info']]\n for d5 in df5['info']:\n d5 = str(d5)\n d5 = d5[0:7]\n if 'nan' not in d5:\n sum_list.append(d5)\n\n df6 = pd.read_excel('./台湾文本-数据/中国台湾网/海峡时评.xlsx').loc[:, ['info']]\n for d6 in df6['info']:\n d6 = str(d6)\n d6 = d6[0:7]\n if 'nan' not in d6:\n sum_list.append(d6)\n\n df7 = pd.read_excel('./台湾文本-数据/中国台湾网/经贸.xlsx').loc[:, ['info']]\n for d7 in df7['info']:\n d7 = str(d7)\n d7 = d7[0:7]\n if 'nan' not in d7:\n sum_list.append(d7)\n\n df8 = pd.read_excel('./台湾文本-数据/中国台湾网/网友专栏.xlsx').loc[:, ['日期时间']]\n for d8 in df8['日期时间']:\n d8 = str(d8).replace('年','-')\n d8 = d8[0:7]\n if 'nan' not in d8:\n sum_list.append(d8)\n\n df9 = pd.read_excel('./台湾文本-数据/中国台湾网/网友快言.xlsx').loc[:, ['info']]\n for d9 in df9['info']:\n d9 = str(d9)\n d9 = d9[0:7]\n if 'nan' not in d9:\n sum_list.append(d9)\n\n df10 = pd.read_excel('./台湾文本-数据/中国台湾网/萧萧话两岸.xlsx').loc[:, ['info']]\n for d10 in df10['info']:\n d10 = str(d10)\n d10 = d10[0:7]\n if 'nan' not in d10:\n sum_list.append(d10)\n\n df11 = pd.read_excel('./台湾文本-数据/中国台湾网/部委.xlsx').loc[:, ['info']]\n for d11 in df11['info']:\n d11 = str(d11)\n d11 = d11[0:7]\n if 'nan' not in d11:\n sum_list.append(d11)\n\n d = {}\n for s in sum_list:\n d[s] = d.get(s, 0) + 1\n ls = list(d.items())\n ls.sort(key=lambda x: x[0], reverse=True)\n del ls[-13:]\n return ls\n\ndef zhihu():\n df = pd.read_excel('./台湾文本-数据/知乎/知乎台海局势的数据.xlsx').loc[:, ['ContentItem-action']]\n sum_list = []\n for d in df['ContentItem-action']:\n d = str(d)\n d = d[0:7]\n if 'nan' not in d:\n sum_list.append(d)\n d = {}\n for s in sum_list:\n d[s] = d.get(s, 0) + 1\n ls = list(d.items())\n ls.sort(key=lambda x: x[0], reverse=False)\n del ls[0:13]\n del ls[-2:]\n return ls\n\ndef ribao_sum():\n sum_list = []\n df = pd.read_excel('./台湾文本-数据/中国日报网/中国日报网两岸关系.xlsx').loc[:, ['日期时间']]\n for d in df['日期时间']:\n d = str(d)\n d = d.replace('年', '-')\n d = d[0:7]\n if 'nan' not in d:\n sum_list.append(d)\n\n df2 = pd.read_excel('./台湾文本-数据/中国日报网/中国日报网台海局势.xlsx').loc[:, ['日期时间']]\n for d2 in df2['日期时间']:\n d2 = str(d2)\n d2 = d2.replace('年', '-')\n d2 = d2[0:7]\n if 'nan' not in d2:\n sum_list.append(d2)\n\n df3 = pd.read_excel('./台湾文本-数据/中国日报网/中国日报网台湾牌.xlsx').loc[:, ['日期时间']]\n for d3 in df3['日期时间']:\n d3 = str(d3)\n d3 = d3.replace('年', '-')\n d3 = d3[0:7]\n if 'nan' not in d3:\n sum_list.append(d3)\n\n df4 = pd.read_excel('./台湾文本-数据/中国日报网/中国日报网和平统一.xlsx').loc[:, ['日期时间']]\n for d4 in df4['日期时间']:\n d4 = str(d4)\n d4 = d4.replace('年', '-')\n d4 = d4[0:7]\n if 'nan' not in d4:\n sum_list.append(d4)\n\n df5 = pd.read_excel('./台湾文本-数据/中国日报网/微博民进党.xlsx').loc[:, ['from']]\n for d5 in df5['from']:\n d5 = str(d5)\n d5 = d5.replace('年','-')\n d5 = d5[0:7]\n if 'nan' not in d5:\n sum_list.append(d5)\n\n d = {}\n for s in sum_list:\n d[s] = d.get(s, 0) + 1\n ls = list(d.items())\n ls.sort(key=lambda x: x[0], reverse=False)\n return ls\n\ndef zhongxing_sum():\n sum_list = []\n df = pd.read_excel('./台湾文本-数据/中新网/中新网两岸关系.xlsx').loc[:, ['日期时间']]\n for d in df['日期时间']:\n d = str(d)\n d = d[0:7]\n if 'nan' not in d:\n sum_list.append(d)\n\n df2 = pd.read_excel('./台湾文本-数据/中新网/中新网台海局势.xlsx').loc[:, ['日期时间']]\n for d2 in df2['日期时间']:\n d2 = str(d2)\n d2 = d2[0:7]\n if 'nan' not in d2:\n sum_list.append(d2)\n\n df3 = pd.read_excel('./台湾文本-数据/中新网/中新网台军.xlsx').loc[:, ['日期时间']]\n for d3 in df3['日期时间']:\n d3 = str(d3)\n d3 = d3[0:7]\n if 'nan' not in d3:\n sum_list.append(d3)\n\n df4 = pd.read_excel('./台湾文本-数据/中新网/中新网台湾政治.xlsx').loc[:, ['日期时间']]\n for d4 in df4['日期时间']:\n d4 = str(d4)\n d4 = d4[0:7]\n if 'nan' not in d4:\n sum_list.append(d4)\n\n df5 = pd.read_excel('./台湾文本-数据/中新网/中新网台湾海峡.xlsx').loc[:, ['日期时间']]\n for d5 in df5['日期时间']:\n d5 = str(d5)\n d5 = d5[0:7]\n if 'nan' not in d5:\n sum_list.append(d5)\n\n df6 = pd.read_excel('./台湾文本-数据/中新网/中新网台湾牌.xlsx').loc[:, ['日期时间']]\n for d6 in df6['日期时间']:\n d6 = str(d6)\n d6 = d6[0:7]\n if 'nan' not in d6:\n sum_list.append(d6)\n\n df7 = pd.read_excel('./台湾文本-数据/中新网/中新网台湾疫情.xlsx').loc[:, ['日期时间']]\n for d7 in df7['日期时间']:\n d7 = str(d7)\n d7 = d7[0:7]\n if 'nan' not in d7:\n sum_list.append(d7)\n\n df8 = pd.read_excel('./台湾文本-数据/中新网/中新网台湾经济.xlsx').loc[:, ['日期时间']]\n for d8 in df8['日期时间']:\n d8 = str(d8)\n d8 = d8[0:7]\n if 'nan' not in d8:\n sum_list.append(d8)\n\n df9 = pd.read_excel('./台湾文本-数据/中新网/中新网台独.xlsx').loc[:, ['日期时间']]\n for d9 in df9['日期时间']:\n d9 = str(d9)\n d9 = d9[0:7]\n if 'nan' not in d9:\n sum_list.append(d9)\n\n df10 = pd.read_excel('./台湾文本-数据/中新网/中新网和平统一.xlsx').loc[:, ['日期时间']]\n for d10 in df10['日期时间']:\n d10 = str(d10)\n d10 = d10[0:7]\n if 'nan' not in d10:\n sum_list.append(d10)\n\n df11 = pd.read_excel('./台湾文本-数据/中新网/中新网武统.xlsx').loc[:, ['日期时间']]\n for d11 in df11['日期时间']:\n d11 = str(d11)\n d11 = d11[0:7]\n if 'nan' not in d11:\n sum_list.append(d11)\n\n df12 = pd.read_excel('./台湾文本-数据/中新网/中新网民进党.xlsx').loc[:, ['日期时间']]\n for d12 in df12['日期时间']:\n d12 = str(d12)\n d12 = d12[0:7]\n if 'nan' not in d12:\n sum_list.append(d12)\n\n df13 = pd.read_excel('./台湾文本-数据/中新网/中新网美台.xlsx').loc[:, ['日期时间']]\n for d13 in df13['日期时间']:\n d13 = str(d13)\n d13 = d13[0:7]\n if 'nan' not in d13:\n sum_list.append(d13)\n\n df14 = pd.read_excel('./台湾文本-数据/中新网/中新网蔡英文.xlsx').loc[:, ['日期时间']]\n for d14 in df14['日期时间']:\n d14 = str(d14)\n d14 = d14[0:7]\n if 'nan' not in d14:\n sum_list.append(d14)\n\n d = {}\n for s in sum_list:\n d[s] = d.get(s, 0) + 1\n ls = list(d.items())\n ls.sort(key=lambda x: x[0], reverse=False)\n del ls[0:46]\n return ls\n\n\ndef jinri_sum():\n sum_list = []\n df = pd.read_excel('./台湾文本-数据/今日头条/今日头条两岸关系.xlsx').loc[:, ['lbtn1']]\n for d in df['lbtn1']:\n d = str(d)\n if 'nan' not in d:\n sum_list.append(d)\n\n df2 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台军.xlsx').loc[:, ['lbtn1']]\n for d2 in df2['lbtn1']:\n d2 = str(d2)\n if 'nan' not in d2:\n sum_list.append(d2)\n\n df3 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台海局势.xlsx').loc[:, ['time']]\n for d3 in df3['time']:\n d3 = str(d3)\n if 'nan' not in d3:\n sum_list.append(d3)\n\n df4 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台湾政治.xlsx').loc[:, ['lbtn1']]\n for d4 in df4['lbtn1']:\n d4 = str(d4)\n if 'nan' not in d4:\n sum_list.append(d4)\n\n df5 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台湾海峡.xlsx').loc[:, ['lbtn1']]\n for d5 in df5['lbtn1']:\n d5 = str(d5)\n if 'nan' not in d5:\n sum_list.append(d5)\n\n df6 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台湾牌.xlsx').loc[:, ['lbtn1']]\n for d6 in df6['lbtn1']:\n d6 = str(d6)\n if 'nan' not in d6:\n sum_list.append(d6)\n\n df7 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台湾疫情.xlsx').loc[:, ['lbtn1']]\n for d7 in df7['lbtn1']:\n d7 = str(d7)\n if 'nan' not in d7:\n sum_list.append(d7)\n\n df8 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台独.xlsx').loc[:, ['lbtn1']]\n for d8 in df8['lbtn1']:\n d8 = str(d8)\n if 'nan' not in d8:\n sum_list.append(d8)\n\n df9 = pd.read_excel('./台湾文本-数据/今日头条/今日头条和平统一.xlsx').loc[:, ['lbtn1']]\n for d9 in df9['lbtn1']:\n d9 = str(d9)\n if 'nan' not in d9:\n sum_list.append(d9)\n\n df10 = pd.read_excel('./台湾文本-数据/今日头条/今日头条拜登台湾.xlsx').loc[:, ['lbtn1']]\n for d10 in df10['lbtn1']:\n d10 = str(d10)\n if 'nan' not in d10:\n sum_list.append(d10)\n\n df11 = pd.read_excel('./台湾文本-数据/今日头条/今日头条武统.xlsx').loc[:, ['lbtn1']]\n for d11 in df11['lbtn1']:\n d11 = str(d11)\n if 'nan' not in d11:\n sum_list.append(d11)\n\n df12 = pd.read_excel('./台湾文本-数据/今日头条/今日头条民进党.xlsx').loc[:, ['lbtn1']]\n for d12 in df12['lbtn1']:\n d12 = str(d12)\n if 'nan' not in d12:\n sum_list.append(d12)\n\n df13= pd.read_excel('./台湾文本-数据/今日头条/今日头条特朗普台湾.xlsx').loc[:, ['lbtn1']]\n for d13 in df13['lbtn1']:\n d13 = str(d13)\n if 'nan' not in d13:\n sum_list.append(d13)\n\n df14 = pd.read_excel('./台湾文本-数据/今日头条/今日头条美台.xlsx').loc[:, ['lbtn1']]\n for d14 in df14['lbtn1']:\n d14 = str(d14)\n if 'nan' not in d14:\n sum_list.append(d14)\n\n df15 = pd.read_excel('./台湾文本-数据/今日头条/今日头条蔡英文.xlsx').loc[:, ['lbtn1']]\n for d15 in df15['lbtn1']:\n d15 = str(d15)\n if 'nan' not in d15:\n sum_list.append(d15)\n\n df16 = pd.read_excel('./台湾文本-数据/今日头条/台湾经济.xlsx').loc[:, ['lbtn1']]\n for d16 in df16['lbtn1']:\n d16 = str(d16)\n if 'nan' not in d16:\n sum_list.append(d16)\n\n d = {}\n for s in sum_list:\n d[s] = d.get(s, 0) + 1\n ls = list(d.items())\n ls.sort(key=lambda x: x[0], reverse=False)\n\n return ls\n\ndef guangming_sum():\n sum_list = []\n df = pd.read_excel('./台湾文本-数据/光明网/光明网两岸关系.xlsx').loc[:, ['日期时间']]\n for d in df['日期时间']:\n d = str(d)\n d = d[0:7]\n if 'nan' not in d:\n sum_list.append(d)\n\n df2 = pd.read_excel('./台湾文本-数据/光明网/光明网台军.xlsx').loc[:, ['m-news-box2']]\n for d2 in df2['m-news-box2']:\n d2 = str(d2)\n d2 = d2[0:7]\n if 'nan' not in d2:\n sum_list.append(d2)\n\n\n\n df4 = pd.read_excel('./台湾文本-数据/光明网/光明网台湾政治.xlsx').loc[:, ['m-news-box1']]\n for d4 in df4['m-news-box1']:\n d4 = str(d4)\n d4 = d4[0:7]\n if 'nan' not in d4:\n sum_list.append(d4)\n\n df5 = pd.read_excel('./台湾文本-数据/光明网/光明网台湾海峡.xlsx').loc[:, ['m-news-box1']]\n for d5 in df5['m-news-box1']:\n d5 = str(d5)\n d5 = d5[0:7]\n if 'nan' not in d5:\n sum_list.append(d5)\n\n df6 = pd.read_excel('./台湾文本-数据/光明网/光明网台湾牌.xlsx').loc[:, ['日期时间']]\n for d6 in df6['日期时间']:\n d6 = str(d6)\n d6 = d6[0:7]\n if 'nan' not in d6:\n sum_list.append(d6)\n\n df7 = pd.read_excel('./台湾文本-数据/光明网/光明网台湾疫情.xlsx').loc[:, ['m-news-box1']]\n for d7 in df7['m-news-box1']:\n d7 = str(d7)\n d7 = d7[0:7]\n if 'nan' not in d7:\n sum_list.append(d7)\n\n df8 = pd.read_excel('./台湾文本-数据/光明网/光明网台湾经济.xlsx').loc[:, ['m-news-box1']]\n for d8 in df8['m-news-box1']:\n d8 = str(d8)\n d8 = d8[0:7]\n if 'nan' not in d8:\n sum_list.append(d8)\n\n df9 = pd.read_excel('./台湾文本-数据/光明网/光明网台独.xlsx').loc[:, ['m-news-box1']]\n for d9 in df9['m-news-box1']:\n d9 = str(d9)\n d9 = d9[0:7]\n if 'nan' not in d9:\n sum_list.append(d9)\n\n df10 = pd.read_excel('./台湾文本-数据/光明网/光明网和平统一.xlsx').loc[:, ['日期时间']]\n for d10 in df10['日期时间']:\n d10 = str(d10)\n d10 = d10[0:7]\n if 'nan' not in d10:\n sum_list.append(d10)\n\n df11 = pd.read_excel('./台湾文本-数据/光明网/光明网武统.xlsx').loc[:, ['m-news-box1']]\n for d11 in df11['m-news-box1']:\n d11 = str(d11)\n d11 = d11[0:7]\n if 'nan' not in d11:\n sum_list.append(d11)\n\n df12 = pd.read_excel('./台湾文本-数据/光明网/光明网民进党.xlsx').loc[:, ['m-news-box1']]\n for d12 in df12['m-news-box1']:\n d12 = str(d12)\n d12 = d12[0:7]\n if 'nan' not in d12:\n sum_list.append(d12)\n\n df13 = pd.read_excel('./台湾文本-数据/光明网/光明网蔡英文.xlsx').loc[:, ['m-news-box1']]\n for d13 in df13['m-news-box1']:\n d13 = str(d13)\n d13 = d13[0:7]\n if 'nan' not in d13:\n sum_list.append(d13)\n\n d = {}\n for s in sum_list:\n d[s] = d.get(s, 0) + 1\n ls = list(d.items())\n ls.sort(key=lambda x: x[0], reverse=False)\n del ls[0:20]\n return ls\n\n\ndef fenghuan_sum():\n sum_list = []\n df = pd.read_excel('./台湾文本-数据/凤凰网/凤凰台军.xlsx').loc[:, ['字段2']]\n for d in df['字段2']:\n d = str(d)\n d = d[0:7]\n if 'nan' not in d:\n sum_list.append(d)\n\n\n df3 = pd.read_excel('./台湾文本-数据/凤凰网/凤凰台湾海峡.xlsx').loc[:, ['字段2']]\n for d3 in df3['字段2']:\n d3 = str(d3)\n d3 = d3[0:7]\n if 'nan' not in d3:\n sum_list.append(d3)\n\n\n df6 = pd.read_excel('./台湾文本-数据/凤凰网/凤凰台独.xlsx').loc[:, ['字段2']]\n for d6 in df6['字段2']:\n d6 = str(d6)\n d6 = d6[0:7]\n if 'nan' not in d6:\n sum_list.append(d6)\n\n df9 = pd.read_excel('./台湾文本-数据/凤凰网/凤凰民进党.xlsx').loc[:, ['字段2']]\n for d9 in df9['字段2']:\n d9 = str(d9)\n d9 = d9[0:7]\n if 'nan' not in d9:\n sum_list.append(d9)\n\n df11 = pd.read_excel('./台湾文本-数据/凤凰网/凤凰网两岸关系.xlsx').loc[:, ['字段2']]\n for d11 in df11['字段2']:\n d11 = str(d11)\n d11 = d11[0:7]\n if 'nan' not in d11:\n sum_list.append(d11)\n\n df14 = pd.read_excel('./台湾文本-数据/凤凰网/凤凰网和平统一.xlsx').loc[:, ['字段2']]\n for d14 in df14['字段2']:\n d14 = str(d14)\n d14 = d14[0:7]\n if 'nan' not in d14:\n sum_list.append(d14)\n\n\n d = {}\n for s in sum_list:\n d[s] = d.get(s, 0) + 1\n ls = list(d.items())\n ls.sort(key=lambda x: x[0], reverse=False)\n del ls[-18:]\n del ls[0:36]\n ls.append(('2021/01',36))\n return ls\n\ndef xinhua_sum():\n sum_list = []\n df = pd.read_excel('./台湾文本-数据/新华网/新华网两岸关系.xlsx').loc[:, ['日期时间']]\n for d in df['日期时间']:\n d = str(d)\n d = d.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d = d[0:7]\n if 'nan' not in d:\n sum_list.append(d)\n\n\n df3 = pd.read_excel('./台湾文本-数据/新华网/新华网台海局势.xlsx').loc[:, ['newstime']]\n for d3 in df3['newstime']:\n d3 = str(d3)\n d3 = d3.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d3 = d3[0:7]\n if 'nan' not in d3:\n sum_list.append(d3)\n\n df4 = pd.read_excel('./台湾文本-数据/新华网/新华网台湾政治.xlsx').loc[:, ['newstime']]\n for d4 in df4['newstime']:\n d4 = str(d4)\n d4 = d4.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d4 = d4[0:7]\n if 'nan' not in d4:\n sum_list.append(d4)\n\n df5 = pd.read_excel('./台湾文本-数据/新华网/新华网台湾海峡.xlsx').loc[:, ['newstime']]\n for d5 in df5['newstime']:\n d5 = str(d5)\n d5 = d5.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d5 = d5[0:7]\n if 'nan' not in d5:\n sum_list.append(d5)\n\n df6 = pd.read_excel('./台湾文本-数据/新华网/新华网台湾牌.xlsx').loc[:, ['newstime']]\n for d6 in df6['newstime']:\n d6 = str(d6)\n d6 = d6.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d6 = d6[0:7]\n if 'nan' not in d6:\n sum_list.append(d6)\n\n df7 = pd.read_excel('./台湾文本-数据/新华网/新华网台湾疫情.xlsx').loc[:, ['newstime']]\n for d7 in df7['newstime']:\n d7 = str(d7)\n d7 = d7.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d7 = d7[0:7]\n if 'nan' not in d7:\n sum_list.append(d7)\n\n df8 = pd.read_excel('./台湾文本-数据/新华网/新华网台湾经济.xlsx').loc[:, ['newstime']]\n for d8 in df8['newstime']:\n d8 = str(d8)\n d8 = d8.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d8 = d8[0:7]\n if 'nan' not in d8:\n sum_list.append(d8)\n\n df9 = pd.read_excel('./台湾文本-数据/新华网/新华网台独.xlsx').loc[:, ['newstime']]\n for d9 in df9['newstime']:\n d9 = str(d9)\n d9 = d9.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d9 = d9[0:7]\n if 'nan' not in d9:\n sum_list.append(d9)\n\n df10 = pd.read_excel('./台湾文本-数据/新华网/新华网和平统一.xlsx').loc[:, ['newstime']]\n for d10 in df10['newstime']:\n d10 = str(d10)\n d10 = d10.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d10 = d10[0:7]\n if 'nan' not in d10:\n sum_list.append(d10)\n\n df11 = pd.read_excel('./台湾文本-数据/新华网/新华网武统.xlsx').loc[:, ['newstime']]\n for d11 in df11['newstime']:\n d11 = str(d11)\n d11 = d11.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d11 = d11[0:7]\n if 'nan' not in d11:\n sum_list.append(d11)\n\n df12 = pd.read_excel('./台湾文本-数据/新华网/新华网民进党.xlsx').loc[:, ['newstime']]\n for d12 in df12['newstime']:\n d12 = str(d12)\n d12 = d12.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d12 = d12[0:7]\n if 'nan' not in d12:\n sum_list.append(d12)\n\n df13 = pd.read_excel('./台湾文本-数据/新华网/新华网美台.xlsx').loc[:, ['newstime']]\n for d13 in df13['newstime']:\n d13 = str(d13)\n d13 = d13.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d13 = d13[0:7]\n if 'nan' not in d13:\n sum_list.append(d13)\n\n df14 = pd.read_excel('./台湾文本-数据/新华网/新华网蔡英文.xlsx').loc[:, ['newstime']]\n for d14 in df14['newstime']:\n d14 = str(d14)\n d14 = d14.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t', '')\n d14 = d14[0:7]\n d14 = d14.replace('新华网\\\\u2003\\\\t\\\\t\\\\t\\\\t','')\n if 'nan' not in d14:\n sum_list.append(d14)\n\n d = {}\n for s in sum_list:\n d[s] = d.get(s, 0) + 1\n ls = list(d.items())\n ls.sort(key=lambda x: x[0], reverse=False)\n del ls[-13:]\n del ls[0:10]\n ls.append(('2020-02', 0))\n ls.append(('2020-03', 0))\n ls.append(('2020-04', 0))\n ls.append(('2020-05', 0))\n ls.append(('2020-06', 0))\n ls.append(('2020-07', 0))\n ls.append(('2020-10', 0))\n ls.sort(key=lambda x: x[0], reverse=False)\n return ls\n\n\ndef terrace_line():\n list1 = weibo_sum()\n list1.sort(key=lambda x: x[0], reverse=False)\n x_data1 = []\n y_data1 = []\n for l in list1:\n x = l[0].replace('年','-').replace('月','')\n x_data1.append(x)\n y_data1.append(l[1])\n\n list2 = huanqiuw_sum()\n x_data2 = []\n y_data2 = []\n for l in list2:\n x_data2.append(l[0])\n y_data2.append(l[1])\n y_data2.insert(0,0)\n y_data2.insert(0,0)\n y_data2.insert(0,0)\n list3 = zhongguotaiwanwang()\n x_data3 = []\n y_data3 = []\n list3.sort(key=lambda x: x[0], reverse=False)\n for l in list3:\n x_data3.append(l[0])\n y_data3.append(l[1])\n list4 = zhihu()\n x_data4 = []\n y_data4 = []\n for l in list4:\n x_data4.append(l[0])\n y_data4.append(l[1])\n y_data4.insert(1,0)\n\n list5 = ribao_sum()\n x_data5 = []\n y_data5 = []\n list5.sort(key=lambda x: x[0], reverse=False)\n for l in list5:\n x_data5.append(l[0])\n y_data5.append(l[1])\n\n\n list6 = zhongxing_sum()\n x_data6 = []\n y_data6 = []\n list6.sort(key=lambda x: x[0], reverse=False)\n for l in list6:\n x_data6.append(l[0])\n y_data6.append(l[1])\n\n\n list7 = jinri_sum()\n x_data7 = ['2020-01', '2020-02', '2020-03', '2020-04', '2020-05', '2020-06', '2020-07', '2020-08', '2020-09', '2020-10', '2020-11', '2020-12', '2021-01']\n y_data7 = [315, 255, 172, 84, 85, 30,42,36,64,14, 3, 141, 308]\n\n\n list8 = guangming_sum()\n x_data8 = []\n y_data8 = []\n list8.sort(key=lambda x: x[0], reverse=False)\n for l in list8:\n x_data8.append(l[0])\n y_data8.append(l[1])\n\n list9 = fenghuan_sum()\n x_data9 = []\n y_data9 = []\n list9.sort(key=lambda x: x[0], reverse=False)\n for l in list9:\n x = l[0].replace('/','-')\n x_data9.append(x)\n y_data9.append(l[1])\n\n list10 = xinhua_sum()\n x_data10 = []\n y_data10 = []\n list10.sort(key=lambda x: x[0], reverse=False)\n for l in list10:\n x_data10.append(l[0])\n y_data10.append(l[1])\n\n return x_data3,y_data1,y_data2,y_data3,y_data4,y_data5,y_data6,y_data7,y_data8,y_data9,y_data10\n# y_data2[0], y_data2[1],y_data2[2] = None, None,None\n# y_data4[1] = None\n# c = (\n# Line()\n# .add_xaxis(xaxis_data=x_data3)\n# .add_yaxis(\n# series_name=\"微博\",\n# symbol=\"emptyCircle\",\n# is_symbol_show=True,\n# color=\"#F2D7D5\",\n# y_axis=y_data1,\n# label_opts=opts.LabelOpts(is_show=False),\n# linestyle_opts=opts.LineStyleOpts(width=3)\n# )\n# .add_yaxis(\n# series_name=\"环球网\",\n# symbol=\"emptyCircle\",\n# is_symbol_show=True,\n# color=\"#C0392B\",\n# y_axis=y_data2,\n# label_opts=opts.LabelOpts(is_show=False),\n# linestyle_opts=opts.LineStyleOpts(width=3)\n# )\n# .add_yaxis(\n# series_name=\"中国台湾网\",\n# symbol=\"emptyCircle\",\n# is_symbol_show=True,\n# color=\"#641E16\",\n# y_axis=y_data3,\n# label_opts=opts.LabelOpts(is_show=False),\n# linestyle_opts=opts.LineStyleOpts(width=3)\n# )\n# .add_yaxis(\n# series_name=\"知乎\",\n# symbol=\"emptyCircle\",\n# is_symbol_show=True,\n# color=\"#9B59B6\",\n# y_axis=y_data4,\n# label_opts=opts.LabelOpts(is_show=False),\n# linestyle_opts=opts.LineStyleOpts(width=3)\n# )\n# .add_yaxis(\n# series_name=\"中国日报网\",\n# symbol=\"emptyCircle\",\n# is_symbol_show=True,\n# color=\"#512E5F\",\n# y_axis=y_data5,\n# label_opts=opts.LabelOpts(is_show=False),\n# linestyle_opts=opts.LineStyleOpts(width=3)\n# )\n# .add_yaxis(\n# series_name=\"中新网\",\n# symbol=\"emptyCircle\",\n# is_symbol_show=True,\n# color=\"#2980B9\",\n# y_axis=y_data6,\n# label_opts=opts.LabelOpts(is_show=False),\n# linestyle_opts=opts.LineStyleOpts(width=3)\n# )\n# .add_yaxis(\n# series_name=\"今日头条\",\n# symbol=\"emptyCircle\",\n# is_symbol_show=True,\n# color=\"#154360\",\n# y_axis=y_data7,\n# label_opts=opts.LabelOpts(is_show=False),\n# linestyle_opts=opts.LineStyleOpts(width=3)\n# )\n# .add_yaxis(\n# series_name=\"光明网\",\n# symbol=\"emptyCircle\",\n# is_symbol_show=True,\n# color=\"#3498DB\",\n# y_axis=y_data8,\n# label_opts=opts.LabelOpts(is_show=False),\n# linestyle_opts=opts.LineStyleOpts(width=3)\n# )\n# .add_yaxis(\n# series_name=\"凤凰网\",\n# symbol=\"emptyCircle\",\n# is_symbol_show=True,\n# color=\"#1ABC9C\",\n# y_axis=y_data9,\n# label_opts=opts.LabelOpts(is_show=False),\n# linestyle_opts=opts.LineStyleOpts(width=3)\n# )\n# .add_yaxis(\n# series_name=\"新华网\",\n# symbol=\"emptyCircle\",\n# is_symbol_show=True,\n# color=\"#0E6251\",\n# y_axis=y_data10,\n# label_opts=opts.LabelOpts(is_show=False),\n# linestyle_opts=opts.LineStyleOpts(width=3)\n# )\n# .set_global_opts(\n# title_opts=opts.TitleOpts(title=\"各大平台台海局势热度\"),\n# tooltip_opts=opts.TooltipOpts(trigger=\"axis\"),\n# yaxis_opts=opts.AxisOpts(\n# type_=\"value\",\n# axistick_opts=opts.AxisTickOpts(is_show=True),\n# splitline_opts=opts.SplitLineOpts(is_show=True),\n# ),\n# xaxis_opts=opts.AxisOpts(type_=\"category\", boundary_gap=False, axisline_opts=opts.AxisLineOpts(\n# is_on_zero=False, linestyle_opts=opts.LineStyleOpts(color=\"#d14a61\")\n# )),\n# )\n# .render(\"./templates/各大平台台海热度折线图.html\")\n# )\n#\n#\n# if __name__ == '__main__':\n# terrace_line()"
] | [
[
"pandas.read_excel"
],
[
"pandas.read_excel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ybayle/SMC2017 | [
"c3131872d86d03abdd954c23e6d3cc224d9bc5f2"
] | [
"src/bayle.py"
] | [
"# -*- coding: utf-8 -*-\n#!/usr/bin/python\n#\n# Author Yann Bayle\n# E-mail [email protected]\n# License MIT\n# Created 01/12/2016\n# Updated 01/12/2016\n# Version 1.0.0\n#\n\n\"\"\"\nDescription of bayle.py\n======================\n\n0 Input the local extracted features from YAAFE\n 13 MFCC per frame\n 186 musical pieces as train set\n1 Computes delta and double delta (39 features per frame)\n2 Gather global mean (39 features per musical pieces)\n3 train on mfcc & deltas (39 feat/frame) to output global predictions\n4 Use global preds to compute song and instru n-grams and histogramm\n which add 70 feat/track\n lead to a total of 109 feat/track\n5 Fit on 109x186\n6 predict (or predict_proba) on 41491 track \n\n:Example:\n\nsource activate py27\nipython\nrun bayle.py -d /media/sf_github/yann/train/\n\n..todo::\n\n\"\"\"\n\nimport multiprocessing\nimport webbrowser\nimport utils\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn import linear_model\nimport sys\nfrom functools import partial\nimport time\nfrom sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport re\nimport os\nimport sys\nimport csv\nimport time\nimport utils\nimport argparse\nfrom datetime import date\nfrom collections import Counter\nfrom matplotlib.cm import ScalarMappable\nfrom matplotlib.colors import Normalize\nfrom matplotlib.colorbar import ColorbarBase\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport joblib\nfrom sklearn.ensemble import RandomForestClassifier\nimport librosa\n\nimport os\nimport sys\nimport json\nimport math\nimport utils\nimport random\nimport joblib\nfrom pprint import pprint\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import precision_score, recall_score, f1_score\nfrom sklearn.model_selection import train_test_split, StratifiedKFold\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn import datasets\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cross_validation import KFold, cross_val_score\nfrom statistics import mean, stdev\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\nfrom sklearn import linear_model\nfrom sklearn.tree import DecisionTreeClassifier\n\nimport classify\n# import reproduce\n\ndef arr2str(data, separator=\",\"):\n return separator.join(str(x) for x in data)\n\ndef str2arr(data):\n return np.array(data).astype(np.float)\n\ndef read_gts(filename, separator=\"\\t\"):\n track_gts = {}\n with open(filename, \"r\") as filep:\n for line in filep:\n line = line.split(separator)\n track_gts[line[0]] = line[1][:-1]\n return track_gts\n\ndef match_feat_with_song_gt(dir_feat, dir_gts):\n \"\"\"Description of match_feat_gt\n\n Use groundtruth created by \n http://www.mathieuramona.com/wp/data/jamendo/ \n\n associate to local features\n csv 7041 lines yaafe\n lab 326.973 sec ramona\n Definition of YAAFE from \n http://yaafe.sourceforge.net/features.html\n \"\"\"\n utils.print_success(\"Matching local feat to song/instru groundtruths\")\n dir_feat = utils.abs_path_dir(dir_feat)\n dir_gts = utils.abs_path_dir(dir_gts)\n block_size = 1024.\n step_size = 512.\n fech = 22050.\n frame_size_ms = block_size / fech\n filenames = [fn for fn in os.listdir(dir_gts)]\n for index, filename in enumerate(filenames):\n utils.print_progress_start(str(index) + \"/\" + str(len(filenames)) + \" \" + filename)\n # gather groundtruths\n groundtruths = []\n with open(dir_gts + filename, \"r\") as filep:\n for row in filep:\n line = row.split(\" \")\n end = float(line[1])\n if \"no\" in line[2]:\n tag = \",i\\n\"\n else:\n tag = \",s\\n\"\n groundtruths.append([end, tag])\n gt_len = len(groundtruths)\n overflow = False\n gt_index = 0\n cpt = 0\n # Write features & groundtruths to file\n str_to_write = \"\"\n feat_fn = filename.split(\".\")[0]\n feat_fn += \".wav.mfcc.csv\"\n with open(dir_feat + feat_fn, \"r\") as filep:\n for index, line in enumerate(filep):\n # todo cleanup\n if gt_index < gt_len:\n if frame_size_ms * index > groundtruths[gt_index][0]:\n gt_index += 1\n if gt_index < gt_len:\n str_to_write += line[:-1] + groundtruths[gt_index][1]\n with open(dir_feat + feat_fn, \"w\") as filep:\n filep.write(str_to_write)\n utils.print_progress_end()\n\ndef match_feat_with_instru_gt(indir, outdir):\n \"\"\"Description of match_feat_gt\n\n Apply instru groundtruth to CCmixter and MedleyDB\n \"\"\"\n utils.print_success(\"Matching local features to instrumental groundtruths\")\n indir = utils.abs_path_dir(indir) + \"/\"\n outdir = utils.abs_path_dir(outdir) + \"/\"\n filenames = [fn for fn in os.listdir(indir)]\n for filename in filenames:\n outfile = open(outdir + filename, \"w\")\n with open(indir + filename, \"r\") as filep:\n for line in filep:\n outfile.write(line[:-1] + \" i\\n\")\n outfile.close()\n\ndef process_local_feat(indir, file_gts_track, outdir_local, out_feat_global, train):\n \"\"\"Description of process_local_feat\n Add delta and double delta to MFCCs\n \"\"\"\n \n utils.print_success(\"Processing local features\")\n \n # Preprocess arg\n indir = utils.abs_path_dir(indir)\n file_gts_track = utils.abs_path_file(file_gts_track)\n filelist = os.listdir(indir)\n outdir_local = utils.abs_path_dir(outdir_local)\n\n track_gts = {}\n with open(file_gts_track, \"r\") as filep:\n for line in filep:\n line = line.split(\",\")\n if train:\n index = line[0]\n else:\n index = line[0] + \".wav.mfcc.csv\"\n track_gts[index] = line[1][:-1]\n\n for index, filename in enumerate(filelist):\n utils.print_progress_start(str(index) + \"/\" + str(len(filelist)) + \" \" + filename)\n if filename in track_gts:\n mfccs = []\n groundtruths = []\n with open(indir + filename, \"r\") as filep:\n next(filep)\n next(filep)\n next(filep)\n next(filep)\n next(filep)\n for line in filep:\n line = line.split(\",\")\n mfccs.append(str2arr(line[:-1]))\n if train:\n groundtruths.append(line[-1][:-1])\n mfccs = np.array(mfccs)\n delta_mfcc = librosa.feature.delta(mfccs)\n delta2_mfcc = librosa.feature.delta(mfccs, order=2)\n # Write local features in outdir_local\n with open(outdir_local + filename, \"w\") as filep:\n gt_to_write = \"\"\n if \"i\" in track_gts[filename]:\n gt_to_write = \",i\"\n elif \"s\" in track_gts[filename]:\n # postpone frame groundtruth annotationa to another function later in the code\n gt_to_write = \"\"\n else:\n utils.print_warning(\"bayle.py line 231 local frame groundtruth undefined\")\n if train:\n for a, b, c, d in zip(mfccs, delta_mfcc, delta2_mfcc, groundtruths):\n filep.write(arr2str(a) + \",\" + arr2str(b) + \",\" + arr2str(c) + \",\" + d + \"\\n\")\n else:\n for a, b, c in zip(mfccs, delta_mfcc, delta2_mfcc):\n filep.write(arr2str(a) + \",\" + arr2str(b) + \",\" + arr2str(c) + gt_to_write + \"\\n\")\n # # Write global features in out_feat_global\n # with open(out_feat_global, \"a\") as filep:\n # filep.write(filename + \",\" +\n # arr2str(np.mean(mfccs, axis=0)) + \",\" + \n # arr2str(np.mean(delta_mfcc, axis=0)) + \",\" + \n # arr2str(np.mean(delta2_mfcc, axis=0)) + \",\" + \n # track_gts[filename] + \"\\n\")\n utils.print_progress_end()\n utils.print_success(\"Adding local groundtruths to Songs in Jamendo thanks to Ramona annotations\")\n match_feat_with_song_gt(dir_feat=outdir_local, dir_gts=\"groundtruths/frame_annot_jamendo_ramona/\")\n utils.print_success(\"Done\")\n\ndef column(matrix, i):\n return [row[i] for row in matrix]\n\ndef ngram_proba(local_pred, threshold=0.5, above_threshold=True):\n \"\"\"\n n-gram creation\n \"\"\"\n cpt_ngram = 0\n nb_ngram = 30\n ngrams = [0,] * nb_ngram\n for pred in local_pred:\n if above_threshold:\n condition = pred > threshold\n else:\n condition = pred <= threshold\n if condition:\n cpt_ngram += 1\n else:\n if cpt_ngram < nb_ngram:\n ngrams[cpt_ngram] += 1\n else:\n ngrams[nb_ngram-1] += 1\n cpt_ngram = 0\n nb_tag_sing = float(sum(ngrams))\n if nb_tag_sing > 0.:\n ngrams = [float(x) / nb_tag_sing for x in ngrams]\n # utils.print_error(ngrams)\n return ','.join(str(x) for x in ngrams)\n\ndef ngram(preds, tag):\n \"\"\"Description of ngram\n \"\"\"\n cpt_ngram = 0\n nb_ngram = 30\n ngrams = [0,] * nb_ngram\n for pred in preds:\n if tag in pred:\n cpt_ngram += 1\n else:\n if cpt_ngram < nb_ngram:\n ngrams[cpt_ngram] += 1\n else:\n ngrams[nb_ngram-1] += 1\n cpt_ngram = 0\n nb_tag = float(sum(ngrams))\n if nb_tag > 0.:\n ngrams = [float(x) / nb_tag for x in ngrams]\n return ','.join(str(x) for x in ngrams)\n\ndef create_track_feat_testset(folder, infile, outfile, model_file, train=False):\n \"\"\"Description of create_track_feat_testset\n Need to read each test file\n compute deltas on mfcc in the ram\n predict and predict_proba \n generate song and instru ngrams and histograms\n Add the mean of mfcc+deltas\n append 109 features vector in feat_track/feat_test.csv\n \"\"\"\n\n utils.print_success(\"Create track feat testset\")\n folder = utils.abs_path_dir(folder)\n infile = utils.abs_path_file(infile)\n clf = joblib.load(model_file)\n track_gts = read_gts(infile, separator=\",\")\n for index, filename in enumerate(track_gts):\n utils.print_progress_start(str(index+1) + \"/\" + str(len(track_gts)) + \" \" + filename)\n mfccs = []\n mfccs_1 = []\n extension = \"\"\n if train:\n extension = \"\"\n else:\n extension += \"_audio_full_mono_22k\"\n extension += \".wav.mfcc.csv\"\n with open(folder + filename + extension, \"r\") as filep:\n if train:\n next(filep)\n next(filep)\n next(filep)\n next(filep)\n next(filep)\n for line in filep:\n if train:\n line = line.split(\",\")\n else:\n line = line.split(\" \")\n mfccs_1.append(str2arr(line[:-1]))\n # if train:\n # mfccs.append(str2arr(line[:-1]))\n # else:\n # mfccs.append(str2arr(line[0:]))\n mfccs = np.array(mfccs_1)\n delta_mfcc = librosa.feature.delta(mfccs)\n delta2_mfcc = librosa.feature.delta(mfccs, order=2)\n tmp = np.append(mfccs, delta_mfcc, axis=1)\n features = np.append(tmp, delta2_mfcc, axis=1)\n preds_proba = clf.predict_proba(features)\n\n # Histogramm\n nb_hist_class = 10\n numbers = column(preds_proba, 0)\n hist_pred = np.histogram(numbers, nb_hist_class)\n hist_pred_norm = hist_pred[0] / float(sum(hist_pred[0]))\n\n ngram_threshold = 0.5\n song_ngram_proba = ngram_proba(local_pred=numbers, threshold=ngram_threshold, above_threshold=True)\n instru_ngram_proba = ngram_proba(local_pred=numbers, threshold=ngram_threshold, above_threshold=False)\n \n preds = clf.predict(features)\n song_ngram = ngram(preds, \"s\")\n instru_ngram = ngram(preds, \"i\")\n\n with open(outfile, \"a\") as filep:\n filep.write(filename[:12] + \",\" +\n arr2str(np.mean(mfccs, axis=0)) + \",\" + \n arr2str(np.mean(delta_mfcc, axis=0)) + \",\" + \n arr2str(np.mean(delta2_mfcc, axis=0)) + \",\" + \n arr2str(hist_pred_norm) + \",\" +\n song_ngram_proba + \",\" + \n instru_ngram_proba + \",\" +\n song_ngram + \",\" + \n instru_ngram + \",\" +\n track_gts[filename] + \"\\n\")\n utils.print_progress_end()\n\ndef figures1bd(indir, file_gts_track):\n \"\"\"Description of figures1bd\n\n infile is formated like:\n /media/sf_github/yann/train/01 - 01 Les Jardins Japonais.wav.mfcc.csv\n feat1 feat2 ... featn tag1\n feat1 feat2 ... featn tag2\n ...\n feat1 feat2 ... featn tag2\n\n 0 Input the local extracted features from YAAFE\n 13 MFCC per frame\n 186 musical pieces as train set\n 1 Computes delta and double delta (39 features per frame)\n 2 Gather global mean (39 features per musical pieces)\n 3 train on mfcc & deltas (39 feat/frame) to output global predictions\n 4 Use global preds to compute song and instru n-grams and histogramm\n which add 70 feat/track\n lead to a total of 109 feat/track\n 5 Fit on 109x186\n 6 predict (or predict_proba) on 41491 track \n \"\"\"\n\n # Preprocess arg\n indir = utils.abs_path_dir(indir)\n file_gts_track = utils.abs_path_file(file_gts_track)\n feat_frame_train = \"feat_frame_train/\"\n utils.create_dir(feat_frame_train)\n feat_frame_test = \"feat_frame_test/\"\n utils.create_dir(feat_frame_test)\n outdir_global = \"feat_track/\"\n utils.create_dir(outdir_global)\n feat_train = outdir_global + \"train.csv\"\n feat_test = outdir_global + \"test.csv\"\n models_dir = \"models/\"\n utils.create_dir(models_dir)\n loc_feat_testset_dirpath = \"/media/sf_DATA/Datasets/Simbals/yaafe/results/processed/\"\n filelist_test = \"filelist_test.tsv\"\n filelist_train = \"filelist_train.tsv\"\n models_global = \"models_track/\"\n utils.create_dir(models_global)\n\n # process_local_feat(indir, file_gts_track, feat_frame_train, feat_train, train=True) \n # classify.create_models(outdir=models_dir, train_dir=feat_frame_train, separator=\",\")\n # create_track_feat_testset(indir, filelist_train, feat_train, train=True)\n\n # 15h28m44s to 19h08m28s Done in 13184117ms\n # create_track_feat_testset(loc_feat_testset_dirpath, filelist_test, feat_test) \n\n # classify.create_models(outdir=models_global, train_file=feat_train)\n # classify.test_models_parallel(\n # models_dir=models_global,\n # out_dir=\"results/\",\n # test_file=feat_test)\n \n # Display results\n reproduce.plot_results(\"results/\")\n\ndef figure1a(file_gts_track):\n \"\"\"Description of figure1a\n \"\"\"\n outdir_global = \"feat_track/\"\n utils.create_dir(outdir_global)\n feat_train = outdir_global + \"train.csv\"\n\n # process_local_feat(indir, file_gts_track, feat_frame_train, feat_train, train=True) \n classify.cross_validation(feat_train, n_folds=5)\n \ndef figure2(indir, file_gts_track):\n \"\"\"Description of figure2\n\n Method to maintain 100 percent of precision and to maximize recall.\n \"\"\"\n pass\n\n\ndef read_file_bayle(filename):\n \"\"\"Description of read_file\n\n train/test example line:\n filename,feat1,feat2,...,featn,tag\n \"\"\"\n filename = utils.abs_path_file(filename)\n filenames = []\n groundtruths = []\n features = []\n with open(filename, \"r\") as filep:\n for row in filep:\n line = row.split(\",\")\n filenames.append(line[0])\n features.append([float(i) for i in line[1:-1]])\n gt = line[-1]\n while \"\\n\" in gt or \"\\r\" in gt:\n gt = gt [:-1]\n groundtruths.append(gt)\n return filenames, features, groundtruths\n\ndef column(matrix, i):\n return [row[i] for row in matrix]\n\ndef process_results(train, test):\n train_fn, train_features, train_groundtruths = read_file_bayle(train)\n test_fn, test_features, test_groundtruths = read_file_bayle(test)\n step = 0.1\n # for weight in np.arange(0.0, 1.0, step):\n # inside_clf = RandomForestClassifier(random_state=2)\n inside_clf = DecisionTreeClassifier(random_state=2)\n # class_weight={\"i\":weight, \"s\":1-weight})\n clf = AdaBoostClassifier(\n random_state=2,#with 4 98%precision song class\n base_estimator=inside_clf)\n clf.fit(train_features, train_groundtruths)\n predictions = clf.predict(test_features)\n print(\"Accuracy \" + str(accuracy_score(test_groundtruths, predictions)))\n print(\"F-Measure \" + str(f1_score(test_groundtruths, predictions, average=\"weighted\")))\n print(\"Precision \" + str(precision_score(test_groundtruths, predictions, average=None)))\n print(\"Recall \" + str(recall_score(test_groundtruths, predictions, average=None)))\n print(\"F-Measure \" + str(f1_score(test_groundtruths, predictions, average=None)))\n\n # predictions = [1.0 if i==\"s\" else 0.0 for i in predictions]\n predictions = column(clf.predict_proba(test_features), 0)\n outdir = \"predictions/\"\n with open(outdir + \"Bayle.csv\", \"w\") as filep:\n for name, pred in zip(test_fn, predictions):\n filep.write(name + \",\" + str(1.0 - float(pred)) + \"\\n\")\n\ndef new_algo_final(indir, file_gts_track):\n utils.print_success(\"Approx. time ~6 hours.\")\n # Preprocess arg\n indir = utils.abs_path_dir(indir)\n file_gts_track = utils.abs_path_file(file_gts_track)\n dir_tmp = utils.create_dir(utils.create_dir(\"src/tmp\") + \"bayle\")\n feat_frame_train = utils.create_dir(dir_tmp + \"feat_frame_train\")\n feat_frame_test = utils.create_dir(dir_tmp + \"feat_frame_test\")\n outdir_global = utils.create_dir(dir_tmp + \"feat_track\")\n feat_train = outdir_global + \"train.csv\"\n feat_test = outdir_global + \"test.csv\"\n models_dir = utils.create_dir(dir_tmp + \"models\")\n loc_feat_testset_dirpath = \"features/database2/\"\n filelist_train = \"groundtruths/database1.csv\"\n filelist_test = \"groundtruths/database2.csv\"\n models_global = utils.create_dir(dir_tmp + \"models_track\")\n\n process_local_feat(indir, file_gts_track, outdir_local=feat_frame_train, out_feat_global=feat_train, train=False)\n classify.create_models(outdir=models_dir, train_dir=feat_frame_train, separator=\",\", classifiers=\"RandomForest\")\n\n \"\"\"\n Create features at track scale for the train set\n Features: MFCC + Delta + Double Delta + ngrams + hist\n \"\"\"\n model_file = \"src/tmp/bayle/models/RandomForest/RandomForest.pkl\"\n model_file = \"/media/sf_DATA/ReproducibleResearchIEEE2017/src/tmp/bayle/models/RandomForest/RandomForest.pkl\"\n create_track_feat_testset(indir, filelist_train, feat_train, model_file, train=True)\n\n # # 15h28m44s to 19h08m28s Done in 13184117ms\n create_track_feat_testset(loc_feat_testset_dirpath, filelist_test, feat_test, model_file) \n\n classify.create_models(outdir=models_global, train_file=feat_train, classifiers=\"RandomForest\")\n process_results(feat_train, feat_test)\n\ndef main():\n begin = int(round(time.time() * 1000))\n PARSER = argparse.ArgumentParser(description=\"Bayle et al. (2017) algorithm\")\n PARSER.add_argument(\n \"-d\",\n \"--indir\",\n help=\"input dir containing all local features extracted by YAAFE\",\n type=str,\n default=\"/media/sf_github/yann/train/\",\n metavar=\"indir\")\n PARSER.add_argument(\n \"-i\",\n \"--gts\",\n help=\"input file containing all track groundtruths\",\n type=str,\n default=\"filelist_train.tsv\")\n\n indir = \"features/database1/\"\n file_gts_track = \"groundtruths/database1.csv\"\n new_algo_final(indir, file_gts_track)\n # figure1a(PARSER.parse_args().gts)\n # figures1bd(PARSER.parse_args().indir, PARSER.parse_args().gts)\n # figure2(PARSER.parse_args().indir, PARSER.parse_args().gts)\n \n # Local feat processing\n\n # Global feat processing\n # bayle_fig3()\n\n utils.print_success(\"Done in \" + str(int(round(time.time() * 1000)) - begin) + \"ms\")\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"sklearn.metrics.recall_score",
"sklearn.metrics.precision_score",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.ensemble.AdaBoostClassifier",
"numpy.append",
"numpy.mean",
"sklearn.metrics.f1_score",
"numpy.array",
"numpy.histogram",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pjchungmd/StemmlerProject | [
"bffcfbf5dc9171f0d62160f058306a2bb6cce2eb"
] | [
"outputs.py"
] | [
"import re\nimport pickle\nimport matplotlib.pyplot as plt\n\nfrom icd9cms.icd9 import search\n\nwith open(\"../data/pcode_dict.txt\", \"rb\") as fp: \n icd9_pcode_dict = pickle.load(fp)\n\ndef plot_scores(score_df, model, bins=100, ft='X_scores'):\n \n #rng = [score_df[ft].min(), score_df[ft].mean() + 2*score_df[ft].std()]\n rng = [score_df[ft].quantile(0.05), score_df[ft].quantile(0.95)]\n \n y = score_df[score_df['Y_test']==1][ft]\n d = y.quantile(0.75)\n #d = model.offset_[0]\n \n plt.hist(score_df[score_df['Y_test']==1][ft], bins=bins, range=rng, alpha=0.7, label='Generated')\n plt.hist(score_df[score_df['Y_test']==0][ft], bins=bins, range=rng, alpha=0.7, label='NTDB')\n #plt.axvline(x=d, color='k', linestyle='--')\n plt.axvline(x=d, color='k', linestyle='--')\n plt.legend(loc='upper right')\n plt.show()\n \n return rng, d\n\n\ndef print_seq_dsc(seq, pcodes=False):\n cds = seq.split()\n tp = 'START'\n for c in cds:\n if c == '<START>':\n print('=' * 9 + ' START ' + '=' * 9)\n elif c == '<DSTART>':\n tp = 'DX'\n print('=' * 10 + ' DXS ' + '=' * 10)\n elif c == '<PSTART>':\n if pcodes:\n return\n tp = 'PR'\n print('=' * 10 + ' PRS ' + '=' * 10)\n elif c == '<END>':\n print('=' * 10 + ' END ' + '=' * 10)\n return\n elif c == '<UNK>':\n print(f'{c}:Unknown Code')\n else:\n if tp == 'DX':\n d = search(c)\n if d:\n print(d)\n elif tp == 'PR':\n pr_cd = re.sub(r'\\.', '', c)\n if pr_cd in icd9_pcode_dict:\n print(f\"{pr_cd}:{icd9_pcode_dict[pr_cd]}\")\n else:\n print(f'{pr_cd}:Unknown Code')\n else:\n continue\n \n\ndef string_seq_dsc(seq, pcodes=False):\n cds = seq.split()\n tp = 'START'\n \n s = ''\n for c in cds:\n if c == '<START>':\n s += '=' * 9 + ' START ' + '=' * 9 + '\\n'\n elif c == '<DSTART>':\n tp = 'DX'\n s += '=' * 10 + ' DXS ' + '=' * 10 + '\\n'\n elif c == '<PSTART>':\n if pcodes:\n return s\n tp = 'PR'\n s += '=' * 10 + ' PRS ' + '=' * 10 + '\\n'\n elif c == '<END>':\n s += '=' * 10 + ' END ' + '=' * 10 + '\\n'\n return s\n elif c == '<UNK>':\n s += f'{c}:Unknown Code' + '\\n'\n else:\n if tp == 'DX':\n d = search(c)\n if d:\n s += str(d) + '\\n'\n elif tp == 'PR':\n pr_cd = re.sub(r'\\.', '', c)\n if pr_cd in icd9_pcode_dict:\n s += f\"{pr_cd}:{icd9_pcode_dict[pr_cd]}\" + '\\n'\n else:\n s += f'{pr_cd}:Unknown Code' + '\\n'\n elif tp == 'START':\n if c == 'E812.0':\n s += \"E812.0:Other motor vehicle traffic accident involving collision with motor vehicle injuring driver of motor vehicle other than motorcycle\\n\"\n if c == 'E885.9':\n s += \"E885.9:Accidental fall from other slipping tripping or stumbling\\n\"\n if c == 'E966.0':\n s += \"E966.0:Assault by cutting and piercing instrument\\n\"\n if c == 'E965.4':\n s += \"E965.4:Assault by firearms and explosives, Other and unspecified firearm\\n\"\n if c == 'E924.0':\n s += \"E924.0:Accident caused by hot substance or object, caustic or corrosive material, and steam, Hot liquids and vapors, including steam\\n\"\n else:\n continue"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kwadwo00/ceviche | [
"a1ee155304c4679b262e4fdf8c8a28bc4d060ec8"
] | [
"ceviche/utils.py"
] | [
"import numpy as np\nimport scipy.sparse as sp\nimport copy\nimport autograd.numpy as npa\nimport matplotlib.pylab as plt\nfrom autograd.extend import primitive, vspace, defvjp, defjvp\n\n\"\"\" Useful functions \"\"\"\n\n\n\"\"\" ==================== SPARSE MATRIX UTILITIES ==================== \"\"\"\n\ndef make_sparse(entries, indices, shape):\n \"\"\"Construct a sparse csr matrix\n Args:\n entries: numpy array with shape (M,) giving values for non-zero\n matrix entries.\n indices: numpy array with shape (2, M) giving x and y indices for\n non-zero matrix entries.\n shape: shape of resulting matrix\n Returns:\n sparse, complex, matrix with specified values\n \"\"\" \n coo = sp.coo_matrix((entries, indices), shape=shape, dtype=npa.complex128)\n return coo.tocsr()\n\ndef get_entries_indices(csr_matrix):\n # takes sparse matrix and returns the entries and indeces in form compatible with 'make_sparse'\n shape = csr_matrix.shape\n coo_matrix = csr_matrix.tocoo()\n entries = csr_matrix.data\n cols = coo_matrix.col\n rows = coo_matrix.row\n indices = npa.vstack((rows, cols))\n return entries, indices\n\ndef transpose_indices(indices):\n # returns the transposed indices for transpose sparse matrix creation\n return npa.flip(indices, axis=0)\n\ndef block_4(A, B, C, D):\n \"\"\" Constructs a big matrix out of four sparse blocks\n returns [A B]\n [C D]\n \"\"\"\n left = sp.vstack([A, C])\n right = sp.vstack([B, D])\n return sp.hstack([left, right]) \n\ndef make_IO_matrices(indices, N):\n \"\"\" Makes matrices that relate the sparse matrix entries to their locations in the matrix\n The kth column of I is a 'one hot' vector specifing the k-th entries row index into A\n The kth column of J is a 'one hot' vector specifing the k-th entries columnn index into A\n O = J^T is for notational convenience.\n Armed with a vector of M entries 'a', we can construct the sparse matrix 'A' as:\n A = I @ diag(a) @ O\n where 'diag(a)' is a (MxM) matrix with vector 'a' along its diagonal.\n In index notation:\n A_ij = I_ik * a_k * O_kj\n In an opposite way, given sparse matrix 'A' we can strip out the entries `a` using the IO matrices as follows:\n a = diag(I^T @ A @ O^T)\n In index notation:\n a_k = I_ik * A_ij * O_kj\n \"\"\"\n M = indices.shape[1] # number of indices in the matrix\n entries_1 = npa.ones(M) # M entries of all 1's\n ik, jk = indices # separate i and j components of the indices\n indices_I = npa.vstack((ik, npa.arange(M))) # indices into the I matrix\n indices_J = npa.vstack((jk, npa.arange(M))) # indices into the J matrix\n I = make_sparse(entries_1, indices_I, shape=(N, M)) # construct the I matrix\n J = make_sparse(entries_1, indices_J, shape=(N, M)) # construct the J matrix\n O = J.T # make O = J^T matrix for consistency with my notes.\n return I, O\n\n\n\"\"\" ==================== DATA GENERATION UTILITIES ==================== \"\"\"\n\ndef make_rand(N):\n # makes a random vector of size N with elements between -0.5 and 0.5\n return npa.random.random(N) - 0.5\n\ndef make_rand_complex(N):\n # makes a random complex-valued vector of size N with re and im parts between -0.5 and 0.5\n return make_rand(N) + 1j * make_rand(N)\n\ndef make_rand_indeces(N, M):\n # make M random indeces into an NxN matrix\n return npa.random.randint(low=0, high=N, size=(2, M))\n\ndef make_rand_entries_indices(N, M):\n # make M random indeces and corresponding entries\n entries = make_rand_complex(M)\n indices = make_rand_indeces(N, M)\n return entries, indices\n\ndef make_rand_sparse(N, M):\n # make a random sparse matrix of shape '(N, N)' and 'M' non-zero elements\n entries, indices = make_rand_entries_indices(N, M)\n return make_sparse(entries, indices, shape=(N, N))\n\ndef make_rand_sparse_density(N, density=1):\n \"\"\" Makes a sparse NxN matrix, another way to do it with density \"\"\"\n return sp.random(N, N, density=density) + 1j * sp.random(N, N, density=density)\n\n\n\"\"\" ==================== NUMERICAL DERIVAITVES ==================== \"\"\"\n\ndef der_num(fn, arg, index, delta):\n # numerical derivative of `fn(arg)` with respect to `index` into arg and numerical step size `delta`\n arg_i_for = arg.copy()\n arg_i_back = arg.copy()\n arg_i_for[index] += delta / 2\n arg_i_back[index] -= delta / 2\n df_darg = (fn(arg_i_for) - fn(arg_i_back)) / delta\n return df_darg\n\ndef grad_num(fn, arg, delta=1e-6):\n # take a (complex) numerical gradient of function 'fn' with argument 'arg' with step size 'delta'\n N = arg.size\n grad = npa.zeros((N,), dtype=npa.complex128)\n f0 = fn(arg)\n for i in range(N):\n grad[i] = der_num(fn, arg, i, delta) # real part\n grad[i] += der_num(fn, arg, i, 1j * delta) # imaginary part\n return grad\n\ndef jac_num(fn, arg, step_size=1e-7):\n \"\"\" DEPRICATED: use 'numerical' in jacobians.py instead\n numerically differentiate `fn` w.r.t. its argument `arg` \n `arg` can be a numpy array of arbitrary shape\n `step_size` can be a number or an array of the same shape as `arg` \"\"\"\n\n in_array = float_2_array(arg).flatten()\n out_array = float_2_array(fn(arg)).flatten()\n\n m = in_array.size\n n = out_array.size\n shape = (m, n)\n jacobian = np.zeros(shape)\n\n for i in range(m):\n input_i = in_array.copy()\n input_i[i] += step_size\n arg_i = input_i.reshape(in_array.shape)\n output_i = fn(arg_i).flatten()\n grad_i = (output_i - out_array) / step_size\n jacobian[i, :] = get_value(grad_i)\n\n return jacobian\n\n\"\"\" ==================== FDTD AND FDFD UTILITIES ==================== \"\"\"\n\ndef grid_center_to_xyz(Q_mid, averaging=True):\n \"\"\" Computes the interpolated value of the quantity Q_mid felt at the Ex, Ey, Ez positions of the Yee latice\n Returns these three components\n \"\"\"\n\n # initialize\n Q_xx = copy.copy(Q_mid)\n Q_yy = copy.copy(Q_mid)\n Q_zz = copy.copy(Q_mid)\n\n # if averaging, set the respective xx, yy, zz components to the midpoint in the Yee lattice.\n if averaging:\n\n # get the value from the middle of the next cell over\n Q_x_r = npa.roll(Q_mid, shift=1, axis=0)\n Q_y_r = npa.roll(Q_mid, shift=1, axis=1)\n Q_z_r = npa.roll(Q_mid, shift=1, axis=2)\n\n # average with the two middle values\n Q_xx = (Q_mid + Q_x_r)/2\n Q_yy = (Q_mid + Q_y_r)/2\n Q_zz = (Q_mid + Q_z_r)/2\n\n return Q_xx, Q_yy, Q_zz\n\n\ndef grid_xyz_to_center(Q_xx, Q_yy, Q_zz):\n \"\"\" Computes the interpolated value of the quantitys Q_xx, Q_yy, Q_zz at the center of Yee latice\n Returns these three components\n \"\"\"\n\n # compute the averages\n Q_xx_avg = (Q_xx.astype('float') + npa.roll(Q_xx, shift=1, axis=0))/2\n Q_yy_avg = (Q_yy.astype('float') + npa.roll(Q_yy, shift=1, axis=1))/2\n Q_zz_avg = (Q_zz.astype('float') + npa.roll(Q_zz, shift=1, axis=2))/2\n\n return Q_xx_avg, Q_yy_avg, Q_zz_avg\n\ndef vec_zz_to_xy(info_dict, vec_zz, grid_averaging=True):\n \"\"\" does grid averaging on z vector vec_zz \"\"\"\n arr_zz = vec_zz.reshape(info_dict['shape'])[:,:,None]\n arr_xx, arr_yy, _ = grid_center_to_xyz(arr_zz, averaging=grid_averaging)\n vec_xx, vec_yy = arr_xx.flatten(), arr_yy.flatten()\n return vec_xx, vec_yy\n\n\"\"\" ===================== TESTING AND DEBUGGING ===================== \"\"\"\n\ndef float_2_array(x):\n if not isinstance(x, np.ndarray):\n return np.array([x])\n else:\n return x\n\ndef reshape_to_ND(arr, N):\n \"\"\" Adds dimensions to arr until it is dimension N\n \"\"\"\n\n ND = len(arr.shape)\n if ND > N:\n raise ValueError(\"array is larger than {} dimensional, given shape {}\".format(N, arr.shape))\n extra_dims = (N - ND) * (1,)\n return arr.reshape(arr.shape + extra_dims)\n\n\n\"\"\" ========================= TOOLS USEFUL FOR WORKING WITH AUTOGRAD ====================== \"\"\"\n\n\ndef get_value(x):\n if type(x) == npa.numpy_boxes.ArrayBox:\n return x._value\n else:\n return x\n\nget_value_arr = np.vectorize(get_value)\n\n\ndef get_shape(x):\n \"\"\" Gets the shape of x, even if it is not an array \"\"\"\n if isinstance(x, float) or isinstance(x, int):\n return (1,)\n elif isinstance(x, tuple) or isinstance(x, list):\n return (len(x),)\n else:\n return vspace(x).shape\n\n\ndef vjp_maker_num(fn, arg_inds, steps):\n \"\"\" Makes a vjp_maker for the numerical derivative of a function `fn`\n w.r.t. argument at position `arg_ind` using step sizes `steps` \"\"\"\n\n def vjp_single_arg(ia):\n arg_ind = arg_inds[ia]\n step = steps[ia]\n\n def vjp_maker(fn_out, *args):\n shape = args[arg_ind].shape\n num_p = args[arg_ind].size\n step = steps[ia]\n\n def vjp(v):\n\n vjp_num = np.zeros(num_p)\n for ip in range(num_p):\n args_new = list(args)\n args_rav = args[arg_ind].flatten()\n args_rav[ip] += step\n args_new[arg_ind] = args_rav.reshape(shape)\n dfn_darg = (fn(*args_new) - fn_out)/step\n vjp_num[ip] = np.sum(v * dfn_darg)\n\n return vjp_num\n\n return vjp\n\n return vjp_maker\n\n vjp_makers = []\n for ia in range(len(arg_inds)):\n vjp_makers.append(vjp_single_arg(ia=ia))\n\n return tuple(vjp_makers)\n\n\n\"\"\" =================== PLOTTING AND MEASUREMENT OF FDTD =================== \"\"\"\n\n\ndef aniplot(F, source, steps, component='Ez', num_panels=10):\n \"\"\" Animate an FDTD (F) with `source` for `steps` time steps.\n display the `component` field components at `num_panels` equally spaced.\n \"\"\"\n F.initialize_fields()\n\n # initialize the plot\n f, ax_list = plt.subplots(1, num_panels, figsize=(20*num_panels,20))\n Nx, Ny, _ = F.eps_r.shape\n ax_index = 0\n\n # fdtd time loop\n for t_index in range(steps):\n fields = F.forward(Jz=source(t_index))\n\n # if it's one of the num_panels panels\n if t_index % (steps // num_panels) == 0:\n\n if ax_index < num_panels: # extra safety..sometimes tries to access num_panels-th elemet of ax_list, leading to error\n\n print('working on axis {}/{} for time step {}'.format(ax_index, num_panels, t_index))\n\n # grab the axis\n ax = ax_list[ax_index]\n\n # plot the fields\n im_t = ax.pcolormesh(np.zeros((Nx, Ny)), cmap='RdBu')\n max_E = np.abs(fields[component]).max()\n im_t.set_array(fields[component][:, :, 0].ravel().T)\n im_t.set_clim([-max_E / 2.0, max_E / 2.0])\n ax.set_title('time = {} seconds'.format(F.dt*t_index))\n\n # update the axis\n ax_index += 1\n plt.show()\n\n\ndef measure_fields(F, source, steps, probes, component='Ez'):\n \"\"\" Returns a time series of the measured `component` fields from FDFD `F`\n driven by `source and measured at `probe`.\n \"\"\"\n F.initialize_fields()\n if not isinstance(probes, list):\n probes = [probes]\n N_probes = len(probes)\n measured = np.zeros((steps, N_probes))\n for t_index in range(steps):\n if t_index % (steps//20) == 0:\n print('{:.2f} % done'.format(float(t_index)/steps*100.0))\n fields = F.forward(Jz=source(t_index))\n for probe_index, probe in enumerate(probes):\n field_probe = np.sum(fields[component] * probe)\n measured[t_index, probe_index] = field_probe\n return measured\n\n\ndef imarr(arr):\n \"\"\" puts array 'arr' into form ready to plot \"\"\"\n arr_value = get_value(arr)\n arr_plot = arr_value.copy()\n if len(arr.shape) == 3:\n arr_plot = arr_plot[:,:,0]\n return np.flipud(arr_plot.T)\n\n\n\"\"\" ====================== FOURIER TRANSFORMS ======================\"\"\"\n\nfrom autograd.extend import primitive, defjvp\nfrom numpy.fft import fft, fftfreq\n\n\n@primitive\ndef my_fft(x): \n \"\"\" \n Wrapper for numpy's FFT, so I can add a primitive to it\n FFT(x) is like a DFT matrix (D) dot with x\n \"\"\"\n return np.fft.fft(x)\n\n\ndef fft_grad(g, ans, x):\n \"\"\" \n Define the jacobian-vector product of my_fft(x)\n The gradient of FFT times g is the vjp\n ans = fft(x) = D @ x\n jvp(fft(x))(g) = d{fft}/d{x} @ g\n = D @ g\n Therefore, it looks like the FFT of g\n \"\"\"\n return np.fft.fft(g)\n\ndefjvp(my_fft, fft_grad)\n\n\ndef get_spectrum(series, dt):\n \"\"\" Get FFT of series \"\"\"\n\n steps = len(series)\n times = np.arange(steps) * dt\n\n # reshape to be able to multiply by hamming window\n series = series.reshape((steps, -1))\n\n # multiply with hamming window to get rid of numerical errors\n hamming_window = np.hamming(steps).reshape((steps, 1))\n signal_f = my_fft(hamming_window * series)\n\n freqs = np.fft.fftfreq(steps, d=dt)\n return freqs, signal_f\n\n\ndef get_max_power_freq(series, dt):\n\n freqs, signal_f = get_spectrum(series, dt)\n return freqs[np.argmax(signal_f)]\n\n\ndef get_spectral_power(series, dt):\n\n freqs, signal_f = get_spectrum(series, dt)\n return freqs, np.square(np.abs(signal_f))\n\n\ndef plot_spectral_power(series, dt, f_top=2e14):\n steps = len(series)\n freqs, signal_f_power = get_spectral_power(series, dt)\n\n # only plot half (other is redundant)\n plt.plot(freqs[:steps//2], signal_f_power[:steps//2])\n plt.xlim([0, f_top])\n plt.xlabel('frequency (Hz)')\n plt.ylabel('power (|signal|^2)')\n plt.show()\n"
] | [
[
"numpy.flipud",
"scipy.sparse.random",
"numpy.hamming",
"scipy.sparse.vstack",
"scipy.sparse.coo_matrix",
"matplotlib.pylab.xlim",
"numpy.arange",
"numpy.argmax",
"matplotlib.pylab.plot",
"numpy.zeros",
"numpy.fft.fftfreq",
"scipy.sparse.hstack",
"numpy.array",
"numpy.sum",
"matplotlib.pylab.show",
"numpy.abs",
"numpy.fft.fft",
"numpy.vectorize",
"matplotlib.pylab.subplots",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.xlabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
BarracudaPff/code-golf-data-pythpn | [
"42e8858c2ebc6a061012bcadb167d29cebb85c5e",
"42e8858c2ebc6a061012bcadb167d29cebb85c5e",
"42e8858c2ebc6a061012bcadb167d29cebb85c5e"
] | [
"parallax/parallax/examples/skip_thoughts/skip_distributed_driver.py",
"models/research/syntaxnet/dragnn/tools/parser_trainer.py",
"models/research/object_detection/dataset_tools/create_oid_tf_record.py"
] | [
"import sys\nimport os\nimport json\nimport time\nimport tensorflow as tf\nfrom tensorflow.core.protobuf import config_pb2\nimport parallax\nimport parallax_config\nimport configuration\nimport skip_thoughts_model\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string(\"data_path\", None, \"\"\"Where to training/test data is stored.\"\"\")\ntf.app.flags.DEFINE_string(\"input_file_pattern\", \"\", \"\"\"File pattern of train data\"\"\")\ntf.app.flags.DEFINE_integer(\"batch_size\", 128, \"\"\"Batch_size\"\"\")\ntf.app.flags.DEFINE_string(\"resource_info_file\", os.path.abspath(os.path.join(os.path.dirname(__file__), \".\", \"resource_info\")), \"Filename containing cluster information\")\ntf.app.flags.DEFINE_integer(\"max_steps\", 1000000, \"\"\"Number of iterations to run for each workers.\"\"\")\ntf.app.flags.DEFINE_integer(\"log_frequency\", 100, \"\"\"How many steps between two runop logs.\"\"\")\ntf.app.flags.DEFINE_boolean(\"sync\", True, \"\")\ndef main(_):\n\tsingle_gpu_graph = tf.Graph()\n\twith single_gpu_graph.as_default():\n\t\tmodel_config = configuration.model_config(input_file_pattern=FLAGS.input_file_pattern, batch_size=FLAGS.batch_size)\n\t\ttraining_config = configuration.training_config()\n\t\tmodel = skip_thoughts_model.SkipThoughtsModel(model_config, mode=\"train\")\n\t\tmodel.build()\n\t\tif training_config.learning_rate_decay_factor > 0:\n\t\t\tlearning_rate = tf.train.exponential_decay(learning_rate=float(training_config.learning_rate), global_step=model.global_step, decay_steps=training_config.learning_rate_decay_steps, decay_rate=training_config.learning_rate_decay_factor, staircase=False)\n\t\telse:\n\t\t\tlearning_rate = tf.constant(training_config.learning_rate)\n\t\toptimizer = tf.train.AdamOptimizer(learning_rate)\n\t\ttrain_tensor = tf.contrib.slim.learning.create_train_op(total_loss=model.total_loss, optimizer=optimizer, global_step=model.global_step, clip_gradient_norm=training_config.clip_gradient_norm)\n\tdef run(sess, num_workers, worker_id, num_replicas_per_worker):\n\t\tfetches = {\"global_step\": model.global_step, \"cost\": model.total_loss, \"train_op\": train_tensor}\n\t\tstart = time.time()\n\t\tfor i in range(FLAGS.max_steps):\n\t\t\tresults = sess.run(fetches)\n\t\t\tif i % FLAGS.log_frequency == 0:\n\t\t\t\tend = time.time()\n\t\t\t\tthroughput = float(FLAGS.log_frequency) / float(end - start)\n\t\t\t\tparallax.log.info(\"global step: %d, loss: %f, throughput: %f steps/sec\" % (results[\"global_step\"][0], results[\"cost\"][0], throughput))\n\t\t\t\tstart = time.time()\n\tsess, num_workers, worker_id, num_replicas_per_worker = parallax.parallel_run(single_gpu_graph, FLAGS.resource_info_file, sync=FLAGS.sync, parallax_config=parallax_config.build_config())\n\trun(sess, num_workers, worker_id, num_replicas_per_worker)\nif __name__ == \"__main__\":\n\ttf.logging.set_verbosity(tf.logging.INFO)\n\ttf.app.run()",
"\"\"\"A program to train a tensorflow neural net parser from a conll file.\"\"\"\nimport os\nimport os.path\nimport random\nimport time\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom google.protobuf import text_format\nfrom syntaxnet.ops import gen_parser_ops\nfrom syntaxnet import task_spec_pb2\nfrom syntaxnet import sentence_pb2\nfrom dragnn.protos import spec_pb2\nfrom dragnn.python import evaluation\nfrom dragnn.python import graph_builder\nfrom dragnn.python import lexicon\nfrom dragnn.python import sentence_io\nfrom dragnn.python import spec_builder\nfrom dragnn.python import trainer_lib\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_string(\"tf_master\", \"\", \"TensorFlow execution engine to connect to.\")\nflags.DEFINE_string(\"resource_path\", \"\", \"Path to constructed resources.\")\nflags.DEFINE_string(\"tensorboard_dir\", \"\", \"Directory for TensorBoard logs output.\")\nflags.DEFINE_string(\"checkpoint_filename\", \"\", \"Filename to save the best checkpoint to.\")\nflags.DEFINE_string(\"training_corpus_path\", \"\", \"Path to training data.\")\nflags.DEFINE_string(\"dev_corpus_path\", \"\", \"Path to development set data.\")\nflags.DEFINE_bool(\"compute_lexicon\", False, \"\")\nflags.DEFINE_bool(\"projectivize_training_set\", True, \"\")\nflags.DEFINE_integer(\"batch_size\", 4, \"Batch size.\")\nflags.DEFINE_integer(\"report_every\", 200, \"Report cost and training accuracy every this many steps.\")\ndef main(unused_argv):\n\tlogging.set_verbosity(logging.INFO)\n\tif not gfile.IsDirectory(FLAGS.resource_path):\n\t\tgfile.MakeDirs(FLAGS.resource_path)\n\tif FLAGS.compute_lexicon:\n\t\tlogging.info(\"Computing lexicon...\")\n\t\tlexicon.build_lexicon(FLAGS.resource_path, FLAGS.training_corpus_path)\n\tchar2word = spec_builder.ComponentSpecBuilder(\"char_lstm\")\n\tchar2word.set_network_unit(name=\"wrapped_units.LayerNormBasicLSTMNetwork\", hidden_layer_sizes=\"256\")\n\tchar2word.set_transition_system(name=\"char-shift-only\", left_to_right=\"true\")\n\tchar2word.add_fixed_feature(name=\"chars\", fml=\"char-input.text-char\", embedding_dim=16)\n\tchar2word.fill_from_resources(FLAGS.resource_path, FLAGS.tf_master)\n\tlookahead = spec_builder.ComponentSpecBuilder(\"lookahead\")\n\tlookahead.set_network_unit(name=\"wrapped_units.LayerNormBasicLSTMNetwork\", hidden_layer_sizes=\"256\")\n\tlookahead.set_transition_system(name=\"shift-only\", left_to_right=\"false\")\n\tlookahead.add_link(source=char2word, fml=\"input.last-char-focus\", embedding_dim=32)\n\tlookahead.fill_from_resources(FLAGS.resource_path, FLAGS.tf_master)\n\ttagger = spec_builder.ComponentSpecBuilder(\"tagger\")\n\ttagger.set_network_unit(name=\"wrapped_units.LayerNormBasicLSTMNetwork\", hidden_layer_sizes=\"256\")\n\ttagger.set_transition_system(name=\"tagger\")\n\ttagger.add_token_link(source=lookahead, fml=\"input.focus\", embedding_dim=32)\n\ttagger.fill_from_resources(FLAGS.resource_path, FLAGS.tf_master)\n\tparser = spec_builder.ComponentSpecBuilder(\"parser\")\n\tparser.set_network_unit(name=\"FeedForwardNetwork\", hidden_layer_sizes=\"256\", layer_norm_hidden=\"True\")\n\tparser.set_transition_system(name=\"arc-standard\")\n\tparser.add_token_link(source=lookahead, fml=\"input.focus\", embedding_dim=32)\n\tparser.add_token_link(source=tagger, fml=\"input.focus stack.focus stack(1).focus\", embedding_dim=32)\n\tparser.add_link(source=parser, name=\"rnn-stack\", fml=\"stack.focus stack(1).focus\", source_translator=\"shift-reduce-step\", embedding_dim=32)\n\tparser.fill_from_resources(FLAGS.resource_path, FLAGS.tf_master)\n\tmaster_spec = spec_pb2.MasterSpec()\n\tmaster_spec.component.extend([char2word.spec, lookahead.spec, tagger.spec, parser.spec])\n\tlogging.info(\"Constructed master spec: %s\", str(master_spec))\n\thyperparam_config = spec_pb2.GridPoint()\n\thyperparam_config.decay_steps = 128000\n\thyperparam_config.learning_rate = 0.001\n\thyperparam_config.learning_method = \"adam\"\n\thyperparam_config.adam_beta1 = 0.9\n\thyperparam_config.adam_beta2 = 0.9\n\thyperparam_config.adam_eps = 0.0001\n\thyperparam_config.gradient_clip_norm = 1\n\thyperparam_config.self_norm_alpha = 1.0\n\thyperparam_config.use_moving_average = True\n\thyperparam_config.dropout_rate = 0.7\n\thyperparam_config.seed = 1\n\tgraph = tf.Graph()\n\twith graph.as_default():\n\t\tbuilder = graph_builder.MasterBuilder(master_spec, hyperparam_config)\n\t\tcomponent_targets = spec_builder.default_targets_from_spec(master_spec)\n\t\ttrainers = [builder.add_training_from_config(target) for target in component_targets]\n\t\tassert len(trainers) == 2\n\t\tannotator = builder.add_annotation()\n\t\tbuilder.add_saver()\n\ttraining_set = sentence_io.ConllSentenceReader(FLAGS.training_corpus_path, projectivize=FLAGS.projectivize_training_set).corpus()\n\tdev_set = sentence_io.ConllSentenceReader(FLAGS.dev_corpus_path, projectivize=False).corpus()\n\tlogging.info(\"Training on %d sentences.\", len(training_set))\n\tlogging.info(\"Tuning on %d sentences.\", len(dev_set))\n\tpretrain_steps = [100, 0]\n\ttagger_steps = 1000\n\ttrain_steps = [tagger_steps, 8 * tagger_steps]\n\ttf.logging.info(\"Creating TensorFlow checkpoint dir...\")\n\tgfile.MakeDirs(os.path.dirname(FLAGS.checkpoint_filename))\n\tsummary_writer = trainer_lib.get_summary_writer(FLAGS.tensorboard_dir)\n\twith tf.Session(FLAGS.tf_master, graph=graph) as sess:\n\t\tsess.run(tf.global_variables_initializer())\n\t\ttrainer_lib.run_training(sess, trainers, annotator, evaluation.parser_summaries, pretrain_steps, train_steps, training_set, dev_set, dev_set, FLAGS.batch_size, summary_writer, FLAGS.report_every, builder.saver, FLAGS.checkpoint_filename)\nif __name__ == \"__main__\":\n\ttf.app.run()",
"r\"\"\"Creates TFRecords of Open Images dataset for object detection.\nExample usage:\n python object_detection/dataset_tools/create_oid_tf_record.py \\\n --input_annotations_csv=/path/to/input/annotations-human-bbox.csv \\\n --input_images_directory=/path/to/input/image_pixels_directory \\\n --input_label_map=/path/to/input/labels_bbox_545.labelmap \\\n --output_tf_record_path_prefix=/path/to/output/prefix.tfrecord\nCSVs with bounding box annotations and image metadata (including the image URLs)\ncan be downloaded from the Open Images GitHub repository:\nhttps://github.com/openimages/dataset\nThis script will include every image found in the input_images_directory in the\noutput TFRecord, even if the image has no corresponding bounding box annotations\nin the input_annotations_csv.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport contextlib2\nimport pandas as pd\nimport tensorflow as tf\nfrom object_detection.dataset_tools import oid_tfrecord_creation\nfrom object_detection.utils import label_map_util\ntf.flags.DEFINE_string(\"input_annotations_csv\", None, \"Path to CSV containing image bounding box annotations\")\ntf.flags.DEFINE_string(\"input_images_directory\", None, \"Directory containing the image pixels \" \"downloaded from the OpenImages GitHub repository.\")\ntf.flags.DEFINE_string(\"input_label_map\", None, \"Path to the label map proto\")\ntf.flags.DEFINE_string(\"output_tf_record_path_prefix\", None, \"Path to the output TFRecord. The shard index and the number of shards \" \"will be appended for each output shard.\")\ntf.flags.DEFINE_integer(\"num_shards\", 100, \"Number of TFRecord shards\")\nFLAGS = tf.flags.FLAGS\ndef main(_):\n\ttf.logging.set_verbosity(tf.logging.INFO)\n\trequired_flags = [\"input_annotations_csv\", \"input_images_directory\", \"input_label_map\", \"output_tf_record_path_prefix\"]\n\tfor flag_name in required_flags:\n\t\tif not getattr(FLAGS, flag_name):\n\t\t\traise ValueError(\"Flag --{} is required\".format(flag_name))\n\tlabel_map = label_map_util.get_label_map_dict(FLAGS.input_label_map)\n\tall_annotations = pd.read_csv(FLAGS.input_annotations_csv)\n\tall_images = tf.gfile.Glob(os.path.join(FLAGS.input_images_directory, \"*.jpg\"))\n\tall_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images]\n\tall_image_ids = pd.DataFrame({\"ImageID\": all_image_ids})\n\tall_annotations = pd.concat([all_annotations, all_image_ids])\n\ttf.logging.log(tf.logging.INFO, \"Found %d images...\", len(all_image_ids))\n\twith contextlib2.ExitStack() as tf_record_close_stack:\n\t\toutput_tfrecords = oid_tfrecord_creation.open_sharded_output_tfrecords(tf_record_close_stack, FLAGS.output_tf_record_path_prefix, FLAGS.num_shards)\n\t\tfor counter, image_data in enumerate(all_annotations.groupby(\"ImageID\")):\n\t\t\ttf.logging.log_every_n(tf.logging.INFO, \"Processed %d images...\", 1000, counter)\n\t\t\timage_id, image_annotations = image_data\n\t\t\timage_path = os.path.join(FLAGS.input_images_directory, image_id + \".jpg\")\n\t\t\twith tf.gfile.Open(image_path) as image_file:\n\t\t\t\tencoded_image = image_file.read()\n\t\t\ttf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(image_annotations, label_map, encoded_image)\n\t\t\tif tf_example:\n\t\t\t\tshard_idx = long(image_id, 16) % FLAGS.num_shards\n\t\t\t\toutput_tfrecords[shard_idx].write(tf_example.SerializeToString())\nif __name__ == \"__main__\":\n\ttf.app.run()"
] | [
[
"tensorflow.contrib.slim.learning.create_train_op",
"tensorflow.Graph",
"tensorflow.constant",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.logging.set_verbosity",
"tensorflow.train.AdamOptimizer",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.app.run"
],
[
"tensorflow.python.platform.tf_logging.set_verbosity",
"tensorflow.Graph",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.platform.gfile.MakeDirs",
"tensorflow.global_variables_initializer",
"tensorflow.logging.info",
"tensorflow.Session",
"tensorflow.python.platform.gfile.IsDirectory",
"tensorflow.app.run"
],
[
"pandas.concat",
"pandas.read_csv",
"tensorflow.gfile.Open",
"tensorflow.flags.DEFINE_string",
"tensorflow.logging.log_every_n",
"pandas.DataFrame",
"tensorflow.logging.set_verbosity",
"tensorflow.flags.DEFINE_integer",
"tensorflow.app.run"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
AHaryanto/azure-automl-mlops | [
"4448ce0733ca45dab3bf47907ed264536b45a453"
] | [
"src/deployment/consume.py"
] | [
"import json\nimport os\nimport sys\n\nimport pandas as pd\nfrom azureml.core.webservice import AksWebservice\nfrom loguru import logger\n\nsys.path.append(os.getcwd())\nimport config as f # noqa: E402\n\n# Load test data\ndata_path = os.path.join('data', 'regression_kaggle_retail_data_analytics',\n 'processed', 'test_data_sample_100.csv')\ntest_df = pd.read_csv(data_path)\nx_test_df = test_df.drop(['Weekly_Sales'], axis=1)\nprint(x_test_df)\n\ndata_dict = x_test_df.to_dict()\ndata_json = json.dumps(data_dict)\n\n# Retrieve a Webservice\naks_service = AksWebservice(\n workspace=f.ws,\n name=f.params['aks_endpoint_name'])\nlogger.debug('Found Webservice {} in {}'.format(\n aks_service.name, aks_service.workspace.name))\n\n# Call the Webservice with the provided input\ny_pred = aks_service.run(input_data=data_json)\ny_pred_df = pd.DataFrame.from_dict(y_pred)\nprint(y_pred_df)\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
jorgimello/meta-learning-fer | [
"793610ae8471f794a6837930d8bb51866c1f7c02",
"793610ae8471f794a6837930d8bb51866c1f7c02"
] | [
"data/ck/check_data.py",
"data/generate_training_data.py"
] | [
"import numpy as np\nimport os, cv2\n\nimgs = np.load('test_set_ck_extended_no_resize.npy')\nlbls = np.load('test_labels_ck_extended_no_resize.npy')\n\nfor i in range(imgs.shape[0]):\n\tprint (lbls[i])\n\tcv2.imshow('img', imgs[i])\n\tcv2.waitKey(0)\n",
"# Source: https://github.com/Microsoft/FERPlus\n\n#\n# Copyright (c) Microsoft. All rights reserved.\n# Licensed under the MIT license. See LICENSE.md file in the project root for full license information.\n#\n\nimport os\nimport csv\nimport argparse\nimport numpy as np\nfrom itertools import islice\nfrom PIL import Image\n\n# List of folders for training, validation and test.\nfolder_names = {'Training' : 'FER2013Train',\n 'PublicTest' : 'FER2013Valid',\n 'PrivateTest': 'FER2013Test'}\n\ndef str_to_image(image_blob):\n ''' Convert a string blob to an image object. '''\n image_string = image_blob.split(' ')\n image_data = np.asarray(image_string, dtype=np.uint8).reshape(48,48)\n return Image.fromarray(image_data)\n\ndef main(base_folder, fer_path, ferplus_path):\n '''\n Generate PNG image files from the combined fer2013.csv and fer2013new.csv file. The generated files\n are stored in their corresponding folder for the trainer to use.\n \n Args:\n base_folder(str): The base folder that contains 'FER2013Train', 'FER2013Valid' and 'FER2013Test'\n subfolder.\n fer_path(str): The full path of fer2013.csv file.\n ferplus_path(str): The full path of fer2013new.csv file.\n '''\n \n print(\"Start generating ferplus images.\")\n \n for key, value in folder_names.items():\n folder_path = os.path.join(base_folder, value)\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n \n ferplus_entries = []\n with open(ferplus_path,'r') as csvfile:\n ferplus_rows = csv.reader(csvfile, delimiter=',')\n for row in islice(ferplus_rows, 1, None):\n ferplus_entries.append(row)\n \n index = 0\n with open(fer_path,'r') as csvfile:\n fer_rows = csv.reader(csvfile, delimiter=',')\n for row in islice(fer_rows, 1, None):\n ferplus_row = ferplus_entries[index]\n file_name = ferplus_row[1].strip()\n if len(file_name) > 0:\n image = str_to_image(row[1])\n image_path = os.path.join(base_folder, folder_names[row[2]], file_name)\n image.save(image_path, compress_level=0) \n index += 1 \n \n print(\"Done...\")\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \n \"--base_folder\", \n type = str, \n help = \"Base folder containing the training, validation and testing folder.\", \n required = True)\n parser.add_argument(\"-fer\", \n \"--fer_path\", \n type = str,\n help = \"Path to the original fer2013.csv file.\",\n required = True)\n \n parser.add_argument(\"-ferplus\", \n \"--ferplus_path\", \n type = str,\n help = \"Path to the new fer2013new.csv file.\",\n required = True) \n\n args = parser.parse_args()\n main(args.base_folder, args.fer_path, args.ferplus_path)"
] | [
[
"numpy.load"
],
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Mithril-NTU/reco-gym | [
"5b494eb719d528096751c886b9ab21b1dbcab20f"
] | [
"recogym/envs/abstract.py"
] | [
"from abc import ABC\n\nimport gym\nimport numpy as np\nimport pandas as pd\nfrom gym.spaces import Discrete\nfrom numpy.random.mtrand import RandomState\nfrom scipy.special import expit as sigmoid\nfrom tqdm import trange\n\nfrom .configuration import Configuration\nfrom .context import DefaultContext\nfrom .features.time import DefaultTimeGenerator\nfrom .observation import Observation\nfrom .session import OrganicSessions\nfrom ..agents import Agent\n\n# Arguments shared between all environments.\n\nenv_args = {\n 'num_products': 10,\n 'num_users': 100,\n 'random_seed': np.random.randint(2 ** 31 - 1),\n 'random_seed_for_user': None, # if set, the random seed for user embedding generation will be changed.\n # Markov State Transition Probabilities.\n 'prob_leave_bandit': 0.01,\n 'prob_leave_organic': 0.01,\n 'prob_bandit_to_organic': 0.05,\n 'prob_organic_to_bandit': 0.25,\n 'normalize_beta': False,\n 'with_ps_all': False\n}\n\n\n# Static function for squashing values between 0 and 1.\ndef f(mat, offset=5):\n \"\"\"Monotonic increasing function as described in toy.pdf.\"\"\"\n return sigmoid(mat - offset)\n\n\n# Magic numbers for Markov states.\norganic = 0\nbandit = 1\nstop = 2\n\n\nclass AbstractEnv(gym.Env, ABC):\n\n def __init__(self):\n gym.Env.__init__(self)\n ABC.__init__(self)\n\n self.first_step = True\n self.config = None\n self.state = None\n self.current_user_id = None\n self.current_time = None\n self.empty_sessions = OrganicSessions()\n\n def reset_random_seed(self, epoch=0):\n # Initialize Random State.\n assert (self.config.random_seed is not None)\n self.rng = RandomState(self.config.random_seed + epoch)\n if self.config.random_seed_for_user is not None:\n assert isinstance(self.config.random_seed_for_user, int)\n self.user_rng = RandomState(self.config.random_seed_for_user + epoch)\n\n def init_gym(self, args):\n\n self.config = Configuration(args)\n\n # Defining Action Space.\n self.action_space = Discrete(self.config.num_products)\n\n if 'time_generator' not in args:\n self.time_generator = DefaultTimeGenerator(self.config)\n else:\n self.time_generator = self.config.time_generator\n\n # Setting random seed for the first time.\n self.reset_random_seed()\n\n if 'agent' not in args:\n self.agent = None\n else:\n self.agent = self.config.agent\n\n # Setting any static parameters such as transition probabilities.\n self.set_static_params()\n\n # Set random seed for second time, ensures multiple epochs possible.\n self.reset_random_seed()\n\n def reset(self, user_id=0):\n # Current state.\n self.first_step = True\n self.state = organic # Manually set first state as Organic.\n\n self.time_generator.reset()\n if self.agent:\n self.agent.reset()\n\n self.current_time = self.time_generator.new_time()\n self.current_user_id = user_id\n\n # Record number of times each product seen for static policy calculation.\n self.organic_views = np.zeros(self.config.num_products)\n\n def generate_organic_sessions(self):\n\n # Initialize session.\n session = OrganicSessions()\n\n while self.state == organic:\n # Add next product view.\n self.update_product_view()\n session.next(\n DefaultContext(self.current_time, self.current_user_id),\n self.product_view\n )\n\n # Update markov state.\n self.update_state()\n\n return session\n\n def step(self, action_id):\n \"\"\"\n\n Parameters\n ----------\n action_id : int between 1 and num_products indicating which\n product recommended (aka which ad shown)\n\n Returns\n -------\n observation, reward, done, info : tuple\n observation (tuple) :\n a tuple of values (is_organic, product_view)\n is_organic - True if Markov state is `organic`,\n False if Markov state `bandit` or `stop`.\n product_view - if Markov state is `organic` then it is an int\n between 1 and P where P is the number of\n products otherwise it is None.\n reward (tuple) :\n a tuple of values (click, ctr), ctr is click-through-rate which\n means the probability of user clicking.\n if the previous state was\n `bandit` - then reward is (1, ctr) if the user clicked on the ad\n you recommended otherwise (0, ctr)\n `organic` - then reward is (None, None)\n done (bool) :\n whether it's time to reset the environment again.\n An episode is over at the end of a user's timeline (all of\n their organic and bandit sessions)\n info (dict) :\n this is unused, it's always an empty dict\n \"\"\"\n\n # No information to return.\n info = {}\n\n if self.first_step:\n assert (action_id is None)\n self.first_step = False\n sessions = self.generate_organic_sessions()\n return (\n Observation(\n DefaultContext(\n self.current_time,\n self.current_user_id\n ),\n sessions\n ),\n (None, None),\n self.state == stop,\n info\n )\n\n assert (action_id is not None)\n # Calculate reward from action.\n reward = self.draw_click(action_id) # (click ,ctr)\n\n self.update_state()\n\n # Markov state dependent logic.\n if self.state == organic:\n sessions = self.generate_organic_sessions()\n else:\n sessions = self.empty_sessions\n\n return (\n Observation(\n DefaultContext(self.current_time, self.current_user_id),\n sessions\n ),\n reward,\n self.state == stop,\n info\n )\n\n def step_offline(self, observation, reward, done):\n \"\"\"Call step function wih the policy implemented by a particular Agent.\"\"\"\n\n if self.first_step:\n action = None\n else:\n assert (hasattr(self, 'agent'))\n assert (observation is not None)\n if self.agent:\n action = self.agent.act(observation, reward, done)\n else:\n # Select a Product randomly.\n action = {\n 't': observation.context().time(),\n 'u': observation.context().user(),\n 'a': np.int16(self.rng.choice(self.config.num_products)),\n 'ps': 1.0 / self.config.num_products,\n 'ps-a': (\n np.ones(self.config.num_products) / self.config.num_products\n if self.config.with_ps_all else\n ()\n ),\n }\n\n if done:\n reward = self.draw_click(action['a']) # (click ,ctr)\n return (\n action,\n Observation(\n DefaultContext(self.current_time, self.current_user_id),\n self.empty_sessions\n ),\n reward,\n done,\n None\n )\n else:\n observation, reward, done, info = self.step(\n action['a'] if action is not None else None\n )\n\n return action, observation, reward, done, info\n\n def generate_logs(\n self,\n num_offline_users: int,\n agent: Agent = None,\n num_organic_offline_users: int = 0\n ):\n \"\"\"\n Produce logs of applying an Agent in the Environment for the specified amount of Users.\n If the Agent is not provided, then the default Agent is used that randomly selects an Action.\n \"\"\"\n\n if agent:\n old_agent = self.agent\n self.agent = agent\n\n data = {\n 't': [],\n 'u': [],\n 'z': [],\n 'v': [],\n 'a': [],\n 'c': [],\n 'ctr': [],\n 'ps': [],\n 'ps-a': [],\n }\n\n def _store_organic(observation):\n assert (observation is not None)\n assert (observation.sessions() is not None)\n for session in observation.sessions():\n data['t'].append(session['t'])\n data['u'].append(session['u'])\n data['z'].append('organic')\n data['v'].append(session['v'])\n data['a'].append(None)\n data['c'].append(None)\n data['ctr'].append(None)\n data['ps'].append(None)\n data['ps-a'].append(None)\n\n def _store_bandit(action, reward):\n if action:\n assert (reward is not None)\n data['t'].append(action['t'])\n data['u'].append(action['u'])\n data['z'].append('bandit')\n data['v'].append(None)\n data['a'].append(action['a'])\n data['c'].append(reward[0])\n data['ctr'].append(reward[1])\n data['ps'].append(action['ps'])\n data['ps-a'].append(action['ps-a'] if 'ps-a' in action else ())\n\n unique_user_id = 0\n for _ in trange(num_organic_offline_users, desc='Organic Users'):\n self.reset(unique_user_id)\n unique_user_id += 1\n observation, _, _, _ = self.step(None)\n _store_organic(observation)\n\n for _ in trange(num_offline_users, desc='Users'):\n self.reset(unique_user_id)\n unique_user_id += 1\n observation, reward, done, _ = self.step(None)\n\n while not done:\n _store_organic(observation)\n action, observation, reward, done, _ = self.step_offline(\n observation, reward, done\n )\n _store_bandit(action, reward)\n\n _store_organic(observation)\n\n data['t'] = np.array(data['t'], dtype=np.float32)\n data['u'] = pd.array(data['u'], dtype=pd.UInt32Dtype())\n data['v'] = pd.array(data['v'], dtype=pd.UInt32Dtype())\n data['a'] = pd.array(data['a'], dtype=pd.UInt32Dtype())\n data['c'] = np.array(data['c'], dtype=np.float32)\n data['ctr'] = np.array(data['ctr'], dtype=np.float32)\n\n if agent:\n self.agent = old_agent\n\n return pd.DataFrame().from_dict(data)\n\n def generate_gt(\n self,\n num_offline_users: int,\n ):\n data = {\n 't': [],\n 'u': [],\n 'z': [],\n 'v': [],\n 'a': [],\n 'c': [],\n 'ctr': [],\n 'ps': [],\n 'ps-a': [],\n }\n\n def _store_organic(observation):\n assert (observation is not None)\n assert (observation.sessions() is not None)\n for session in observation.sessions():\n data['t'].append(session['t'])\n data['u'].append(session['u'])\n data['z'].append('organic')\n data['v'].append(session['v'])\n data['a'].append(None)\n data['c'].append(None)\n data['ctr'].append(None)\n data['ps'].append(None)\n data['ps-a'].append(None)\n\n def _store_bandit(action, reward):\n if action:\n assert (reward is not None)\n data['t'].append(action['t'])\n data['u'].append(action['u'])\n data['z'].append('bandit')\n data['v'].append(None)\n data['a'].append(action['a'])\n data['c'].append(reward[0])\n data['ctr'].append(reward[1])\n data['ps'].append(action['ps'])\n data['ps-a'].append(action['ps-a'] if 'ps-a' in action else ())\n\n unique_user_id = 0\n all_actions = np.arange(self.config.num_products)\n for _ in trange(num_offline_users, desc='Users'):\n self.reset(unique_user_id)\n unique_user_id += 1\n observation, reward, done, _ = self.step(None)\n\n while not done:\n _store_organic(observation)\n for action in all_actions:\n if action == 0:\n observation, reward, done, info = self.step(0)\n else:\n reward = self.draw_click(action)\n action = {\n 't': observation.context().time(),\n 'u': observation.context().user(),\n 'a': action,\n 'ps': 1.0,\n 'ps-a': (\n np.ones(self.config.num_products) / self.config.num_products\n if self.config.with_ps_all else\n ()\n ),\n }\n _store_bandit(action, reward)\n _store_organic(observation)\n\n data['t'] = np.array(data['t'], dtype=np.float32)\n data['u'] = pd.array(data['u'], dtype=pd.UInt32Dtype())\n data['v'] = pd.array(data['v'], dtype=pd.UInt32Dtype())\n data['a'] = pd.array(data['a'], dtype=pd.UInt32Dtype())\n data['c'] = np.array(data['c'], dtype=np.float32)\n data['ctr'] = np.array(data['ctr'], dtype=np.float32)\n\n return pd.DataFrame().from_dict(data)\n"
] | [
[
"scipy.special.expit",
"numpy.arange",
"pandas.UInt32Dtype",
"pandas.DataFrame",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.random.mtrand.RandomState",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
AyrtonB/National-Grid-Data-Portal | [
"b70cc684ecb92b82ba3de63fadfbf0caee30c943"
] | [
"build/lib/NGDataPortal/NGDataPortal.py"
] | [
"\"\"\"\nImports\n\"\"\"\nimport pandas as pd\nimport warnings\nimport requests\nimport json\nimport os\n\n\"\"\"\nMain Scripts\n\"\"\"\n## Loading static files\nresource_filepath = os.path.join(os.path.dirname(__file__), 'stream_to_resource_id.json')\nwith open(resource_filepath, 'r') as fp:\n stream_to_id_map = json.load(fp)\n \n## Main class\nclass Wrapper(): \n def NG_request(self, params={}): \n url_root = 'https://national-grid-admin.ckan.io/api/3/action/datastore_search'\n\n params.update({'resource_id':self.resource_id})\n\n if 'sql' in params.keys():\n url_root += '_sql'\n\n r = requests.get(url_root, params=params)\n\n return r\n\n def raise_(self, err_txt, error=ValueError): \n raise error(err_txt)\n\n def check_request_success(self, r_json):\n if r_json['success'] == False:\n \n if 'message' in r_json['error'].keys():\n err_msg = r_json['error']['message']\n else:\n err_msg = r_json['error']['info']['orig']\n\n self.raise_(err_msg)\n\n date_between = lambda self, dt_col, start_date, end_date: f'SELECT * from \"{self.resource_id}\" WHERE \"{dt_col}\" BETWEEN \\'{start_date}\\'::timestamp AND \\'{end_date}\\'::timestamp ORDER BY \"{dt_col}\"' \n date_less_than = lambda self, dt_col, date: f'SELECT * from \"{self.resource_id}\" WHERE \"{dt_col}\" < \\'{date}\\'::timestamp ORDER BY \"{dt_col}\"' \n date_greater_than = lambda self, dt_col, date: f'SELECT * from \"{self.resource_id}\" WHERE \"{dt_col}\" > \\'{date}\\'::timestamp ORDER BY \"{dt_col}\"' \n\n def form_dt_rng_sql_query(self, dt_col, start_date=None, end_date=None):\n start_end_date_exist = (start_date!=None, end_date!=None)\n\n func_map = {\n (False, False) : {'error' : 'A start and/or end date should be passed'},\n (True, True) : self.date_between(dt_col, start_date, end_date),\n (False, True) : self.date_less_than(dt_col, end_date),\n (True, False) : self.date_greater_than(dt_col, start_date),\n }\n\n sql = func_map[start_end_date_exist]\n\n if not isinstance(sql, str):\n self.raise_(sql['error'])\n\n return sql\n\n def query_API(self, params={}, start_date=None, end_date=None, dt_col=None, sql='', return_raw=False):\n ## Handling SQL queries\n if start_date or end_date:\n if sql != '':\n warnings.warn('The start and end date query will overwrite the provided SQL')\n\n if not dt_col:\n warnings.warn('If a start or end date has been provided the \\'dt_col\\' parameter must be provided')\n\n sql = self.form_dt_rng_sql_query(dt_col, start_date=start_date, end_date=end_date)\n params.update({'sql':sql})\n\n elif sql != '':\n params.update({'sql':sql})\n \n elif 'sql' in params.keys():\n params.pop('sql')\n\n ## Making the request\n r = self.NG_request(params=params)\n\n if return_raw == True:\n return r\n\n ## Checking and parsing the response\n r_json = r.json()\n self.check_request_success(r_json)\n\n df = pd.DataFrame(r_json['result']['records'])\n\n return df\n \n def assign_stream(self, stream):\n self.stream = stream\n self.resource_id = stream_to_id_map[self.stream]\n \n def __init__(self, stream):\n self.assign_stream(stream)\n self.streams = list(stream_to_id_map.keys()) \n\n \nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
sergiopasra/glue | [
"c25a217a122a11818382672c99cb21f57a30636f",
"c25a217a122a11818382672c99cb21f57a30636f",
"c25a217a122a11818382672c99cb21f57a30636f"
] | [
"glue/app/qt/tests/test_preferences.py",
"glue/core/data_exporters/tests/test_gridded_fits.py",
"glue/dialogs/component_arithmetic/qt/component_arithmetic.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport os\n\nimport pytest\nimport numpy as np\nfrom mock import patch, MagicMock\nfrom matplotlib.colors import ColorConverter\n\nfrom glue import custom_viewer\nfrom glue.tests.helpers import PYSIDE2_INSTALLED # noqa\nfrom glue.core import HubListener, Application, Data, DataCollection\nfrom glue.core.message import SettingsChangeMessage\nfrom qtpy import QtWidgets\nfrom glue.app.qt.preferences import PreferencesDialog\nfrom glue.app.qt import GlueApplication\nfrom glue.viewers.scatter.qt import ScatterViewer\nfrom glue.viewers.image.qt import ImageViewer\nfrom glue.viewers.histogram.qt import HistogramViewer\nfrom glue.plugins.dendro_viewer.qt import DendrogramViewer\n\nrgb = ColorConverter().to_rgb\n\n\nclass TestPreferences():\n\n def setup_method(self, method):\n self.app = Application()\n\n def test_no_change(self):\n\n # If we don't change anything, settings should be preserved\n\n with patch('glue.config.settings') as settings:\n\n settings.FOREGROUND_COLOR = 'red'\n settings.BACKGROUND_COLOR = (0, 0.5, 1)\n settings.DATA_COLOR = (1, 0.5, 0.25)\n settings.DATA_ALPHA = 0.3\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n\n assert dialog.theme == 'Custom'\n\n dialog.accept()\n\n assert rgb(settings.FOREGROUND_COLOR) == (1, 0, 0)\n assert rgb(settings.BACKGROUND_COLOR) == (0, 0.5, 1)\n assert rgb(settings.DATA_COLOR) == (1, 0.5, 0.25)\n assert settings.DATA_ALPHA == 0.3\n\n def test_theme_autodetect(self):\n\n # If we don't change anything, settings should be preserved\n\n with patch('glue.config.settings') as settings:\n\n settings.FOREGROUND_COLOR = 'white'\n settings.BACKGROUND_COLOR = 'black'\n settings.DATA_COLOR = '0.75'\n settings.DATA_ALPHA = 0.8\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n assert dialog.theme == 'White on Black'\n dialog.accept()\n\n settings.FOREGROUND_COLOR = 'black'\n settings.BACKGROUND_COLOR = 'white'\n settings.DATA_COLOR = '0.35'\n settings.DATA_ALPHA = 0.8\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n assert dialog.theme == 'Black on White'\n dialog.accept()\n\n def test_themes(self):\n\n # Check that themes work\n\n with patch('glue.config.settings') as settings:\n\n settings.FOREGROUND_COLOR = 'red'\n settings.BACKGROUND_COLOR = (0, 0.5, 1)\n settings.DATA_COLOR = (1, 0.5, 0.25)\n settings.DATA_ALPHA = 0.3\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n dialog.theme = 'White on Black'\n dialog.accept()\n\n assert rgb(settings.FOREGROUND_COLOR) == (1, 1, 1)\n assert rgb(settings.BACKGROUND_COLOR) == (0, 0, 0)\n assert rgb(settings.DATA_COLOR) == (0.75, 0.75, 0.75)\n assert settings.DATA_ALPHA == 0.8\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n dialog.theme = 'Black on White'\n dialog.accept()\n\n assert rgb(settings.FOREGROUND_COLOR) == (0, 0, 0)\n assert rgb(settings.BACKGROUND_COLOR) == (1, 1, 1)\n assert rgb(settings.DATA_COLOR) == (0.35, 0.35, 0.35)\n assert settings.DATA_ALPHA == 0.8\n\n def test_custom_changes(self):\n\n # Check that themes work\n\n with patch('glue.config.settings') as settings:\n\n settings.FOREGROUND_COLOR = 'red'\n settings.BACKGROUND_COLOR = (0, 0.5, 1)\n settings.DATA_COLOR = (1, 0.5, 0.25)\n settings.DATA_ALPHA = 0.3\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n dialog.foreground = (0, 1, 1)\n dialog.accept()\n\n assert rgb(settings.FOREGROUND_COLOR) == (0, 1, 1)\n assert rgb(settings.BACKGROUND_COLOR) == (0, 0.5, 1)\n assert rgb(settings.DATA_COLOR) == (1, 0.5, 0.25)\n assert settings.DATA_ALPHA == 0.3\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n dialog.background = (1, 0, 1)\n dialog.accept()\n\n assert rgb(settings.FOREGROUND_COLOR) == (0, 1, 1)\n assert rgb(settings.BACKGROUND_COLOR) == (1, 0, 1)\n assert rgb(settings.DATA_COLOR) == (1, 0.5, 0.25)\n assert settings.DATA_ALPHA == 0.3\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n dialog.data_color = (1, 1, 0.5)\n dialog.accept()\n\n assert rgb(settings.FOREGROUND_COLOR) == (0, 1, 1)\n assert rgb(settings.BACKGROUND_COLOR) == (1, 0, 1)\n assert rgb(settings.DATA_COLOR) == (1, 1, 0.5)\n assert settings.DATA_ALPHA == 0.3\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n dialog.data_alpha = 0.4\n dialog.accept()\n\n assert rgb(settings.FOREGROUND_COLOR) == (0, 1, 1)\n assert rgb(settings.BACKGROUND_COLOR) == (1, 0, 1)\n assert rgb(settings.DATA_COLOR) == (1, 1, 0.5)\n assert settings.DATA_ALPHA == 0.4\n\n def test_custom_pane(self):\n\n settings = MagicMock()\n\n class CustomPreferences(QtWidgets.QWidget):\n\n def __init__(self, parent=None):\n\n super(CustomPreferences, self).__init__(parent=parent)\n\n self.layout = QtWidgets.QFormLayout()\n\n self.option1 = QtWidgets.QLineEdit()\n self.option2 = QtWidgets.QLineEdit()\n\n self.layout.addRow(\"Option 1\", self.option1)\n self.layout.addRow(\"Option 2\", self.option2)\n\n self.setLayout(self.layout)\n\n def finalize(self):\n settings.OPTION1 = \"Monty\"\n settings.OPTION2 = \"Python\"\n\n preference_panes = [('Custom', CustomPreferences)]\n\n with patch('glue.config.preference_panes', preference_panes):\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n dialog.accept()\n\n assert settings.OPTION1 == \"Monty\"\n assert settings.OPTION2 == \"Python\"\n\n def test_settings_change_message(self):\n\n # Make sure that a SettingsChangeMessage gets emitted when settings\n # change in the dialog\n\n class TestListener(HubListener):\n\n def __init__(self, hub):\n hub.subscribe(self, SettingsChangeMessage,\n handler=self.receive_message)\n self.received = []\n\n def receive_message(self, message):\n self.received.append(message)\n\n listener = TestListener(self.app._hub)\n\n with patch('glue.config.settings') as settings:\n\n settings.FOREGROUND_COLOR = 'red'\n settings.BACKGROUND_COLOR = (0, 0.5, 1)\n settings.DATA_COLOR = (1, 0.5, 0.25)\n settings.DATA_ALPHA = 0.3\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n dialog.foreground = (0, 1, 1)\n dialog.accept()\n\n assert len(listener.received) == 1\n assert listener.received[0].settings == ('FOREGROUND_COLOR', 'BACKGROUND_COLOR')\n\n def test_save_to_disk(self, tmpdir):\n\n with patch('glue.config.settings') as settings:\n with patch('glue.config.CFG_DIR', tmpdir.strpath):\n\n settings.FOREGROUND_COLOR = 'red'\n settings.BACKGROUND_COLOR = (0, 0.5, 1)\n settings.DATA_COLOR = (1, 0.5, 0.25)\n settings.DATA_ALPHA = 0.3\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n dialog.save_to_disk = False\n dialog.accept()\n\n assert not os.path.exists(os.path.join(tmpdir.strpath, 'settings.cfg'))\n\n dialog = PreferencesDialog(self.app)\n dialog.show()\n dialog.save_to_disk = True\n dialog.accept()\n\n assert os.path.exists(os.path.join(tmpdir.strpath, 'settings.cfg'))\n\n\ndef assert_axes_background(axes, color):\n assert axes.patch.get_facecolor() == color\n assert axes.figure.get_facecolor() == color\n\n\ndef assert_axes_foreground(axes, color):\n\n if hasattr(axes, 'coords'):\n # TODO: fix this in WCSAxes\n assert axes.coords.frame._color == color\n for coord in axes.coords:\n assert coord.ticks.get_color() == color\n assert coord.ticklabels.get_color() == color\n assert coord.axislabels.get_color() == color\n else:\n for spine in axes.spines.values():\n assert spine.get_edgecolor() == color\n for tick in axes.xaxis.get_ticklines() + axes.yaxis.get_ticklines():\n assert tick.get_color() == color\n for label in axes.xaxis.get_ticklabels() + axes.yaxis.get_ticklabels():\n assert label.get_color() == color\n assert axes.xaxis.label.get_color() == color\n assert axes.yaxis.label.get_color() == color\n\n\ndef _generate_custom_viewer():\n\n example = custom_viewer('Test Plot', x='att(x)', y='att(y)')\n\n @example.plot_data\n def plot_data(axes, x, y, style):\n axes.plot(x, y)\n\n @example.plot_subset\n def plot_subset(axes, x, y, style):\n axes.plot(x, y)\n\n @example.setup\n def setup(axes):\n pass\n\n from glue.config import qt_client\n for viewer in qt_client.members:\n if viewer.LABEL == 'Test Plot':\n return viewer\n\n raise Exception(\"Failed to find custom viewer in qt_client\")\n\n\[email protected]('PYSIDE2_INSTALLED')\ndef test_foreground_background_settings():\n\n d_1d = Data(x=np.random.random(100), y=np.random.random(100), label='Data 1d')\n d_2d = Data(x=np.random.random((100, 100)), y=np.random.random((100, 100)), label='Data 2d')\n\n dc = DataCollection([d_1d, d_2d])\n\n app = GlueApplication(dc)\n\n # Make sure that settings change existing viewers, so we create a bunch of\n # viewers here.\n\n scatter1 = app.new_data_viewer(ScatterViewer)\n scatter1.add_data(d_1d)\n\n image1 = app.new_data_viewer(ImageViewer)\n image1.add_data(d_2d)\n\n histogram1 = app.new_data_viewer(HistogramViewer)\n histogram1.add_data(d_1d)\n\n dendrogram1 = app.new_data_viewer(DendrogramViewer)\n\n example_custom = _generate_custom_viewer()\n\n custom1 = app.new_data_viewer(example_custom)\n\n RED = (1, 0, 0, 0.5)\n GREEN = (0, 1, 0, 0.6)\n\n app.show()\n\n with patch('glue.config.settings') as settings:\n\n settings.FOREGROUND_COLOR = 'black'\n settings.BACKGROUND_COLOR = 'white'\n settings.DATA_COLOR = '0.5'\n settings.DATA_ALPHA = 0.5\n\n dialog = PreferencesDialog(app)\n dialog.show()\n dialog.background = RED\n dialog.foreground = GREEN\n dialog.accept()\n\n assert_axes_background(scatter1.axes, RED)\n assert_axes_background(image1.axes, RED)\n assert_axes_background(histogram1.axes, RED)\n assert_axes_background(dendrogram1.axes, RED)\n assert_axes_background(custom1.axes, RED)\n\n assert_axes_foreground(scatter1.axes, GREEN)\n assert_axes_foreground(image1.axes, GREEN)\n assert_axes_foreground(histogram1.axes, GREEN)\n assert_axes_foreground(dendrogram1.axes, GREEN)\n assert_axes_foreground(custom1.axes, GREEN)\n\n # Now make sure that new viewers also inherit these settings\n\n scatter2 = app.new_data_viewer(ScatterViewer)\n scatter2.add_data(d_1d)\n\n image2 = app.new_data_viewer(ImageViewer)\n image2.add_data(d_2d)\n\n histogram2 = app.new_data_viewer(HistogramViewer)\n histogram2.add_data(d_1d)\n\n dendrogram2 = app.new_data_viewer(DendrogramViewer)\n custom2 = app.new_data_viewer(example_custom)\n\n assert_axes_background(scatter2.axes, RED)\n assert_axes_background(image2.axes, RED)\n assert_axes_background(histogram2.axes, RED)\n assert_axes_background(dendrogram2.axes, RED)\n assert_axes_background(custom2.axes, RED)\n\n assert_axes_foreground(scatter2.axes, GREEN)\n assert_axes_foreground(image2.axes, GREEN)\n assert_axes_foreground(histogram2.axes, GREEN)\n assert_axes_foreground(dendrogram2.axes, GREEN)\n assert_axes_foreground(custom2.axes, GREEN)\n\n app.close()\n",
"from __future__ import absolute_import, division, print_function\n\nimport numpy as np\nfrom glue.core import Data\nfrom glue.core.coordinates import WCSCoordinates\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\n\nfrom ..gridded_fits import fits_writer\n\n\ndef test_fits_writer_data(tmpdir):\n\n filename = tmpdir.join('test1.fits').strpath\n\n data = Data(x=np.arange(6).reshape(2, 3),\n y=(np.arange(6) * 2).reshape(2, 3))\n\n fits_writer(filename, data)\n\n with fits.open(filename) as hdulist:\n assert len(hdulist) == 2\n np.testing.assert_equal(hdulist['x'].data, data['x'])\n np.testing.assert_equal(hdulist['y'].data, data['y'])\n\n # Only write out some components\n\n filename = tmpdir.join('test2.fits').strpath\n\n fits_writer(filename, data, components=[data.id['x']])\n\n with fits.open(filename) as hdulist:\n assert len(hdulist) == 1\n np.testing.assert_equal(hdulist['x'].data, data['x'])\n\n\ndef test_component_unit_header(tmpdir):\n from astropy import units as u\n filename = tmpdir.join('test3.fits').strpath\n\n data = Data(x=np.arange(6).reshape(2, 3),\n y=(np.arange(6) * 2).reshape(2, 3),\n z=(np.arange(6) * 2).reshape(2, 3))\n\n wcs = WCS()\n data.coords = WCSCoordinates(wcs=wcs)\n\n unit1 = data.get_component(\"x\").units = u.m / u.s\n unit2 = data.get_component(\"y\").units = u.Jy\n unit3 = data.get_component(\"z\").units = \"\"\n\n fits_writer(filename, data)\n\n with fits.open(filename) as hdulist:\n assert len(hdulist) == 3\n bunit = hdulist['x'].header.get('BUNIT')\n assert u.Unit(bunit) == unit1\n\n bunit = hdulist['y'].header.get('BUNIT')\n assert u.Unit(bunit) == unit2\n\n bunit = hdulist['z'].header.get('BUNIT')\n assert bunit == unit3\n\n\ndef test_fits_writer_subset(tmpdir):\n\n filename = tmpdir.join('test').strpath\n\n data = Data(x=np.arange(6).reshape(2, 3),\n y=(np.arange(6) * 2).reshape(2, 3))\n\n subset = data.new_subset()\n subset.subset_state = data.id['x'] > 2\n\n fits_writer(filename, subset)\n\n with fits.open(filename) as hdulist:\n assert np.all(np.isnan(hdulist['x'].data[0]))\n assert np.all(np.isnan(hdulist['y'].data[0]))\n np.testing.assert_equal(hdulist['x'].data[1], data['x'][1])\n np.testing.assert_equal(hdulist['y'].data[1], data['y'][1])\n",
"from __future__ import absolute_import, division, print_function\n\nimport os\nfrom collections import defaultdict, Counter\n\nfrom qtpy import QtWidgets, QtGui\nfrom qtpy.QtCore import Qt\n\nfrom glue.external.echo import SelectionCallbackProperty\nfrom glue.external.echo.qt import connect_combo_selection\nfrom glue.core import ComponentID\nfrom glue.core.parse import ParsedComponentLink, ParsedCommand\nfrom glue.utils.qt import load_ui\nfrom glue.core.message import NumericalDataChangedMessage\n\nfrom glue.dialogs.component_arithmetic.qt.equation_editor import EquationEditorDialog\n\n__all__ = ['ArithmeticEditorWidget']\n\n\nclass ArithmeticEditorWidget(QtWidgets.QDialog):\n\n data = SelectionCallbackProperty()\n\n def __init__(self, data_collection=None, parent=None):\n\n super(ArithmeticEditorWidget, self).__init__(parent=parent)\n\n self.ui = load_ui('component_arithmetic.ui', self,\n directory=os.path.dirname(__file__))\n\n self.list = self.ui.list_derived_components\n\n self.data_collection = data_collection\n\n self._components_derived = defaultdict(list)\n self._components_other = defaultdict(list)\n self._state = defaultdict(dict)\n\n for data in data_collection:\n\n # First find all derived components (only ones based on arithmetic\n # expressions)\n\n self._components_derived[data] = []\n\n for cid in data.derived_components:\n comp = data.get_component(cid)\n if isinstance(comp.link, ParsedComponentLink):\n comp_state = {}\n comp_state['cid'] = cid\n comp_state['label'] = cid.label\n comp_state['equation'] = comp.link._parsed\n self._state[data][cid] = comp_state\n self._components_derived[data].append(cid)\n\n # Keep track of all other components\n\n self._components_other[data] = []\n\n for cid in data.components:\n if cid not in self._components_derived[data]:\n self._components_other[data].append(cid)\n\n # Populate data combo\n ArithmeticEditorWidget.data.set_choices(self, list(self.data_collection))\n ArithmeticEditorWidget.data.set_display_func(self, lambda x: x.label)\n connect_combo_selection(self, 'data', self.ui.combosel_data)\n\n self.ui.combosel_data.setCurrentIndex(0)\n self.ui.combosel_data.currentIndexChanged.connect(self._update_component_lists)\n self._update_component_lists()\n\n self.ui.button_add_derived.clicked.connect(self._add_derived_component)\n self.ui.button_edit_derived.clicked.connect(self._edit_derived_component)\n self.ui.button_remove_derived.clicked.connect(self._remove_derived_component)\n\n self.ui.list_derived_components.itemSelectionChanged.connect(self._update_selection_derived)\n\n self._update_selection_derived()\n\n self.ui.list_derived_components.itemChanged.connect(self._update_state)\n self.ui.list_derived_components.order_changed.connect(self._update_state)\n self.ui.list_derived_components.itemDoubleClicked.connect(self._edit_derived_component)\n\n self.ui.button_ok.clicked.connect(self.accept)\n self.ui.button_cancel.clicked.connect(self.reject)\n\n def _update_selection_derived(self):\n enabled = self.list.selected_cid is not None\n self.button_edit_derived.setEnabled(enabled)\n self.button_remove_derived.setEnabled(enabled)\n\n def _update_component_lists(self, *args):\n\n # This gets called when the data is changed and we need to update the\n # components shown in the lists.\n\n self.list.blockSignals(True)\n\n mapping = {}\n for cid in self.data.components:\n mapping[cid] = cid.label\n\n self.list.clear()\n for cid in self._components_derived[self.data]:\n label = self._state[self.data][cid]['label']\n if self._state[self.data][cid]['equation'] is None:\n expression = ''\n else:\n expression = self._state[self.data][cid]['equation'].render(mapping)\n self.list.add_cid_and_label(cid, [label, expression], editable=False)\n\n self.list.blockSignals(False)\n\n self._validate()\n\n def _validate(self):\n\n # Construct a list of all labels for the current dataset so that\n # we can check which ones are duplicates\n labels = [c.label for c in self._components_other[self.data]]\n labels.extend([c['label'] for c in self._state[self.data].values()])\n if len(labels) == 0:\n return\n label_count = Counter(labels)\n\n # It's possible that the duplicates are entirely for components not\n # shown in this editor, so we keep track here of whether an invalid\n # component has been found.\n invalid = False\n\n if label_count.most_common(1)[0][1] > 1:\n\n # If we are here, there are duplicates somewhere in the list\n # of components.\n\n brush_red = QtGui.QBrush(Qt.red)\n brush_black = QtGui.QBrush(Qt.black)\n\n self.list.blockSignals(True)\n\n for item in self.list:\n label = item.text(0)\n if label_count[label] > 1:\n item.setForeground(0, brush_red)\n invalid = True\n else:\n item.setForeground(0, brush_black)\n\n self.list.blockSignals(False)\n\n if invalid:\n self.ui.label_status.setStyleSheet('color: red')\n self.ui.label_status.setText('Error: some components have duplicate names')\n self.ui.button_ok.setEnabled(False)\n self.ui.combosel_data.setEnabled(False)\n else:\n self.ui.label_status.setStyleSheet('')\n self.ui.label_status.setText('')\n self.ui.button_ok.setEnabled(True)\n self.ui.combosel_data.setEnabled(True)\n\n def _update_state(self, *args):\n self._components_derived[self.data] = []\n for item in self.list:\n cid = item.data(0, Qt.UserRole)\n self._state[self.data][cid]['label'] = item.text(0)\n self._components_derived[self.data].append(cid)\n self._update_component_lists()\n\n def _remove_derived_component(self, *args):\n cid = self.list.selected_cid\n if cid is not None:\n self._components_derived[self.data].remove(cid)\n self._state[self.data].pop(cid)\n self._update_component_lists()\n\n def _add_derived_component(self, *args):\n\n comp_state = {}\n comp_state['cid'] = ComponentID('')\n comp_state['label'] = ''\n comp_state['equation'] = None\n\n self._components_derived[self.data].append(comp_state['cid'])\n self._state[self.data][comp_state['cid']] = comp_state\n\n self._update_component_lists()\n\n self.list.select_cid(comp_state['cid'])\n\n result = self._edit_derived_component()\n\n if not result: # user cancelled\n self._components_derived[self.data].remove(comp_state['cid'])\n self._state[self.data].pop(comp_state['cid'])\n self._update_component_lists()\n\n def _edit_derived_component(self, event=None):\n\n derived_item = self.list.selected_item\n\n if derived_item is None:\n return False\n\n derived_cid = self.list.selected_cid\n\n # Note, we put the pixel/world components last as it's most likely the\n # user wants to use one of the main components.\n mapping = {}\n references = {}\n for cid in (self.data.main_components +\n self.data.pixel_component_ids +\n self.data.world_component_ids):\n if cid is not derived_cid:\n mapping[cid] = cid.label\n references[cid.label] = cid\n\n label = self._state[self.data][derived_cid]['label']\n\n if self._state[self.data][derived_cid]['equation'] is None:\n equation = None\n else:\n equation = self._state[self.data][derived_cid]['equation'].render(mapping)\n\n dialog = EquationEditorDialog(label=label, equation=equation, references=references, parent=self)\n dialog.setWindowFlags(self.windowFlags() | Qt.Window)\n dialog.setFocus()\n dialog.raise_()\n dialog.exec_()\n\n if dialog.final_expression is None:\n return False\n\n name, equation = dialog.get_final_label_and_parsed_command()\n self._state[self.data][derived_cid]['label'] = name\n self._state[self.data][derived_cid]['equation'] = equation\n derived_item.setText(0, name)\n\n # Make sure we update the component list here since the equation may\n # have changed and we need to update the preview\n self._update_component_lists()\n\n return True\n\n def accept(self):\n\n for data in self._components_derived:\n\n cids_derived = self._components_derived[data]\n cids_other = self._components_other[data]\n cids_all = cids_other + cids_derived\n cids_existing = data.components\n components = dict((cid.uuid, cid) for cid in data.components)\n\n # First deal with renaming of components\n for cid_new in cids_derived:\n label = self._state[data][cid_new]['label']\n if label != cid_new.label:\n cid_new.label = label\n\n # Second deal with the removal of components\n for cid_old in cids_existing:\n if not any(cid_old is cid_new for cid_new in cids_all):\n data.remove_component(cid_old)\n\n # Third, update/add arithmetic expressions as needed\n for cid_new in cids_derived:\n if any(cid_new is cid_old for cid_old in cids_existing):\n comp = data.get_component(cid_new)\n if comp.link._parsed._cmd != self._state[data][cid_new]['equation']._cmd:\n comp.link._parsed._cmd = self._state[data][cid_new]['equation']._cmd\n comp.link._parsed._references = components\n if data.hub:\n msg = NumericalDataChangedMessage(data)\n data.hub.broadcast(msg)\n else:\n pc = ParsedCommand(self._state[data][cid_new]['equation']._cmd, components)\n link = ParsedComponentLink(cid_new, pc)\n data.add_component_link(link)\n\n # Findally, reorder components as needed\n data.reorder_components(cids_all)\n\n super(ArithmeticEditorWidget, self).accept()\n\n\nif __name__ == \"__main__\": # pragma: nocover\n\n from glue.utils.qt import get_qapp\n app = get_qapp()\n\n import numpy as np\n\n from glue.core.data import Data\n from glue.core.data_collection import DataCollection\n\n x = np.random.random((5, 5))\n y = x * 3\n dc = DataCollection()\n dc.append(Data(label='test1', x=x, y=y))\n dc.append(Data(label='test2', a=x, b=y))\n\n widget = ArithmeticEditorWidget(dc)\n widget.exec_()\n"
] | [
[
"numpy.random.random",
"matplotlib.colors.ColorConverter"
],
[
"numpy.isnan",
"numpy.testing.assert_equal",
"numpy.arange"
],
[
"numpy.random.random"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Quandela/Perceval | [
"677f03808f9fbd3b4390aa4ef8cf6a5e4ec9f83a"
] | [
"perceval/backends/cliffords2017.py"
] | [
"# MIT License\n#\n# Copyright (c) 2022 Quandela\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom .template import Backend\n\nimport numpy as np\nimport quandelibc as qc\nfrom perceval.utils import BasicState\n\n\ndef _square(x):\n return abs((x*x).real)\n\ndef _get_scale(w):\n return max([max(abs(x.real), abs(x.imag)) for x in w])\n\nclass CliffordClifford2017Backend(Backend):\n name = \"CliffordClifford2017\"\n supports_symbolic = False\n supports_circuit_computing = False\n\n def prob_be(self, input_state, output_state, n=None, output_idx=None):\n raise NotImplementedError\n\n def sample(self, input_state):\n # prepare Us that is a m*n matrix\n m = self._m\n n = input_state.n\n fs = [0]*m\n Us = np.zeros((n, m), dtype=np.complex128)\n # build Us while transposing it\n rowidx = 0\n for ik in range(self._m):\n for i in range(input_state[ik]):\n Us[rowidx, :] = self._U[:, ik]\n rowidx += 1\n if n > 1:\n A = Us[np.random.permutation(n), :]\n else:\n A = Us\n w = _square(A[0, :])\n mode_seq = [np.random.choice(np.arange(0, m), p=w/sum(w), size=1)[0]]\n fs[mode_seq[0]] = 1\n for mode_limit in range(2, n+1):\n # permanents of sub-matrices using Laplace-type expansion (arXiv:1505.05486)\n sub_perm = np.array(qc.sub_permanents_cx(np.copy(np.reshape(A[0:mode_limit, mode_seq],\n (-1, mode_limit-1)))))\n sub_perm /= _get_scale(sub_perm)\n # generate next mode from there\n perm_vector = np.dot(sub_perm.transpose(), A[0:mode_limit])\n w = _square(perm_vector)\n next_mode = np.random.choice(np.arange(0, m), p=w/sum(w), size=1)[0]\n mode_seq.append(next_mode)\n fs[next_mode] += 1\n return BasicState(fs)\n"
] | [
[
"numpy.reshape",
"numpy.arange",
"numpy.random.permutation",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gorogoroumaru/optuna | [
"2d27f9086eae6b839fcddea061d848b12f5adf46"
] | [
"optuna/samplers/tpe/sampler.py"
] | [
"import math\n\nimport numpy as np\nimport scipy.special\n\nfrom optuna import distributions\nfrom optuna.pruners import HyperbandPruner\nfrom optuna.samplers import base\nfrom optuna.samplers import random\nfrom optuna.samplers.tpe.parzen_estimator import _ParzenEstimator\nfrom optuna.samplers.tpe.parzen_estimator import _ParzenEstimatorParameters\nfrom optuna import structs\nfrom optuna.structs import StudyDirection\nfrom optuna import type_checking\n\nif type_checking.TYPE_CHECKING:\n from typing import Any # NOQA\n from typing import Callable # NOQA\n from typing import Dict # NOQA\n from typing import List # NOQA\n from typing import Optional # NOQA\n from typing import Tuple # NOQA\n\n from optuna.distributions import BaseDistribution # NOQA\n from optuna.structs import FrozenTrial # NOQA\n from optuna.study import Study # NOQA\n\nEPS = 1e-12\n\n\ndef default_gamma(x):\n # type: (int) -> int\n\n return min(int(np.ceil(0.1 * x)), 25)\n\n\ndef hyperopt_default_gamma(x):\n # type: (int) -> int\n\n return min(int(np.ceil(0.25 * np.sqrt(x))), 25)\n\n\ndef default_weights(x):\n # type: (int) -> np.ndarray\n\n if x == 0:\n return np.asarray([])\n elif x < 25:\n return np.ones(x)\n else:\n ramp = np.linspace(1.0 / x, 1.0, num=x - 25)\n flat = np.ones(25)\n return np.concatenate([ramp, flat], axis=0)\n\n\nclass TPESampler(base.BaseSampler):\n \"\"\"Sampler using TPE (Tree-structured Parzen Estimator) algorithm.\n\n This sampler is based on *independent sampling*.\n See also :class:`~optuna.samplers.BaseSampler` for more details of 'independent sampling'.\n\n On each trial, for each parameter, TPE fits one Gaussian Mixture Model (GMM) ``l(x)`` to\n the set of parameter values associated with the best objective values, and another GMM\n ``g(x)`` to the remaining parameter values. It chooses the parameter value ``x`` that\n maximizes the ratio ``l(x)/g(x)``.\n\n For further information about TPE algorithm, please refer to the following papers:\n\n - `Algorithms for Hyper-Parameter Optimization\n <https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf>`_\n - `Making a Science of Model Search: Hyperparameter Optimization in Hundreds of\n Dimensions for Vision Architectures <http://proceedings.mlr.press/v28/bergstra13.pdf>`_\n\n Example:\n\n .. testcode::\n\n import optuna\n from optuna.samplers import TPESampler\n\n def objective(trial):\n x = trial.suggest_uniform('x', -10, 10)\n return x**2\n\n study = optuna.create_study(sampler=TPESampler())\n study.optimize(objective, n_trials=10)\n\n \"\"\"\n\n def __init__(\n self,\n consider_prior=True, # type: bool\n prior_weight=1.0, # type: float\n consider_magic_clip=True, # type: bool\n consider_endpoints=False, # type: bool\n n_startup_trials=10, # type: int\n n_ei_candidates=24, # type: int\n gamma=default_gamma, # type: Callable[[int], int]\n weights=default_weights, # type: Callable[[int], np.ndarray]\n seed=None, # type: Optional[int]\n ):\n # type: (...) -> None\n\n self._parzen_estimator_parameters = _ParzenEstimatorParameters(\n consider_prior, prior_weight, consider_magic_clip, consider_endpoints, weights\n )\n self._prior_weight = prior_weight\n self._n_startup_trials = n_startup_trials\n self._n_ei_candidates = n_ei_candidates\n self._gamma = gamma\n self._weights = weights\n\n self._rng = np.random.RandomState(seed)\n self._random_sampler = random.RandomSampler(seed=seed)\n\n def infer_relative_search_space(self, study, trial):\n # type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]\n\n return {}\n\n def sample_relative(self, study, trial, search_space):\n # type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]\n\n return {}\n\n def sample_independent(self, study, trial, param_name, param_distribution):\n # type: (Study, FrozenTrial, str, BaseDistribution) -> Any\n\n values, scores = _get_observation_pairs(study, param_name, trial)\n\n n = len(values)\n\n if n < self._n_startup_trials:\n return self._random_sampler.sample_independent(\n study, trial, param_name, param_distribution\n )\n\n below_param_values, above_param_values = self._split_observation_pairs(values, scores)\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self._sample_uniform(param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n return self._sample_loguniform(\n param_distribution, below_param_values, above_param_values\n )\n elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):\n return self._sample_discrete_uniform(\n param_distribution, below_param_values, above_param_values\n )\n elif isinstance(param_distribution, distributions.IntUniformDistribution):\n return self._sample_int(param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n index = self._sample_categorical_index(\n param_distribution, below_param_values, above_param_values\n )\n return param_distribution.choices[index]\n else:\n distribution_list = [\n distributions.UniformDistribution.__name__,\n distributions.LogUniformDistribution.__name__,\n distributions.DiscreteUniformDistribution.__name__,\n distributions.IntUniformDistribution.__name__,\n distributions.CategoricalDistribution.__name__,\n ]\n raise NotImplementedError(\n \"The distribution {} is not implemented. \"\n \"The parameter distribution should be one of the {}\".format(\n param_distribution, distribution_list\n )\n )\n\n def _split_observation_pairs(\n self,\n config_vals, # type: List[float]\n loss_vals, # type: List[Tuple[float, float]]\n ):\n # type: (...) -> Tuple[np.ndarray, np.ndarray]\n\n config_vals = np.asarray(config_vals)\n loss_vals = np.asarray(loss_vals, dtype=[(\"step\", float), (\"score\", float)])\n\n n_below = self._gamma(len(config_vals))\n loss_ascending = np.argsort(loss_vals)\n below = config_vals[np.sort(loss_ascending[:n_below])]\n above = config_vals[np.sort(loss_ascending[n_below:])]\n return below, above\n\n def _sample_uniform(self, distribution, below, above):\n # type: (distributions.UniformDistribution, np.ndarray, np.ndarray) -> float\n\n low = distribution.low\n high = distribution.high\n return self._sample_numerical(low, high, below, above)\n\n def _sample_loguniform(self, distribution, below, above):\n # type: (distributions.LogUniformDistribution, np.ndarray, np.ndarray) -> float\n\n low = distribution.low\n high = distribution.high\n return self._sample_numerical(low, high, below, above, is_log=True)\n\n def _sample_discrete_uniform(self, distribution, below, above):\n # type:(distributions.DiscreteUniformDistribution, np.ndarray, np.ndarray) -> float\n\n q = distribution.q\n r = distribution.high - distribution.low\n # [low, high] is shifted to [0, r] to align sampled values at regular intervals.\n low = 0 - 0.5 * q\n high = r + 0.5 * q\n\n # Shift below and above to [0, r]\n above -= distribution.low\n below -= distribution.low\n\n best_sample = self._sample_numerical(low, high, below, above, q=q) + distribution.low\n return min(max(best_sample, distribution.low), distribution.high)\n\n def _sample_int(self, distribution, below, above):\n # type: (distributions.IntUniformDistribution, np.ndarray, np.ndarray) -> int\n\n d = distributions.DiscreteUniformDistribution(\n low=distribution.low, high=distribution.high, q=distribution.step\n )\n return int(self._sample_discrete_uniform(d, below, above))\n\n def _sample_numerical(\n self,\n low, # type: float\n high, # type: float\n below, # type: np.ndarray\n above, # type: np.ndarray\n q=None, # type: Optional[float]\n is_log=False, # type: bool\n ):\n # type: (...) -> float\n\n if is_log:\n low = np.log(low)\n high = np.log(high)\n below = np.log(below)\n above = np.log(above)\n\n size = (self._n_ei_candidates,)\n\n parzen_estimator_below = _ParzenEstimator(\n mus=below, low=low, high=high, parameters=self._parzen_estimator_parameters\n )\n samples_below = self._sample_from_gmm(\n parzen_estimator=parzen_estimator_below,\n low=low,\n high=high,\n q=q,\n is_log=is_log,\n size=size,\n )\n log_likelihoods_below = self._gmm_log_pdf(\n samples=samples_below,\n parzen_estimator=parzen_estimator_below,\n low=low,\n high=high,\n q=q,\n is_log=is_log,\n )\n\n parzen_estimator_above = _ParzenEstimator(\n mus=above, low=low, high=high, parameters=self._parzen_estimator_parameters\n )\n\n log_likelihoods_above = self._gmm_log_pdf(\n samples=samples_below,\n parzen_estimator=parzen_estimator_above,\n low=low,\n high=high,\n q=q,\n is_log=is_log,\n )\n\n return float(\n TPESampler._compare(\n samples=samples_below, log_l=log_likelihoods_below, log_g=log_likelihoods_above\n )[0]\n )\n\n def _sample_categorical_index(self, distribution, below, above):\n # type: (distributions.CategoricalDistribution, np.ndarray, np.ndarray) -> int\n\n choices = distribution.choices\n below = list(map(int, below))\n above = list(map(int, above))\n upper = len(choices)\n size = (self._n_ei_candidates,)\n\n weights_below = self._weights(len(below))\n counts_below = np.bincount(below, minlength=upper, weights=weights_below)\n weighted_below = counts_below + self._prior_weight\n weighted_below /= weighted_below.sum()\n samples_below = self._sample_from_categorical_dist(weighted_below, size)\n log_likelihoods_below = TPESampler._categorical_log_pdf(samples_below, weighted_below)\n\n weights_above = self._weights(len(above))\n counts_above = np.bincount(above, minlength=upper, weights=weights_above)\n weighted_above = counts_above + self._prior_weight\n weighted_above /= weighted_above.sum()\n log_likelihoods_above = TPESampler._categorical_log_pdf(samples_below, weighted_above)\n\n return int(\n TPESampler._compare(\n samples=samples_below, log_l=log_likelihoods_below, log_g=log_likelihoods_above\n )[0]\n )\n\n def _sample_from_gmm(\n self,\n parzen_estimator, # type: _ParzenEstimator\n low, # type: float\n high, # type: float\n q=None, # type: Optional[float]\n size=(), # type: Tuple\n is_log=False, # type: bool\n ):\n # type: (...) -> np.ndarray\n\n weights = parzen_estimator.weights\n mus = parzen_estimator.mus\n sigmas = parzen_estimator.sigmas\n weights, mus, sigmas = map(np.asarray, (weights, mus, sigmas))\n n_samples = np.prod(size)\n\n if low >= high:\n raise ValueError(\n \"The 'low' should be lower than the 'high'. \"\n \"But (low, high) = ({}, {}).\".format(low, high)\n )\n samples = np.asarray([], dtype=float)\n while samples.size < n_samples:\n active = np.argmax(self._rng.multinomial(1, weights))\n draw = self._rng.normal(loc=mus[active], scale=sigmas[active])\n if low <= draw < high:\n samples = np.append(samples, draw)\n\n samples = np.reshape(samples, size)\n\n if is_log:\n samples = np.exp(samples)\n\n if q is None:\n return samples\n else:\n return np.round(samples / q) * q\n\n def _gmm_log_pdf(\n self,\n samples, # type: np.ndarray\n parzen_estimator, # type: _ParzenEstimator\n low, # type: float\n high, # type: float\n q=None, # type: Optional[float]\n is_log=False, # type: bool\n ):\n # type: (...) -> np.ndarray\n\n weights = parzen_estimator.weights\n mus = parzen_estimator.mus\n sigmas = parzen_estimator.sigmas\n samples, weights, mus, sigmas = map(np.asarray, (samples, weights, mus, sigmas))\n if samples.size == 0:\n return np.asarray([], dtype=float)\n if weights.ndim != 1:\n raise ValueError(\n \"The 'weights' should be 2-dimension. \"\n \"But weights.shape = {}\".format(weights.shape)\n )\n if mus.ndim != 1:\n raise ValueError(\n \"The 'mus' should be 2-dimension. \" \"But mus.shape = {}\".format(mus.shape)\n )\n if sigmas.ndim != 1:\n raise ValueError(\n \"The 'sigmas' should be 2-dimension. \" \"But sigmas.shape = {}\".format(sigmas.shape)\n )\n _samples = samples\n samples = _samples.flatten()\n\n p_accept = np.sum(\n weights\n * (\n TPESampler._normal_cdf(high, mus, sigmas)\n - TPESampler._normal_cdf(low, mus, sigmas)\n )\n )\n\n if q is None:\n jacobian = samples[:, None] if is_log else np.ones(samples.shape)[:, None]\n if is_log:\n distance = np.log(samples[:, None]) - mus\n else:\n distance = samples[:, None] - mus\n mahalanobis = (distance / np.maximum(sigmas, EPS)) ** 2\n Z = np.sqrt(2 * np.pi) * sigmas * jacobian\n coefficient = weights / Z / p_accept\n return_val = TPESampler._logsum_rows(-0.5 * mahalanobis + np.log(coefficient))\n else:\n probabilities = np.zeros(samples.shape, dtype=float)\n cdf_func = TPESampler._log_normal_cdf if is_log else TPESampler._normal_cdf\n for w, mu, sigma in zip(weights, mus, sigmas):\n if is_log:\n upper_bound = np.minimum(samples + q / 2.0, np.exp(high))\n lower_bound = np.maximum(samples - q / 2.0, np.exp(low))\n lower_bound = np.maximum(0, lower_bound)\n else:\n upper_bound = np.minimum(samples + q / 2.0, high)\n lower_bound = np.maximum(samples - q / 2.0, low)\n inc_amt = w * cdf_func(upper_bound, mu, sigma)\n inc_amt -= w * cdf_func(lower_bound, mu, sigma)\n probabilities += inc_amt\n return_val = np.log(probabilities + EPS) - np.log(p_accept + EPS)\n\n return_val.shape = _samples.shape\n return return_val\n\n def _sample_from_categorical_dist(self, probabilities, size):\n # type: (np.ndarray, Tuple[int]) -> np.ndarray\n\n if probabilities.size == 1 and isinstance(probabilities[0], np.ndarray):\n probabilities = probabilities[0]\n probabilities = np.asarray(probabilities)\n\n if size == (0,):\n return np.asarray([], dtype=float)\n assert len(size)\n assert probabilities.ndim == 1\n\n n_draws = int(np.prod(size))\n sample = self._rng.multinomial(n=1, pvals=probabilities, size=int(n_draws))\n assert sample.shape == size + (probabilities.size,)\n return_val = np.dot(sample, np.arange(probabilities.size))\n return_val.shape = size\n return return_val\n\n @classmethod\n def _categorical_log_pdf(\n cls,\n sample, # type: np.ndarray\n p, # type: np.ndarray\n ):\n # type: (...) -> np.ndarray\n\n if sample.size:\n return np.log(np.asarray(p)[sample])\n else:\n return np.asarray([])\n\n @classmethod\n def _compare(cls, samples, log_l, log_g):\n # type: (np.ndarray, np.ndarray, np.ndarray) -> np.ndarray\n\n samples, log_l, log_g = map(np.asarray, (samples, log_l, log_g))\n if samples.size:\n score = log_l - log_g\n if samples.size != score.size:\n raise ValueError(\n \"The size of the 'samples' and that of the 'score' \"\n \"should be same. \"\n \"But (samples.size, score.size) = ({}, {})\".format(samples.size, score.size)\n )\n\n best = np.argmax(score)\n return np.asarray([samples[best]] * samples.size)\n else:\n return np.asarray([])\n\n @classmethod\n def _logsum_rows(cls, x):\n # type: (np.ndarray) -> np.ndarray\n\n x = np.asarray(x)\n m = x.max(axis=1)\n return np.log(np.exp(x - m[:, None]).sum(axis=1)) + m\n\n @classmethod\n def _normal_cdf(cls, x, mu, sigma):\n # type: (float, np.ndarray, np.ndarray) -> np.ndarray\n\n mu, sigma = map(np.asarray, (mu, sigma))\n denominator = x - mu\n numerator = np.maximum(np.sqrt(2) * sigma, EPS)\n z = denominator / numerator\n return 0.5 * (1 + scipy.special.erf(z))\n\n @classmethod\n def _log_normal_cdf(cls, x, mu, sigma):\n # type: (float, np.ndarray, np.ndarray) -> np.ndarray\n\n mu, sigma = map(np.asarray, (mu, sigma))\n if x < 0:\n raise ValueError(\"Negative argument is given to _lognormal_cdf. x: {}\".format(x))\n denominator = np.log(np.maximum(x, EPS)) - mu\n numerator = np.maximum(np.sqrt(2) * sigma, EPS)\n z = denominator / numerator\n return 0.5 + 0.5 * scipy.special.erf(z)\n\n @staticmethod\n def hyperopt_parameters():\n # type: () -> Dict[str, Any]\n \"\"\"Return the the default parameters of hyperopt (v0.1.2).\n\n :class:`~optuna.samplers.TPESampler` can be instantiated with the parameters returned\n by this method.\n\n Example:\n\n Create a :class:`~optuna.samplers.TPESampler` instance with the default\n parameters of `hyperopt <https://github.com/hyperopt/hyperopt/tree/0.1.2>`_.\n\n .. testcode::\n\n import optuna\n from optuna.samplers import TPESampler\n\n def objective(trial):\n x = trial.suggest_uniform('x', -10, 10)\n return x**2\n\n sampler = TPESampler(**TPESampler.hyperopt_parameters())\n study = optuna.create_study(sampler=sampler)\n study.optimize(objective, n_trials=10)\n\n Returns:\n A dictionary containing the default parameters of hyperopt.\n\n \"\"\"\n\n return {\n \"consider_prior\": True,\n \"prior_weight\": 1.0,\n \"consider_magic_clip\": True,\n \"consider_endpoints\": False,\n \"n_startup_trials\": 20,\n \"n_ei_candidates\": 24,\n \"gamma\": hyperopt_default_gamma,\n \"weights\": default_weights,\n }\n\n\ndef _get_observation_pairs(study, param_name, trial):\n # type: (Study, str, FrozenTrial) -> Tuple[List[float], List[Tuple[float, float]]]\n \"\"\"Get observation pairs from the study.\n\n This function collects observation pairs from the complete or pruned trials of the study.\n The trials that don't contain the parameter named ``param_name`` are excluded\n from the result.\n\n An observation pair fundamentally consists of a parameter value and an objective value.\n However, due to the pruning mechanism of Optuna, final objective values are not always\n available. Therefore, this function uses intermediate values in addition to the final\n ones, and reports the value with its step count as ``(-step, value)``.\n Consequently, the structure of the observation pair is as follows:\n ``(param_value, (-step, value))``.\n\n The second element of an observation pair is used to rank observations in\n ``_split_observation_pairs`` method (i.e., observations are sorted lexicographically by\n ``(-step, value)``).\n \"\"\"\n\n sign = 1\n if study.direction == StudyDirection.MAXIMIZE:\n sign = -1\n\n if isinstance(study.pruner, HyperbandPruner):\n # Create `_BracketStudy` to use trials that have the same bracket id.\n pruner = study.pruner # type: HyperbandPruner\n study = pruner._create_bracket_study(study, pruner._get_bracket_id(study, trial))\n\n values = []\n scores = []\n for trial in study.get_trials(deepcopy=False):\n if param_name not in trial.params:\n continue\n\n if trial.state is structs.TrialState.COMPLETE and trial.value is not None:\n score = (-float(\"inf\"), sign * trial.value)\n elif trial.state is structs.TrialState.PRUNED:\n if len(trial.intermediate_values) > 0:\n step, intermediate_value = max(trial.intermediate_values.items())\n if math.isnan(intermediate_value):\n score = (-step, float(\"inf\"))\n else:\n score = (-step, sign * intermediate_value)\n else:\n score = (float(\"inf\"), 0.0)\n else:\n continue\n\n distribution = trial.distributions[param_name]\n param_value = distribution.to_internal_repr(trial.params[param_name])\n values.append(param_value)\n scores.append(score)\n\n return values, scores\n"
] | [
[
"numpy.minimum",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.concatenate",
"numpy.round",
"numpy.exp",
"numpy.reshape",
"numpy.arange",
"numpy.ceil",
"numpy.argmax",
"numpy.zeros",
"numpy.log",
"numpy.append",
"numpy.argsort",
"numpy.random.RandomState",
"numpy.maximum",
"numpy.sort",
"numpy.ones",
"numpy.bincount",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
warner-benjamin/fastai | [
"ceeba805f43e6258e7131d78706859f45c342575"
] | [
"fastai/layers.py"
] | [
"# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_layers.ipynb (unless otherwise specified).\n\n\nfrom __future__ import annotations\n\n\n__all__ = ['module', 'Identity', 'Lambda', 'PartialLambda', 'Flatten', 'ToTensorBase', 'View', 'ResizeBatch',\n 'Debugger', 'sigmoid_range', 'SigmoidRange', 'AdaptiveConcatPool1d', 'AdaptiveConcatPool2d', 'PoolType',\n 'adaptive_pool', 'PoolFlatten', 'NormType', 'BatchNorm', 'InstanceNorm', 'BatchNorm1dFlat', 'LinBnDrop',\n 'sigmoid', 'sigmoid_', 'vleaky_relu', 'init_default', 'init_linear', 'ConvLayer', 'AdaptiveAvgPool',\n 'MaxPool', 'AvgPool', 'trunc_normal_', 'Embedding', 'SelfAttention', 'PooledSelfAttention2d',\n 'SimpleSelfAttention', 'icnr_init', 'PixelShuffle_ICNR', 'sequential', 'SequentialEx', 'MergeLayer', 'Cat',\n 'SimpleCNN', 'ProdLayer', 'inplace_relu', 'SEModule', 'ResBlock', 'SEBlock', 'SEResNeXtBlock',\n 'SeparableBlock', 'TimeDistributed', 'swish', 'Swish', 'MishJitAutoFn', 'mish', 'Mish', 'ParameterModule',\n 'children_and_parameters', 'has_children', 'flatten_model', 'NoneReduce', 'in_channels']\n\n# Cell\n#nbdev_comment from __future__ import annotations\nfrom .imports import *\nfrom .torch_imports import *\nfrom .torch_core import *\nfrom torch.nn.utils import weight_norm, spectral_norm\n\n# Cell\ndef module(*flds, **defaults):\n \"Decorator to create an `nn.Module` using `f` as `forward` method\"\n pa = [inspect.Parameter(o, inspect.Parameter.POSITIONAL_OR_KEYWORD) for o in flds]\n pb = [inspect.Parameter(k, inspect.Parameter.POSITIONAL_OR_KEYWORD, default=v)\n for k,v in defaults.items()]\n params = pa+pb\n all_flds = [*flds,*defaults.keys()]\n\n def _f(f):\n class c(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n for i,o in enumerate(args): kwargs[all_flds[i]] = o\n kwargs = merge(defaults,kwargs)\n for k,v in kwargs.items(): setattr(self,k,v)\n __repr__ = basic_repr(all_flds)\n forward = f\n c.__signature__ = inspect.Signature(params)\n c.__name__ = c.__qualname__ = f.__name__\n c.__doc__ = f.__doc__\n return c\n return _f\n\n# Cell\n@module()\ndef Identity(self, x):\n \"Do nothing at all\"\n return x\n\n# Cell\n@module('func')\ndef Lambda(self, x):\n \"An easy way to create a pytorch layer for a simple `func`\"\n return self.func(x)\n\n# Cell\nclass PartialLambda(Lambda):\n \"Layer that applies `partial(func, **kwargs)`\"\n def __init__(self, func, **kwargs):\n super().__init__(partial(func, **kwargs))\n self.repr = f'{func.__name__}, {kwargs}'\n\n def forward(self, x): return self.func(x)\n def __repr__(self): return f'{self.__class__.__name__}({self.repr})'\n\n# Cell\n@module(full=False)\ndef Flatten(self, x):\n \"Flatten `x` to a single dimension, e.g. at end of a model. `full` for rank-1 tensor\"\n return TensorBase(x.view(-1) if self.full else x.view(x.size(0), -1))\n\n# Cell\n@module(tensor_cls=TensorBase)\ndef ToTensorBase(self, x):\n \"Remove `tensor_cls` to x\"\n return self.tensor_cls(x)\n\n# Cell\nclass View(Module):\n \"Reshape `x` to `size`\"\n def __init__(self, *size): self.size = size\n def forward(self, x): return x.view(self.size)\n\n# Cell\nclass ResizeBatch(Module):\n \"Reshape `x` to `size`, keeping batch dim the same size\"\n def __init__(self, *size): self.size = size\n def forward(self, x): return x.view((x.size(0),) + self.size)\n\n# Cell\n@module()\ndef Debugger(self,x):\n \"A module to debug inside a model.\"\n set_trace()\n return x\n\n# Cell\ndef sigmoid_range(x, low, high):\n \"Sigmoid function with range `(low, high)`\"\n return torch.sigmoid(x) * (high - low) + low\n\n# Cell\n@module('low','high')\ndef SigmoidRange(self, x):\n \"Sigmoid module with range `(low, high)`\"\n return sigmoid_range(x, self.low, self.high)\n\n# Cell\nclass AdaptiveConcatPool1d(Module):\n \"Layer that concats `AdaptiveAvgPool1d` and `AdaptiveMaxPool1d`\"\n def __init__(self, size=None):\n self.size = size or 1\n self.ap = nn.AdaptiveAvgPool1d(self.size)\n self.mp = nn.AdaptiveMaxPool1d(self.size)\n def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)\n\n# Cell\nclass AdaptiveConcatPool2d(Module):\n \"Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`\"\n def __init__(self, size=None):\n self.size = size or 1\n self.ap = nn.AdaptiveAvgPool2d(self.size)\n self.mp = nn.AdaptiveMaxPool2d(self.size)\n def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)\n\n# Cell\nclass PoolType: Avg,Max,Cat = 'Avg','Max','Cat'\n\n# Cell\ndef adaptive_pool(pool_type):\n return nn.AdaptiveAvgPool2d if pool_type=='Avg' else nn.AdaptiveMaxPool2d if pool_type=='Max' else AdaptiveConcatPool2d\n\n# Cell\nclass PoolFlatten(nn.Sequential):\n \"Combine `nn.AdaptiveAvgPool2d` and `Flatten`.\"\n def __init__(self, pool_type=PoolType.Avg): super().__init__(adaptive_pool(pool_type)(1), Flatten())\n\n# Cell\nNormType = Enum('NormType', 'Batch BatchZero Weight Spectral Instance InstanceZero')\n\n# Cell\ndef _get_norm(prefix, nf, ndim=2, zero=False, **kwargs):\n \"Norm layer with `nf` features and `ndim` initialized depending on `norm_type`.\"\n assert 1 <= ndim <= 3\n bn = getattr(nn, f\"{prefix}{ndim}d\")(nf, **kwargs)\n if bn.affine:\n bn.bias.data.fill_(1e-3)\n bn.weight.data.fill_(0. if zero else 1.)\n return bn\n\n# Cell\n@delegates(nn.BatchNorm2d)\ndef BatchNorm(nf, ndim=2, norm_type=NormType.Batch, **kwargs):\n \"BatchNorm layer with `nf` features and `ndim` initialized depending on `norm_type`.\"\n return _get_norm('BatchNorm', nf, ndim, zero=norm_type==NormType.BatchZero, **kwargs)\n\n# Cell\n@delegates(nn.InstanceNorm2d)\ndef InstanceNorm(nf, ndim=2, norm_type=NormType.Instance, affine=True, **kwargs):\n \"InstanceNorm layer with `nf` features and `ndim` initialized depending on `norm_type`.\"\n return _get_norm('InstanceNorm', nf, ndim, zero=norm_type==NormType.InstanceZero, affine=affine, **kwargs)\n\n# Cell\nclass BatchNorm1dFlat(nn.BatchNorm1d):\n \"`nn.BatchNorm1d`, but first flattens leading dimensions\"\n def forward(self, x):\n if x.dim()==2: return super().forward(x)\n *f,l = x.shape\n x = x.contiguous().view(-1,l)\n return super().forward(x).view(*f,l)\n\n# Cell\nclass LinBnDrop(nn.Sequential):\n \"Module grouping `BatchNorm1d`, `Dropout` and `Linear` layers\"\n def __init__(self, n_in, n_out, bn=True, p=0., act=None, lin_first=False):\n layers = [BatchNorm(n_out if lin_first else n_in, ndim=1)] if bn else []\n if p != 0: layers.append(nn.Dropout(p))\n lin = [nn.Linear(n_in, n_out, bias=not bn)]\n if act is not None: lin.append(act)\n layers = lin+layers if lin_first else layers+lin\n super().__init__(*layers)\n\n# Cell\ndef sigmoid(input, eps=1e-7):\n \"Same as `torch.sigmoid`, plus clamping to `(eps,1-eps)\"\n return input.sigmoid().clamp(eps,1-eps)\n\n# Cell\ndef sigmoid_(input, eps=1e-7):\n \"Same as `torch.sigmoid_`, plus clamping to `(eps,1-eps)\"\n return input.sigmoid_().clamp_(eps,1-eps)\n\n# Cell\nfrom torch.nn.init import kaiming_uniform_,uniform_,xavier_uniform_,normal_\n\n# Cell\ndef vleaky_relu(input, inplace=True):\n \"`F.leaky_relu` with 0.3 slope\"\n return F.leaky_relu(input, negative_slope=0.3, inplace=inplace)\n\n# Cell\nfor o in F.relu,nn.ReLU,F.relu6,nn.ReLU6,F.leaky_relu,nn.LeakyReLU:\n o.__default_init__ = kaiming_uniform_\n\n# Cell\nfor o in F.sigmoid,nn.Sigmoid,F.tanh,nn.Tanh,sigmoid,sigmoid_:\n o.__default_init__ = xavier_uniform_\n\n# Cell\ndef init_default(m, func=nn.init.kaiming_normal_):\n \"Initialize `m` weights with `func` and set `bias` to 0.\"\n if func and hasattr(m, 'weight'): func(m.weight)\n with torch.no_grad(): nested_callable(m, 'bias.fill_')(0.)\n return m\n\n# Cell\ndef init_linear(m, act_func=None, init='auto', bias_std=0.01):\n if getattr(m,'bias',None) is not None and bias_std is not None:\n if bias_std != 0: normal_(m.bias, 0, bias_std)\n else: m.bias.data.zero_()\n if init=='auto':\n if act_func in (F.relu_,F.leaky_relu_): init = kaiming_uniform_\n else: init = nested_callable(act_func, '__class__.__default_init__')\n if init == noop: init = getcallable(act_func, '__default_init__')\n if callable(init): init(m.weight)\n\n# Cell\ndef _conv_func(ndim=2, transpose=False):\n \"Return the proper conv `ndim` function, potentially `transposed`.\"\n assert 1 <= ndim <=3\n return getattr(nn, f'Conv{\"Transpose\" if transpose else \"\"}{ndim}d')\n\n# Cell\ndefaults.activation=nn.ReLU\n\n# Cell\nclass ConvLayer(nn.Sequential):\n \"Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and `norm_type` layers.\"\n @delegates(nn.Conv2d)\n def __init__(self, ni, nf, ks=3, stride=1, padding=None, bias=None, ndim=2, norm_type=NormType.Batch, bn_1st=True,\n act_cls=defaults.activation, transpose=False, init='auto', xtra=None, bias_std=0.01, **kwargs):\n if padding is None: padding = ((ks-1)//2 if not transpose else 0)\n bn = norm_type in (NormType.Batch, NormType.BatchZero)\n inn = norm_type in (NormType.Instance, NormType.InstanceZero)\n if bias is None: bias = not (bn or inn)\n conv_func = _conv_func(ndim, transpose=transpose)\n conv = conv_func(ni, nf, kernel_size=ks, bias=bias, stride=stride, padding=padding, **kwargs)\n act = None if act_cls is None else act_cls()\n init_linear(conv, act, init=init, bias_std=bias_std)\n if norm_type==NormType.Weight: conv = weight_norm(conv)\n elif norm_type==NormType.Spectral: conv = spectral_norm(conv)\n layers = [conv]\n act_bn = []\n if act is not None: act_bn.append(act)\n if bn: act_bn.append(BatchNorm(nf, norm_type=norm_type, ndim=ndim))\n if inn: act_bn.append(InstanceNorm(nf, norm_type=norm_type, ndim=ndim))\n if bn_1st: act_bn.reverse()\n layers += act_bn\n if xtra: layers.append(xtra)\n super().__init__(*layers)\n\n# Cell\ndef AdaptiveAvgPool(sz=1, ndim=2):\n \"nn.AdaptiveAvgPool layer for `ndim`\"\n assert 1 <= ndim <= 3\n return getattr(nn, f\"AdaptiveAvgPool{ndim}d\")(sz)\n\n# Cell\ndef MaxPool(ks=2, stride=None, padding=0, ndim=2, ceil_mode=False):\n \"nn.MaxPool layer for `ndim`\"\n assert 1 <= ndim <= 3\n return getattr(nn, f\"MaxPool{ndim}d\")(ks, stride=stride, padding=padding)\n\n# Cell\ndef AvgPool(ks=2, stride=None, padding=0, ndim=2, ceil_mode=False):\n \"nn.AvgPool layer for `ndim`\"\n assert 1 <= ndim <= 3\n return getattr(nn, f\"AvgPool{ndim}d\")(ks, stride=stride, padding=padding, ceil_mode=ceil_mode)\n\n# Cell\ndef trunc_normal_(x, mean=0., std=1.):\n \"Truncated normal initialization (approximation)\"\n # From https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/12\n return x.normal_().fmod_(2).mul_(std).add_(mean)\n\n# Cell\nclass Embedding(nn.Embedding):\n \"Embedding layer with truncated normal initialization\"\n def __init__(self, ni, nf, std=0.01):\n super().__init__(ni, nf)\n trunc_normal_(self.weight.data, std=std)\n\n# Cell\nclass SelfAttention(Module):\n \"Self attention layer for `n_channels`.\"\n def __init__(self, n_channels):\n self.query,self.key,self.value = [self._conv(n_channels, c) for c in (n_channels//8,n_channels//8,n_channels)]\n self.gamma = nn.Parameter(tensor([0.]))\n\n def _conv(self,n_in,n_out):\n return ConvLayer(n_in, n_out, ks=1, ndim=1, norm_type=NormType.Spectral, act_cls=None, bias=False)\n\n def forward(self, x):\n #Notation from the paper.\n size = x.size()\n x = x.view(*size[:2],-1)\n f,g,h = self.query(x),self.key(x),self.value(x)\n beta = F.softmax(torch.bmm(f.transpose(1,2), g), dim=1)\n o = self.gamma * torch.bmm(h, beta) + x\n return o.view(*size).contiguous()\n\n# Cell\nclass PooledSelfAttention2d(Module):\n \"Pooled self attention layer for 2d.\"\n def __init__(self, n_channels):\n self.n_channels = n_channels\n self.query,self.key,self.value = [self._conv(n_channels, c) for c in (n_channels//8,n_channels//8,n_channels//2)]\n self.out = self._conv(n_channels//2, n_channels)\n self.gamma = nn.Parameter(tensor([0.]))\n\n def _conv(self,n_in,n_out):\n return ConvLayer(n_in, n_out, ks=1, norm_type=NormType.Spectral, act_cls=None, bias=False)\n\n def forward(self, x):\n n_ftrs = x.shape[2]*x.shape[3]\n f = self.query(x).view(-1, self.n_channels//8, n_ftrs)\n g = F.max_pool2d(self.key(x), [2,2]).view(-1, self.n_channels//8, n_ftrs//4)\n h = F.max_pool2d(self.value(x), [2,2]).view(-1, self.n_channels//2, n_ftrs//4)\n beta = F.softmax(torch.bmm(f.transpose(1, 2), g), -1)\n o = self.out(torch.bmm(h, beta.transpose(1,2)).view(-1, self.n_channels//2, x.shape[2], x.shape[3]))\n return self.gamma * o + x\n\n# Cell\ndef _conv1d_spect(ni:int, no:int, ks:int=1, stride:int=1, padding:int=0, bias:bool=False):\n \"Create and initialize a `nn.Conv1d` layer with spectral normalization.\"\n conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias)\n nn.init.kaiming_normal_(conv.weight)\n if bias: conv.bias.data.zero_()\n return spectral_norm(conv)\n\n# Cell\nclass SimpleSelfAttention(Module):\n def __init__(self, n_in:int, ks=1, sym=False):\n self.sym,self.n_in = sym,n_in\n self.conv = _conv1d_spect(n_in, n_in, ks, padding=ks//2, bias=False)\n self.gamma = nn.Parameter(tensor([0.]))\n\n def forward(self,x):\n if self.sym:\n c = self.conv.weight.view(self.n_in,self.n_in)\n c = (c + c.t())/2\n self.conv.weight = c.view(self.n_in,self.n_in,1)\n\n size = x.size()\n x = x.view(*size[:2],-1)\n\n convx = self.conv(x)\n xxT = torch.bmm(x,x.permute(0,2,1).contiguous())\n o = torch.bmm(xxT, convx)\n o = self.gamma * o + x\n return o.view(*size).contiguous()\n\n# Cell\ndef icnr_init(x, scale=2, init=nn.init.kaiming_normal_):\n \"ICNR init of `x`, with `scale` and `init` function\"\n ni,nf,h,w = x.shape\n ni2 = int(ni/(scale**2))\n k = init(x.new_zeros([ni2,nf,h,w])).transpose(0, 1)\n k = k.contiguous().view(ni2, nf, -1)\n k = k.repeat(1, 1, scale**2)\n return k.contiguous().view([nf,ni,h,w]).transpose(0, 1)\n\n# Cell\nclass PixelShuffle_ICNR(nn.Sequential):\n \"Upsample by `scale` from `ni` filters to `nf` (default `ni`), using `nn.PixelShuffle`.\"\n def __init__(self, ni, nf=None, scale=2, blur=False, norm_type=NormType.Weight, act_cls=defaults.activation):\n super().__init__()\n nf = ifnone(nf, ni)\n layers = [ConvLayer(ni, nf*(scale**2), ks=1, norm_type=norm_type, act_cls=act_cls, bias_std=0),\n nn.PixelShuffle(scale)]\n if norm_type == NormType.Weight:\n layers[0][0].weight_v.data.copy_(icnr_init(layers[0][0].weight_v.data))\n layers[0][0].weight_g.data.copy_(((layers[0][0].weight_v.data**2).sum(dim=[1,2,3])**0.5)[:,None,None,None])\n else:\n layers[0][0].weight.data.copy_(icnr_init(layers[0][0].weight.data))\n if blur: layers += [nn.ReplicationPad2d((1,0,1,0)), nn.AvgPool2d(2, stride=1)]\n super().__init__(*layers)\n\n# Cell\ndef sequential(*args):\n \"Create an `nn.Sequential`, wrapping items with `Lambda` if needed\"\n if len(args) != 1 or not isinstance(args[0], OrderedDict):\n args = list(args)\n for i,o in enumerate(args):\n if not isinstance(o,nn.Module): args[i] = Lambda(o)\n return nn.Sequential(*args)\n\n# Cell\nclass SequentialEx(Module):\n \"Like `nn.Sequential`, but with ModuleList semantics, and can access module input\"\n def __init__(self, *layers): self.layers = nn.ModuleList(layers)\n\n def forward(self, x):\n res = x\n for l in self.layers:\n res.orig = x\n nres = l(res)\n # We have to remove res.orig to avoid hanging refs and therefore memory leaks\n res.orig, nres.orig = None, None\n res = nres\n return res\n\n def __getitem__(self,i): return self.layers[i]\n def append(self,l): return self.layers.append(l)\n def extend(self,l): return self.layers.extend(l)\n def insert(self,i,l): return self.layers.insert(i,l)\n\n# Cell\nclass MergeLayer(Module):\n \"Merge a shortcut with the result of the module by adding them or concatenating them if `dense=True`.\"\n def __init__(self, dense:bool=False): self.dense=dense\n def forward(self, x): return torch.cat([x,x.orig], dim=1) if self.dense else (x+x.orig)\n\n# Cell\nclass Cat(nn.ModuleList):\n \"Concatenate layers outputs over a given dim\"\n def __init__(self, layers, dim=1):\n self.dim=dim\n super().__init__(layers)\n def forward(self, x): return torch.cat([l(x) for l in self], dim=self.dim)\n\n# Cell\nclass SimpleCNN(nn.Sequential):\n \"Create a simple CNN with `filters`.\"\n def __init__(self, filters, kernel_szs=None, strides=None, bn=True):\n nl = len(filters)-1\n kernel_szs = ifnone(kernel_szs, [3]*nl)\n strides = ifnone(strides , [2]*nl)\n layers = [ConvLayer(filters[i], filters[i+1], kernel_szs[i], stride=strides[i],\n norm_type=(NormType.Batch if bn and i<nl-1 else None)) for i in range(nl)]\n layers.append(PoolFlatten())\n super().__init__(*layers)\n\n# Cell\nclass ProdLayer(Module):\n \"Merge a shortcut with the result of the module by multiplying them.\"\n def forward(self, x): return x * x.orig\n\n# Cell\ninplace_relu = partial(nn.ReLU, inplace=True)\n\n# Cell\ndef SEModule(ch, reduction, act_cls=defaults.activation):\n nf = math.ceil(ch//reduction/8)*8\n return SequentialEx(nn.AdaptiveAvgPool2d(1),\n ConvLayer(ch, nf, ks=1, norm_type=None, act_cls=act_cls),\n ConvLayer(nf, ch, ks=1, norm_type=None, act_cls=nn.Sigmoid),\n ProdLayer())\n\n# Cell\nclass ResBlock(Module):\n \"Resnet block from `ni` to `nh` with `stride`\"\n @delegates(ConvLayer.__init__)\n def __init__(self, expansion, ni, nf, stride=1, groups=1, reduction=None, nh1=None, nh2=None, dw=False, g2=1,\n sa=False, sym=False, norm_type=NormType.Batch, act_cls=defaults.activation, ndim=2, ks=3,\n pool=AvgPool, pool_first=True, **kwargs):\n norm2 = (NormType.BatchZero if norm_type==NormType.Batch else\n NormType.InstanceZero if norm_type==NormType.Instance else norm_type)\n if nh2 is None: nh2 = nf\n if nh1 is None: nh1 = nh2\n nf,ni = nf*expansion,ni*expansion\n k0 = dict(norm_type=norm_type, act_cls=act_cls, ndim=ndim, **kwargs)\n k1 = dict(norm_type=norm2, act_cls=None, ndim=ndim, **kwargs)\n convpath = [ConvLayer(ni, nh2, ks, stride=stride, groups=ni if dw else groups, **k0),\n ConvLayer(nh2, nf, ks, groups=g2, **k1)\n ] if expansion == 1 else [\n ConvLayer(ni, nh1, 1, **k0),\n ConvLayer(nh1, nh2, ks, stride=stride, groups=nh1 if dw else groups, **k0),\n ConvLayer(nh2, nf, 1, groups=g2, **k1)]\n if reduction: convpath.append(SEModule(nf, reduction=reduction, act_cls=act_cls))\n if sa: convpath.append(SimpleSelfAttention(nf,ks=1,sym=sym))\n self.convpath = nn.Sequential(*convpath)\n idpath = []\n if ni!=nf: idpath.append(ConvLayer(ni, nf, 1, act_cls=None, ndim=ndim, **kwargs))\n if stride!=1: idpath.insert((1,0)[pool_first], pool(stride, ndim=ndim, ceil_mode=True))\n self.idpath = nn.Sequential(*idpath)\n self.act = defaults.activation(inplace=True) if act_cls is defaults.activation else act_cls()\n\n def forward(self, x): return self.act(self.convpath(x) + self.idpath(x))\n\n# Cell\ndef SEBlock(expansion, ni, nf, groups=1, reduction=16, stride=1, **kwargs):\n return ResBlock(expansion, ni, nf, stride=stride, groups=groups, reduction=reduction, nh1=nf*2, nh2=nf*expansion, **kwargs)\n\n# Cell\ndef SEResNeXtBlock(expansion, ni, nf, groups=32, reduction=16, stride=1, base_width=4, **kwargs):\n w = math.floor(nf * (base_width / 64)) * groups\n return ResBlock(expansion, ni, nf, stride=stride, groups=groups, reduction=reduction, nh2=w, **kwargs)\n\n# Cell\ndef SeparableBlock(expansion, ni, nf, reduction=16, stride=1, base_width=4, **kwargs):\n return ResBlock(expansion, ni, nf, stride=stride, reduction=reduction, nh2=nf*2, dw=True, **kwargs)\n\n# Cell\ndef _stack_tups(tuples, stack_dim=1):\n \"Stack tuple of tensors along `stack_dim`\"\n return tuple(torch.stack([t[i] for t in tuples], dim=stack_dim) for i in range_of(tuples[0]))\n\n# Cell\nclass TimeDistributed(Module):\n \"Applies `module` over `tdim` identically for each step, use `low_mem` to compute one at a time.\"\n def __init__(self, module, low_mem=False, tdim=1):\n store_attr()\n\n def forward(self, *tensors, **kwargs):\n \"input x with shape:(bs,seq_len,channels,width,height)\"\n if self.low_mem or self.tdim!=1:\n return self.low_mem_forward(*tensors, **kwargs)\n else:\n #only support tdim=1\n inp_shape = tensors[0].shape\n bs, seq_len = inp_shape[0], inp_shape[1]\n out = self.module(*[x.view(bs*seq_len, *x.shape[2:]) for x in tensors], **kwargs)\n return self.format_output(out, bs, seq_len)\n\n def low_mem_forward(self, *tensors, **kwargs):\n \"input x with shape:(bs,seq_len,channels,width,height)\"\n seq_len = tensors[0].shape[self.tdim]\n args_split = [torch.unbind(x, dim=self.tdim) for x in tensors]\n out = []\n for i in range(seq_len):\n out.append(self.module(*[args[i] for args in args_split]), **kwargs)\n if isinstance(out[0], tuple):\n return _stack_tups(out, stack_dim=self.tdim)\n return torch.stack(out, dim=self.tdim)\n\n def format_output(self, out, bs, seq_len):\n \"unstack from batchsize outputs\"\n if isinstance(out, tuple):\n return tuple(out_i.view(bs, seq_len, *out_i.shape[1:]) for out_i in out)\n return out.view(bs, seq_len,*out.shape[1:])\n\n def __repr__(self):\n return f'TimeDistributed({self.module})'\n\n# Cell\nfrom torch.jit import script\n\n# Cell\n@script\ndef _swish_jit_fwd(x): return x.mul(torch.sigmoid(x))\n\n@script\ndef _swish_jit_bwd(x, grad_output):\n x_sigmoid = torch.sigmoid(x)\n return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid)))\n\nclass _SwishJitAutoFn(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x):\n ctx.save_for_backward(x)\n return _swish_jit_fwd(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n x = ctx.saved_variables[0]\n return _swish_jit_bwd(x, grad_output)\n\n# Cell\ndef swish(x, inplace=False): return _SwishJitAutoFn.apply(x)\n\n# Cell\nclass Swish(Module):\n def forward(self, x): return _SwishJitAutoFn.apply(x)\n\n# Cell\n@script\ndef _mish_jit_fwd(x): return x.mul(torch.tanh(F.softplus(x)))\n\n@script\ndef _mish_jit_bwd(x, grad_output):\n x_sigmoid = torch.sigmoid(x)\n x_tanh_sp = F.softplus(x).tanh()\n return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))\n\nclass MishJitAutoFn(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x):\n ctx.save_for_backward(x)\n return _mish_jit_fwd(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n x = ctx.saved_variables[0]\n return _mish_jit_bwd(x, grad_output)\n\n# Cell\ndef mish(x): return F.mish(x) if torch.__version__ >= '1.9' else MishJitAutoFn.apply(x)\n\n# Cell\nclass Mish(Module):\n def forward(self, x): return MishJitAutoFn.apply(x)\n\n# Cell\nif ismin_torch('1.9'): Mish = nn.Mish\n\n# Cell\nfor o in swish,Swish,mish,Mish: o.__default_init__ = kaiming_uniform_\n\n# Cell\nclass ParameterModule(Module):\n \"Register a lone parameter `p` in a module.\"\n def __init__(self, p): self.val = p\n def forward(self, x): return x\n\n# Cell\ndef children_and_parameters(m):\n \"Return the children of `m` and its direct parameters not registered in modules.\"\n children = list(m.children())\n children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])\n for p in m.parameters():\n if id(p) not in children_p: children.append(ParameterModule(p))\n return children\n\n# Cell\ndef has_children(m):\n try: next(m.children())\n except StopIteration: return False\n return True\n\n# Cell\ndef flatten_model(m):\n \"Return the list of all submodules and parameters of `m`\"\n return sum(map(flatten_model,children_and_parameters(m)),[]) if has_children(m) else [m]\n\n# Cell\nclass NoneReduce():\n \"A context manager to evaluate `loss_func` with none reduce.\"\n def __init__(self, loss_func): self.loss_func,self.old_red = loss_func,None\n\n def __enter__(self):\n if hasattr(self.loss_func, 'reduction'):\n self.old_red = self.loss_func.reduction\n self.loss_func.reduction = 'none'\n return self.loss_func\n else: return partial(self.loss_func, reduction='none')\n\n def __exit__(self, type, value, traceback):\n if self.old_red is not None: self.loss_func.reduction = self.old_red\n\n# Cell\ndef in_channels(m):\n \"Return the shape of the first weight layer in `m`.\"\n try: return next(l.weight.shape[1] for l in flatten_model(m) if nested_attr(l,'weight.ndim',-1)==4)\n except StopIteration as e: e.args = [\"No weight layer\"]; raise"
] | [
[
"torch.nn.utils.weight_norm",
"torch.nn.utils.spectral_norm",
"torch.nn.init.normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ian-Goodall-Halliwell/projected_gan | [
"cf7d61c44cc6a48de7c1ea1d90ba9fff4d55d8f2"
] | [
"training/training_loop.py"
] | [
"# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n#\n# modified by Axel Sauer for \"Projected GANs Converge Faster\"\n#\n\n\n\"\"\"Main training loop.\"\"\"\n\nimport os\nimport time\nimport copy\nimport json\nimport dill\nimport psutil\nimport PIL.Image\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport dnnlib\nimport pickle\nfrom torch_utils import misc\nfrom torch_utils import training_stats\nfrom torch_utils.ops import conv2d_gradfix\nfrom torch_utils.ops import grid_sample_gradfix\n\nimport legacy\nfrom metrics import metric_main\n\n#----------------------------------------------------------------------------\n\ndef setup_snapshot_image_grid(training_set, random_seed=0):\n rnd = np.random.RandomState(random_seed)\n gw = np.clip(7680 // training_set.image_shape[2], 7, 32)\n gh = np.clip(4320 // training_set.image_shape[1], 4, 32)\n\n # No labels => show random subset of training samples.\n if not training_set.has_labels:\n all_indices = list(range(len(training_set)))\n rnd.shuffle(all_indices)\n grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]\n\n else:\n # Group training samples by label.\n label_groups = dict() # label => [idx, ...]\n for idx in range(len(training_set)):\n label = tuple(training_set.get_details(idx).raw_label.flat[::-1])\n if label not in label_groups:\n label_groups[label] = []\n label_groups[label].append(idx)\n\n # Reorder.\n label_order = sorted(label_groups.keys())\n for label in label_order:\n rnd.shuffle(label_groups[label])\n\n # Organize into grid.\n grid_indices = []\n for y in range(gh):\n label = label_order[y % len(label_order)]\n indices = label_groups[label]\n grid_indices += [indices[x % len(indices)] for x in range(gw)]\n label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))]\n\n # Load data.\n images, labels = zip(*[training_set[i] for i in grid_indices])\n return (gw, gh), np.stack(images), np.stack(labels)\n\n#----------------------------------------------------------------------------\n\ndef save_image_grid(img, fname, drange, grid_size):\n lo, hi = drange\n img = np.asarray(img, dtype=np.float32)\n img = (img - lo) * (255 / (hi - lo))\n img = np.rint(img).clip(0, 255).astype(np.uint8)\n\n gw, gh = grid_size\n _N, C, H, W = img.shape\n img = img.reshape([gh, gw, C, H, W])\n img = img.transpose(0, 3, 1, 4, 2)\n img = img.reshape([gh * H, gw * W, C])\n\n assert C in [1, 3]\n if C == 1:\n PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)\n if C == 3:\n PIL.Image.fromarray(img, 'RGB').save(fname)\n\n#----------------------------------------------------------------------------\n\ndef training_loop(\n run_dir = '.', # Output directory.\n training_set_kwargs = {}, # Options for training set.\n data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.\n G_kwargs = {}, # Options for generator network.\n D_kwargs = {}, # Options for discriminator network.\n G_opt_kwargs = {}, # Options for generator optimizer.\n D_opt_kwargs = {}, # Options for discriminator optimizer.\n loss_kwargs = {}, # Options for loss function.\n metrics = [], # Metrics to evaluate during training.\n random_seed = 0, # Global random seed.\n num_gpus = 1, # Number of GPUs participating in the training.\n rank = 0, # Rank of the current process in [0, num_gpus[.\n batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.\n batch_gpu = 4, # Number of samples processed at a time by one GPU.\n ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.\n ema_rampup = 0.05, # EMA ramp-up coefficient. None = no rampup.\n G_reg_interval = None, # How often to perform regularization for G? None = disable lazy regularization.\n D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization.\n total_kimg = 25000, # Total length of the training, measured in thousands of real images.\n kimg_per_tick = 4, # Progress snapshot interval.\n image_snapshot_ticks = 50, # How often to save image snapshots? None = disable.\n network_snapshot_ticks = 50, # How often to save network snapshots? None = disable.\n resume_pkl = None, # Network pickle to resume training from.\n resume_kimg = 0, # First kimg to report when resuming training.\n cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?\n abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.\n progress_fn = None, # Callback function for updating training progress. Called for all ranks.\n restart_every = -1, # Time interval in seconds to exit code\n):\n # Initialize.\n start_time = time.time()\n device = torch.device('cuda', rank)\n np.random.seed(random_seed * num_gpus + rank)\n torch.manual_seed(random_seed * num_gpus + rank)\n torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed.\n torch.backends.cuda.matmul.allow_tf32 = False # Improves numerical accuracy.\n torch.backends.cudnn.allow_tf32 = False # Improves numerical accuracy.\n conv2d_gradfix.enabled = True # Improves training speed.\n grid_sample_gradfix.enabled = True # Avoids errors with the augmentation pipe.\n __RESTART__ = torch.tensor(0., device=device) # will be broadcasted to exit loop\n __CUR_NIMG__ = torch.tensor(resume_kimg * 1000, dtype=torch.long, device=device)\n __CUR_TICK__ = torch.tensor(0, dtype=torch.long, device=device)\n __BATCH_IDX__ = torch.tensor(0, dtype=torch.long, device=device)\n __PL_MEAN__ = torch.zeros([], device=device)\n best_fid = 9999\n\n # Load training set.\n if rank == 0:\n print('Loading training set...')\n training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset\n training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)\n training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))\n if rank == 0:\n print()\n print('Num images: ', len(training_set))\n print('Image shape:', training_set.image_shape)\n print('Label shape:', training_set.label_shape)\n print()\n\n # Construct networks.\n if rank == 0:\n print('Constructing networks...')\n common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels)\n G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module\n D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module\n G_ema = copy.deepcopy(G).eval()\n\n # Check for existing checkpoint\n ckpt_pkl = None\n if restart_every > 0 and os.path.isfile(misc.get_ckpt_path(run_dir)):\n ckpt_pkl = resume_pkl = misc.get_ckpt_path(run_dir)\n\n # Resume from existing pickle.\n if (resume_pkl is not None) and (rank == 0):\n print(f'Resuming test from \"{resume_pkl}\"')\n with dnnlib.util.open_url(resume_pkl) as f:\n resume_data = legacy.load_network_pkl(f)\n for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:\n misc.copy_params_and_buffers(resume_data[name], module, require_all=False)\n\n if ckpt_pkl is not None: # Load ticks\n __CUR_NIMG__ = resume_data['progress']['cur_nimg'].to(device)\n __CUR_TICK__ = resume_data['progress']['cur_tick'].to(device)\n __BATCH_IDX__ = resume_data['progress']['batch_idx'].to(device)\n __PL_MEAN__ = resume_data['progress'].get('pl_mean', torch.zeros([])).to(device)\n best_fid = resume_data['progress']['best_fid'] # only needed for rank == 0\n\n # Print network summary tables.\n if rank == 0:\n z = torch.empty([batch_gpu, G.z_dim], device=device)\n c = torch.empty([batch_gpu, G.c_dim], device=device)\n img = misc.print_module_summary(G, [z, c])\n misc.print_module_summary(D, [img, c])\n\n # Distribute across GPUs.\n if rank == 0:\n print(f'Distributing across {num_gpus} GPUs...')\n for module in [G, D, G_ema]:\n if module is not None and num_gpus > 1:\n for param in misc.params_and_buffers(module):\n torch.distributed.broadcast(param, src=0)\n\n # Setup training phases.\n if rank == 0:\n print('Setting up training phases...')\n loss = dnnlib.util.construct_class_by_name(device=device, G=G, G_ema=G_ema, D=D, **loss_kwargs) # subclass of training.loss.Loss\n phases = []\n for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]:\n if reg_interval is None:\n opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer\n phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)]\n else: # Lazy regularization.\n mb_ratio = reg_interval / (reg_interval + 1)\n opt_kwargs = dnnlib.EasyDict(opt_kwargs)\n opt_kwargs.lr = opt_kwargs.lr * mb_ratio\n opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]\n opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer\n phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)]\n phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)]\n for phase in phases:\n phase.start_event = None\n phase.end_event = None\n if rank == 0:\n phase.start_event = torch.cuda.Event(enable_timing=True)\n phase.end_event = torch.cuda.Event(enable_timing=True)\n\n # Export sample images.\n grid_size = None\n grid_z = None\n grid_c = None\n if rank == 0:\n print('Exporting sample images...')\n grid_size, images, labels = setup_snapshot_image_grid(training_set=training_set)\n save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size)\n\n grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu)\n grid_c = torch.from_numpy(labels).to(device).split(batch_gpu)\n images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()\n\n save_image_grid(images, os.path.join(run_dir, 'fakes_init.png'), drange=[-1,1], grid_size=grid_size)\n\n # Initialize logs.\n if rank == 0:\n print('Initializing logs...')\n stats_collector = training_stats.Collector(regex='.*')\n stats_metrics = dict()\n stats_jsonl = None\n stats_tfevents = None\n if rank == 0:\n stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')\n try:\n import torch.utils.tensorboard as tensorboard\n stats_tfevents = tensorboard.SummaryWriter(run_dir)\n except ImportError as err:\n print('Skipping tfevents export:', err)\n\n # Train.\n if rank == 0:\n print(f'Training for {total_kimg} kimg...')\n print()\n if num_gpus > 1: # broadcast loaded states to all\n torch.distributed.broadcast(__CUR_NIMG__, 0)\n torch.distributed.broadcast(__CUR_TICK__, 0)\n torch.distributed.broadcast(__BATCH_IDX__, 0)\n torch.distributed.broadcast(__PL_MEAN__, 0)\n torch.distributed.barrier() # ensure all processes received this info\n cur_nimg = __CUR_NIMG__.item()\n cur_tick = __CUR_TICK__.item()\n tick_start_nimg = cur_nimg\n tick_start_time = time.time()\n maintenance_time = tick_start_time - start_time\n batch_idx = __BATCH_IDX__.item()\n if progress_fn is not None:\n progress_fn(cur_nimg // 1000, total_kimg)\n if hasattr(loss, 'pl_mean'):\n loss.pl_mean.copy_(__PL_MEAN__)\n while True:\n\n with torch.autograd.profiler.record_function('data_fetch'):\n phase_real_img, phase_real_c = next(training_set_iterator)\n phase_real_img = (phase_real_img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu)\n phase_real_c = phase_real_c.to(device).split(batch_gpu)\n all_gen_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device)\n all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]\n all_gen_c = [training_set.get_label(np.random.randint(len(training_set))) for _ in range(len(phases) * batch_size)]\n all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device)\n all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)]\n\n # Execute training phases.\n for phase, phase_gen_z, phase_gen_c in zip(phases, all_gen_z, all_gen_c):\n if batch_idx % phase.interval != 0:\n continue\n if phase.start_event is not None:\n phase.start_event.record(torch.cuda.current_stream(device))\n\n # Accumulate gradients.\n phase.opt.zero_grad(set_to_none=True)\n phase.module.requires_grad_(True)\n\n if phase.name in ['Dmain', 'Dboth', 'Dreg']:\n phase.module.feature_network.requires_grad_(False)\n\n for real_img, real_c, gen_z, gen_c in zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c):\n \n loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, gain=phase.interval, cur_nimg=cur_nimg)\n phase.module.requires_grad_(False)\n\n # Update weights.\n with torch.autograd.profiler.record_function(phase.name + '_opt'):\n params = [param for param in phase.module.parameters() if param.grad is not None]\n # for para in params:\n # print(para)\n if len(params) > 0:\n flat = torch.cat([param.grad.flatten() for param in params])\n if num_gpus > 1:\n torch.distributed.all_reduce(flat)\n flat /= num_gpus\n misc.nan_to_num(flat, nan=0, posinf=1e5, neginf=-1e5, out=flat)\n grads = flat.split([param.numel() for param in params])\n for param, grad in zip(params, grads):\n param.grad = grad.reshape(param.shape)\n phase.opt.step()\n\n # Phase done.\n if phase.end_event is not None:\n phase.end_event.record(torch.cuda.current_stream(device))\n\n # Update G_ema.\n with torch.autograd.profiler.record_function('Gema'):\n ema_nimg = ema_kimg * 1000\n if ema_rampup is not None:\n ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)\n ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))\n for p_ema, p in zip(G_ema.parameters(), G.parameters()):\n p_ema.copy_(p.lerp(p_ema, ema_beta))\n for b_ema, b in zip(G_ema.buffers(), G.buffers()):\n b_ema.copy_(b)\n\n # Update state.\n cur_nimg += batch_size\n batch_idx += 1\n\n # Perform maintenance tasks once per tick.\n done = (cur_nimg >= total_kimg * 1000)\n if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):\n continue\n\n # Print status line, accumulating the same information in training_stats.\n tick_end_time = time.time()\n fields = []\n fields += [f\"tick {training_stats.report0('Progress/tick', cur_tick):<5d}\"]\n fields += [f\"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}\"]\n fields += [f\"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}\"]\n fields += [f\"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}\"]\n fields += [f\"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}\"]\n fields += [f\"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}\"]\n fields += [f\"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}\"]\n fields += [f\"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}\"]\n fields += [f\"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', torch.cuda.max_memory_reserved(device) / 2**30):<6.2f}\"]\n fields += [f\"gpumemc {training_stats.report0('Resources/peak_gpu_mem_gbc', torch.cuda.memory_allocated(device) / 2**30):<6.2f}\"]\n fields += [f\"reservedc {training_stats.report0('Resources/peak_gpu_mem_reserved_gbc', torch.cuda.memory_reserved(device) / 2**30):<6.2f}\"]\n torch.cuda.reset_peak_memory_stats()\n training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))\n training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))\n if rank == 0:\n print(' '.join(fields))\n\n # Check for abort.\n if (not done) and (abort_fn is not None) and abort_fn():\n done = True\n if rank == 0:\n print()\n print('Aborting...')\n\n # Check for restart.\n if (rank == 0) and (restart_every > 0) and (time.time() - start_time > restart_every):\n print('Restart job...')\n __RESTART__ = torch.tensor(1., device=device)\n if num_gpus > 1:\n torch.distributed.broadcast(__RESTART__, 0)\n if __RESTART__:\n done = True\n print(f'Process {rank} leaving...')\n if num_gpus > 1:\n torch.distributed.barrier()\n\n # Save image snapshot.\n if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):\n images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()\n save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}.png'), drange=[-1,1], grid_size=grid_size)\n\n # Save network snapshot.\n snapshot_pkl = None\n snapshot_data = None\n if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):\n snapshot_data = dict(G=G, D=D, G_ema=G_ema, training_set_kwargs=dict(training_set_kwargs))\n for key, value in snapshot_data.items():\n if isinstance(value, torch.nn.Module):\n snapshot_data[key] = value\n del value # conserve memory\n\n # Save Checkpoint if needed\n if (rank == 0) and (restart_every > 0) and (network_snapshot_ticks is not None) and (\n done or cur_tick % network_snapshot_ticks == 0):\n snapshot_pkl = misc.get_ckpt_path(run_dir)\n # save as tensors to avoid error for multi GPU\n snapshot_data['progress'] = {\n 'cur_nimg': torch.LongTensor([cur_nimg]),\n 'cur_tick': torch.LongTensor([cur_tick]),\n 'batch_idx': torch.LongTensor([batch_idx]),\n 'best_fid': best_fid,\n }\n if hasattr(loss, 'pl_mean'):\n snapshot_data['progress']['pl_mean'] = loss.pl_mean.cpu()\n\n with open(snapshot_pkl, 'wb') as f:\n pickle.dump(snapshot_data, f)\n\n # Evaluate metrics.\n # if (snapshot_data is not None) and (len(metrics) > 0):\n if cur_tick and (snapshot_data is not None) and (len(metrics) > 0):\n if rank == 0:\n print('Evaluating metrics...')\n for metric in metrics:\n result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'], run_dir=run_dir, cur_nimg=cur_nimg,\n dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)\n if rank == 0:\n metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)\n stats_metrics.update(result_dict.results)\n\n # save best fid ckpt\n snapshot_pkl = os.path.join(run_dir, f'best_model.pkl')\n cur_nimg_txt = os.path.join(run_dir, f'best_nimg.txt')\n if rank == 0:\n if 'fid50k_full' in stats_metrics and stats_metrics['fid50k_full'] < best_fid:\n best_fid = stats_metrics['fid50k_full']\n\n with open(snapshot_pkl, 'wb') as f:\n dill.dump(snapshot_data, f)\n # save curr iteration number (directly saving it to pkl leads to problems with multi GPU)\n with open(cur_nimg_txt, 'w') as f:\n f.write(str(cur_nimg))\n\n del snapshot_data # conserve memory\n\n # Collect statistics.\n for phase in phases:\n value = []\n if (phase.start_event is not None) and (phase.end_event is not None) and \\\n not (phase.start_event.cuda_event == 0 and phase.end_event.cuda_event == 0): # Both events were not initialized yet, can happen with restart\n phase.end_event.synchronize()\n value = phase.start_event.elapsed_time(phase.end_event)\n training_stats.report0('Timing/' + phase.name, value)\n stats_collector.update()\n stats_dict = stats_collector.as_dict()\n\n # Update logs.\n timestamp = time.time()\n if stats_jsonl is not None:\n fields = dict(stats_dict, timestamp=timestamp)\n stats_jsonl.write(json.dumps(fields) + '\\n')\n stats_jsonl.flush()\n if stats_tfevents is not None:\n global_step = int(cur_nimg / 1e3)\n walltime = timestamp - start_time\n for name, value in stats_dict.items():\n stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)\n for name, value in stats_metrics.items():\n stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)\n stats_tfevents.flush()\n if progress_fn is not None:\n progress_fn(cur_nimg // 1000, total_kimg)\n\n # Update state.\n cur_tick += 1\n tick_start_nimg = cur_nimg\n tick_start_time = time.time()\n maintenance_time = tick_start_time - tick_end_time\n if done:\n break\n\n # Done.\n if rank == 0:\n print()\n print('Exiting...')\n\n#----------------------------------------------------------------------------\n"
] | [
[
"torch.autograd.profiler.record_function",
"torch.distributed.broadcast",
"torch.zeros",
"numpy.asarray",
"torch.utils.data.DataLoader",
"torch.utils.tensorboard.SummaryWriter",
"torch.device",
"torch.cuda.max_memory_reserved",
"torch.cuda.memory_reserved",
"numpy.clip",
"torch.randn",
"torch.from_numpy",
"numpy.stack",
"torch.tensor",
"torch.distributed.barrier",
"torch.LongTensor",
"torch.empty",
"torch.cuda.current_stream",
"torch.cuda.Event",
"numpy.rint",
"numpy.random.RandomState",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.max_memory_allocated",
"torch.cuda.reset_peak_memory_stats",
"torch.distributed.all_reduce",
"torch.cuda.memory_allocated"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
QuatZo/HideNSeek-DeepReinforcementLearning-PyGame | [
"fa20ae1c518a3f72e9e37dafea33697ef82e2557"
] | [
"hidenseek/ext/engine.py"
] | [
"import pygame\nimport math\nimport copy\nfrom objects.controllable import Hiding, Seeker\nfrom objects.fixed import Wall\nfrom ext.supportive import Point, Collision, MapGenerator\nimport random\nfrom ext.loggers import LOGGING_DASHES, logger_engine, logger_hiding, logger_seeker\nimport numpy as np\nfrom ext.config import config\n\n\nclass HideNSeek(object):\n \"\"\"\n Engine Class for Hide'n'Seek Game\n\n Attributes\n ----------\n width : int\n width of the game window\n height : int\n height of the game window\n fps : int\n amount of fps you want to lock on\n map_path : string\n path to the map, with *.bmp extension\n clock : pygame.time.Clock\n pygame Clock objects to lock FPS and use timer\n screen : pygame.display\n pygame module to control display window and screen\n dt : float\n time per frame (in miliseconds)\n duration : float\n gameplay maximum duration (in ticks), if no other game over event\n agent_env : dict\n stores agents local environments, which includes walls and enemies\n p_hide_cfg : configparser Object\n config for Hiding Agent\n p_seek_cfg : configparser Object\n config for Seeker Agent \n player_seek : hidenseek.objects.controllable.Seeker\n instance of Seeker Agent\n player_hide : hidenseek.objects.controllable.Hiding\n instance of Hiding Agent\n players_group : pygame.sprite.Group\n group of Sprites for Agents (hidenseek.objects.controllable.Seeker, hidenseek.objects.controllable.Hiding)\n walls_group : pygame.sprite.Group\n group of sprites for Walls (hidenseek.objects.fixed.Wall)\n\n Methods\n -------\n init():\n initializes game environment object, don't confuse with built-in __init__ method\n reset():\n resets game environment\n game_over():\n checks whenever game should be ended or not\n step():\n called every frame, does everything to make game running\n _draw_agent_vision(agent, screen):\n draws agent POV to the given screen\n render(mode='human', close=False)\n renders game depending on given mode; close argument closes the pygame instance\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n Constructs all neccesary attributes for the HideNSeek Object\n\n Parameters\n ----------\n config : configparser Object\n contains config file in configparser (dict-like) Object\n \"\"\"\n\n self.fps = config['GAME'].getint('FPS', fallback=30)\n self.map_path = config['GAME'].get(\n 'MAP_PATH', fallback='fallback_map') + '.bmp'\n self.clock = None\n self.screen = None\n self.dt = None\n self.cfg = config['GAME']\n self.duration = None\n self.p_hide_cfg = config['AGENT_HIDING']\n self.p_seek_cfg = config['AGENT_SEEKER']\n self.agent_env = {}\n\n logger_engine.info(\"Initializing Game Engine\")\n logger_engine.info(f\"\\tFPS: {self.fps}\")\n\n def init(self, walls, seeker, hider, width, height):\n \"\"\"\n Initializes game environment, which means creating Agents & their POV, \n adding them to Sprite Group and creating Sprite Group for Walls\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n\n self.duration = self.cfg.getint('DURATION', fallback=60)\n self.clock = pygame.time.Clock()\n\n self.width, self.height = width, height\n logger_engine.info(f\"\\tResolution: {self.width}x{self.height}\")\n\n logger_engine.info(\"\\tWalls Sprite Group\")\n self.walls_group = pygame.sprite.Group()\n\n self.walls_group.add(walls)\n self.player_seek = seeker\n self.player_hide = hider\n\n logger_engine.info(\"Initializing Environment Objects\")\n self.player_seek.update_vision({'walls': [], 'enemy': None, })\n self.player_hide.update_vision({'walls': [], 'enemy': None, })\n\n self._calc_local_env()\n\n logger_engine.info(\"\\tSeeker Vision\")\n self.player_seek.update_vision(self.agent_env['p_seek'])\n\n logger_engine.info(\"\\tHiding Vision\")\n self.player_hide.update_vision(self.agent_env['p_hide'])\n\n logger_engine.info(\"\\tAgents Sprite Group\")\n self.players_group = pygame.sprite.Group()\n self.players_group.add(self.player_seek)\n self.players_group.add(self.player_hide)\n\n def reset(self):\n \"\"\"\n Resets the environment by running init() function\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n\n # it just uses init function\n logger_engine.info(f\"{LOGGING_DASHES} Resetting Game {LOGGING_DASHES}\")\n self.init()\n\n def game_over(self):\n \"\"\"\n Whenever game should end or not. Events:\n - checks whether game duration exceeded given time\n - checks the collision between 2 Agents by using 2 methods: AABB and - if first return POSSIBLE collision - SAT\n\n Parameters\n ----------\n None\n\n Returns\n -------\n is_over : boolean\n whether game ended\n winner : string or None\n who won the game, if the game ended\n \"\"\"\n\n if self.duration <= 0:\n return True, \"HIDING\"\n\n if Collision.aabb(self.player_seek.pos, (self.player_seek.width, self.player_seek.height), self.player_hide.pos, (self.player_hide.width, self.player_hide.height)):\n logger_engine.info(\n \"Rectangle collision, checking Polygon Collision by using SAT Method.\")\n if Collision.sat(self.player_seek.get_abs_vertices(), self.player_hide.get_abs_vertices()):\n logger_engine.info(\"Polygon Collision! Ending the game!\")\n return True, \"SEEKER\"\n return False, None\n\n def _can_create_wall(self, wall, enemy):\n \"\"\"\n Checks whether can add Wall (if there is no collision between newly created wall, local walls and enemies)\n\n Parameters\n ----------\n wall : hidenseek.objects.fixed.Wall\n newly created Wall\n enemy : hidenseek.objects.controllable.Player (Hiding or Seeker) or None\n if enemy in local environment then Player instance, else None\n\n Returns\n -------\n can : bool\n if can create wall\n \"\"\"\n\n # check if dynamically created POV lines are shorter than eyesight -- if yes, then it's not possible to create a Wall\n local_wall_edges = self.player_hide.reduce_wall_edges(\n self.agent_env['p_hide']['walls'])\n wall_vertices = wall.get_abs_vertices()\n wall_edges = [wall_vertices[0], wall.pos,\n wall_vertices[3]] # only closer edges & center\n\n vision_ray_points = [[self.player_hide.pos, wall_edge]\n for wall_edge in wall_edges] + [[self.player_hide.pos, self.player_hide.vision_top]]\n for ray in vision_ray_points:\n ray_dist = ray[0].distance(ray[1])\n for local_wall_edge in local_wall_edges:\n p = Collision.line_intersection(ray, local_wall_edge)\n if p and p.distance(ray[0]) < ray_dist:\n logger_hiding.info(\n f\"\\tCouldn't add Wall #{self.player_hide.walls_counter + 1}, because something is on the way.\")\n return False\n\n for _wall in self.agent_env['p_hide']['walls']:\n if Collision.aabb(wall.pos, (wall.width, wall.height), _wall.pos, (_wall.width, _wall.height)):\n if Collision.sat(wall.get_abs_vertices(), _wall.get_abs_vertices()):\n logger_hiding.info(\n f\"\\tCouldn't add Wall #{self.player_hide.walls_counter + 1}, because it would overlap with other Wall.\")\n return False\n\n if enemy and Collision.aabb(enemy.pos, (enemy.width, enemy.height), wall.pos, (wall.width, wall.height)):\n if Collision.sat(self.player_hide.get_abs_vertices(), enemy.get_abs_vertices()):\n logger_hiding.info(\n f\"\\tCouldn't add Wall #{self.player_hide.walls_counter + 1}, because it would overlap with Enemy Agent\")\n return False\n return True\n\n def _add_wall(self):\n \"\"\"\n Adds Wall\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n\n if self.player_hide.walls_counter < self.player_hide.walls_max and not self.player_hide.wall_timer:\n logger_hiding.info(\n f\"\\tAdding Wall #{self.player_hide.walls_counter + 1}\")\n\n wall_pos = copy.deepcopy(self.player_hide.pos)\n wall_size = (max(int(self.player_hide.width / 10), 2),\n max(int(self.player_hide.height / 2), 2)) # minimum 2x2 Wall\n vision_arc_range = np.sqrt((self.player_hide.vision_top.x - self.player_hide.pos.x) * (self.player_hide.vision_top.x - self.player_hide.pos.x) + (\n self.player_hide.vision_top.y - self.player_hide.pos.y) * (self.player_hide.vision_top.y - self.player_hide.pos.y))\n # vision arc range - 1.5 wall width, so the wall is always created inside PoV.\n wall_pos.x = wall_pos.x + vision_arc_range - \\\n (1.5 * wall_size[0])\n wall_pos = Point.triangle_unit_circle_relative(\n self.player_hide.direction, self.player_hide.pos, wall_pos)\n\n wall = Wall(self.player_hide, wall_pos.x,\n wall_pos.y, wall_size, self.cfg['GRAPHICS_PATH_WALL_OWNER'])\n logger_hiding.info(f\"\\t\\tPosition: {wall_pos}\")\n wall._rotate(self.player_hide.direction, wall_pos)\n if self._can_create_wall(wall, self.agent_env['p_hide']['enemy']):\n self.player_hide.walls_counter += 1\n logger_hiding.info(\n f\"\\tAdded wall #{self.player_hide.walls_counter}\")\n self.walls_group.add(wall)\n self.player_hide.wall_timer = copy.deepcopy(\n self.player_hide.wall_timer_init)\n else:\n del wall\n\n def _remove_wall(self):\n \"\"\"\n Removes (randomly chosen) Wall\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n if self.agent_env['p_seek']['walls'] and not self.player_seek.wall_timer:\n # remove closest in local env\n enemy_walls = [\n wall for wall in self.agent_env['p_seek']['walls'] if wall.owner]\n if enemy_walls:\n wall_dist = [self.player_seek.pos.distance(\n wall.pos) for wall in enemy_walls]\n closest = wall_dist.index(min(wall_dist))\n delete_wall = enemy_walls[closest]\n delete_wall.owner.walls_counter -= 1\n self.walls_group.remove(delete_wall)\n del delete_wall\n self.player_seek.wall_timer = self.player_seek.wall_timer_init\n\n def _reduce_agent_cooldown(self, agent):\n \"\"\"\n Reduces agent cooldown by 1 frame every update\n\n Parameters\n ----------\n agent : hidenseek.objects.controllable.Player\n Agent instance\n\n Returns\n -------\n None\n \"\"\"\n\n if agent.wall_timer > 0:\n agent.wall_timer -= 1\n # for negative it's 0, for positive - higher than 0, needed if time-based cooldown (i.e. 5s) instead of frame-based (i.e. 500 frames)\n agent.wall_timer = max(agent.wall_timer, 0)\n\n def _calc_local_env(self):\n \"\"\"\n Calculates local environments for Agents\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n\n self.agent_env['p_seek'] = {\n 'walls': Collision.get_objects_in_local_env(self.walls_group, self.player_seek.pos, self.player_seek.vision_radius, self.player_seek.direction, self.player_seek.ray_objects),\n 'enemy': self.player_hide if Collision.get_objects_in_local_env([self.player_hide], self.player_seek.pos, self.player_seek.vision_radius, self.player_seek.direction, self.player_seek.ray_objects) else None,\n }\n self.agent_env['p_hide'] = {\n 'walls': Collision.get_objects_in_local_env(self.walls_group, self.player_hide.pos, self.player_hide.vision_radius, self.player_hide.direction, self.player_hide.ray_objects),\n 'enemy': self.player_seek if Collision.get_objects_in_local_env([self.player_seek], self.player_hide.pos, self.player_hide.vision_radius, self.player_hide.direction, self.player_hide.ray_objects) else None,\n }\n\n def step(self):\n \"\"\"\n Runs every frame, does:\n overwrites screen\n locks fps\n updates Agents (depending on the Agent Action) based on previously calculated Agent local environments\n creates local environment for Agents\n updates Agents POV\n updates game duration\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n\n # clean screen\n logger_engine.info(\"New Frame\")\n logger_engine.debug(\"\\tLocking FPS\")\n self.dt = self.clock.tick_busy_loop(self.fps)\n logger_engine.info(f\"\\tFPS: {self.clock.get_fps()}\")\n\n self._reduce_agent_cooldown(self.player_seek)\n self._reduce_agent_cooldown(self.player_hide)\n\n logger_engine.debug(\"\\tTaking actions\")\n new_action_seek = copy.deepcopy(\n random.choice(self.player_seek.actions))\n new_action_hide = copy.deepcopy(\n random.choice(self.player_hide.actions))\n\n if new_action_seek['type'] == 'remove_wall':\n self._remove_wall()\n else:\n self.player_seek.update(\n new_action_seek, self.agent_env['p_seek'], logger_seeker)\n\n if new_action_hide['type'] == 'add_wall':\n self._add_wall()\n else:\n self.player_hide.update(\n new_action_hide, self.agent_env['p_hide'], logger_hiding)\n\n logger_engine.debug(\"\\tCalculating Local Environments\")\n self._calc_local_env()\n\n logger_engine.debug(\"\\tUpdating vision\")\n self.player_seek.update_vision(self.agent_env['p_seek'])\n self.player_hide.update_vision(self.agent_env['p_hide'])\n\n self.duration -= 1\n logger_engine.info(f\"\\tLeft: {self.duration} frames\")\n\n def _draw_agent_vision(self, agent, screen):\n \"\"\"\n Function used only in HideNSeek class. Draws Agent POV on given Screen\n\n Parameters\n ----------\n agent : hidenseek.objects.controllable.Player\n agent instance, may be Player, Hiding or Seeker\n screen : pygame.Display\n game window\n\n Returns\n -------\n None\n \"\"\"\n\n pygame.draw.line(screen, (0, 255, 0), (agent.pos.x, agent.pos.y),\n (agent.vision_top.x, agent.vision_top.y), 1)\n ray_obj = agent.ray_points # without square object\n for obj in ray_obj:\n pygame.draw.line(screen,\n (255, 85, 55),\n (agent.pos.x, agent.pos.y),\n (obj.x, obj.y)\n )\n\n def _draw_agent(self, agent, screen):\n \"\"\"\n Function used only in HideNSeek class. Draws Agent POV on given Screen\n\n Parameters\n ----------\n agent : hidenseek.objects.controllable.Player\n agent instance, may be Player, Hiding or Seeker\n screen : pygame.Display\n game window\n\n Returns\n -------\n None\n \"\"\"\n\n # Copy and then rotate the original image.\n copied_sprite = agent.sprites[agent.image_index].copy()\n copied_sprite = pygame.transform.scale(copied_sprite, (agent.width, agent.height))\n copied_sprite = pygame.transform.rotate(\n copied_sprite, -agent.direction * 180 / math.pi)\n\n copied_sprite_rect = copied_sprite.get_rect()\n copied_sprite_rect.center = (agent.pos.x, agent.pos.y)\n screen.blit(copied_sprite, copied_sprite_rect)\n\n agent.image = pygame.Surface((agent.width, agent.height))\n agent.image.set_colorkey((0, 0, 0))\n\n def render(self, mode='human', close=False):\n \"\"\"\n Renders game based on the mode. Raises Exception if unexpected render mode.\n\n Parameters\n ----------\n mode : string\n mode in which game should be rendered (graphic, console, rgb_array) \n close : boolean\n whether pygame instance should be shutdown\n\n Returns\n -------\n None\n \"\"\"\n\n if mode == 'human':\n if close:\n pygame.quit()\n return\n if not self.screen:\n pygame.init()\n self.screen = pygame.display.set_mode(\n (self.width, self.height), 0, 32)\n\n self.screen.fill((0, 0, 0))\n self.walls_group.draw(self.screen)\n\n if self.cfg.getint('DRAW_POV', fallback=1):\n self._draw_agent_vision(self.player_seek, self.screen)\n self._draw_agent_vision(self.player_hide, self.screen)\n self._draw_agent(self.player_hide, self.screen)\n self._draw_agent(self.player_seek, self.screen)\n\n self.players_group.draw(self.screen)\n\n pygame.display.update()\n elif mode == 'console':\n pass\n else:\n raise Exception(\n \"Unexpected render mode, available: 'human', 'console', 'rgb_array'\")\n"
] | [
[
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PyTorchRL/pytorchrl | [
"055843ab58a06ba1f77da73082be6f23cf453ddd",
"055843ab58a06ba1f77da73082be6f23cf453ddd"
] | [
"pytorchrl/agent/env/openai_baselines_dependencies/vec_env/dummy_vec_env.py",
"pytorchrl/agent/env/openai_baselines_dependencies/Monitor.py"
] | [
"import numpy as np\nfrom pytorchrl.agent.env.openai_baselines_dependencies.vec_env.vec_env import VecEnv\nfrom pytorchrl.agent.env.openai_baselines_dependencies.vec_env.util import copy_obs_dict, dict_to_obs, obs_space_info\n\n\nclass DummyVecEnv(VecEnv):\n \"\"\"\n VecEnv that does runs multiple environments sequentially, that is,\n the step and reset commands are send to one environment at a time.\n Useful when debugging and when num_env == 1 (in the latter case,\n avoids communication overhead)\n \"\"\"\n def __init__(self, env_fns):\n \"\"\"\n Arguments:\n env_fns: iterable of callables functions that build environments\n \"\"\"\n self.envs = [fn() for fn in env_fns]\n env = self.envs[0]\n VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)\n obs_space = env.observation_space\n self.keys, shapes, dtypes = obs_space_info(obs_space)\n\n self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }\n self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)\n self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)\n self.buf_infos = [{} for _ in range(self.num_envs)]\n self.actions = None\n self.spec = self.envs[0].spec\n\n def step_async(self, actions):\n listify = True\n try:\n if len(actions) == self.num_envs:\n listify = False\n except TypeError:\n pass\n\n if not listify:\n self.actions = actions\n else:\n assert self.num_envs == 1, \"actions {} is either not a list or has a wrong size - cannot match to {} environments\".format(actions, self.num_envs)\n self.actions = [actions]\n\n def step_wait(self):\n for e in range(self.num_envs):\n action = self.actions[e]\n # if isinstance(self.envs[e].action_space, spaces.Discrete):\n # action = int(action)\n\n obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(action)\n if self.buf_dones[e]:\n obs = self.envs[e].reset()\n self._save_obs(e, obs)\n return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),\n self.buf_infos.copy())\n\n def reset(self):\n for e in range(self.num_envs):\n obs = self.envs[e].reset()\n self._save_obs(e, obs)\n return self._obs_from_buf()\n\n def reset_single_env(self, num_env):\n for e in range(self.num_envs):\n obs = self.envs[e].reset()\n self._save_obs(e, obs)\n return self._obs_from_buf()\n\n def _save_obs(self, e, obs):\n for k in self.keys:\n if k is None:\n self.buf_obs[k][e] = obs\n else:\n self.buf_obs[k][e] = obs[k]\n\n def _obs_from_buf(self):\n return dict_to_obs(copy_obs_dict(self.buf_obs))\n\n def get_images(self):\n return [env.render(mode='rgb_array') for env in self.envs]\n\n def render(self, mode='human'):\n if self.num_envs == 1:\n return self.envs[0].render(mode=mode)\n else:\n return super().render(mode=mode)",
"import csv\nimport time\nimport json\nfrom glob import glob\nimport os.path as osp\nfrom gym.core import Wrapper\n\n\nclass Monitor(Wrapper):\n EXT = \"monitor.csv\"\n f = None\n\n def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):\n Wrapper.__init__(self, env=env)\n self.tstart = time.time()\n if filename:\n self.results_writer = ResultsWriter(filename,\n header={\"t_start\": time.time(), 'env_id' : env.spec and env.spec.id},\n extra_keys=reset_keywords + info_keywords\n )\n else:\n self.results_writer = None\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards = None\n self.needs_reset = True\n self.episode_rewards = []\n self.episode_lengths = []\n self.episode_times = []\n self.total_steps = 0\n self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()\n\n def reset(self, **kwargs):\n self.reset_state()\n for k in self.reset_keywords:\n v = kwargs.get(k)\n if v is None:\n raise ValueError('Expected you to pass kwarg %s into reset'%k)\n self.current_reset_info[k] = v\n return self.env.reset(**kwargs)\n\n def reset_state(self):\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\"Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)\")\n self.rewards = []\n self.needs_reset = False\n\n\n def step(self, action):\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n ob, rew, done, info = self.env.step(action)\n self.update(ob, rew, done, info)\n return (ob, rew, done, info)\n\n def update(self, ob, rew, done, info):\n self.rewards.append(rew)\n if done:\n self.needs_reset = True\n eprew = sum(self.rewards)\n eplen = len(self.rewards)\n epinfo = {\"r\": round(eprew, 6), \"l\": eplen, \"t\": round(time.time() - self.tstart, 6)}\n for k in self.info_keywords:\n epinfo[k] = info[k]\n self.episode_rewards.append(eprew)\n self.episode_lengths.append(eplen)\n self.episode_times.append(time.time() - self.tstart)\n epinfo.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(epinfo)\n assert isinstance(info, dict)\n if isinstance(info, dict):\n info['episode'] = epinfo\n\n self.total_steps += 1\n\n def close(self):\n super(Monitor, self).close()\n if self.f is not None:\n self.f.close()\n\n def get_total_steps(self):\n return self.total_steps\n\n def get_episode_rewards(self):\n return self.episode_rewards\n\n def get_episode_lengths(self):\n return self.episode_lengths\n\n def get_episode_times(self):\n return self.episode_times\n\nclass LoadMonitorResultsError(Exception):\n pass\n\n\nclass ResultsWriter(object):\n def __init__(self, filename, header='', extra_keys=()):\n self.extra_keys = extra_keys\n assert filename is not None\n if not filename.endswith(Monitor.EXT):\n if osp.isdir(filename):\n filename = osp.join(filename, Monitor.EXT)\n else:\n filename = filename + \".\" + Monitor.EXT\n self.f = open(filename, \"wt\")\n if isinstance(header, dict):\n header = '# {} \\n'.format(json.dumps(header))\n self.f.write(header)\n self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+tuple(extra_keys))\n self.logger.writeheader()\n self.f.flush()\n\n def write_row(self, epinfo):\n if self.logger:\n self.logger.writerow(epinfo)\n self.f.flush()\n\n\ndef get_monitor_files(dir):\n return glob(osp.join(dir, \"*\" + Monitor.EXT))\n\ndef load_results(dir):\n import pandas\n monitor_files = (\n glob(osp.join(dir, \"*monitor.json\")) +\n glob(osp.join(dir, \"*monitor.csv\"))) # get both csv and (old) json files\n if not monitor_files:\n raise LoadMonitorResultsError(\"no monitor files of the form *%s found in %s\" % (Monitor.EXT, dir))\n dfs = []\n headers = []\n for fname in monitor_files:\n with open(fname, 'rt') as fh:\n if fname.endswith('csv'):\n firstline = fh.readline()\n if not firstline:\n continue\n assert firstline[0] == '#'\n header = json.loads(firstline[1:])\n df = pandas.read_csv(fh, index_col=None)\n headers.append(header)\n elif fname.endswith('json'): # Deprecated json format\n episodes = []\n lines = fh.readlines()\n header = json.loads(lines[0])\n headers.append(header)\n for line in lines[1:]:\n episode = json.loads(line)\n episodes.append(episode)\n df = pandas.DataFrame(episodes)\n else:\n assert 0, 'unreachable'\n df['t'] += header['t_start']\n dfs.append(df)\n df = pandas.concat(dfs)\n df.sort_values('t', inplace=True)\n df.reset_index(inplace=True)\n df['t'] -= min(header['t_start'] for header in headers)\n df.headers = headers # HACK to preserve backwards compatibility\n return df"
] | [
[
"numpy.copy",
"numpy.zeros"
],
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
sfwyly/NVAE | [
"e944d375945fa50fd71ef5ddfbc97b3159e68aec"
] | [
"encoder.py"
] | [
"\r\nimport tensorflow as tf\r\nimport tensorflow.keras.models as models\r\nimport tensorflow.keras.layers as layers\r\n\r\nfrom common import EncoderResidualBlock, Swish\r\n\r\n\r\nclass ConvBlock(layers.Layer):\r\n\r\n def __init__(self, in_channels, out_channels):\r\n\r\n super(ConvBlock, self).__init__()\r\n\r\n self._seq = models.Sequential([\r\n layers.Conv2D(out_channels, kernel_size=3, padding=\"same\"), # in_channels -> out_channels\r\n layers.Conv2D(out_channels // 2, kernel_size =1),\r\n layers.BatchNormalization(), Swish(),\r\n layers.Conv2D(out_channels, kernel_size=3, strides = 2, padding = \"same\"),\r\n layers.BatchNormalization(), Swish()\r\n ])\r\n\r\n def call(self, x):\r\n\r\n return self._seq(x)\r\n\r\n\r\nclass EncoderBlock(layers.Layer):\r\n\r\n def __init__(self, channels):\r\n\r\n super(EncoderBlock, self).__init__()\r\n\r\n self.channels = channels\r\n self.modules = []\r\n\r\n for i in range(len(channels) - 1):\r\n self.modules.append(ConvBlock(channels[i], channels[i+1]))\r\n def call(self, x):\r\n\r\n for module in self.modules:\r\n x = module(x)\r\n return x\r\n\r\nclass Encoder(layers.Layer):\r\n\r\n def __init__(self, z_dim):\r\n\r\n super(Encoder, self).__init__()\r\n self.encoder_blocks = [\r\n EncoderBlock([3, z_dim //16, z_dim //8]),\r\n EncoderBlock([z_dim//8, z_dim//4, z_dim//2]),\r\n EncoderBlock([z_dim//2, z_dim])\r\n ]\r\n\r\n self.encoder_residual_blocks = [\r\n EncoderResidualBlock(z_dim // 8),\r\n EncoderResidualBlock(z_dim // 2),\r\n EncoderResidualBlock(z_dim)\r\n ]\r\n\r\n self.condition_x = models.Sequential([\r\n Swish(), layers.Conv2D(z_dim *2, kernel_size = 1) # z_dim -> z_dim * 2\r\n ])\r\n\r\n def call(self, x):\r\n xs = []\r\n last_x = x\r\n\r\n for e, r in zip(self.encoder_blocks, self.encoder_residual_blocks):\r\n x = r(e(x))\r\n last_x = x\r\n xs.append(x)\r\n\r\n mu, log_var = tf.split(self.condition_x(last_x), 2, -1)\r\n\r\n return mu, log_var, xs[:-1][::-1]\r\n\r\n"
] | [
[
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.BatchNormalization"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.