repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
carsecond/AeroPy | [
"81685f364abd9536fc62dce114f14bef191dab8c"
] | [
"aeropy/structural/stable_solution.py"
] | [
"import numpy as np\nfrom scipy.optimize import minimize\n\n\nclass mesh_1D():\n def __init__(self, alpha=[1, 1], alpha_nodes=[0, 1], mesh_n=10, x2=0):\n self.n = mesh_n\n self.x_p = np.linspace(0, 1, mesh_n)\n self.dx_p = 1./(mesh_n-1)\n self.alpha = alpha\n self.alpha_nodes = alpha_nodes\n self.x2 = x2\n\n if self.alpha_nodes[0] != 0 or self.alpha_nodes[-1] != 1:\n raise Exception('Need to define alpha_x for whole domain')\n\n if len(self.alpha_nodes) != len(alpha):\n raise Exception('Number of alpha nodes and values must be same')\n if list(self.alpha_nodes) != sorted(self.alpha_nodes):\n raise Exception('Alpha nodes must be in increasing order')\n\n self.nodes_xc = self.mesh_child()\n\n def mesh_child(self):\n self.n = len(self.x_p)\n x = self.x_p\n counter = 0\n self.x_c = self.x_p.copy()\n self.alpha_x = []\n for i in range(len(self.alpha_nodes)-1):\n\n alpha_i = self.alpha_nodes[i]\n alpha_f = self.alpha_nodes[i+1]\n x_filtered = x[x >= alpha_i]\n x_filtered = x_filtered[x_filtered < alpha_f]\n self.x_c[counter:counter+len(x_filtered)] = self.alpha[i]*x_filtered\n counter += len(x_filtered)\n self.alpha_x += len(x_filtered)*[self.alpha[i]]\n # Add last\n self.alpha_x.append(self.alpha[-1])\n self.alpha_x = np.array(self.alpha_x)\n self.x_c[-1] *= self.alpha[-1]\n\n\nclass properties():\n def __init__(self, young=70e9, poisson=.3, dimensions=[0.01, 0.01],\n crosssection='square'):\n \"\"\"Stores calculates material or structural properties. If 'square',\n dimensions = [width, height]\"\"\"\n self.young = young\n self.poisson = poisson\n self.dimensions = dimensions\n self.length = 1\n\n if crosssection == 'square':\n self.area = dimensions[0]*dimensions[1]\n self.inertia = dimensions[0]*dimensions[1]**3/12.\n if young < 0 or poisson < 0 or poisson > 1 or self.area <= 0:\n raise Exception('Material properties need to make sense')\n\n\nclass boundary_conditions():\n def __init__(self, concentrated_load=np.array([[0, 0, 0], ]), load_x=[1],\n distributed_load = 0):\n self.concentrated_load = concentrated_load\n self.concentrated_x = load_x\n self.concentrated_n = len(concentrated_load)\n self.distributed_load = None\n\n self.distributed_load = distributed_load\n\n if len(concentrated_load) != len(load_x):\n raise Exception('load values and x lists have to match')\n\nclass euler_bernoulle():\n def __init__(self, properties, load, load_type, geometry,\n x = np.linspace(0,1), boundaries = {'x':[], 'u':[]}):\n self.properties = properties\n self.load = load\n self.load_type = load_type\n self.g = geometry\n self.g.x1_grid = x\n self.boundaries = boundaries\n\n def analytical_solutions(self):\n if self.load_type == 'concentrated':\n bp = self.properties\n self.g.D = self.load/(6*bp.young*bp.inertia) * \\\n np.array([0, 0, 3, -1])\n elif self.load_type == 'distributed':\n bp = self.properties\n c2 = 6*(bp.length**2)*self.load/(24*bp.young*bp.inertia)\n c3 = -4*(bp.length)*self.load/(24*bp.young*bp.inertia)\n c4 = (1)*self.load/(24*bp.young*bp.inertia)\n self.g.D = np.array([0, 0, c2, c3, c4])\n\n def free_energy(self):\n bp = self.properties\n if self.load_type == 'concentrated':\n raise NotImplementedError\n elif self.load_type == 'distributed':\n ddu = self.g.x3(self.g.x1_grid, 'x11')\n self.phi = bp.young*bp.inertia/2*ddu**2\n\n def strain_energy(self):\n self.U = np.trapz(self.phi, self.g.x1_grid)\n\n def work(self):\n u = self.g.x3(self.g.x1_grid)\n if self.load_type == 'concentrated':\n self.W = self.load*u[-1]\n elif self.load_type == 'distributed':\n self.W = np.trapz(self.load*u, self.g.x1_grid)\n\n def residual(self):\n self.R = self.U - self.W\n\n def minimum_potential(self, x0=[0,0,0]):\n def to_optimize(x):\n self.g.D = [0,0] + list(x)\n self.work()\n self.free_energy()\n self.strain_energy()\n self.residual()\n return self.R\n\n # With bounds\n bounds = np.array(((-0.01,0.01),)*len(x0))\n\n res = minimize(to_optimize, x0, bounds=bounds)\n self.g.D = [0,0] + list(res.x)\n self.R = res.fun\n return(res.x, res.fun)\n\nclass structure():\n def __init__(self, geometry_parent, geometry_child, mesh, properties,\n bc, model='beam'):\n # Defining geometries\n self.g_p = geometry_parent\n self.g_c = geometry_child\n\n self.model = model\n self.bc = bc\n self.mesh = mesh\n self.properties = properties\n\n # storage varaibles\n self.opt_x = []\n self.opt_f = []\n\n def u(self, input=None, diff=None):\n # If for a one time run, run for new input and revert back to original\n # input\n if input is not None:\n stored_x_p = self.mesh.x_p\n self.mesh.x_p = input\n self.mesh.mesh_child()\n\n parent = self.g_p.r(input=self.mesh.x_p, x2=self.mesh.x2,\n input_type='x1', diff=diff)\n child = self.g_c.r(input=self.mesh.x_c, x2=self.mesh.x2,\n input_type='x1', diff=diff)\n\n self.cosine_direction(diff=None)\n # Taking into consideration extension of the beam\n child[0] *= self.mesh.alpha_x\n output = child - parent\n\n for i in range(self.mesh.n):\n output[:, i] = np.matmul(self.R[i], output[:, i])\n if diff is 'x1':\n dR = self.cosine_direction(diff='x1')\n parent = self.g_p.r(input=self.mesh.x_p, x2=self.mesh.x2,\n input_type='x1', diff=None)\n child = self.g_c.r(input=self.mesh.x_c, x2=self.mesh.x2,\n input_type='x1', diff=None)\n position_delta = child - parent\n output[:, i] += np.matmul(dR[i], position_delta[:, i])\n\n if input is not None:\n self.mesh.x_p = stored_x_p\n self.mesh.mesh_child()\n return(output)\n\n def calculate_position(self, input=None, diff=None):\n # If for a one time run, run for new input and revert back to original\n # input\n if input is not None:\n stored_x_p = self.mesh.x_p\n self.mesh.x_p = input\n self.mesh.mesh_child()\n\n self.r_p = self.g_p.r(input=self.mesh.x_p, x2=self.mesh.x2,\n input_type='x1', diff=diff)\n self.r_c = self.g_c.r(input=self.mesh.x_c, x2=self.mesh.x2,\n input_type='x1', diff=diff)\n if diff is not None:\n r_p, r_c = self.r_p, self.r_c\n self.calculate_position(input=input, diff=None)\n return(r_p, r_c)\n if input is not None:\n self.mesh.x_p = stored_x_p\n self.mesh.mesh_child()\n\n def cosine_direction(self, diff=None, g=None):\n \"\"\"Calculate cosine matrix between a rectangular cartesian system and\n a curvilinear(or given) coordinate system. Returns matrix with shape\n (number of nodes, Ri, Rj). Different from rest of code where (x,y,n)\"\"\"\n def dot(a, b):\n a_1, a_2 = a\n b_1, b_2 = b\n return(a_1*b_1 + a_2*b_2)\n self.R = np.zeros((self.mesh.n, 2, 2))\n e = np.eye(2)\n for k in range(self.mesh.n):\n for i in range(2):\n for j in range(2):\n gi = e[i]\n if g is None:\n gj = self.g_p.g(j+1, np.array([self.mesh.x_c[k]]),\n diff=diff)\n else:\n gj = g[j]\n\n self.R[k][i][j] = dot(gi, gj)\n return(self.R)\n\n def uij(self, i, j, diff=None, input_type='x1'):\n '''Indexes here are from 1 to n. So +=1 compared to rest'''\n # TODO: makes this more optimal(calculating u multiple times)\n\n ui_j = self.u(diff='x%i' % (j))[i-1]\n ui = self.u()[i-1]\n\n for l in range(1, 3):\n ui_j += self.g_c.christoffel(i, j, l, self.mesh.x_p,\n self.mesh.x2)*ui\n\n return(ui_j)\n\n def strain(self):\n # For cartesian\n self.epsilon = np.zeros([2, 2, self.mesh.n])\n for i in range(2):\n for j in range(2):\n ii = i + 1\n jj = j + 1\n self.epsilon[i][j] = .5*(self.uij(ii, jj) +\n self.uij(jj, ii))\n # for m in range(2):\n # mm = m + 1\n # self.epsilon[i][j] -= .5*(self.uij(mm, jj) *\n # self.uij(mm, ii))\n return(self.epsilon)\n\n def stress(self, loading_condition='uniaxial'):\n E = self.properties.young\n nu = self.properties.poisson\n if loading_condition == 'uniaxial':\n self.sigma = E*self.epsilon\n if loading_condition == 'plane_stress':\n self.sigma = E/(1-nu**2)*(1-nu)*self.epsilon\n for k in range(2):\n self.sigma[k][k] += E/(1-nu**2)*nu*self.epsilon[k][k]\n return(self.sigma)\n\n def strain_energy(self):\n energy = 0\n for i in range(len(self.sigma)):\n for j in range(len(self.sigma[i])):\n for k in range(len(self.sigma[i][j])):\n if k == 0 or k == self.mesh.n - 1:\n multiplier = .5*self.properties.area*self.mesh.dx_p/2.\n else:\n multiplier = .5*self.properties.area*self.mesh.dx_p\n energy += multiplier*self.sigma[i][j][k]*self.epsilon[i][j][k]\n return(energy)\n\n def work(self):\n energy = 0\n for i in range(self.bc.concentrated_n):\n u = self.u(np.array([self.bc.concentrated_x[i]]))\n for j in range(2):\n energy += self.bc.concentrated_load[i][j] * u[j][0]\n return(energy)\n\n def residual(self, input=None, input_type = 'Strain',\n loading_condition = None, input_function = None):\n if input is not None:\n if input_function is not None:\n input = input_function(input)\n if input_type is 'Strain':\n self.mesh.alpha = 1+np.array(input)\n self.mesh.mesh_child()\n elif input_type is 'Geometry':\n self.g_c.a = input\n self.mesh.mesh_child()\n self.calculate_position()\n self.strain()\n self.stress(loading_condition = loading_condition)\n energy = self.strain_energy() - self.work()\n return(energy)\n\n def find_stable(self, x0=[0], bounds=None, input_type = 'Strain',\n loading_condition = 'uniaxial', input_function = lambda x:x):\n def _callback(x):\n input = (bounds[:,1]-bounds[:,0])*x + bounds[:,0]\n self.opt_x.append(input)\n self.opt_f.append(self.residual(input, input_type = input_type,\n loading_condition = loading_condition,\n input_function = input_function))\n def to_optimize(x):\n input = (bounds[:,1]-bounds[:,0])*x + bounds[:,0]\n output = self.residual(input, input_type = input_type,\n loading_condition = loading_condition,\n input_function = input_function)\n\n # print(output, input)\n return output\n\n # With bounds\n try:\n x0_nd = (x0-bounds[:,0]) / (bounds[:,1]-bounds[:,0])\n bounds_nd = np.array([[-1,1],]*len(x0))\n\n self.opt_x = [x0]\n self.opt_f = [to_optimize(x0_nd)]\n\n res = minimize(to_optimize, x0_nd, bounds=bounds_nd, callback=_callback)\n except(TypeError):\n bounds = np.array(((0,1),)*len(x0))\n self.opt_x = [x0]\n self.opt_f = [to_optimize(x0)]\n\n res = minimize(to_optimize, x0, callback=_callback)\n x = (bounds[:,1]-bounds[:,0])*res.x + bounds[:,0]\n return(x, res.fun)\n\n def sweep_strains(self, strains, strains_x, reorder=None,\n loading_condition = 'uniaxial'):\n energy_list = []\n residual_list = []\n n = len(strains)\n for i in range(n):\n self.mesh.alpha = 1 + strains[i]\n self.mesh.alpha_x = strains_x\n self.mesh.mesh_child()\n self.strain()\n self.stress()\n residual_list.append(self.residual(input_type = 'Strain',\n loading_condition = loading_condition))\n energy_list.append(self.strain_energy())\n if reorder is not None:\n residual_list = np.resize(residual_list, reorder)\n energy_list = np.resize(energy_list, reorder)\n return(energy_list, residual_list)\n\n def sweep_geometries(self, geom_variables, input_function, reorder=None,\n loading_condition = 'plane_stress'):\n energy_list = []\n residual_list = []\n n = len(geom_variables)\n for i in range(n):\n print(i)\n input = geom_variables[i]\n residual_list.append(self.residual(input, input_type = 'Geometry',\n input_function = input_function,\n loading_condition = loading_condition))\n energy_list.append(self.strain_energy())\n if reorder is not None:\n residual_list = np.resize(residual_list, reorder)\n energy_list = np.resize(energy_list, reorder)\n return(energy_list, residual_list)\n"
] | [
[
"numpy.resize",
"numpy.linspace",
"numpy.eye",
"numpy.matmul",
"scipy.optimize.minimize",
"numpy.array",
"numpy.zeros",
"numpy.trapz"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
drkwdck/Codec | [
"5cf7f2b3070f50a84b5e37d9333c34b23295f563"
] | [
"MetricsCalculator.py"
] | [
"import numpy as np\n\n\nclass MetricsCalculator:\n @staticmethod\n def PSNR(original: np.ndarray, compressed: np.ndarray) -> float:\n mse = np.mean((original - compressed) ** 2)\n if mse == 0:\n return 100\n max_pixel = 255.0\n psnr = 20 * np.log10(max_pixel / np.sqrt(mse))\n return psnr\n"
] | [
[
"numpy.mean",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tatarize/svgpathtools | [
"3bd88b02b482491e13707602352c00dfa51d10f4"
] | [
"test/test_parsing.py"
] | [
"# Note: This file was taken mostly as is from the svg.path module (v 2.0)\nfrom __future__ import division, absolute_import, print_function\nimport unittest\nfrom svgpathtools import *\nimport svgpathtools\nimport numpy as np\n\n\ndef construct_rotation_tf(a, x, y):\n a = a * np.pi / 180.0\n tf_offset = np.identity(3)\n tf_offset[0:2, 2:3] = np.array([[x], [y]])\n tf_rotate = np.identity(3)\n tf_rotate[0:2, 0:2] = np.array([[np.cos(a), -np.sin(a)],\n [np.sin(a), np.cos(a)]])\n tf_offset_neg = np.identity(3)\n tf_offset_neg[0:2, 2:3] = np.array([[-x], [-y]])\n\n return tf_offset.dot(tf_rotate).dot(tf_offset_neg)\n\n\nclass TestParser(unittest.TestCase):\n\n def test_svg_examples(self):\n \"\"\"Examples from the SVG spec\"\"\"\n path1 = parse_path('M 100 100 L 300 100 L 200 300 z')\n self.assertEqual(path1, Path(Line(100 + 100j, 300 + 100j),\n Line(300 + 100j, 200 + 300j),\n Line(200 + 300j, 100 + 100j)))\n self.assertTrue(path1.isclosed())\n\n # for Z command behavior when there is multiple subpaths\n path1 = parse_path('M 0 0 L 50 20 M 100 100 L 300 100 L 200 300 z')\n self.assertEqual(path1, Path(Line(0 + 0j, 50 + 20j),\n Line(100 + 100j, 300 + 100j),\n Line(300 + 100j, 200 + 300j),\n Line(200 + 300j, 100 + 100j)))\n\n path1 = parse_path('M 100 100 L 200 200')\n path2 = parse_path('M100 100L200 200')\n self.assertEqual(path1, path2)\n\n path1 = parse_path('M 100 200 L 200 100 L -100 -200')\n path2 = parse_path('M 100 200 L 200 100 -100 -200')\n self.assertEqual(path1, path2)\n\n path1 = parse_path(\"\"\"M100,200 C100,100 250,100 250,200\n S400,300 400,200\"\"\")\n self.assertEqual(path1, Path(CubicBezier(100 + 200j,\n 100 + 100j,\n 250 + 100j,\n 250 + 200j),\n CubicBezier(250 + 200j,\n 250 + 300j,\n 400 + 300j,\n 400 + 200j)))\n\n path1 = parse_path('M100,200 C100,100 400,100 400,200')\n self.assertEqual(path1, Path(CubicBezier(100 + 200j,\n 100 + 100j,\n 400 + 100j,\n 400 + 200j)))\n\n path1 = parse_path('M100,500 C25,400 475,400 400,500')\n self.assertEqual(path1, Path(CubicBezier(100 + 500j,\n 25 + 400j,\n 475 + 400j,\n 400 + 500j)))\n\n path1 = parse_path('M100,800 C175,700 325,700 400,800')\n self.assertEqual(path1, Path(CubicBezier(100 + 800j,\n 175 + 700j,\n 325 + 700j,\n 400 + 800j)))\n\n path1 = parse_path('M600,200 C675,100 975,100 900,200')\n self.assertEqual(path1, Path(CubicBezier(600 + 200j,\n 675 + 100j,\n 975 + 100j,\n 900 + 200j)))\n\n path1 = parse_path('M600,500 C600,350 900,650 900,500')\n self.assertEqual(path1, Path(CubicBezier(600 + 500j,\n 600 + 350j,\n 900 + 650j,\n 900 + 500j)))\n\n path1 = parse_path(\"\"\"M600,800 C625,700 725,700 750,800\n S875,900 900,800\"\"\")\n self.assertEqual(path1, Path(CubicBezier(600 + 800j,\n 625 + 700j,\n 725 + 700j,\n 750 + 800j),\n CubicBezier(750 + 800j,\n 775 + 900j,\n 875 + 900j,\n 900 + 800j)))\n\n path1 = parse_path('M200,300 Q400,50 600,300 T1000,300')\n self.assertEqual(path1, Path(QuadraticBezier(200 + 300j,\n 400 + 50j,\n 600 + 300j),\n QuadraticBezier(600 + 300j,\n 800 + 550j,\n 1000 + 300j)))\n\n path1 = parse_path('M300,200 h-150 a150,150 0 1,0 150,-150 z')\n self.assertEqual(path1, Path(Line(300 + 200j, 150 + 200j),\n Arc(150 + 200j, 150 + 150j, 0, 1, 0, 300 + 50j),\n Line(300 + 50j, 300 + 200j)))\n\n path1 = parse_path('M275,175 v-150 a150,150 0 0,0 -150,150 z')\n self.assertEqual(path1,\n Path(Line(275 + 175j, 275 + 25j),\n Arc(275 + 25j, 150 + 150j, 0, 0, 0, 125 + 175j),\n Line(125 + 175j, 275 + 175j)))\n\n path1 = parse_path(\"\"\"M600,350 l 50,-25\n a25,25 -30 0,1 50,-25 l 50,-25\n a25,50 -30 0,1 50,-25 l 50,-25\n a25,75 -30 0,1 50,-25 l 50,-25\n a25,100 -30 0,1 50,-25 l 50,-25\"\"\")\n self.assertEqual(path1,\n Path(Line(600 + 350j, 650 + 325j),\n Arc(650 + 325j, 25 + 25j, -30, 0, 1, 700 + 300j),\n Line(700 + 300j, 750 + 275j),\n Arc(750 + 275j, 25 + 50j, -30, 0, 1, 800 + 250j),\n Line(800 + 250j, 850 + 225j),\n Arc(850 + 225j, 25 + 75j, -30, 0, 1, 900 + 200j),\n Line(900 + 200j, 950 + 175j),\n Arc(950 + 175j, 25 + 100j, -30, 0, 1, 1000 + 150j),\n Line(1000 + 150j, 1050 + 125j)))\n\n def test_others(self):\n # Other paths that need testing:\n\n # Relative moveto:\n path1 = parse_path('M 0 0 L 50 20 m 50 80 L 300 100 L 200 300 z')\n self.assertEqual(path1, Path(Line(0 + 0j, 50 + 20j),\n Line(100 + 100j, 300 + 100j),\n Line(300 + 100j, 200 + 300j),\n Line(200 + 300j, 100 + 100j)))\n\n # Initial smooth and relative CubicBezier\n path1 = parse_path(\"\"\"M100,200 s 150,-100 150,0\"\"\")\n self.assertEqual(path1,\n Path(CubicBezier(100 + 200j,\n 100 + 200j,\n 250 + 100j,\n 250 + 200j)))\n\n # Initial smooth and relative QuadraticBezier\n path1 = parse_path(\"\"\"M100,200 t 150,0\"\"\")\n self.assertEqual(path1,\n Path(QuadraticBezier(100 + 200j,\n 100 + 200j,\n 250 + 200j)))\n\n # Relative QuadraticBezier\n path1 = parse_path(\"\"\"M100,200 q 0,0 150,0\"\"\")\n self.assertEqual(path1,\n Path(QuadraticBezier(100 + 200j,\n 100 + 200j,\n 250 + 200j)))\n\n def test_negative(self):\n \"\"\"You don't need spaces before a minus-sign\"\"\"\n path1 = parse_path('M100,200c10-5,20-10,30-20')\n path2 = parse_path('M 100 200 c 10 -5 20 -10 30 -20')\n self.assertEqual(path1, path2)\n\n def test_numbers(self):\n \"\"\"Exponents and other number format cases\"\"\"\n # It can be e or E, the plus is optional, and a minimum of\n # +/-3.4e38 must be supported.\n path1 = parse_path('M-3.4e38 3.4E+38L-3.4E-38,3.4e-38')\n path2 = Path(Line(-3.4e+38 + 3.4e+38j, -3.4e-38 + 3.4e-38j))\n self.assertEqual(path1, path2)\n\n def test_errors(self):\n self.assertRaises(ValueError, parse_path,\n 'M 100 100 L 200 200 Z 100 200')\n\n\n def test_transform(self):\n\n tf_matrix = svgpathtools.parser.parse_transform(\n 'matrix(1.0 2.0 3.0 4.0 5.0 6.0)')\n expected_tf_matrix = np.identity(3)\n expected_tf_matrix[0:2, 0:3] = np.array([[1.0, 3.0, 5.0],\n [2.0, 4.0, 6.0]])\n self.assertTrue(np.array_equal(expected_tf_matrix, tf_matrix))\n\n # Try a test with no y specified\n expected_tf_translate = np.identity(3)\n expected_tf_translate[0, 2] = -36\n self.assertTrue(np.array_equal(\n expected_tf_translate,\n svgpathtools.parser.parse_transform('translate(-36)')\n ))\n\n # Now specify y\n expected_tf_translate[1, 2] = 45.5\n tf_translate = svgpathtools.parser.parse_transform(\n 'translate(-36 45.5)')\n self.assertTrue(np.array_equal(expected_tf_translate, tf_translate))\n\n # Try a test with no y specified\n expected_tf_scale = np.identity(3)\n expected_tf_scale[0, 0] = 10\n expected_tf_scale[1, 1] = 10\n self.assertTrue(np.array_equal(\n expected_tf_scale,\n svgpathtools.parser.parse_transform('scale(10)')\n ))\n\n # Now specify y\n expected_tf_scale[1, 1] = 0.5\n tf_scale = svgpathtools.parser.parse_transform('scale(10 0.5)')\n self.assertTrue(np.array_equal(expected_tf_scale, tf_scale))\n\n tf_rotation = svgpathtools.parser.parse_transform('rotate(-10 50 100)')\n expected_tf_rotation = construct_rotation_tf(-10, 50, 100)\n self.assertTrue(np.array_equal(expected_tf_rotation, tf_rotation))\n\n # Try a test with no offset specified\n self.assertTrue(np.array_equal(\n construct_rotation_tf(50, 0, 0),\n svgpathtools.parser.parse_transform('rotate(50)')\n ))\n\n expected_tf_skewx = np.identity(3)\n expected_tf_skewx[0, 1] = np.tan(40.0 * np.pi/180.0)\n tf_skewx = svgpathtools.parser.parse_transform('skewX(40)')\n self.assertTrue(np.array_equal(expected_tf_skewx, tf_skewx))\n\n expected_tf_skewy = np.identity(3)\n expected_tf_skewy[1, 0] = np.tan(30.0 * np.pi / 180.0)\n tf_skewy = svgpathtools.parser.parse_transform('skewY(30)')\n self.assertTrue(np.array_equal(expected_tf_skewy, tf_skewy))\n\n self.assertTrue(np.array_equal(\n tf_rotation.dot(tf_translate).dot(tf_skewx).dot(tf_scale),\n svgpathtools.parser.parse_transform(\n \"\"\"rotate(-10 50 100)\n translate(-36 45.5)\n skewX(40)\n scale(10 0.5)\"\"\")\n ))\n\n def test_pathd_init(self):\n path0 = Path('')\n path1 = parse_path(\"M 100 100 L 300 100 L 200 300 z\")\n path2 = Path(\"M 100 100 L 300 100 L 200 300 z\")\n self.assertEqual(path1, path2)\n\n path1 = parse_path(\"m 100 100 L 300 100 L 200 300 z\", current_pos=50+50j)\n path2 = Path(\"m 100 100 L 300 100 L 200 300 z\")\n self.assertNotEqual(path1, path2)\n\n path1 = parse_path(\"m 100 100 L 300 100 L 200 300 z\")\n path2 = Path(\"m 100 100 L 300 100 L 200 300 z\", current_pos=50 + 50j)\n self.assertNotEqual(path1, path2)\n\n path1 = parse_path(\"m 100 100 L 300 100 L 200 300 z\", current_pos=50 + 50j)\n path2 = Path(\"m 100 100 L 300 100 L 200 300 z\", current_pos=50 + 50j)\n self.assertEqual(path1, path2)\n\n path1 = parse_path(\"m 100 100 L 300 100 L 200 300 z\", 50+50j)\n path2 = Path(\"m 100 100 L 300 100 L 200 300 z\")\n self.assertNotEqual(path1, path2)\n\n path1 = parse_path(\"m 100 100 L 300 100 L 200 300 z\")\n path2 = Path(\"m 100 100 L 300 100 L 200 300 z\", 50 + 50j)\n self.assertNotEqual(path1, path2)\n\n path1 = parse_path(\"m 100 100 L 300 100 L 200 300 z\", 50 + 50j)\n path2 = Path(\"m 100 100 L 300 100 L 200 300 z\", 50 + 50j)\n self.assertEqual(path1, path2)\n\n def test_issue_99(self):\n p = Path(\"M 100 250 S 200 200 200 250 300 300 300 250\")\n self.assertEqual(p.d(useSandT=True), 'M 100.0,250.0 S 200.0,200.0 200.0,250.0 S 300.0,300.0 300.0,250.0')\n self.assertEqual(p.d(),\n 'M 100.0,250.0 C 100.0,250.0 200.0,200.0 200.0,250.0 C 200.0,300.0 300.0,300.0 300.0,250.0')\n self.assertNotEqual(p.d(),\n 'M 100.0,250.0 C 100.0,250.0 200.0,200.0 200.0,250.0 C 200.0,250.0 300.0,300.0 300.0,250.0')\n"
] | [
[
"numpy.array_equal",
"numpy.cos",
"numpy.sin",
"numpy.tan",
"numpy.identity",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gafzan/AlgorithmicTradingStrategies | [
"80e7f97bf75b22de52130de5ad247342488b27f4"
] | [
"database/bloomberg.py"
] | [
"\"\"\"\nbloomberg.py\n\"\"\"\ntry:\n import pdblp\nexcept ModuleNotFoundError:\n print(\"No Bloomberg acces found (when importing 'pdblp')\")\n\ntry:\n from xbbg import blp\nexcept ModuleNotFoundError:\n print(\"No Bloomberg acces found (when importing 'xbbg')\")\n\nimport pandas as pd\nfrom datetime import datetime\nimport logging\n\n# my modules\nfrom general_tools import list_grouper, progression_bar\nfrom excel_tools import save_df\nfrom database.config_database import __TICKER_ELIGIBILITY_FOLDER__\n\n# logger\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s : %(module)s : %(funcName)s : %(message)s')\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(formatter)\nlogger.addHandler(stream_handler)\n\n\nclass BloombergConnection:\n\n batch_size = 100\n\n def __init__(self, use_debug: bool=True):\n self.con = pdblp.BCon(debug=use_debug, port=8194, timeout=10000)\n self.con.start()\n self.default_start_date = '19500101'\n\n def reconnect(self):\n logger.warning('Reconnecting to Bloomberg due to run time error.')\n self.con.start()\n\n def get_last_price(self, tickers: {str, list}, start_date: datetime = None, end_date: datetime = None):\n \"\"\"\n Get a DataFrame with close prices (PX_LAST) for the given ticker(s) between the given dates.\n :param tickers: can be a string or list of strings\n :param start_date: datetime.datetime. If None, the start date will be set to a default date (e.g. 1 jan 1950)\n :param end_date: datetime.datetime. If None, the end date will be set to today\n :return: DataFrame\n \"\"\"\n last_price_bbg_df = self._get_daily_data(tickers, 'PX_LAST', start_date, end_date)\n return last_price_bbg_df\n\n def get_volume(self, tickers: {str, list}, start_date: datetime = None, end_date: datetime = None):\n \"\"\"\n Get a DataFrame with volumes (PX_VOLUME) for the given ticker(s) between the given dates.\n :param tickers: can be a string or list of strings\n :param start_date: datetime.datetime. If None, the start date will be set to a default date (e.g. 1 jan 1950)\n :param end_date: datetime.datetime. If None, the end date will be set to today\n :return: DataFrame\n \"\"\"\n last_price_bbg_df = self._get_daily_data(tickers, 'PX_VOLUME', start_date, end_date)\n return last_price_bbg_df\n\n def get_daily_data(self, tickers: {str, list}, field: {str, list}, start_date: datetime = None, end_date: datetime = None):\n \"\"\"\n Get a containing data for the given field(s) and ticker(s) between the given dates.\n :param tickers: can be a string or list of strings\n :param start_date: datetime.datetime. If None, the start date will be set to a default date (e.g. 1 jan 1950)\n :param end_date: datetime.datetime. If None, the end date will be set to today\n :return: DataFrame\n \"\"\"\n last_price_bbg_df = self._get_daily_data(tickers, field, start_date, end_date)\n return last_price_bbg_df\n\n def _get_daily_data(self, tickers: {str, list}, field: {str, list}, start_date: datetime = None, end_date: datetime = None):\n \"\"\"\n Get a containing data for the given field(s) and ticker(s) between the given dates.\n :param tickers: can be a string or list of strings\n :param start_date: datetime.datetime. If None, the start date will be set to a default date (e.g. 1 jan 1950)\n :param end_date: datetime.datetime. If None, the end date will be set to today\n :return: DataFrame\n \"\"\"\n # adjust inputs\n if isinstance(tickers, str):\n tickers = [tickers]\n tickers = self.add_bbg_ticker_suffix(tickers)\n if end_date is None:\n end_date = datetime.today()\n\n # logger\n if type(field) == str:\n logger.debug('Downloading {} data from Bloomberg.'.format(field.upper().replace(' ', '_')))\n else:\n field = [fld.upper().replace(' ', '_') for fld in field]\n logger.debug('Downloading %s data from Bloomberg.' % ', '.join(field))\n\n # loop through the batches of tickers\n daily_data_bbg_df = None\n ticker_batches = list_grouper(tickers, self.batch_size)\n counter = 1\n for ticker_sub_list in ticker_batches:\n progression_bar(counter, len(ticker_batches))\n\n # get the data from Bloomberg\n while True:\n try:\n sub_daily_data_bbg_df = self.con.bdh(ticker_sub_list, field, self.bbg_date(start_date), self.bbg_date(end_date))\n break\n except RuntimeError:\n self.reconnect()\n sub_daily_data_bbg_df = self.con.bdh(ticker_sub_list, field, self.bbg_date(start_date), self.bbg_date(end_date))\n\n if daily_data_bbg_df is None:\n daily_data_bbg_df = sub_daily_data_bbg_df\n else:\n daily_data_bbg_df = pd.concat([daily_data_bbg_df, sub_daily_data_bbg_df], join='outer', axis=1)\n counter += 1\n if isinstance(field, str):\n daily_data_bbg_df.columns = daily_data_bbg_df.columns.droplevel(1) # remove the field name\n return daily_data_bbg_df\n\n def get_dividend_data(self, tickers: {str, list}, start_date: datetime = None, end_date: datetime = None, do_pivot: bool = True):\n \"\"\"\n Get a DataFrame containing dividend amount for the given tickers between the given dates (ex-dividend dates)\n :param tickers: can be a string or list of strings\n :param start_date: datetime.datetime. If None, the start date will be set to a default date (e.g. 1 jan 1950)\n :param end_date: datetime.datetime. If None, the end date will be set to today\n :param do_pivot: if true, DataFRame is pivoted: index = ex-dividend date, values = dividend amount, columns = tickers\n :return: DataFrame\n \"\"\"\n # adjust inputs\n if isinstance(tickers, str):\n tickers = [tickers]\n tickers = self.add_bbg_ticker_suffix(tickers)\n if end_date is None:\n end_date = datetime.today()\n\n logger.debug('Downloading dividend data from Bloomberg.')\n\n # loop through the batches of tickers\n dividend_bbg_df = None\n ticker_batches = list_grouper(tickers, self.batch_size)\n counter = 1\n for ticker_sub_list in ticker_batches:\n progression_bar(counter, len(ticker_batches))\n # get the data from Bloomberg\n while True:\n try:\n sub_dividend_bbg_df = blp.bds(ticker_sub_list, 'DVD_HIST', 'Header=Y')\n break\n except RuntimeError:\n self.reconnect()\n sub_dividend_bbg_df = blp.bds(ticker_sub_list, 'DVD_HIST', 'Header=Y')\n try:\n sub_dividend_bbg_df = sub_dividend_bbg_df[['ex_date', 'dividend_amount']].copy()\n except KeyError: # in case there are no dividends\n pass\n else:\n sub_dividend_bbg_df.reset_index(inplace=True)\n if dividend_bbg_df is None:\n dividend_bbg_df = sub_dividend_bbg_df\n else:\n dividend_bbg_df = pd.concat([dividend_bbg_df, sub_dividend_bbg_df], ignore_index=True, sort=False)\n counter += 1\n if dividend_bbg_df is None:\n return\n\n # only include eligible ex dividend dates\n dividend_bbg_df = self._adjust_dividend_df(tickers, dividend_bbg_df, start_date, end_date, do_pivot)\n return dividend_bbg_df\n\n @staticmethod\n def _adjust_dividend_df(tickers: list, dividend_df: pd.DataFrame, start_date: {datetime, None}, end_date: {datetime, None}, do_pivot: bool):\n \"\"\"\n Adjusts and re-formats when applicable a DataFrame with dividend data\n :param tickers: can be a string or list of strings\n :param dividend_df: DataFrame with dividend data\n :param start_date: If None, the start date will be set to a default date (e.g. 1 jan 1950)\n :param end_date: If None, the end date will be set to today\n :param do_pivot: If true, DataFRame is pivoted: index = ex-dividend date, values = dividend amount, columns = tickers\n :return: DataFrame\n \"\"\"\n # only include eligible ex dividend dates\n dividend_df.sort_values(by=['ex_date'], inplace=True) # sort the dates\n dividend_df['ex_date'] = pd.to_datetime(dividend_df['ex_date']) # convert the dates to datetime\n if start_date is not None:\n dividend_df = dividend_df[dividend_df['ex_date'] >= start_date]\n if end_date is not None:\n dividend_df = dividend_df[dividend_df['ex_date'] <= end_date]\n\n # for each ticker sum dividend payments on same ex-dates\n dividend_df = dividend_df.groupby(by=['ticker', 'ex_date'])['dividend_amount'].sum()\n dividend_df = dividend_df.reset_index()\n\n if do_pivot:\n dividend_df = pd.pivot_table(dividend_df, values='dividend_amount', index='ex_date', columns='ticker')\n # add back ticker(s) that does not pay any dividends\n missing_tickers = list(set(tickers).difference(list(dividend_df)))\n dividend_df = dividend_df.reindex(columns=list(dividend_df) + missing_tickers)\n dividend_df = dividend_df[tickers]\n return dividend_df\n\n def get_underlying_information(self, tickers: {str, list}, field: {str, list}):\n \"\"\"\n Returns a DataFrame with underlying information e.g. sector names for the given ticker(s)\n :param tickers: van be a string or list of strings\n :param field: non-case sensitive string (e.g. GICS_SECTOR_NAME)\n :return: DataFrame\n \"\"\"\n if isinstance(tickers, str):\n tickers = [tickers]\n tickers = self.add_bbg_ticker_suffix(tickers)\n if isinstance(field, str):\n field = [field]\n field = [fld.upper().replace(' ', '_') for fld in field] # capital cases and replace blanks with underscore\n\n while True:\n try:\n underlying_data_bbg_df = self.con.ref(tickers, field)\n break\n except RuntimeError:\n self.reconnect()\n underlying_data_bbg_df = self.con.ref(tickers, field)\n\n underlying_data_bbg_pivot_df = pd.pivot_table(underlying_data_bbg_df, values='value', index='ticker',\n columns='field', aggfunc=lambda x: ' '.join(str(v) for v in x))\n return underlying_data_bbg_pivot_df\n\n def get_index_members(self, index_ticker: str, observation_date: {datetime, list}=None,\n add_bbg_suffix: bool = True) -> {list, dict}:\n \"\"\"\n Returns a list or a dictionary (key = datetime, values = list of tickers) of index members for the given ticker\n and the observation date(s). If Observation dates are not given, script returns the index members as of today.\n :param index_ticker: string\n :param observation_date: datetime or list of datetime\n :param add_bbg_suffix: bool\n :return: list of strings or dictionary\n \"\"\"\n # handle various observation date inputs\n if observation_date is None:\n observation_date = datetime.today()\n elif isinstance(observation_date, pd.DatetimeIndex):\n observation_date = [obs_date for obs_date in observation_date]\n if type(observation_date) != list:\n return_dict = False\n observation_date = [observation_date]\n else:\n return_dict = True\n result_dict = {}\n for obs_date in observation_date:\n logger.debug('Donwloading historical index members of {} observed at {} from Bloomberg.'.format(index_ticker.upper(), obs_date))\n bbg_obs_date = self.bbg_date(obs_date)\n bulk_data_bbg = self.con.bulkref(index_ticker, 'INDX_MWEIGHT_HIST', [('END_DATE_OVERRIDE', bbg_obs_date)])\n index_members = bulk_data_bbg[bulk_data_bbg['name'] == 'Index Member']['value'].values\n if add_bbg_suffix:\n index_members = self.add_bbg_ticker_suffix(index_members)\n result_dict.update({obs_date: index_members})\n if return_dict:\n return result_dict\n else:\n return result_dict[observation_date[0]]\n\n def get_index_inclusion_df(self, index_ticker: str, observation_calendar: pd.DatetimeIndex,\n add_bbg_suffix: bool = True) -> pd.DataFrame:\n \"\"\"\n Returns a DataFrame with tickers in alphabetic order as column headers and observation dates as index. Vaue is 1\n if the ticker is included in the index for that particular observation date, else 0.\n :param index_ticker: str\n :param observation_calendar: pd.DatetimeIndex\n :param add_bbg_suffix: bool\n :return: pd.DataFrame\n \"\"\"\n obs_date_ticker_list_dict = self.get_index_members(index_ticker, observation_calendar, add_bbg_suffix)\n \n # get all the unique tickers and sort in alphabetic order\n tickers = []\n for ticker_list in obs_date_ticker_list_dict.values():\n tickers.extend(ticker_list)\n tickers = list(set(tickers))\n tickers.sort()\n \n # loop through all observation dates and set value to 1 if ticker (column) exists in the index, else 0.\n result_df = pd.DataFrame(columns=tickers)\n for obs_date in observation_calendar:\n tickers_in_index = obs_date_ticker_list_dict[obs_date]\n ticker_inclusivity = [1 if ticker in tickers_in_index else 0 for ticker in tickers]\n result_df.loc[obs_date] = ticker_inclusivity\n\n # remove the first rows where all columns are zero\n cum_num_tickers = result_df.sum(axis=1).cumsum()\n eligible_index = cum_num_tickers[cum_num_tickers > 0].index\n return result_df.loc[eligible_index, :]\n\n def get_futures_chain(self, generic_futures_index_ticker: str):\n \"\"\"\n Returns a list of tickers of underlying futures contracts that are part of a generic futures index\n :param generic_futures_index_ticker: string\n :return: list of strings\n \"\"\"\n bulk_data_bbg = self.con.bulkref(generic_futures_index_ticker, 'FUT_CHAIN_LAST_TRADE_DATES',\n [('INCLUDE_EXPIRED_CONTRACTS', 'Y')])\n bulk_data_bbg = bulk_data_bbg[bulk_data_bbg['name'] == \"Future's Ticker\"]\n futures_tickers = list(bulk_data_bbg['value'].values)\n return futures_tickers\n\n # TODO method to load fundamental data from companies\n def get_fundamental_data(self):\n pass\n\n def bbg_date(self, input_date) -> str:\n \"\"\"\n Converts the date to Bloomberg format: 'YYYYMMDD'\n \"\"\"\n if input_date is None:\n return self.default_start_date\n input_date_str = str(input_date)\n day = input_date_str[8:10]\n month = input_date_str[5:7]\n year = input_date_str[:4]\n return year + month + day\n\n def bbg_today(self):\n return self.bbg_date(datetime.today())\n\n @staticmethod\n def convert_bbg_date_to_date(bbg_date: str)->datetime:\n \"\"\"\n Converts the Bloomberg date (string) to datetime format\n :param bbg_date: str\n :return: datetime\n \"\"\"\n day = bbg_date[6:8]\n month = bbg_date[4:6]\n year = bbg_date[:4]\n return datetime(int(year), int(month), int(day))\n\n @staticmethod\n def add_bbg_ticker_suffix(tickers: {str, list}, suffix: str = 'EQUITY') -> list:\n \"\"\"\n Adds a suffix to all the tickers in the ticker list if applicable\n :param tickers: list of strings\n :param suffix: sub-string to be added to the original ticker\n :return: list of strings\n \"\"\"\n return BloombergConnection._adjust_bbg_tickers(tickers, suffix, True)\n\n @staticmethod\n def remove_bbg_ticker_suffix(tickers: {str, list}, suffix: str = 'EQUITY')->list:\n \"\"\"\n Removes a suffix from all the tickers in the ticker list if applicable\n :param tickers: list of strings\n :param suffix: sub-string to be removed from the original ticker\n :return: list of strings\n \"\"\"\n return BloombergConnection._adjust_bbg_tickers(tickers, suffix, False)\n\n @staticmethod\n def _adjust_bbg_tickers(tickers: {str, list}, suffix: str, add_suffix: bool) -> list:\n \"\"\"\n Removes (adds) a given suffix from (to) all the tickers in the ticker list if applicable\n :param tickers: list of strings\n :param suffix: sub-string to be removed from the original ticker\n :param add_suffix: if true, add suffix else remove\n :return: list of strings\n \"\"\"\n is_string = type(tickers) == str\n if is_string:\n tickers = [tickers]\n adj_ticker_list = []\n suffix = suffix.upper()\n for ticker in tickers:\n ticker = ticker.upper()\n if add_suffix:\n has_suffix = ticker.endswith('EQUITY') or ticker.endswith('INDEX') or ticker.endswith(\n 'COMDTY') or ticker.endswith(suffix)\n if not has_suffix:\n ticker += ' ' + suffix\n else:\n if ticker.endswith(suffix):\n ticker = ticker.replace(' ' + suffix, '')\n adj_ticker_list.append(ticker)\n if is_string:\n return adj_ticker_list[0]\n else:\n return adj_ticker_list\n\n\ndef save_index_membership_df_to_excel():\n # input parameters\n index_name = input(\"Enter a name of an index (e.g. 'SPX'): \")\n if 'index' not in index_name:\n index_name += ' index'\n obs_freq = input(\"Enter the frequency for when you observe the members of '{}' (e.g. '3M')\".format(index_name.upper()))\n obs_calendar = pd.date_range(start='2000', end=datetime.today(), freq=obs_freq)\n\n # connect to Bloomberg and load a DataFrame that has value 1 if the ticker is included in the index at the\n # particular observation date\n bbg_con = BloombergConnection(use_debug=False)\n member_df = bbg_con.get_index_inclusion_df(index_ticker=index_name, observation_calendar=obs_calendar)\n full_path = __TICKER_ELIGIBILITY_FOLDER__ + '\\\\' + index_name.lower().replace(' ', '_') + '_member_' \\\n + obs_freq + '_observation_' + str(datetime.today())[:10] + '.xlsx'\n tickers_df = pd.DataFrame({'tickers': list(member_df)})\n save_df(df_list=[member_df, tickers_df], full_path=full_path, sheet_name_list=['index_members', 'tickers'])\n\n\nif __name__ == '__main__':\n save_index_membership_df_to_excel()\n\n"
] | [
[
"pandas.concat",
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.pivot_table"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
snwnde/yasa | [
"71c0a8245c61b328a82ab1197c34b673333120a3"
] | [
"yasa/spectral.py"
] | [
"\"\"\"\nThis file contains several helper functions to calculate spectral power from\n1D and 2D EEG data.\n\"\"\"\nimport mne\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom scipy import signal\nfrom scipy.integrate import simps\nfrom scipy.interpolate import RectBivariateSpline\nfrom .io import set_log_level\n\nlogger = logging.getLogger('yasa')\n\n__all__ = ['bandpower', 'bandpower_from_psd', 'bandpower_from_psd_ndarray',\n 'irasa', 'stft_power']\n\n\ndef bandpower(data, sf=None, ch_names=None, hypno=None, include=(2, 3),\n win_sec=4, relative=True, bandpass=False,\n bands=[(0.5, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),\n (12, 16, 'Sigma'), (16, 30, 'Beta'), (30, 40, 'Gamma')],\n kwargs_welch=dict(average='median', window='hamming')):\n \"\"\"\n Calculate the Welch bandpower for each channel and, if specified, for each sleep stage.\n\n .. versionadded:: 0.1.6\n\n Parameters\n ----------\n data : np.array_like or :py:class:`mne.io.BaseRaw`\n 1D or 2D EEG data. Can also be a :py:class:`mne.io.BaseRaw`, in which case ``data``,\n ``sf``, and ``ch_names`` will be automatically extracted, and ``data`` will also be\n converted from Volts (MNE default) to micro-Volts (YASA).\n sf : float\n The sampling frequency of data AND the hypnogram. Can be omitted if ``data`` is a\n :py:class:`mne.io.BaseRaw`.\n ch_names : list\n List of channel names, e.g. ['Cz', 'F3', 'F4', ...]. If None, channels will be labelled\n ['CHAN000', 'CHAN001', ...]. Can be omitted if ``data`` is a :py:class:`mne.io.BaseRaw`.\n hypno : array_like\n Sleep stage (hypnogram). If the hypnogram is loaded, the bandpower will be extracted for\n each sleep stage defined in ``include``.\n\n The hypnogram must have the exact same number of samples as ``data``. To upsample your\n hypnogram, please refer to :py:func:`yasa.hypno_upsample_to_data`.\n\n .. note::\n The default hypnogram format in YASA is a 1D integer vector where:\n\n - -2 = Unscored\n - -1 = Artefact / Movement\n - 0 = Wake\n - 1 = N1 sleep\n - 2 = N2 sleep\n - 3 = N3 sleep\n - 4 = REM sleep\n include : tuple, list or int\n Values in ``hypno`` that will be included in the mask. The default is (2, 3), meaning that\n the bandpower are sequentially calculated for N2 and N3 sleep. This has no effect when\n ``hypno`` is None.\n win_sec : int or float\n The length of the sliding window, in seconds, used for the Welch PSD calculation.\n Ideally, this should be at least two times the inverse of the lower frequency of\n interest (e.g. for a lower frequency of interest of 0.5 Hz, the window length should\n be at least 2 * 1 / 0.5 = 4 seconds).\n relative : boolean\n If True, bandpower is divided by the total power between the min and max frequencies\n defined in ``band``.\n bandpass : boolean\n If True, apply a standard FIR bandpass filter using the minimum and maximum frequencies\n in ``bands``. Fore more details, refer to :py:func:`mne.filter.filter_data`.\n bands : list of tuples\n List of frequency bands of interests. Each tuple must contain the lower and upper\n frequencies, as well as the band name (e.g. (0.5, 4, 'Delta')).\n kwargs_welch : dict\n Optional keywords arguments that are passed to the :py:func:`scipy.signal.welch` function.\n\n Returns\n -------\n bandpowers : :py:class:`pandas.DataFrame`\n Bandpower dataframe, in which each row is a channel and each column a spectral band.\n\n Notes\n -----\n For an example of how to use this function, please refer to\n https://github.com/raphaelvallat/yasa/blob/master/notebooks/08_bandpower.ipynb\n \"\"\"\n # Type checks\n assert isinstance(bands, list), 'bands must be a list of tuple(s)'\n assert isinstance(relative, bool), 'relative must be a boolean'\n assert isinstance(bandpass, bool), 'bandpass must be a boolean'\n\n # Check if input data is a MNE Raw object\n if isinstance(data, mne.io.BaseRaw):\n sf = data.info['sfreq'] # Extract sampling frequency\n ch_names = data.ch_names # Extract channel names\n data = data.get_data() * 1e6 # Convert from V to uV\n _, npts = data.shape\n else:\n # Safety checks\n assert isinstance(data, np.ndarray), 'Data must be a numpy array.'\n data = np.atleast_2d(data)\n assert data.ndim == 2, 'Data must be of shape (nchan, n_samples).'\n nchan, npts = data.shape\n # assert nchan < npts, 'Data must be of shape (nchan, n_samples).'\n assert sf is not None, 'sf must be specified if passing a numpy array.'\n assert isinstance(sf, (int, float))\n if ch_names is None:\n ch_names = ['CHAN' + str(i).zfill(3) for i in range(nchan)]\n else:\n ch_names = np.atleast_1d(np.asarray(ch_names, dtype=str))\n assert ch_names.ndim == 1, 'ch_names must be 1D.'\n assert len(ch_names) == nchan, 'ch_names must match data.shape[0].'\n\n if bandpass:\n # Apply FIR bandpass filter\n all_freqs = np.hstack([[b[0], b[1]] for b in bands])\n fmin, fmax = min(all_freqs), max(all_freqs)\n data = mne.filter.filter_data(data.astype('float64'), sf, fmin, fmax, verbose=0)\n\n win = int(win_sec * sf) # nperseg\n\n if hypno is None:\n # Calculate the PSD over the whole data\n freqs, psd = signal.welch(data, sf, nperseg=win, **kwargs_welch)\n return bandpower_from_psd(\n psd, freqs, ch_names, bands=bands, relative=relative).set_index('Chan')\n else:\n # Per each sleep stage defined in ``include``.\n hypno = np.asarray(hypno)\n assert include is not None, 'include cannot be None if hypno is given'\n include = np.atleast_1d(np.asarray(include))\n assert hypno.ndim == 1, 'Hypno must be a 1D array.'\n assert hypno.size == npts, 'Hypno must have same size as data.shape[1]'\n assert include.size >= 1, '`include` must have at least one element.'\n assert hypno.dtype.kind == include.dtype.kind, 'hypno and include must have same dtype'\n assert np.in1d(hypno, include).any(), (\n 'None of the stages specified in `include` are present in hypno.')\n # Initialize empty dataframe and loop over stages\n df_bp = pd.DataFrame([])\n for stage in include:\n if stage not in hypno:\n continue\n data_stage = data[:, hypno == stage]\n freqs, psd = signal.welch(data_stage, sf, nperseg=win,\n **kwargs_welch)\n bp_stage = bandpower_from_psd(psd, freqs, ch_names, bands=bands,\n relative=relative)\n bp_stage['Stage'] = stage\n df_bp = pd.concat([df_bp, bp_stage], axis=0)\n return df_bp.set_index(['Stage', 'Chan'])\n\n\ndef bandpower_from_psd(psd, freqs, ch_names=None, bands=[(0.5, 4, 'Delta'),\n (4, 8, 'Theta'), (8, 12, 'Alpha'), (12, 16, 'Sigma'),\n (16, 30, 'Beta'), (30, 40, 'Gamma')], relative=True):\n \"\"\"Compute the average power of the EEG in specified frequency band(s)\n given a pre-computed PSD.\n\n .. versionadded:: 0.1.5\n\n Parameters\n ----------\n psd : array_like\n Power spectral density of data, in uV^2/Hz. Must be of shape (n_channels, n_freqs).\n See :py:func:`scipy.signal.welch` for more details.\n freqs : array_like\n Array of frequencies.\n ch_names : list\n List of channel names, e.g. ['Cz', 'F3', 'F4', ...]. If None, channels will be labelled\n ['CHAN000', 'CHAN001', ...].\n bands : list of tuples\n List of frequency bands of interests. Each tuple must contain the lower and upper\n frequencies, as well as the band name (e.g. (0.5, 4, 'Delta')).\n relative : boolean\n If True, bandpower is divided by the total power between the min and\n max frequencies defined in ``band`` (default 0.5 to 40 Hz).\n\n Returns\n -------\n bandpowers : :py:class:`pandas.DataFrame`\n Bandpower dataframe, in which each row is a channel and each column a spectral band.\n \"\"\"\n # Type checks\n assert isinstance(bands, list), 'bands must be a list of tuple(s)'\n assert isinstance(relative, bool), 'relative must be a boolean'\n\n # Safety checks\n freqs = np.asarray(freqs)\n assert freqs.ndim == 1\n psd = np.atleast_2d(psd)\n assert psd.ndim == 2, 'PSD must be of shape (n_channels, n_freqs).'\n all_freqs = np.hstack([[b[0], b[1]] for b in bands])\n fmin, fmax = min(all_freqs), max(all_freqs)\n idx_good_freq = np.logical_and(freqs >= fmin, freqs <= fmax)\n freqs = freqs[idx_good_freq]\n res = freqs[1] - freqs[0]\n nchan = psd.shape[0]\n assert nchan < psd.shape[1], 'PSD must be of shape (n_channels, n_freqs).'\n if ch_names is not None:\n ch_names = np.atleast_1d(np.asarray(ch_names, dtype=str))\n assert ch_names.ndim == 1, 'ch_names must be 1D.'\n assert len(ch_names) == nchan, 'ch_names must match psd.shape[0].'\n else:\n ch_names = ['CHAN' + str(i).zfill(3) for i in range(nchan)]\n bp = np.zeros((nchan, len(bands)), dtype=np.float64)\n psd = psd[:, idx_good_freq]\n total_power = simps(psd, dx=res)\n total_power = total_power[..., np.newaxis]\n\n # Check if there are negative values in PSD\n if (psd < 0).any():\n msg = (\n \"There are negative values in PSD. This will result in incorrect \"\n \"bandpower values. We highly recommend working with an \"\n \"all-positive PSD. For more details, please refer to: \"\n \"https://github.com/raphaelvallat/yasa/issues/29\")\n logger.warning(msg)\n\n # Enumerate over the frequency bands\n labels = []\n for i, band in enumerate(bands):\n b0, b1, la = band\n labels.append(la)\n idx_band = np.logical_and(freqs >= b0, freqs <= b1)\n bp[:, i] = simps(psd[:, idx_band], dx=res)\n\n if relative:\n bp /= total_power\n\n # Convert to DataFrame\n bp = pd.DataFrame(bp, columns=labels)\n bp['TotalAbsPow'] = np.squeeze(total_power)\n bp['FreqRes'] = res\n # bp['WindowSec'] = 1 / res\n bp['Relative'] = relative\n bp['Chan'] = ch_names\n bp = bp.set_index('Chan').reset_index()\n # Add hidden attributes\n bp.bands_ = str(bands)\n return bp\n\n\ndef bandpower_from_psd_ndarray(psd, freqs, bands=[(0.5, 4, 'Delta'),\n (4, 8, 'Theta'), (8, 12, 'Alpha'),\n (12, 16, 'Sigma'), (16, 30, 'Beta'),\n (30, 40, 'Gamma')], relative=True):\n \"\"\"Compute bandpowers in N-dimensional PSD.\n\n This is a NumPy-only implementation of the :py:func:`yasa.bandpower_from_psd` function,\n which supports 1-D arrays of shape (n_freqs), or N-dimensional arays (e.g. 2-D (n_chan,\n n_freqs) or 3-D (n_chan, n_epochs, n_freqs))\n\n .. versionadded:: 0.2.0\n\n Parameters\n ----------\n psd : :py:class:`numpy.ndarray`\n Power spectral density of data, in uV^2/Hz. Must be a N-D array of shape (..., n_freqs).\n See :py:func:`scipy.signal.welch` for more details.\n freqs : :py:class:`numpy.ndarray`\n Array of frequencies. Must be a 1-D array of shape (n_freqs,)\n bands : list of tuples\n List of frequency bands of interests. Each tuple must contain the lower and upper\n frequencies, as well as the band name (e.g. (0.5, 4, 'Delta')).\n relative : boolean\n If True, bandpower is divided by the total power between the min and\n max frequencies defined in ``band`` (default 0.5 to 40 Hz).\n\n Returns\n -------\n bandpowers : :py:class:`numpy.ndarray`\n Bandpower array of shape *(n_bands, ...)*.\n \"\"\"\n # Type checks\n assert isinstance(bands, list), 'bands must be a list of tuple(s)'\n assert isinstance(relative, bool), 'relative must be a boolean'\n\n # Safety checks\n freqs = np.asarray(freqs)\n psd = np.asarray(psd)\n assert freqs.ndim == 1, 'freqs must be a 1-D array of shape (n_freqs,)'\n assert psd.shape[-1] == freqs.shape[-1], 'n_freqs must be last axis of psd'\n\n # Extract frequencies of interest\n all_freqs = np.hstack([[b[0], b[1]] for b in bands])\n fmin, fmax = min(all_freqs), max(all_freqs)\n idx_good_freq = np.logical_and(freqs >= fmin, freqs <= fmax)\n freqs = freqs[idx_good_freq]\n res = freqs[1] - freqs[0]\n\n # Trim PSD to frequencies of interest\n psd = psd[..., idx_good_freq]\n\n # Check if there are negative values in PSD\n if (psd < 0).any():\n msg = (\n \"There are negative values in PSD. This will result in incorrect \"\n \"bandpower values. We highly recommend working with an \"\n \"all-positive PSD. For more details, please refer to: \"\n \"https://github.com/raphaelvallat/yasa/issues/29\")\n logger.warning(msg)\n\n # Calculate total power\n total_power = simps(psd, dx=res, axis=-1)\n total_power = total_power[np.newaxis, ...]\n\n # Initialize empty array\n bp = np.zeros((len(bands), *psd.shape[:-1]), dtype=np.float64)\n\n # Enumerate over the frequency bands\n labels = []\n for i, band in enumerate(bands):\n b0, b1, la = band\n labels.append(la)\n idx_band = np.logical_and(freqs >= b0, freqs <= b1)\n bp[i] = simps(psd[..., idx_band], dx=res, axis=-1)\n\n if relative:\n bp /= total_power\n return bp\n\n\ndef irasa(data, sf=None, ch_names=None, band=(1, 30),\n hset=[1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6,\n 1.65, 1.7, 1.75, 1.8, 1.85, 1.9], return_fit=True, win_sec=4,\n kwargs_welch=dict(average='median', window='hamming'),\n verbose=True):\n r\"\"\"\n Separate the aperiodic (= fractal, or 1/f) and oscillatory component\n of the power spectra of EEG data using the IRASA method.\n\n .. versionadded:: 0.1.7\n\n Parameters\n ----------\n data : :py:class:`numpy.ndarray` or :py:class:`mne.io.BaseRaw`\n 1D or 2D EEG data. Can also be a :py:class:`mne.io.BaseRaw`, in which\n case ``data``, ``sf``, and ``ch_names`` will be automatically\n extracted, and ``data`` will also be converted from Volts (MNE default)\n to micro-Volts (YASA).\n sf : float\n The sampling frequency of data AND the hypnogram.\n Can be omitted if ``data`` is a :py:class:`mne.io.BaseRaw`.\n ch_names : list\n List of channel names, e.g. ['Cz', 'F3', 'F4', ...]. If None,\n channels will be labelled ['CHAN000', 'CHAN001', ...].\n Can be omitted if ``data`` is a :py:class:`mne.io.BaseRaw`.\n band : tuple or None\n Broad band frequency range.\n Default is 1 to 30 Hz.\n hset : list or :py:class:`numpy.ndarray`\n Resampling factors used in IRASA calculation. Default is to use a range\n of values from 1.1 to 1.9 with an increment of 0.05.\n return_fit : boolean\n If True (default), fit an exponential function to the aperiodic PSD\n and return the fit parameters (intercept, slope) and :math:`R^2` of\n the fit.\n\n The aperiodic signal, :math:`L`, is modeled using an exponential\n function in semilog-power space (linear frequencies and log PSD) as:\n\n .. math:: L = a + \\text{log}(F^b)\n\n where :math:`a` is the intercept, :math:`b` is the slope, and\n :math:`F` the vector of input frequencies.\n win_sec : int or float\n The length of the sliding window, in seconds, used for the Welch PSD\n calculation. Ideally, this should be at least two times the inverse of\n the lower frequency of interest (e.g. for a lower frequency of interest\n of 0.5 Hz, the window length should be at least 2 * 1 / 0.5 =\n 4 seconds).\n kwargs_welch : dict\n Optional keywords arguments that are passed to the\n :py:func:`scipy.signal.welch` function.\n verbose : bool or str\n Verbose level. Default (False) will only print warning and error\n messages. The logging levels are 'debug', 'info', 'warning', 'error',\n and 'critical'. For most users the choice is between 'info'\n (or ``verbose=True``) and warning (``verbose=False``).\n\n Returns\n -------\n freqs : :py:class:`numpy.ndarray`\n Frequency vector.\n psd_aperiodic : :py:class:`numpy.ndarray`\n The fractal (= aperiodic) component of the PSD.\n psd_oscillatory : :py:class:`numpy.ndarray`\n The oscillatory (= periodic) component of the PSD.\n fit_params : :py:class:`pandas.DataFrame` (optional)\n Dataframe of fit parameters. Only if ``return_fit=True``.\n\n Notes\n -----\n The Irregular-Resampling Auto-Spectral Analysis (IRASA) method is\n described in Wen & Liu (2016). In a nutshell, the goal is to separate the\n fractal and oscillatory components in the power spectrum of EEG signals.\n\n The steps are:\n\n 1. Compute the original power spectral density (PSD) using Welch's method.\n 2. Resample the EEG data by multiple non-integer factors and their\n reciprocals (:math:`h` and :math:`1/h`).\n 3. For every pair of resampled signals, calculate the PSD and take the\n geometric mean of both. In the resulting PSD, the power associated with\n the oscillatory component is redistributed away from its original\n (fundamental and harmonic) frequencies by a frequency offset that varies\n with the resampling factor, whereas the power solely attributed to the\n fractal component remains the same power-law statistical distribution\n independent of the resampling factor.\n 4. It follows that taking the median of the PSD of the variously\n resampled signals can extract the power spectrum of the fractal\n component, and the difference between the original power spectrum and\n the extracted fractal spectrum offers an approximate estimate of the\n power spectrum of the oscillatory component.\n\n Note that an estimate of the original PSD can be calculated by simply\n adding ``psd = psd_aperiodic + psd_oscillatory``.\n\n For an example of how to use this function, please refer to\n https://github.com/raphaelvallat/yasa/blob/master/notebooks/09_IRASA.ipynb\n\n For an article discussing the challenges of using IRASA (or fooof) see [5].\n\n References\n ----------\n [1] Wen, H., & Liu, Z. (2016). Separating Fractal and Oscillatory\n Components in the Power Spectrum of Neurophysiological Signal.\n Brain Topography, 29(1), 13–26.\n https://doi.org/10.1007/s10548-015-0448-0\n\n [2] https://github.com/fieldtrip/fieldtrip/blob/master/specest/\n\n [3] https://github.com/fooof-tools/fooof\n\n [4] https://www.biorxiv.org/content/10.1101/299859v1\n\n [5] https://doi.org/10.1101/2021.10.15.464483\n \"\"\"\n import fractions\n set_log_level(verbose)\n # Check if input data is a MNE Raw object\n if isinstance(data, mne.io.BaseRaw):\n sf = data.info['sfreq'] # Extract sampling frequency\n ch_names = data.ch_names # Extract channel names\n hp = data.info['highpass'] # Extract highpass filter\n lp = data.info['lowpass'] # Extract lowpass filter\n data = data.get_data() * 1e6 # Convert from V to uV\n else:\n # Safety checks\n assert isinstance(data, np.ndarray), 'Data must be a numpy array.'\n data = np.atleast_2d(data)\n assert data.ndim == 2, 'Data must be of shape (nchan, n_samples).'\n nchan, npts = data.shape\n assert nchan < npts, 'Data must be of shape (nchan, n_samples).'\n assert sf is not None, 'sf must be specified if passing a numpy array.'\n assert isinstance(sf, (int, float))\n if ch_names is None:\n ch_names = ['CHAN' + str(i).zfill(3) for i in range(nchan)]\n else:\n ch_names = np.atleast_1d(np.asarray(ch_names, dtype=str))\n assert ch_names.ndim == 1, 'ch_names must be 1D.'\n assert len(ch_names) == nchan, 'ch_names must match data.shape[0].'\n hp = 0 # Highpass filter unknown -> set to 0 Hz\n lp = sf / 2 # Lowpass filter unknown -> set to Nyquist\n\n # Check the other arguments\n hset = np.asarray(hset)\n assert hset.ndim == 1, 'hset must be 1D.'\n assert hset.size > 1, '2 or more resampling fators are required.'\n hset = np.round(hset, 4) # avoid float precision error with np.arange.\n band = sorted(band)\n assert band[0] > 0, 'first element of band must be > 0.'\n assert band[1] < (sf / 2), 'second element of band must be < (sf / 2).'\n win = int(win_sec * sf) # nperseg\n\n # Inform about maximum resampled fitting range\n h_max = np.max(hset)\n band_evaluated = (band[0] / h_max, band[1] * h_max)\n freq_Nyq = sf / 2 # Nyquist frequency\n freq_Nyq_res = freq_Nyq / h_max # minimum resampled Nyquist frequency\n logging.info(f\"Fitting range: {band[0]:.2f}Hz-{band[1]:.2f}Hz\")\n logging.info(f\"Evaluated frequency range: {band_evaluated[0]:.2f}Hz-{band_evaluated[1]:.2f}Hz\")\n if band_evaluated[0] < hp:\n logging.warning(\"The evaluated frequency range starts below the \"\n f\"highpass filter ({hp:.2f}Hz). Increase the lower band\"\n f\" ({band[0]:.2f}Hz) or decrease the maximum value of \"\n f\"the hset ({h_max:.2f}).\")\n if band_evaluated[1] > lp and lp < freq_Nyq_res:\n logging.warning(\"The evaluated frequency range ends after the \"\n f\"lowpass filter ({lp:.2f}Hz). Decrease the upper band\"\n f\" ({band[1]:.2f}Hz) or decrease the maximum value of \"\n f\"the hset ({h_max:.2f}).\")\n if band_evaluated[1] > freq_Nyq_res:\n logging.warning(\"The evaluated frequency range ends after the \"\n \"resampled Nyquist frequency \"\n f\"({freq_Nyq_res:.2f}Hz). Decrease the upper band \"\n f\"({band[1]:.2f}Hz) or decrease the maximum value \"\n f\"of the hset ({h_max:.2f}).\")\n\n # Calculate the original PSD over the whole data\n freqs, psd = signal.welch(data, sf, nperseg=win, **kwargs_welch)\n\n # Start the IRASA procedure\n psds = np.zeros((len(hset), *psd.shape))\n\n for i, h in enumerate(hset):\n # Get the upsampling/downsampling (h, 1/h) factors as integer\n rat = fractions.Fraction(str(h))\n up, down = rat.numerator, rat.denominator\n # Much faster than FFT-based resampling\n data_up = signal.resample_poly(data, up, down, axis=-1)\n data_down = signal.resample_poly(data, down, up, axis=-1)\n # Calculate the PSD using same params as original\n freqs_up, psd_up = signal.welch(data_up, h * sf, nperseg=win, **kwargs_welch)\n freqs_dw, psd_dw = signal.welch(data_down, sf / h, nperseg=win, **kwargs_welch)\n # Geometric mean of h and 1/h\n psds[i, :] = np.sqrt(psd_up * psd_dw)\n\n # Now we take the median PSD of all the resampling factors, which gives\n # a good estimate of the aperiodic component of the PSD.\n psd_aperiodic = np.median(psds, axis=0)\n\n # We can now calculate the oscillations (= periodic) component.\n psd_osc = psd - psd_aperiodic\n\n # Let's crop to the frequencies defined in band\n mask_freqs = np.ma.masked_outside(freqs, *band).mask\n freqs = freqs[~mask_freqs]\n psd_aperiodic = np.compress(~mask_freqs, psd_aperiodic, axis=-1)\n psd_osc = np.compress(~mask_freqs, psd_osc, axis=-1)\n\n if return_fit:\n # Aperiodic fit in semilog space for each channel\n from scipy.optimize import curve_fit\n intercepts, slopes, r_squared = [], [], []\n\n def func(t, a, b):\n # See https://github.com/fooof-tools/fooof\n return a + np.log(t**b)\n\n for y in np.atleast_2d(psd_aperiodic):\n y_log = np.log(y)\n # Note that here we define bounds for the slope but not for the\n # intercept.\n popt, pcov = curve_fit(func, freqs, y_log, p0=(2, -1),\n bounds=((-np.inf, -10), (np.inf, 2)))\n intercepts.append(popt[0])\n slopes.append(popt[1])\n # Calculate R^2: https://stackoverflow.com/q/19189362/10581531\n residuals = y_log - func(freqs, *popt)\n ss_res = np.sum(residuals**2)\n ss_tot = np.sum((y_log - np.mean(y_log))**2)\n r_squared.append(1 - (ss_res / ss_tot))\n\n # Create fit parameters dataframe\n fit_params = {'Chan': ch_names, 'Intercept': intercepts,\n 'Slope': slopes, 'R^2': r_squared,\n 'std(osc)': np.std(psd_osc, axis=-1, ddof=1)}\n return freqs, psd_aperiodic, psd_osc, pd.DataFrame(fit_params)\n else:\n return freqs, psd_aperiodic, psd_osc\n\n\ndef stft_power(data, sf, window=2, step=.2, band=(1, 30), interp=True, norm=False):\n \"\"\"Compute the pointwise power via STFT and interpolation.\n\n Parameters\n ----------\n data : array_like\n Single-channel data.\n sf : float\n Sampling frequency of the data.\n window : int\n Window size in seconds for STFT. 2 or 4 seconds are usually a good default.\n Higher values = higher frequency resolution = lower time resolution.\n step : int\n Step in seconds for the STFT.\n A step of 0.2 second (200 ms) is usually a good default.\n\n * If ``step`` == 0, overlap at every sample (slowest)\n * If ``step`` == nperseg, no overlap (fastest)\n\n Higher values = higher precision = slower computation.\n band : tuple or None\n Broad band frequency range. Default is 1 to 30 Hz.\n interp : boolean\n If True, a cubic interpolation is performed to ensure that the output is the same size as\n the input (= pointwise power).\n norm : bool\n If True, return bandwise normalized band power, i.e. for each time point, the sum of power\n in all the frequency bins equals 1.\n\n Returns\n -------\n f : :py:class:`numpy.ndarray`\n Frequency vector\n t : :py:class:`numpy.ndarray`\n Time vector\n Sxx : :py:class:`numpy.ndarray`\n Power in the specified frequency bins of shape (f, t)\n\n Notes\n -----\n 2D Interpolation is done using :py:class:`scipy.interpolate.RectBivariateSpline`\n which is much faster than :py:class:`scipy.interpolate.interp2d` for a rectangular grid.\n The default is to use a bivariate spline with 3 degrees.\n \"\"\"\n # Safety check\n data = np.asarray(data)\n assert step <= window\n step = 1 / sf if step == 0 else step\n\n # Define STFT parameters\n nperseg = int(window * sf)\n noverlap = int(nperseg - (step * sf))\n\n # Compute STFT and remove the last epoch\n f, t, Sxx = signal.stft(\n data, sf, nperseg=nperseg, noverlap=noverlap, detrend=False, padded=True)\n\n # Let's keep only the frequency of interest\n if band is not None:\n idx_band = np.logical_and(f >= band[0], f <= band[1])\n f = f[idx_band]\n Sxx = Sxx[idx_band, :]\n\n # Compute power and interpolate\n Sxx = np.square(np.abs(Sxx))\n if interp:\n func = RectBivariateSpline(f, t, Sxx)\n t = np.arange(data.size) / sf\n Sxx = func(f, t)\n\n # Normalize\n if norm:\n sum_pow = Sxx.sum(0).reshape(1, -1)\n np.divide(Sxx, sum_pow, out=Sxx)\n return f, t, Sxx\n"
] | [
[
"numpy.sqrt",
"numpy.asarray",
"scipy.signal.stft",
"numpy.squeeze",
"numpy.ma.masked_outside",
"numpy.in1d",
"pandas.DataFrame",
"numpy.round",
"numpy.max",
"numpy.mean",
"scipy.optimize.curve_fit",
"numpy.divide",
"scipy.signal.welch",
"numpy.hstack",
"numpy.arange",
"numpy.std",
"pandas.concat",
"numpy.log",
"scipy.interpolate.RectBivariateSpline",
"scipy.signal.resample_poly",
"numpy.median",
"numpy.atleast_2d",
"scipy.integrate.simps",
"numpy.logical_and",
"numpy.sum",
"numpy.abs",
"numpy.compress"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
}
] |
GHzytp/PyLorentz | [
"dedfc5d2eea231f523028abe4b3ba1c806bbff1f"
] | [
"PyTIE/TIE_helper.py"
] | [
"\"\"\"Helper functions for TIE. \n\nAn assortment of helper functions that load images, pass data, and generally \nare used in the reconstruction. Additionally, a couple of functions used for \ndisplaying images and stacks.\n\nAuthor: Arthur McCray, ANL, Summer 2019.\n\"\"\"\n \nimport matplotlib.pyplot as plt \nimport matplotlib as mpl\nimport numpy as np\nimport hyperspy.api as hs\nimport sys\nfrom cv2 import resize, imwrite\nfrom skimage import io\nfrom scipy.ndimage.filters import median_filter\nfrom scipy import ndimage\nfrom ipywidgets import interact\nimport hyperspy # just for checking type in show_stack. \nfrom copy import deepcopy\nfrom TIE_params import TIE_params\nimport textwrap\nimport os\n\n\n# ============================================================= #\n# Functions used for loading and passing the TIE data # \n# ============================================================= #\n\ndef load_data(path=None, fls_file='', al_file='', flip=None, flip_fls_file=None, filtersize=3): \n \"\"\"Load files in a directory (from a .fls file) using hyperspy. \n\n For more information on how to organize the directory and load the data, as \n well as how to setup the .fls file please refer to the README or the \n TIE_template.ipynb notebook. \n\n Args:\n path (str): Location of data directory. \n fls_file (str): Name of the .fls file which contains the image names and\n defocus values. \n al_file (str): Name of the aligned stack image file. \n flip (Bool): True if using a flip stack, False otherwise. Uniformly \n thick films can be reconstructed without a flip stack. The \n electrostatic phase shift will not be reconstructed.\n flip_fls_file (str): Name of the .fls file for the flip images if they \n are not named the same as the unflip files. Will only be applied to \n the /flip/ directory. \n filtersize (int): (`optional`) The images are processed with a median \n filter to remove hot pixels which occur in experimental data. This \n should be set to 0 for simulated data, though generally one would \n only use this function for experimental data. \n \n Returns:\n list: List of length 3, containing the following items: \n\n - imstack: array of hyperspy signal2D objects (one per image)\n - flipstack: array of hyperspy signal2D objects, empty array if \n flip == False \n - ptie: TIE_params object holding a reference to the imstack and many\n other parameters. \n\n \"\"\"\n\n unflip_files = []\n flip_files = []\n\n # Finding the unflip fls file\n path = os.path.abspath(path)\n if not fls_file.endswith('.fls'):\n fls_file += '.fls'\n if os.path.isfile(os.path.join(path, fls_file)):\n fls_full = os.path.join(path, fls_file)\n elif os.path.isfile(os.path.join(path, 'unflip', fls_file)):\n fls_full = os.path.join(path, 'unflip', fls_file)\n elif os.path.isfile(os.path.join(path, 'tfs', fls_file)) and not flip:\n fls_full = os.path.join(path, 'tfs', fls_file)\n else:\n print(\"fls file could not be found.\")\n sys.exit(1)\n\n if flip_fls_file is None: # one fls file given\n fls = []\n with open(fls_full) as file:\n for line in file:\n fls.append(line.strip())\n\n num_files = int(fls[0])\n if flip: \n for line in fls[1:num_files+1]:\n unflip_files.append(os.path.join(path, 'unflip', line))\n for line in fls[1:num_files+1]:\n flip_files.append(os.path.join(path, 'flip', line))\n else:\n if os.path.isfile(os.path.join(path, 'tfs', fls[2])):\n tfs_dir = 'tfs'\n else:\n tfs_dir = 'unflip'\n for line in fls[1:num_files+1]:\n unflip_files.append(os.path.join(path, tfs_dir, line))\n \n else: # there are 2 fls files given\n if not flip: \n print(textwrap.dedent(\"\"\"\n You probably made a mistake.\n You're defining both unflip and flip fls files but have flip=False.\n Proceeding anyways, will only load unflip stack (if it doesnt break).\\n\"\"\"))\n # find the flip fls file\n if not flip_fls_file.endswith('.fls'):\n flip_fls_file += '.fls'\n if os.path.isfile(os.path.join(path, flip_fls_file)):\n flip_fls_full = os.path.join(path, flip_fls_file)\n elif os.path.isfile(os.path.join(path, 'flip', flip_fls_file)):\n flip_fls_full = os.path.join(path, 'flip', flip_fls_file)\n\n fls = []\n flip_fls = []\n with open(fls_full) as file:\n for line in file:\n fls.append(line.strip())\n\n with open(flip_fls_full) as file:\n for line in file:\n flip_fls.append(line.strip())\n\n assert int(fls[0]) == int(flip_fls[0])\n num_files = int(fls[0])\n for line in fls[1:num_files+1]:\n unflip_files.append(os.path.join(path, \"unflip\", line))\n for line in flip_fls[1:num_files+1]:\n flip_files.append(os.path.join(path, \"flip\", line))\n\n # Actually load the data using hyperspy\n imstack = hs.load(unflip_files)\n if flip:\n flipstack = hs.load(flip_files)\n else:\n flipstack = []\n\n # convert scale dimensions to nm\n for sig in imstack + flipstack: \n sig.axes_manager.convert_units(units=['nm', 'nm'])\n\n if unflip_files[0][-4:] != '.dm3' and unflip_files[0][-4:] != '.dm4': \n # if not dm3's then they generally don't have the title metadata. \n for sig in imstack + flipstack: \n sig.metadata.General.title = sig.metadata.General.original_filename\n\n # load the aligned tifs and update the dm3 data to match\n # The data from the dm3's will be replaced with the aligned image data. \n try:\n al_tifs = io.imread(os.path.join(path, al_file))\n except FileNotFoundError as e:\n print('Incorrect aligned stack filename given.')\n raise e\n\n if flip:\n tot_files = 2*num_files\n else:\n tot_files = num_files \n\n for i in range(tot_files):\n # pull slices from correct axis, assumes fewer slices than images are tall\n if al_tifs.shape[0] < al_tifs.shape[2]:\n im = al_tifs[i]\n elif al_tifs.shape[0] > al_tifs.shape[2]:\n im = al_tifs[:,:,i]\n else:\n print(\"Bad stack\\n Or maybe the second axis is slice axis?\")\n print('Loading failed.\\n')\n sys.exit(1)\n \n # then median filter to remove \"hot pixels\"\n im = median_filter(im, size= filtersize)\n\n # and assign to appropriate stack \n if i < num_files:\n print('loading unflip:', unflip_files[i])\n imstack[i].data = im\n else: \n j = i - num_files\n print('loading flip:', flip_files[j])\n flipstack[j].data = im\n\n # read the defocus values\n defvals = fls[-(num_files//2):]\n assert num_files == 2*len(defvals) + 1\n defvals = [float(i) for i in defvals] # defocus values +/-\n\n # Create a TIE_params object\n ptie = TIE_params(imstack, flipstack, defvals, flip, path)\n print('Data loaded successfully.')\n return (imstack, flipstack, ptie)\n\n\ndef load_data_GUI(path, fls_file1, fls_file2, al_file='', single=False, filtersize=3):\n \"\"\"Load files in a directory (from a .fls file) using hyperspy.\n\n For more information on how to organize the directory and load the data, as\n well as how to setup the .fls file please refer to the README or the\n TIE_template.ipynb notebook.\n\n Args:\n path (str): Location of data directory.\n fls_file1 (str): Name of the .fls file which contains the image names and\n defocus values.\n fls_file2 (str): Name of the .fls file for the flip images if they\n are not named the same as the unflip files. Will only be applied to\n the /flip/ directory.\n al_file (str): Name of the aligned stack image file.\n single (Bool): True if using a single stack, False otherwise. Uniformly\n thick films can be reconstructed with a single stack. The\n electrostatic phase shift will not be reconstructed.\n filtersize (int): (`optional`) The images are processed with a median\n filter to remove hot pixels which occur in experimental data. This\n should be set to 0 for simulated data, though generally one would\n only use this function for experimental data.\n\n Returns:\n list: List of length 3, containing the following items:\n\n - imstack: array of hyperspy signal2D objects (one per image)\n - flipstack: array of hyperspy signal2D objects, empty array if\n flip == False\n - ptie: TIE_params object holding a reference to the imstack and many\n other parameters.\n\n \"\"\"\n\n unflip_files = []\n flip_files = []\n\n if fls_file2 is None: # one fls file given\n u_files = []\n with open(fls_file1) as file:\n for line in file:\n u_files.append(line.strip())\n\n num_files = int(u_files[0])\n if not single:\n for line in u_files[1:num_files + 1]:\n unflip_files.append(os.path.join(path, 'unflip', line))\n for line in u_files[1:num_files + 1]:\n flip_files.append(os.path.join(path, 'flip', line))\n else:\n if os.path.exists(os.path.join(path, 'tfs')):\n sub_dir = 'tfs'\n else:\n sub_dir = 'unflip'\n for line in u_files[1:num_files + 1]:\n unflip_files.append(os.path.join(path, sub_dir, line))\n\n else: # there are 2 fls files given\n if single:\n print(textwrap.dedent(\"\"\"\n You probably made a mistake.\n You're defining both unflip and flip fls files but have flip=False.\n Proceeding anyways, will only load unflip stack (if it doesnt break).\\n\"\"\"))\n\n u_files = []\n f_files = []\n with open(fls_file1) as file:\n for line in file:\n u_files.append(line.strip())\n\n with open(fls_file2) as file:\n for line in file:\n f_files.append(line.strip())\n\n assert int(u_files[0]) == int(f_files[0])\n num_files = int(u_files[0])\n for line in u_files[1:num_files + 1]:\n unflip_files.append(os.path.join(path, \"unflip\", line))\n for line in f_files[1:num_files + 1]:\n flip_files.append(os.path.join(path, \"flip\", line))\n\n # Actually load the data using hyperspy\n imstack = hs.load(unflip_files)\n if not single:\n flipstack = hs.load(flip_files)\n else:\n flipstack = []\n\n # convert scale dimensions to nm\n for sig in imstack + flipstack:\n sig.axes_manager.convert_units(units=['nm', 'nm'])\n\n if unflip_files[0][-4:] != '.dm3' and unflip_files[0][-4:] != '.dm4':\n # if not dm3's then they generally don't have the title metadata.\n for sig in imstack + flipstack:\n sig.metadata.General.title = sig.metadata.General.original_filename\n\n # load the aligned tifs and update the dm3 data to match\n # The data from the dm3's will be replaced with the aligned image data.\n try:\n al_tifs = io.imread(al_file)\n except FileNotFoundError as e:\n print('Incorrect aligned stack filename given.')\n raise e\n\n if not single:\n tot_files = 2 * num_files\n else:\n tot_files = num_files\n\n for i in range(tot_files):\n # pull slices from correct axis, assumes fewer slices than images are tall\n if al_tifs.shape[0] < al_tifs.shape[2]:\n im = al_tifs[i]\n elif al_tifs.shape[0] > al_tifs.shape[2]:\n im = al_tifs[:, :, i]\n else:\n print(\"Bad stack\\n Or maybe the second axis is slice axis?\")\n print('Loading failed.\\n')\n sys.exit(1)\n\n # then median filter to remove \"hot pixels\"\n im = median_filter(im, size=filtersize)\n\n # and assign to appropriate stack\n if i < num_files:\n print('loading unflip:', unflip_files[i])\n imstack[i].data = im\n else:\n j = i - num_files\n print('loading flip:', flip_files[j])\n flipstack[j].data = im\n\n # read the defocus values\n defvals = u_files[-(num_files // 2):]\n assert num_files == 2 * len(defvals) + 1\n defvals = [float(i) for i in defvals] # defocus values +/-\n\n # Create a TIE_params object\n if single:\n single = None\n else:\n single = True\n ptie = TIE_params(imstack, flipstack, defvals, single, path)\n print('Data loaded successfully.')\n return (imstack, flipstack, ptie)\n\n\ndef select_tifs(i, ptie, long_deriv = False):\n \"\"\"Returns a list of the images which will be used in TIE() or SITIE().\n\n Uses copy.deepcopy() as the data will be modified in the reconstruction \n process, and we don't want to change the original data. This method is \n likely not best practice. In the future this might get moved to the \n TIE_params class. \n\n Args: \n i (int): Index of defvals for which to select the tifs. \n ptie (``TIE_params`` object): Parameters for reconstruction, holds the \n images. \n\n Returns:\n list: List of np arrays, return depends on parameters: \n\n - if long_deriv == False: \n\n - if ptie.flip == True: returns [ +- , -- , 0 , ++ , -+ ] \n - elif ptie.flip == False: returns [+-, 0, ++]\n - where first +/- is unflip/flip, second +/- is over/underfocus.\n E.g. -+ is the flipped overfocused image. 0 is the averaged \n infocus image.\n\n - elif long_deriv == True: returns all images in imstack followed by\n all images in flipstack. \n\n \"\"\"\n if long_deriv:\n recon_tifs = []\n for sig in ptie.imstack:\n recon_tifs.append(sig.data)\n if ptie.flip:\n for sig in ptie.flipstack:\n recon_tifs.append(sig.data)\n\n else:\n if i < 0:\n i = len(ptie.defvals)+i\n num_files = ptie.num_files\n under = num_files//2 - (i+1)\n over = num_files//2 + (i+1)\n imstack = ptie.imstack\n flipstack = ptie.flipstack\n if ptie.flip:\n recon_tifs = [\n imstack[under].data, # +-\n flipstack[under].data, # --\n (imstack[num_files//2].data + \n flipstack[num_files//2].data)/2, # infocus\n imstack[over].data, # ++\n flipstack[over].data # -+\n ]\n else:\n recon_tifs = [\n imstack[under].data, # +-\n imstack[num_files//2].data, # 0\n imstack[over].data # ++\n ]\n try:\n recon_tifs = deepcopy(recon_tifs) \n except TypeError:\n print(\"TypeError in select_tifs deepcopy. Proceeding with originals.\")\n return recon_tifs\n\n\ndef dist(ny, nx, shift=False):\n \"\"\"Creates a frequency array for Fourier processing. \n\n Args: \n ny (int): Height of array \n nx (int): Width of array\n shift (bool): Whether to center the frequency spectrum. \n\n - False: (default) smallest values are at the corners. \n - True: smallest values at center of array. \n\n Returns: \n ``ndarray``: Numpy array of shape (ny, nx). \n \"\"\"\n ly = (np.arange(ny)-ny/2)/ny\n lx = (np.arange(nx)-nx/2)/nx\n [X,Y] = np.meshgrid(lx, ly)\n q = np.sqrt(X**2 + Y**2)\n if not shift:\n q = np.fft.ifftshift(q)\n return q\n\n\ndef scale_stack(imstack):\n \"\"\"Scale a stack of images so all have the same total intensity. \n\n Args: \n imstack (list): List of 2D arrays. \n\n Returns:\n list: List of same shape as imstack\n \"\"\"\n imstack = deepcopy(imstack)\n tots = np.sum(imstack, axis = (1,2))\n t = max(tots) / tots\n for i in range(len(tots)):\n imstack[i] *= t[i]\n return imstack/np.max(imstack)\n\n\n# =============================================== #\n# Various display functions # \n# =============================================== #\n\"\"\" Not all of these are used in TIE_reconstruct, but I often find them useful\nto have handy when working in Jupyter notebooks.\"\"\"\n\n\ndef show_im(image, title=None, simple=False, origin='upper', cbar=True,\n cbar_title='', scale=None, **kwargs):\n \"\"\"Display an image on a new axis.\n \n Takes a 2D array and displays the image in grayscale with optional title on \n a new axis. In general it's nice to have things on their own axes, but if \n too many are open it's a good idea to close with plt.close('all'). \n\n Args: \n image (2D array): Image to be displayed.\n title (str): (`optional`) Title of plot. \n simple (bool): (`optional`) Default output or additional labels. \n\n - True, will just show image. \n - False, (default) will show a colorbar with axes labels, and will adjust the \n contrast range for images with a very small range of values (<1e-12). \n\n origin (str): (`optional`) Control image orientation. \n\n - 'upper': (default) (0,0) in upper left corner, y-axis goes down. \n - 'lower': (0,0) in lower left corner, y-axis goes up. \n\n cbar (bool): (`optional`) Choose to display the colorbar or not. Only matters when\n simple = False. \n cbar_title (str): (`optional`) Title attached to the colorbar (indicating the \n units or significance of the values). \n scale (float): Scale of image in nm/pixel. Axis markers will be given in\n units of nanometers. \n\n Returns:\n None\n \"\"\"\n fig, ax = plt.subplots()\n if not simple and np.max(image) - np.min(image) < 1e-12:\n # adjust coontrast range\n vmin = np.min(image) - 1e-12\n vmax = np.max(image) + 1e-12\n im = ax.matshow(image, cmap = 'gray', origin=origin, vmin=vmin, vmax=vmax)\n else:\n im = ax.matshow(image, cmap = 'gray', origin=origin, **kwargs)\n\n if title is not None: \n ax.set_title(str(title), pad=0)\n\n if simple:\n plt.axis('off')\n else:\n plt.tick_params(axis='x',top=False)\n ax.xaxis.tick_bottom()\n ax.tick_params(direction='in')\n if scale is None:\n ticks_label = 'pixels'\n else:\n def mjrFormatter(x, pos):\n return f\"{scale*x:.3g}\"\n\n fov = scale * max(image.shape[0], image.shape[1])\n\n if fov < 4e3: # if fov < 4um use nm scale\n ticks_label = ' nm '\n elif fov > 4e6: # if fov > 4mm use m scale\n ticks_label = \" m \"\n scale /= 1e9\n else: # if fov between the two, use um\n ticks_label = \" $\\mu$m \"\n scale /= 1e3\n\n ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(mjrFormatter))\n ax.xaxis.set_major_formatter(mpl.ticker.FuncFormatter(mjrFormatter))\n\n if origin == 'lower': \n ax.text(y=0,x=0,s=ticks_label, rotation=-45, va='top', ha='right')\n elif origin =='upper': # keep label in lower left corner\n ax.text(y=image.shape[0],x=0,s=ticks_label, rotation=-45, va='top', ha='right')\n\n if cbar: \n plt.colorbar(im, ax=ax, pad=0.02, format=\"%.2g\", label=str(cbar_title))\n\n plt.show()\n return\n\n\ndef show_stack(images, ptie=None, origin='upper', title=False):\n \"\"\"Shows a stack of dm3s or np images with a slider to navigate slice axis. \n \n Uses ipywidgets.interact to allow user to view multiple images on the same\n axis using a slider. There is likely a better way to do this, but this was \n the first one I found that works... \n\n If a TIE_params object is given, only the regions corresponding to ptie.crop\n will be shown. \n\n Args:\n images (list): List of 2D arrays. Stack of images to be shown. \n ptie (``TIE_params`` object): Will use ptie.crop to show only the region\n that will remain after being cropped. \n origin (str): (`optional`) Control image orientation. \n title (bool): (`optional`) Try and pull a title from the signal objects. \n Returns:\n None \n \"\"\"\n sig = False\n if type(images[0]) == hyperspy._signals.signal2d.Signal2D:\n sig = True\n imstack = []\n titles = []\n for signal2D in images:\n imstack.append(signal2D.data)\n titles.append(signal2D.metadata.General.title)\n images = np.array(imstack)\n else:\n images = np.array(images)\n\n if ptie is None:\n t , b = 0, images[0].shape[0]\n l , r = 0, images[0].shape[1]\n else:\n if ptie.rotation != 0 or ptie.x_transl != 0 or ptie.y_transl != 0:\n rotate, x_shift, y_shift = ptie.rotation, ptie.x_transl, ptie.y_transl\n for i in range(len(images)):\n images[i] = ndimage.rotate(images[i], rotate, reshape=False)\n images[i] = ndimage.shift(images[i], (-y_shift, x_shift))\n t = ptie.crop['top']\n b = ptie.crop['bottom']\n l = ptie.crop['left']\n r = ptie.crop['right']\n\n images = images[:,t:b,l:r]\n\n fig, ax = plt.subplots()\n plt.axis('off')\n N = images.shape[0]\n\n def view_image(i=0):\n im = plt.imshow(images[i], cmap='gray', interpolation='nearest', origin=origin)\n if title: \n if sig:\n plt.title('Image title: {:}'.format(titles[i]))\n else:\n plt.title('Stack[{:}]'.format(i))\n\n interact(view_image, i=(0, N-1))\n return \n\n \ndef show_2D(mag_x, mag_y, mag_z=None, a=15, l=None, w=None, title=None, color=False, hsv=True,\n origin='upper', save=None, GUI_handle=False, GUI_color_array=None):\n \"\"\" Display a 2D vector arrow plot. \n\n Displays an an arrow plot of a vector field, with arrow length scaling with \n vector magnitude. If color=True, a colormap will be displayed under the \n arrow plot. \n\n If mag_z is included and color=True, a spherical colormap will be used with \n color corresponding to in-plane and white/black to out-of-plane vector \n orientation. \n\n Args: \n mag_x (2D array): x-component of magnetization. \n mag_y (2D array): y-component of magnetization. \n mag_z (2D array): optional z-component of magnetization. \n a (int): Number of arrows to plot along the x and y axes. Default 15. \n l (float): Scale factor of arrows. Larger l -> shorter arrows. Default None\n guesses at a good value. None uses matplotlib default. \n w (float): Width scaling of arrows. None uses matplotlib default.\n title (str): (`optional`) Title for plot. Default None. \n color (bool): (`optional`) Whether or not to show a colormap underneath \n the arrow plot. Color image is made from colorwheel.color_im(). \n hsv (bool): (`optional`) Only relevant if color == True. Whether to use \n an hsv or 4-fold color-wheel in the color image. \n origin (str): (`optional`) Control image orientation. \n save (str): (`optional`) Path to save the figure.\n GUI_handle (bool): ('optional') Handle for indicating if using GUI.\n Default is False.\n GUI_color_array (2D array): ('optional') The colored image array passed from the GUI,\n it is for creating the overlaying the arrows without using color_im().\n\n Returns: \n fig: Returns the figure handle.\n \"\"\" \n assert mag_x.ndim == mag_y.ndim\n if mag_x.ndim == 3: \n print(\"Summing along first axis\")\n mag_x = np.sum(mag_x, axis=0)\n mag_y = np.sum(mag_y, axis=0)\n if mag_z is not None:\n mag_z = np.sum(mag_z, axis=0)\n\n a = ((mag_x.shape[0] - 1)//a)+1\n\n dimy, dimx = mag_x.shape\n X = np.arange(0, dimx, 1)\n Y = np.arange(0, dimy, 1)\n U = mag_x \n V = mag_y \n \n sz_inches = 8\n if not GUI_handle or save is not None:\n if color:\n rad = mag_x.shape[0]//16\n rad = max(rad, 16)\n pad = 10 # pixels\n width = np.shape(mag_y)[1] + 2*rad + pad\n aspect = dimy/width\n else:\n aspect = dimy/dimx\n\n if GUI_handle and save is None:\n fig, ax = plt.subplots(figsize=(10, 10))\n plt.ioff()\n ax.set_aspect('equal', adjustable='box')\n else:\n fig, ax = plt.subplots()\n ax.set_aspect(aspect)\n\n if color:\n if not GUI_handle or save is not None:\n from colorwheel import color_im\n im = ax.matshow(color_im(mag_x, mag_y, mag_z, hsvwheel=hsv, rad=rad), cmap='gray',\n origin=origin)\n else:\n im = ax.matshow(GUI_color_array, cmap='gray', origin=origin, aspect='equal')\n arrow_color = 'white'\n plt.axis('off')\n else:\n arrow_color = 'black'\n if GUI_handle and save is None:\n white_array = np.zeros([dimy, dimx, 3], dtype=np.uint8)\n white_array.fill(255)\n im = ax.matshow(white_array, cmap='gray', origin=origin, aspect='equal')\n plt.axis('off')\n elif GUI_handle and save:\n white_array = np.zeros([dimy, dimx, 3], dtype=np.uint8)\n white_array.fill(255)\n im = ax.matshow(white_array, cmap='gray', origin=origin)\n fig.tight_layout(pad=0)\n ax.xaxis.set_major_locator(mpl.ticker.NullLocator())\n ax.yaxis.set_major_locator(mpl.ticker.NullLocator())\n plt.axis('off')\n\n ashift = (dimx-1) % a//2\n q = ax.quiver(X[ashift::a], Y[ashift::a], U[ashift::a,ashift::a], V[ashift::a,ashift::a],\n units='xy',\n scale=l,\n scale_units='xy',\n width=w,\n angles='xy',\n pivot='mid',\n color=arrow_color)\n\n if not color:\n if not GUI_handle:\n qk = ax.quiverkey(q, X=0.95, Y=0.98, U=1, label=r'$Msat$', labelpos='S',\n coordinates='axes')\n qk.text.set_backgroundcolor('w')\n if origin == 'upper':\n ax.invert_yaxis()\n\n if title is not None:\n tr = False\n ax.set_title(title)\n else:\n tr = True\n\n plt.tick_params(axis='x', labelbottom=False, bottom=False, top=False)\n plt.tick_params(axis='y', labelleft=False, left=False, right=False)\n # ax.set_aspect(aspect)\n if not GUI_handle:\n plt.show()\n\n if save is not None: \n if not color:\n tr = False\n fig.set_size_inches(8, 8/aspect)\n print(f'Saving: {save}')\n plt.axis('off')\n # sets dpi to 5 times original image dpi so arrows are reasonably sharp\n dpi2 = max(dimy, dimx) * 5 / sz_inches\n plt.savefig(save, dpi=dpi2, bbox_inches='tight', transparent=tr)\n\n if GUI_handle:\n return fig, ax\n else:\n return "
] | [
[
"matplotlib.pyplot.imshow",
"numpy.sqrt",
"numpy.max",
"numpy.arange",
"numpy.fft.ifftshift",
"matplotlib.pyplot.axis",
"numpy.zeros",
"numpy.min",
"matplotlib.pyplot.savefig",
"scipy.ndimage.rotate",
"scipy.ndimage.filters.median_filter",
"numpy.array",
"numpy.meshgrid",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.ticker.NullLocator",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.ioff",
"numpy.shape",
"matplotlib.ticker.FuncFormatter",
"scipy.ndimage.shift",
"matplotlib.pyplot.tick_params"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
bayc/floris-1 | [
"00fc1600e42fe8e92728e74ae47d8ba9da2e7360"
] | [
"examples/example_0005_adjust_floris.py"
] | [
"# Copyright 2019 NREL\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy of the\n# License at http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\n# See read the https://floris.readthedocs.io for documentation\n\nimport matplotlib.pyplot as plt\nimport floris.tools as wfct\nimport floris.tools.visualization as vis\nimport floris.tools.cut_plane as cp\nfrom floris.utilities import Vec3\nimport numpy as np\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size\n\n# Initialize FLORIS model\nfi = wfct.floris_utilities.FlorisInterface(\"example_input.json\")\n\n# set turbine locations to 4 turbines in a row - demonstrate how to change coordinates\nD = fi.floris.farm.flow_field.turbine_map.turbines[0].rotor_diameter\nlayout_x = [0, 7*D, 0, 7*D]\nlayout_y = [0, 0, 5*D, 5*D]\nfi.reinitialize_flow_field(layout_array=(layout_x, layout_y))\n\n# Calculate wake\nfi.calculate_wake()\n\n\n# ================================================================================\nprint('Plotting the FLORIS flowfield...')\n# ================================================================================\n\n# Initialize the horizontal cut\nhor_plane = wfct.cut_plane.HorPlane(\n fi.get_flow_data(),\n fi.floris.farm.turbines[0].hub_height\n)\n\n# Plot and show\nfig, ax = plt.subplots()\nwfct.visualization.visualize_cut_plane(hor_plane, ax=ax)\n\n# ================================================================================\nprint('Changing wind direction and wind speed...')\n# ================================================================================\n\nws = np.linspace(6, 8, 3)\nwd = [45.0, 170.0, 270.]\n\n# Plot and show\nfig, ax = plt.subplots(3, 3, figsize=(15, 15))\npower = np.zeros((len(ws), len(wd)))\nfor i, speed in enumerate(ws):\n for j, wdir in enumerate(wd):\n print('Calculating wake: wind direction = ',\n wdir, 'and wind speed = ', speed)\n\n fi.reinitialize_flow_field(wind_speed=speed, wind_direction=wdir)\n\n # recalculate the wake\n fi.calculate_wake()\n\n # record powers\n power[i, j] = np.sum(fi.get_turbine_power())\n\n # ============================================\n # not necessary if you only want the powers\n # ============================================\n # Visualize the changes\n # Initialize the horizontal cut\n hor_plane = wfct.cut_plane.HorPlane(\n fi.get_flow_data(),\n fi.floris.farm.turbines[0].hub_height\n )\n im = wfct.visualization.visualize_cut_plane(hor_plane, ax=ax[i, j])\n strTitle = 'Wind Dir = ' + \\\n str(wdir) + 'deg' + ' Speed = ' + str(speed) + 'm/s'\n ax[i, j].set_title(strTitle)\n fig.colorbar(im, ax=ax[i, j], fraction=0.025, pad=0.04)\n\n# ================================================================================\n# print('Set yaw angles...')\n# ================================================================================\n\n# assign yaw angles to turbines and calculate wake at 270\n# initial power output\nfi.calculate_wake()\npower_initial = np.sum(fi.get_turbine_power())\n\n# Set the yaw angles\nyaw_angles = [25.0, 0, 25.0, 0]\nfi.calculate_wake(yaw_angles=yaw_angles)\n\n# Check the new power\npower_yaw = np.sum(fi.get_turbine_power())\nprint('Power aligned: %.1f' % power_initial)\nprint('Power yawed: %.1f' % power_yaw)\n\n# ================================================================================\nprint('Plotting the FLORIS flowfield with yaw...')\n# ================================================================================\n\n# Initialize the horizontal cut\nhor_plane = wfct.cut_plane.HorPlane(\n fi.get_flow_data(),\n fi.floris.farm.turbines[0].hub_height\n)\n\n# Plot and show\nfig, ax = plt.subplots()\nwfct.visualization.visualize_cut_plane(hor_plane, ax=ax)\nax.set_title('Flow with yawed front turbines')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gbegkas/Detectron | [
"8d53dcdc2d1282938636f8dd45859101214730ff"
] | [
"misc/max_auc.py"
] | [
"import json\r\nimport argparse\r\nimport sys\r\nimport os\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')\r\n parser.add_argument(\r\n '--path',\r\n dest='auc_file',\r\n help='json with auc',\r\n default=None,\r\n type=str\r\n )\r\n if len(sys.argv) == 1:\r\n parser.print_help()\r\n sys.exit(1)\r\n return parser.parse_args()\r\n\r\n\r\nif __name__ == '__main__':\r\n args = parse_args()\r\n with open(args.auc_file) as fp:\r\n data = json.load(fp)\r\n\r\n max_auc = 0\r\n for key in sorted(data.keys()):\r\n if max_auc < data[key]['INbreast']['box']['AUC']:\r\n max_auc = data[key]['INbreast']['box']['AUC']\r\n max_key = key\r\n # if int(key) > 32000:\r\n # print(data[key])\r\n # with open(os.path.join(os.path.dirname(args.auc_file), 'auc.json'), 'w') as fp:\r\n # json.dump(data, fp, indent=1, separators=(',', ':'), sort_keys=True)\r\n\r\n # print(data[max_key]['INbreast']['box'])\r\n\r\n print('Iteration: {iter}'.format(iter=max_key))\r\n for metric in data[max_key]['INbreast']['box'].keys():\r\n print('{metric}: {data}'.format(metric=metric, data=data[max_key]['INbreast']['box'][metric]))\r\n\r\n # print('Auc: {auc}'.format(auc=max_auc))\r\n\r\n keys = map(int, data.keys())\r\n keys = sorted(keys)\r\n auc = []\r\n ap50 = []\r\n ap = []\r\n ap75 = []\r\n apl = []\r\n apm = []\r\n aps = []\r\n for key in keys:\r\n auc.append(data[str(key)]['INbreast']['box']['AUC'])\r\n ap50.append(data[str(key)]['INbreast']['box']['AP50'])\r\n ap.append(data[str(key)]['INbreast']['box']['AP'])\r\n ap75.append(data[str(key)]['INbreast']['box']['AP75'])\r\n apl.append(data[str(key)]['INbreast']['box']['APl'])\r\n apm.append(data[str(key)]['INbreast']['box']['APm'])\r\n aps.append(data[str(key)]['INbreast']['box']['APs'])\r\n\r\n fig = plt.figure()\r\n plt.plot(keys, auc)\r\n # plt.plot(keys, ap, c='brown')\r\n # plt.plot(keys, ap50, c='black')\r\n # plt.plot(keys, ap75, c='cyan')\r\n # plt.plot(keys, apl, c='magenta')\r\n # plt.plot(keys, apm, c='yellow')\r\n # plt.plot(keys, aps, c='green')\r\n # plt.title('AUC Model {model} {dataset}'.format(model=backbone, dataset=cfg.TEST.DATASETS[0]))\r\n plt.ylabel('Auc')\r\n plt.xlabel('Iterations')\r\n plt.axis([min(keys) - 1000, max(keys) + 1000, min(auc) - 0.05, max(auc) + 0.05])\r\n plt.plot([20000, 50000], [0.94, 0.94], c='red')\r\n # plt.xlim([min(keys) - 1000, max(keys) + 1000])\r\n # plt.ylim([min(values) - 0.05, max(values) + 0.05])\r\n # fig.savefig(os.path.join(cfg.OUTPUT_DIR,\r\n # '{model}_{dataset}_{iter}.eps'.format(model=backbone,\r\n # dataset=cfg.TEST.DATASETS[0],\r\n # iter=cfg.SOLVER['MAX_ITER'])), format='eps')\r\n # fig.savefig(os.path.join(cfg.OUTPUT_DIR,\r\n # '{model}_{dataset}_{iter}.png'.format(model=backbone,\r\n # dataset=cfg.TEST.DATASETS[0],\r\n # iter=cfg.SOLVER['MAX_ITER'])))\r\n plt.show()\r\n fig = plt.figure()\r\n plt.plot(keys, ap50, c='black')\r\n plt.ylabel('AP50')\r\n plt.xlabel('Iterations')\r\n plt.show()\r\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ElDeveloper/pandas | [
"dc430c58654b9d0b943fb78dc493ba600511591f"
] | [
"pandas/core/common.py"
] | [
"\"\"\"\nMisc tools for implementing data structures\n\"\"\"\n\nimport re\nimport collections\nimport numbers\nimport codecs\nimport csv\nimport types\nfrom datetime import datetime, timedelta\nfrom functools import partial\n\nfrom numpy.lib.format import read_array, write_array\nimport numpy as np\n\nimport pandas as pd\nimport pandas.algos as algos\nimport pandas.lib as lib\nimport pandas.tslib as tslib\nfrom pandas import compat\nfrom pandas.compat import StringIO, BytesIO, range, long, u, zip, map, string_types\n\nfrom pandas.core.config import get_option\n\nclass PandasError(Exception):\n pass\n\n\nclass SettingWithCopyError(ValueError):\n pass\n\n\nclass SettingWithCopyWarning(Warning):\n pass\n\n\nclass AmbiguousIndexError(PandasError, KeyError):\n pass\n\n\nclass AbstractMethodError(NotImplementedError):\n \"\"\"Raise this error instead of NotImplementedError for abstract methods\n while keeping compatibility with Python 2 and Python 3.\n \"\"\"\n def __init__(self, class_instance):\n self.class_instance = class_instance\n\n def __str__(self):\n return \"This method must be defined on the concrete class of \" \\\n + self.class_instance.__class__.__name__\n\n_POSSIBLY_CAST_DTYPES = set([np.dtype(t).name\n for t in ['O', 'int8',\n 'uint8', 'int16', 'uint16', 'int32',\n 'uint32', 'int64', 'uint64']])\n\n_NS_DTYPE = np.dtype('M8[ns]')\n_TD_DTYPE = np.dtype('m8[ns]')\n_INT64_DTYPE = np.dtype(np.int64)\n_DATELIKE_DTYPES = set([np.dtype(t) for t in ['M8[ns]', '<M8[ns]', '>M8[ns]',\n 'm8[ns]', '<m8[ns]', '>m8[ns]']])\n_int8_max = np.iinfo(np.int8).max\n_int16_max = np.iinfo(np.int16).max\n_int32_max = np.iinfo(np.int32).max\n\n# define abstract base classes to enable isinstance type checking on our\n# objects\ndef create_pandas_abc_type(name, attr, comp):\n @classmethod\n def _check(cls, inst):\n return getattr(inst, attr, '_typ') in comp\n dct = dict(__instancecheck__=_check,\n __subclasscheck__=_check)\n meta = type(\"ABCBase\", (type,), dct)\n return meta(name, tuple(), dct)\n\n\nABCIndex = create_pandas_abc_type(\"ABCIndex\", \"_typ\", (\"index\",))\nABCInt64Index = create_pandas_abc_type(\"ABCInt64Index\", \"_typ\", (\"int64index\",))\nABCFloat64Index = create_pandas_abc_type(\"ABCFloat64Index\", \"_typ\", (\"float64index\",))\nABCMultiIndex = create_pandas_abc_type(\"ABCMultiIndex\", \"_typ\", (\"multiindex\",))\nABCDatetimeIndex = create_pandas_abc_type(\"ABCDatetimeIndex\", \"_typ\", (\"datetimeindex\",))\nABCTimedeltaIndex = create_pandas_abc_type(\"ABCTimedeltaIndex\", \"_typ\", (\"timedeltaindex\",))\nABCPeriodIndex = create_pandas_abc_type(\"ABCPeriodIndex\", \"_typ\", (\"periodindex\",))\nABCCategoricalIndex = create_pandas_abc_type(\"ABCCategoricalIndex\", \"_typ\", (\"categoricalindex\",))\nABCIndexClass = create_pandas_abc_type(\"ABCIndexClass\", \"_typ\", (\"index\",\n \"int64index\",\n \"float64index\",\n \"multiindex\",\n \"datetimeindex\",\n \"timedeltaindex\",\n \"periodindex\",\n \"categoricalindex\"))\n\nABCSeries = create_pandas_abc_type(\"ABCSeries\", \"_typ\", (\"series\",))\nABCDataFrame = create_pandas_abc_type(\"ABCDataFrame\", \"_typ\", (\"dataframe\",))\nABCPanel = create_pandas_abc_type(\"ABCPanel\", \"_typ\", (\"panel\",))\nABCSparseSeries = create_pandas_abc_type(\"ABCSparseSeries\", \"_subtyp\",\n ('sparse_series',\n 'sparse_time_series'))\nABCSparseArray = create_pandas_abc_type(\"ABCSparseArray\", \"_subtyp\",\n ('sparse_array', 'sparse_series'))\nABCCategorical = create_pandas_abc_type(\"ABCCategorical\",\"_typ\",(\"categorical\"))\nABCPeriod = create_pandas_abc_type(\"ABCPeriod\", \"_typ\", (\"period\",))\n\nclass _ABCGeneric(type):\n\n def __instancecheck__(cls, inst):\n return hasattr(inst, \"_data\")\n\n\nABCGeneric = _ABCGeneric(\"ABCGeneric\", tuple(), {})\n\n\ndef bind_method(cls, name, func):\n \"\"\"Bind a method to class, python 2 and python 3 compatible.\n\n Parameters\n ----------\n\n cls : type\n class to receive bound method\n name : basestring\n name of method on class instance\n func : function\n function to be bound as method\n\n\n Returns\n -------\n None\n \"\"\"\n # only python 2 has bound/unbound method issue\n if not compat.PY3:\n setattr(cls, name, types.MethodType(func, None, cls))\n else:\n setattr(cls, name, func)\n\nclass CategoricalDtypeType(type):\n \"\"\"\n the type of CategoricalDtype, this metaclass determines subclass ability\n \"\"\"\n def __init__(cls, name, bases, attrs):\n pass\n\nclass CategoricalDtype(object):\n __meta__ = CategoricalDtypeType\n \"\"\"\n A np.dtype duck-typed class, suitable for holding a custom categorical dtype.\n\n THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.object\n \"\"\"\n name = 'category'\n names = None\n type = CategoricalDtypeType\n subdtype = None\n kind = 'O'\n str = '|O08'\n num = 100\n shape = tuple()\n itemsize = 8\n base = np.dtype('O')\n isbuiltin = 0\n isnative = 0\n\n def __unicode__(self):\n return self.name\n\n def __str__(self):\n \"\"\"\n Return a string representation for a particular Object\n\n Invoked by str(df) in both py2/py3.\n Yields Bytestring in Py2, Unicode String in py3.\n \"\"\"\n\n if compat.PY3:\n return self.__unicode__()\n return self.__bytes__()\n\n def __bytes__(self):\n \"\"\"\n Return a string representation for a particular object.\n\n Invoked by bytes(obj) in py3 only.\n Yields a bytestring in both py2/py3.\n \"\"\"\n from pandas.core.config import get_option\n\n encoding = get_option(\"display.encoding\")\n return self.__unicode__().encode(encoding, 'replace')\n\n def __repr__(self):\n \"\"\"\n Return a string representation for a particular object.\n\n Yields Bytestring in Py2, Unicode String in py3.\n \"\"\"\n return str(self)\n\n def __hash__(self):\n # make myself hashable\n return hash(str(self))\n\n def __eq__(self, other):\n if isinstance(other, compat.string_types):\n return other == self.name\n\n return isinstance(other, CategoricalDtype)\n\ndef isnull(obj):\n \"\"\"Detect missing values (NaN in numeric arrays, None/NaN in object arrays)\n\n Parameters\n ----------\n arr : ndarray or object value\n Object to check for null-ness\n\n Returns\n -------\n isnulled : array-like of bool or bool\n Array or bool indicating whether an object is null or if an array is\n given which of the element is null.\n\n See also\n --------\n pandas.notnull: boolean inverse of pandas.isnull\n \"\"\"\n return _isnull(obj)\n\n\ndef _isnull_new(obj):\n if lib.isscalar(obj):\n return lib.checknull(obj)\n # hack (for now) because MI registers as ndarray\n elif isinstance(obj, pd.MultiIndex):\n raise NotImplementedError(\"isnull is not defined for MultiIndex\")\n elif isinstance(obj, (ABCSeries, np.ndarray, pd.Index)):\n return _isnull_ndarraylike(obj)\n elif isinstance(obj, ABCGeneric):\n return obj._constructor(obj._data.isnull(func=isnull))\n elif isinstance(obj, list) or hasattr(obj, '__array__'):\n return _isnull_ndarraylike(np.asarray(obj))\n else:\n return obj is None\n\n\ndef _isnull_old(obj):\n \"\"\"Detect missing values. Treat None, NaN, INF, -INF as null.\n\n Parameters\n ----------\n arr: ndarray or object value\n\n Returns\n -------\n boolean ndarray or boolean\n \"\"\"\n if lib.isscalar(obj):\n return lib.checknull_old(obj)\n # hack (for now) because MI registers as ndarray\n elif isinstance(obj, pd.MultiIndex):\n raise NotImplementedError(\"isnull is not defined for MultiIndex\")\n elif isinstance(obj, (ABCSeries, np.ndarray, pd.Index)):\n return _isnull_ndarraylike_old(obj)\n elif isinstance(obj, ABCGeneric):\n return obj._constructor(obj._data.isnull(func=_isnull_old))\n elif isinstance(obj, list) or hasattr(obj, '__array__'):\n return _isnull_ndarraylike_old(np.asarray(obj))\n else:\n return obj is None\n\n_isnull = _isnull_new\n\n\ndef _use_inf_as_null(key):\n \"\"\"Option change callback for null/inf behaviour\n Choose which replacement for numpy.isnan / -numpy.isfinite is used.\n\n Parameters\n ----------\n flag: bool\n True means treat None, NaN, INF, -INF as null (old way),\n False means None and NaN are null, but INF, -INF are not null\n (new way).\n\n Notes\n -----\n This approach to setting global module values is discussed and\n approved here:\n\n * http://stackoverflow.com/questions/4859217/\n programmatically-creating-variables-in-python/4859312#4859312\n \"\"\"\n flag = get_option(key)\n if flag:\n globals()['_isnull'] = _isnull_old\n else:\n globals()['_isnull'] = _isnull_new\n\n\ndef _isnull_ndarraylike(obj):\n\n values = getattr(obj, 'values', obj)\n dtype = values.dtype\n\n if dtype.kind in ('O', 'S', 'U'):\n if is_categorical_dtype(values):\n from pandas import Categorical\n if not isinstance(values, Categorical):\n values = values.values\n result = values.isnull()\n else:\n\n # Working around NumPy ticket 1542\n shape = values.shape\n\n if dtype.kind in ('S', 'U'):\n result = np.zeros(values.shape, dtype=bool)\n else:\n result = np.empty(shape, dtype=bool)\n vec = lib.isnullobj(values.ravel())\n result[...] = vec.reshape(shape)\n\n elif is_datetimelike(obj):\n # this is the NaT pattern\n result = values.view('i8') == tslib.iNaT\n else:\n result = np.isnan(values)\n\n # box\n if isinstance(obj, ABCSeries):\n from pandas import Series\n result = Series(result, index=obj.index, name=obj.name, copy=False)\n\n return result\n\ndef _isnull_ndarraylike_old(obj):\n values = getattr(obj, 'values', obj)\n dtype = values.dtype\n\n if dtype.kind in ('O', 'S', 'U'):\n # Working around NumPy ticket 1542\n shape = values.shape\n\n if values.dtype.kind in ('S', 'U'):\n result = np.zeros(values.shape, dtype=bool)\n else:\n result = np.empty(shape, dtype=bool)\n vec = lib.isnullobj_old(values.ravel())\n result[:] = vec.reshape(shape)\n\n elif dtype in _DATELIKE_DTYPES:\n # this is the NaT pattern\n result = values.view('i8') == tslib.iNaT\n else:\n result = ~np.isfinite(values)\n\n # box\n if isinstance(obj, ABCSeries):\n from pandas import Series\n result = Series(result, index=obj.index, name=obj.name, copy=False)\n\n return result\n\n\ndef notnull(obj):\n \"\"\"Replacement for numpy.isfinite / -numpy.isnan which is suitable for use\n on object arrays.\n\n Parameters\n ----------\n arr : ndarray or object value\n Object to check for *not*-null-ness\n\n Returns\n -------\n isnulled : array-like of bool or bool\n Array or bool indicating whether an object is *not* null or if an array\n is given which of the element is *not* null.\n\n See also\n --------\n pandas.isnull : boolean inverse of pandas.notnull\n \"\"\"\n res = isnull(obj)\n if np.isscalar(res):\n return not res\n return ~res\n\ndef is_null_datelike_scalar(other):\n \"\"\" test whether the object is a null datelike, e.g. Nat\n but guard against passing a non-scalar \"\"\"\n if other is pd.NaT or other is None:\n return True\n elif np.isscalar(other):\n\n # a timedelta\n if hasattr(other,'dtype'):\n return other.view('i8') == tslib.iNaT\n elif is_integer(other) and other == tslib.iNaT:\n return True\n return isnull(other)\n return False\n\ndef array_equivalent(left, right, strict_nan=False):\n \"\"\"\n True if two arrays, left and right, have equal non-NaN elements, and NaNs in\n corresponding locations. False otherwise. It is assumed that left and right\n are NumPy arrays of the same dtype. The behavior of this function\n (particularly with respect to NaNs) is not defined if the dtypes are\n different.\n\n Parameters\n ----------\n left, right : ndarrays\n strict_nan : bool, default False\n If True, consider NaN and None to be different.\n\n Returns\n -------\n b : bool\n Returns True if the arrays are equivalent.\n\n Examples\n --------\n >>> array_equivalent(\n ... np.array([1, 2, np.nan]),\n ... np.array([1, 2, np.nan]))\n True\n >>> array_equivalent(\n ... np.array([1, np.nan, 2]),\n ... np.array([1, 2, np.nan]))\n False\n \"\"\"\n\n left, right = np.asarray(left), np.asarray(right)\n if left.shape != right.shape: return False\n\n # Object arrays can contain None, NaN and NaT.\n if issubclass(left.dtype.type, np.object_) or issubclass(right.dtype.type, np.object_):\n\n if not strict_nan:\n # pd.isnull considers NaN and None to be equivalent.\n return lib.array_equivalent_object(_ensure_object(left.ravel()),\n _ensure_object(right.ravel()))\n\n for left_value, right_value in zip(left, right):\n if left_value is tslib.NaT and right_value is not tslib.NaT:\n return False\n\n elif isinstance(left_value, float) and np.isnan(left_value):\n if not isinstance(right_value, float) or not np.isnan(right_value):\n return False\n else:\n if left_value != right_value:\n return False\n return True\n\n # NaNs can occur in float and complex arrays.\n if issubclass(left.dtype.type, (np.floating, np.complexfloating)):\n return ((left == right) | (np.isnan(left) & np.isnan(right))).all()\n\n # NaNs cannot occur otherwise.\n return np.array_equal(left, right)\n\ndef _iterable_not_string(x):\n return (isinstance(x, collections.Iterable) and\n not isinstance(x, compat.string_types))\n\n\ndef flatten(l):\n \"\"\"Flatten an arbitrarily nested sequence.\n\n Parameters\n ----------\n l : sequence\n The non string sequence to flatten\n\n Notes\n -----\n This doesn't consider strings sequences.\n\n Returns\n -------\n flattened : generator\n \"\"\"\n for el in l:\n if _iterable_not_string(el):\n for s in flatten(el):\n yield s\n else:\n yield el\n\n\ndef mask_missing(arr, values_to_mask):\n \"\"\"\n Return a masking array of same size/shape as arr\n with entries equaling any member of values_to_mask set to True\n \"\"\"\n if not isinstance(values_to_mask, (list, np.ndarray)):\n values_to_mask = [values_to_mask]\n\n try:\n values_to_mask = np.array(values_to_mask, dtype=arr.dtype)\n except Exception:\n values_to_mask = np.array(values_to_mask, dtype=object)\n\n na_mask = isnull(values_to_mask)\n nonna = values_to_mask[~na_mask]\n\n mask = None\n for x in nonna:\n if mask is None:\n mask = arr == x\n\n # if x is a string and arr is not, then we get False and we must\n # expand the mask to size arr.shape\n if np.isscalar(mask):\n mask = np.zeros(arr.shape, dtype=bool)\n else:\n mask |= arr == x\n\n if na_mask.any():\n if mask is None:\n mask = isnull(arr)\n else:\n mask |= isnull(arr)\n\n return mask\n\n\ndef _pickle_array(arr):\n arr = arr.view(np.ndarray)\n\n buf = BytesIO()\n write_array(buf, arr)\n\n return buf.getvalue()\n\n\ndef _unpickle_array(bytes):\n arr = read_array(BytesIO(bytes))\n\n # All datetimes should be stored as M8[ns]. When unpickling with\n # numpy1.6, it will read these as M8[us]. So this ensures all\n # datetime64 types are read as MS[ns]\n if is_datetime64_dtype(arr):\n arr = arr.view(_NS_DTYPE)\n\n return arr\n\n\ndef _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):\n def wrapper(arr, indexer, out, fill_value=np.nan):\n if arr_dtype is not None:\n arr = arr.view(arr_dtype)\n if out_dtype is not None:\n out = out.view(out_dtype)\n if fill_wrap is not None:\n fill_value = fill_wrap(fill_value)\n f(arr, indexer, out, fill_value=fill_value)\n return wrapper\n\n\ndef _convert_wrapper(f, conv_dtype):\n def wrapper(arr, indexer, out, fill_value=np.nan):\n arr = arr.astype(conv_dtype)\n f(arr, indexer, out, fill_value=fill_value)\n return wrapper\n\n\ndef _take_2d_multi_generic(arr, indexer, out, fill_value, mask_info):\n # this is not ideal, performance-wise, but it's better than raising\n # an exception (best to optimize in Cython to avoid getting here)\n row_idx, col_idx = indexer\n if mask_info is not None:\n (row_mask, col_mask), (row_needs, col_needs) = mask_info\n else:\n row_mask = row_idx == -1\n col_mask = col_idx == -1\n row_needs = row_mask.any()\n col_needs = col_mask.any()\n if fill_value is not None:\n if row_needs:\n out[row_mask, :] = fill_value\n if col_needs:\n out[:, col_mask] = fill_value\n for i in range(len(row_idx)):\n u_ = row_idx[i]\n for j in range(len(col_idx)):\n v = col_idx[j]\n out[i, j] = arr[u_, v]\n\n\ndef _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info):\n if mask_info is not None:\n mask, needs_masking = mask_info\n else:\n mask = indexer == -1\n needs_masking = mask.any()\n if arr.dtype != out.dtype:\n arr = arr.astype(out.dtype)\n if arr.shape[axis] > 0:\n arr.take(_ensure_platform_int(indexer), axis=axis, out=out)\n if needs_masking:\n outindexer = [slice(None)] * arr.ndim\n outindexer[axis] = mask\n out[tuple(outindexer)] = fill_value\n\n\n_take_1d_dict = {\n ('int8', 'int8'): algos.take_1d_int8_int8,\n ('int8', 'int32'): algos.take_1d_int8_int32,\n ('int8', 'int64'): algos.take_1d_int8_int64,\n ('int8', 'float64'): algos.take_1d_int8_float64,\n ('int16', 'int16'): algos.take_1d_int16_int16,\n ('int16', 'int32'): algos.take_1d_int16_int32,\n ('int16', 'int64'): algos.take_1d_int16_int64,\n ('int16', 'float64'): algos.take_1d_int16_float64,\n ('int32', 'int32'): algos.take_1d_int32_int32,\n ('int32', 'int64'): algos.take_1d_int32_int64,\n ('int32', 'float64'): algos.take_1d_int32_float64,\n ('int64', 'int64'): algos.take_1d_int64_int64,\n ('int64', 'float64'): algos.take_1d_int64_float64,\n ('float32', 'float32'): algos.take_1d_float32_float32,\n ('float32', 'float64'): algos.take_1d_float32_float64,\n ('float64', 'float64'): algos.take_1d_float64_float64,\n ('object', 'object'): algos.take_1d_object_object,\n ('bool', 'bool'):\n _view_wrapper(algos.take_1d_bool_bool, np.uint8, np.uint8),\n ('bool', 'object'):\n _view_wrapper(algos.take_1d_bool_object, np.uint8, None),\n ('datetime64[ns]', 'datetime64[ns]'):\n _view_wrapper(algos.take_1d_int64_int64, np.int64, np.int64, np.int64)\n}\n\n\n_take_2d_axis0_dict = {\n ('int8', 'int8'): algos.take_2d_axis0_int8_int8,\n ('int8', 'int32'): algos.take_2d_axis0_int8_int32,\n ('int8', 'int64'): algos.take_2d_axis0_int8_int64,\n ('int8', 'float64'): algos.take_2d_axis0_int8_float64,\n ('int16', 'int16'): algos.take_2d_axis0_int16_int16,\n ('int16', 'int32'): algos.take_2d_axis0_int16_int32,\n ('int16', 'int64'): algos.take_2d_axis0_int16_int64,\n ('int16', 'float64'): algos.take_2d_axis0_int16_float64,\n ('int32', 'int32'): algos.take_2d_axis0_int32_int32,\n ('int32', 'int64'): algos.take_2d_axis0_int32_int64,\n ('int32', 'float64'): algos.take_2d_axis0_int32_float64,\n ('int64', 'int64'): algos.take_2d_axis0_int64_int64,\n ('int64', 'float64'): algos.take_2d_axis0_int64_float64,\n ('float32', 'float32'): algos.take_2d_axis0_float32_float32,\n ('float32', 'float64'): algos.take_2d_axis0_float32_float64,\n ('float64', 'float64'): algos.take_2d_axis0_float64_float64,\n ('object', 'object'): algos.take_2d_axis0_object_object,\n ('bool', 'bool'):\n _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8, np.uint8),\n ('bool', 'object'):\n _view_wrapper(algos.take_2d_axis0_bool_object, np.uint8, None),\n ('datetime64[ns]', 'datetime64[ns]'):\n _view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64,\n fill_wrap=np.int64)\n}\n\n\n_take_2d_axis1_dict = {\n ('int8', 'int8'): algos.take_2d_axis1_int8_int8,\n ('int8', 'int32'): algos.take_2d_axis1_int8_int32,\n ('int8', 'int64'): algos.take_2d_axis1_int8_int64,\n ('int8', 'float64'): algos.take_2d_axis1_int8_float64,\n ('int16', 'int16'): algos.take_2d_axis1_int16_int16,\n ('int16', 'int32'): algos.take_2d_axis1_int16_int32,\n ('int16', 'int64'): algos.take_2d_axis1_int16_int64,\n ('int16', 'float64'): algos.take_2d_axis1_int16_float64,\n ('int32', 'int32'): algos.take_2d_axis1_int32_int32,\n ('int32', 'int64'): algos.take_2d_axis1_int32_int64,\n ('int32', 'float64'): algos.take_2d_axis1_int32_float64,\n ('int64', 'int64'): algos.take_2d_axis1_int64_int64,\n ('int64', 'float64'): algos.take_2d_axis1_int64_float64,\n ('float32', 'float32'): algos.take_2d_axis1_float32_float32,\n ('float32', 'float64'): algos.take_2d_axis1_float32_float64,\n ('float64', 'float64'): algos.take_2d_axis1_float64_float64,\n ('object', 'object'): algos.take_2d_axis1_object_object,\n ('bool', 'bool'):\n _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8, np.uint8),\n ('bool', 'object'):\n _view_wrapper(algos.take_2d_axis1_bool_object, np.uint8, None),\n ('datetime64[ns]', 'datetime64[ns]'):\n _view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64,\n fill_wrap=np.int64)\n}\n\n\n_take_2d_multi_dict = {\n ('int8', 'int8'): algos.take_2d_multi_int8_int8,\n ('int8', 'int32'): algos.take_2d_multi_int8_int32,\n ('int8', 'int64'): algos.take_2d_multi_int8_int64,\n ('int8', 'float64'): algos.take_2d_multi_int8_float64,\n ('int16', 'int16'): algos.take_2d_multi_int16_int16,\n ('int16', 'int32'): algos.take_2d_multi_int16_int32,\n ('int16', 'int64'): algos.take_2d_multi_int16_int64,\n ('int16', 'float64'): algos.take_2d_multi_int16_float64,\n ('int32', 'int32'): algos.take_2d_multi_int32_int32,\n ('int32', 'int64'): algos.take_2d_multi_int32_int64,\n ('int32', 'float64'): algos.take_2d_multi_int32_float64,\n ('int64', 'int64'): algos.take_2d_multi_int64_int64,\n ('int64', 'float64'): algos.take_2d_multi_int64_float64,\n ('float32', 'float32'): algos.take_2d_multi_float32_float32,\n ('float32', 'float64'): algos.take_2d_multi_float32_float64,\n ('float64', 'float64'): algos.take_2d_multi_float64_float64,\n ('object', 'object'): algos.take_2d_multi_object_object,\n ('bool', 'bool'):\n _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8, np.uint8),\n ('bool', 'object'):\n _view_wrapper(algos.take_2d_multi_bool_object, np.uint8, None),\n ('datetime64[ns]', 'datetime64[ns]'):\n _view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64,\n fill_wrap=np.int64)\n}\n\n\ndef _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):\n if ndim <= 2:\n tup = (arr_dtype.name, out_dtype.name)\n if ndim == 1:\n func = _take_1d_dict.get(tup, None)\n elif ndim == 2:\n if axis == 0:\n func = _take_2d_axis0_dict.get(tup, None)\n else:\n func = _take_2d_axis1_dict.get(tup, None)\n if func is not None:\n return func\n\n tup = (out_dtype.name, out_dtype.name)\n if ndim == 1:\n func = _take_1d_dict.get(tup, None)\n elif ndim == 2:\n if axis == 0:\n func = _take_2d_axis0_dict.get(tup, None)\n else:\n func = _take_2d_axis1_dict.get(tup, None)\n if func is not None:\n func = _convert_wrapper(func, out_dtype)\n return func\n\n def func(arr, indexer, out, fill_value=np.nan):\n indexer = _ensure_int64(indexer)\n _take_nd_generic(arr, indexer, out, axis=axis,\n fill_value=fill_value, mask_info=mask_info)\n return func\n\n\ndef take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan,\n mask_info=None, allow_fill=True):\n \"\"\"\n Specialized Cython take which sets NaN values in one pass\n\n Parameters\n ----------\n arr : ndarray\n Input array\n indexer : ndarray\n 1-D array of indices to take, subarrays corresponding to -1 value\n indicies are filed with fill_value\n axis : int, default 0\n Axis to take from\n out : ndarray or None, default None\n Optional output array, must be appropriate type to hold input and\n fill_value together, if indexer has any -1 value entries; call\n common._maybe_promote to determine this type for any fill_value\n fill_value : any, default np.nan\n Fill value to replace -1 values with\n mask_info : tuple of (ndarray, boolean)\n If provided, value should correspond to:\n (indexer != -1, (indexer != -1).any())\n If not provided, it will be computed internally if necessary\n allow_fill : boolean, default True\n If False, indexer is assumed to contain no -1 values so no filling\n will be done. This short-circuits computation of a mask. Result is\n undefined if allow_fill == False and -1 is present in indexer.\n \"\"\"\n if indexer is None:\n indexer = np.arange(arr.shape[axis], dtype=np.int64)\n dtype, fill_value = arr.dtype, arr.dtype.type()\n else:\n indexer = _ensure_int64(indexer)\n if not allow_fill:\n dtype, fill_value = arr.dtype, arr.dtype.type()\n mask_info = None, False\n else:\n # check for promotion based on types only (do this first because\n # it's faster than computing a mask)\n dtype, fill_value = _maybe_promote(arr.dtype, fill_value)\n if dtype != arr.dtype and (out is None or out.dtype != dtype):\n # check if promotion is actually required based on indexer\n if mask_info is not None:\n mask, needs_masking = mask_info\n else:\n mask = indexer == -1\n needs_masking = mask.any()\n mask_info = mask, needs_masking\n if needs_masking:\n if out is not None and out.dtype != dtype:\n raise TypeError('Incompatible type for fill_value')\n else:\n # if not, then depromote, set fill_value to dummy\n # (it won't be used but we don't want the cython code\n # to crash when trying to cast it to dtype)\n dtype, fill_value = arr.dtype, arr.dtype.type()\n\n flip_order = False\n if arr.ndim == 2:\n if arr.flags.f_contiguous:\n flip_order = True\n\n if flip_order:\n arr = arr.T\n axis = arr.ndim - axis - 1\n if out is not None:\n out = out.T\n\n # at this point, it's guaranteed that dtype can hold both the arr values\n # and the fill_value\n if out is None:\n out_shape = list(arr.shape)\n out_shape[axis] = len(indexer)\n out_shape = tuple(out_shape)\n if arr.flags.f_contiguous and axis == arr.ndim - 1:\n # minor tweak that can make an order-of-magnitude difference\n # for dataframes initialized directly from 2-d ndarrays\n # (s.t. df.values is c-contiguous and df._data.blocks[0] is its\n # f-contiguous transpose)\n out = np.empty(out_shape, dtype=dtype, order='F')\n else:\n out = np.empty(out_shape, dtype=dtype)\n\n func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype,\n axis=axis, mask_info=mask_info)\n\n indexer = _ensure_int64(indexer)\n func(arr, indexer, out, fill_value)\n\n if flip_order:\n out = out.T\n return out\n\n\ntake_1d = take_nd\n\n\ndef take_2d_multi(arr, indexer, out=None, fill_value=np.nan,\n mask_info=None, allow_fill=True):\n \"\"\"\n Specialized Cython take which sets NaN values in one pass\n \"\"\"\n if indexer is None or (indexer[0] is None and indexer[1] is None):\n row_idx = np.arange(arr.shape[0], dtype=np.int64)\n col_idx = np.arange(arr.shape[1], dtype=np.int64)\n indexer = row_idx, col_idx\n dtype, fill_value = arr.dtype, arr.dtype.type()\n else:\n row_idx, col_idx = indexer\n if row_idx is None:\n row_idx = np.arange(arr.shape[0], dtype=np.int64)\n else:\n row_idx = _ensure_int64(row_idx)\n if col_idx is None:\n col_idx = np.arange(arr.shape[1], dtype=np.int64)\n else:\n col_idx = _ensure_int64(col_idx)\n indexer = row_idx, col_idx\n if not allow_fill:\n dtype, fill_value = arr.dtype, arr.dtype.type()\n mask_info = None, False\n else:\n # check for promotion based on types only (do this first because\n # it's faster than computing a mask)\n dtype, fill_value = _maybe_promote(arr.dtype, fill_value)\n if dtype != arr.dtype and (out is None or out.dtype != dtype):\n # check if promotion is actually required based on indexer\n if mask_info is not None:\n (row_mask, col_mask), (row_needs, col_needs) = mask_info\n else:\n row_mask = row_idx == -1\n col_mask = col_idx == -1\n row_needs = row_mask.any()\n col_needs = col_mask.any()\n mask_info = (row_mask, col_mask), (row_needs, col_needs)\n if row_needs or col_needs:\n if out is not None and out.dtype != dtype:\n raise TypeError('Incompatible type for fill_value')\n else:\n # if not, then depromote, set fill_value to dummy\n # (it won't be used but we don't want the cython code\n # to crash when trying to cast it to dtype)\n dtype, fill_value = arr.dtype, arr.dtype.type()\n\n # at this point, it's guaranteed that dtype can hold both the arr values\n # and the fill_value\n if out is None:\n out_shape = len(row_idx), len(col_idx)\n out = np.empty(out_shape, dtype=dtype)\n\n func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)\n if func is None and arr.dtype != out.dtype:\n func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)\n if func is not None:\n func = _convert_wrapper(func, out.dtype)\n if func is None:\n def func(arr, indexer, out, fill_value=np.nan):\n _take_2d_multi_generic(arr, indexer, out,\n fill_value=fill_value, mask_info=mask_info)\n func(arr, indexer, out=out, fill_value=fill_value)\n return out\n\n_diff_special = {\n 'float64': algos.diff_2d_float64,\n 'float32': algos.diff_2d_float32,\n 'int64': algos.diff_2d_int64,\n 'int32': algos.diff_2d_int32,\n 'int16': algos.diff_2d_int16,\n 'int8': algos.diff_2d_int8,\n}\n\ndef diff(arr, n, axis=0):\n \"\"\" difference of n between self,\n analagoust to s-s.shift(n) \"\"\"\n\n n = int(n)\n na = np.nan\n dtype = arr.dtype\n is_timedelta = False\n if needs_i8_conversion(arr):\n dtype = np.float64\n arr = arr.view('i8')\n na = tslib.iNaT\n is_timedelta = True\n elif issubclass(dtype.type, np.integer):\n dtype = np.float64\n elif issubclass(dtype.type, np.bool_):\n dtype = np.object_\n\n dtype = np.dtype(dtype)\n out_arr = np.empty(arr.shape, dtype=dtype)\n\n na_indexer = [slice(None)] * arr.ndim\n na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)\n out_arr[tuple(na_indexer)] = na\n\n if arr.ndim == 2 and arr.dtype.name in _diff_special:\n f = _diff_special[arr.dtype.name]\n f(arr, out_arr, n, axis)\n else:\n res_indexer = [slice(None)] * arr.ndim\n res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)\n res_indexer = tuple(res_indexer)\n\n lag_indexer = [slice(None)] * arr.ndim\n lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)\n lag_indexer = tuple(lag_indexer)\n\n # need to make sure that we account for na for datelike/timedelta\n # we don't actually want to subtract these i8 numbers\n if is_timedelta:\n res = arr[res_indexer]\n lag = arr[lag_indexer]\n\n mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)\n if mask.any():\n res = res.copy()\n res[mask] = 0\n lag = lag.copy()\n lag[mask] = 0\n\n result = res - lag\n result[mask] = na\n out_arr[res_indexer] = result\n else:\n out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]\n\n if is_timedelta:\n from pandas import TimedeltaIndex\n out_arr = TimedeltaIndex(out_arr.ravel().astype('int64')).asi8.reshape(out_arr.shape).astype('timedelta64[ns]')\n\n return out_arr\n\ndef _coerce_indexer_dtype(indexer, categories):\n \"\"\" coerce the indexer input array to the smallest dtype possible \"\"\"\n l = len(categories)\n if l < _int8_max:\n return _ensure_int8(indexer)\n elif l < _int16_max:\n return _ensure_int16(indexer)\n elif l < _int32_max:\n return _ensure_int32(indexer)\n return _ensure_int64(indexer)\n\ndef _coerce_to_dtypes(result, dtypes):\n \"\"\" given a dtypes and a result set, coerce the result elements to the\n dtypes\n \"\"\"\n if len(result) != len(dtypes):\n raise AssertionError(\"_coerce_to_dtypes requires equal len arrays\")\n\n from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type\n\n def conv(r, dtype):\n try:\n if isnull(r):\n pass\n elif dtype == _NS_DTYPE:\n r = lib.Timestamp(r)\n elif dtype == _TD_DTYPE:\n r = _coerce_scalar_to_timedelta_type(r)\n elif dtype == np.bool_:\n # messy. non 0/1 integers do not get converted.\n if is_integer(r) and r not in [0,1]:\n return int(r)\n r = bool(r)\n elif dtype.kind == 'f':\n r = float(r)\n elif dtype.kind == 'i':\n r = int(r)\n except:\n pass\n\n return r\n\n return [conv(r, dtype) for r, dtype in zip(result, dtypes)]\n\n\ndef _infer_fill_value(val):\n \"\"\"\n infer the fill value for the nan/NaT from the provided scalar/ndarray/list-like\n if we are a NaT, return the correct dtyped element to provide proper block construction\n\n \"\"\"\n\n if not is_list_like(val):\n val = [val]\n val = np.array(val,copy=False)\n if is_datetimelike(val):\n return np.array('NaT',dtype=val.dtype)\n elif is_object_dtype(val.dtype):\n dtype = lib.infer_dtype(_ensure_object(val))\n if dtype in ['datetime','datetime64']:\n return np.array('NaT',dtype=_NS_DTYPE)\n elif dtype in ['timedelta','timedelta64']:\n return np.array('NaT',dtype=_TD_DTYPE)\n return np.nan\n\n\ndef _infer_dtype_from_scalar(val):\n \"\"\" interpret the dtype from a scalar, upcast floats and ints\n return the new value and the dtype \"\"\"\n\n dtype = np.object_\n\n # a 1-element ndarray\n if isinstance(val, np.ndarray):\n if val.ndim != 0:\n raise ValueError(\n \"invalid ndarray passed to _infer_dtype_from_scalar\")\n\n dtype = val.dtype\n val = val.item()\n\n elif isinstance(val, compat.string_types):\n\n # If we create an empty array using a string to infer\n # the dtype, NumPy will only allocate one character per entry\n # so this is kind of bad. Alternately we could use np.repeat\n # instead of np.empty (but then you still don't want things\n # coming out as np.str_!\n\n dtype = np.object_\n\n elif isinstance(val, (np.datetime64, datetime)) and getattr(val,'tz',None) is None:\n val = lib.Timestamp(val).value\n dtype = np.dtype('M8[ns]')\n\n elif isinstance(val, (np.timedelta64, timedelta)):\n val = tslib.convert_to_timedelta(val,'ns')\n dtype = np.dtype('m8[ns]')\n\n elif is_bool(val):\n dtype = np.bool_\n\n # provide implicity upcast on scalars\n elif is_integer(val):\n dtype = np.int64\n\n elif is_float(val):\n dtype = np.float64\n\n elif is_complex(val):\n dtype = np.complex_\n\n return dtype, val\n\n\ndef _maybe_promote(dtype, fill_value=np.nan):\n\n # if we passed an array here, determine the fill value by dtype\n if isinstance(fill_value, np.ndarray):\n if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):\n fill_value = tslib.iNaT\n else:\n\n # we need to change to object type as our\n # fill_value is of object type\n if fill_value.dtype == np.object_:\n dtype = np.dtype(np.object_)\n fill_value = np.nan\n\n # returns tuple of (dtype, fill_value)\n if issubclass(dtype.type, (np.datetime64, np.timedelta64)):\n # for now: refuse to upcast datetime64\n # (this is because datetime64 will not implicitly upconvert\n # to object correctly as of numpy 1.6.1)\n if isnull(fill_value):\n fill_value = tslib.iNaT\n else:\n if issubclass(dtype.type, np.datetime64):\n try:\n fill_value = lib.Timestamp(fill_value).value\n except:\n # the proper thing to do here would probably be to upcast\n # to object (but numpy 1.6.1 doesn't do this properly)\n fill_value = tslib.iNaT\n else:\n fill_value = tslib.iNaT\n elif is_float(fill_value):\n if issubclass(dtype.type, np.bool_):\n dtype = np.object_\n elif issubclass(dtype.type, np.integer):\n dtype = np.float64\n elif is_bool(fill_value):\n if not issubclass(dtype.type, np.bool_):\n dtype = np.object_\n elif is_integer(fill_value):\n if issubclass(dtype.type, np.bool_):\n dtype = np.object_\n elif issubclass(dtype.type, np.integer):\n # upcast to prevent overflow\n arr = np.asarray(fill_value)\n if arr != arr.astype(dtype):\n dtype = arr.dtype\n elif is_complex(fill_value):\n if issubclass(dtype.type, np.bool_):\n dtype = np.object_\n elif issubclass(dtype.type, (np.integer, np.floating)):\n dtype = np.complex128\n else:\n dtype = np.object_\n\n # in case we have a string that looked like a number\n if is_categorical_dtype(dtype):\n dtype = dtype\n elif issubclass(np.dtype(dtype).type, compat.string_types):\n dtype = np.object_\n\n return dtype, fill_value\n\n\ndef _maybe_upcast_putmask(result, mask, other):\n \"\"\"\n A safe version of putmask that potentially upcasts the result\n\n Parameters\n ----------\n result : ndarray\n The destination array. This will be mutated in-place if no upcasting is\n necessary.\n mask : boolean ndarray\n other : ndarray or scalar\n The source array or value\n\n Returns\n -------\n result : ndarray\n changed : boolean\n Set to true if the result array was upcasted\n \"\"\"\n\n if mask.any():\n # Two conversions for date-like dtypes that can't be done automatically\n # in np.place:\n # NaN -> NaT\n # integer or integer array -> date-like array\n if result.dtype in _DATELIKE_DTYPES:\n if lib.isscalar(other):\n if isnull(other):\n other = tslib.iNaT\n elif is_integer(other):\n other = np.array(other, dtype=result.dtype)\n elif is_integer_dtype(other):\n other = np.array(other, dtype=result.dtype)\n\n def changeit():\n\n # try to directly set by expanding our array to full\n # length of the boolean\n try:\n om = other[mask]\n om_at = om.astype(result.dtype)\n if (om == om_at).all():\n new_result = result.values.copy()\n new_result[mask] = om_at\n result[:] = new_result\n return result, False\n except:\n pass\n\n # we are forced to change the dtype of the result as the input\n # isn't compatible\n r, _ = _maybe_upcast(result, fill_value=other, copy=True)\n np.place(r, mask, other)\n\n return r, True\n\n # we want to decide whether place will work\n # if we have nans in the False portion of our mask then we need to\n # upcast (possibly), otherwise we DON't want to upcast (e.g. if we\n # have values, say integers, in the success portion then it's ok to not\n # upcast)\n new_dtype, _ = _maybe_promote(result.dtype, other)\n if new_dtype != result.dtype:\n\n # we have a scalar or len 0 ndarray\n # and its nan and we are changing some values\n if (np.isscalar(other) or\n (isinstance(other, np.ndarray) and other.ndim < 1)):\n if isnull(other):\n return changeit()\n\n # we have an ndarray and the masking has nans in it\n else:\n\n if isnull(other[mask]).any():\n return changeit()\n\n try:\n np.place(result, mask, other)\n except:\n return changeit()\n\n return result, False\n\n\ndef _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):\n \"\"\" provide explict type promotion and coercion\n\n Parameters\n ----------\n values : the ndarray that we want to maybe upcast\n fill_value : what we want to fill with\n dtype : if None, then use the dtype of the values, else coerce to this type\n copy : if True always make a copy even if no upcast is required\n \"\"\"\n\n if dtype is None:\n dtype = values.dtype\n new_dtype, fill_value = _maybe_promote(dtype, fill_value)\n if new_dtype != values.dtype:\n values = values.astype(new_dtype)\n elif copy:\n values = values.copy()\n return values, fill_value\n\n\ndef _possibly_cast_item(obj, item, dtype):\n chunk = obj[item]\n\n if chunk.values.dtype != dtype:\n if dtype in (np.object_, np.bool_):\n obj[item] = chunk.astype(np.object_)\n elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover\n raise ValueError(\"Unexpected dtype encountered: %s\" % dtype)\n\n\ndef _possibly_downcast_to_dtype(result, dtype):\n \"\"\" try to cast to the specified dtype (e.g. convert back to bool/int\n or could be an astype of float64->float32\n \"\"\"\n\n if np.isscalar(result):\n return result\n\n trans = lambda x: x\n if isinstance(dtype, compat.string_types):\n if dtype == 'infer':\n inferred_type = lib.infer_dtype(_ensure_object(result.ravel()))\n if inferred_type == 'boolean':\n dtype = 'bool'\n elif inferred_type == 'integer':\n dtype = 'int64'\n elif inferred_type == 'datetime64':\n dtype = 'datetime64[ns]'\n elif inferred_type == 'timedelta64':\n dtype = 'timedelta64[ns]'\n\n # try to upcast here\n elif inferred_type == 'floating':\n dtype = 'int64'\n if issubclass(result.dtype.type, np.number):\n trans = lambda x: x.round()\n\n else:\n dtype = 'object'\n\n if isinstance(dtype, compat.string_types):\n dtype = np.dtype(dtype)\n\n try:\n\n # don't allow upcasts here (except if empty)\n if dtype.kind == result.dtype.kind:\n if result.dtype.itemsize <= dtype.itemsize and np.prod(result.shape):\n return result\n\n if issubclass(dtype.type, np.floating):\n return result.astype(dtype)\n elif dtype == np.bool_ or issubclass(dtype.type, np.integer):\n\n # if we don't have any elements, just astype it\n if not np.prod(result.shape):\n return trans(result).astype(dtype)\n\n # do a test on the first element, if it fails then we are done\n r = result.ravel()\n arr = np.array([r[0]])\n if not np.allclose(arr, trans(arr).astype(dtype)):\n return result\n\n # a comparable, e.g. a Decimal may slip in here\n elif not isinstance(r[0], (np.integer, np.floating, np.bool, int,\n float, bool)):\n return result\n\n if (issubclass(result.dtype.type, (np.object_, np.number)) and\n notnull(result).all()):\n new_result = trans(result).astype(dtype)\n try:\n if np.allclose(new_result, result):\n return new_result\n except:\n\n # comparison of an object dtype with a number type could\n # hit here\n if (new_result == result).all():\n return new_result\n\n # a datetimelike\n elif dtype.kind in ['M','m'] and result.dtype.kind in ['i']:\n try:\n result = result.astype(dtype)\n except:\n pass\n\n except:\n pass\n\n return result\n\n\ndef _maybe_convert_string_to_object(values):\n \"\"\"\n Convert string-like and string-like array to convert object dtype.\n This is to avoid numpy to handle the array as str dtype.\n \"\"\"\n if isinstance(values, string_types):\n values = np.array([values], dtype=object)\n elif (isinstance(values, np.ndarray) and\n issubclass(values.dtype.type, (np.string_, np.unicode_))):\n values = values.astype(object)\n return values\n\n\ndef _lcd_dtypes(a_dtype, b_dtype):\n \"\"\" return the lcd dtype to hold these types \"\"\"\n\n if is_datetime64_dtype(a_dtype) or is_datetime64_dtype(b_dtype):\n return _NS_DTYPE\n elif is_timedelta64_dtype(a_dtype) or is_timedelta64_dtype(b_dtype):\n return _TD_DTYPE\n elif is_complex_dtype(a_dtype):\n if is_complex_dtype(b_dtype):\n return a_dtype\n return np.float64\n elif is_integer_dtype(a_dtype):\n if is_integer_dtype(b_dtype):\n if a_dtype.itemsize == b_dtype.itemsize:\n return a_dtype\n return np.int64\n return np.float64\n elif is_float_dtype(a_dtype):\n if is_float_dtype(b_dtype):\n if a_dtype.itemsize == b_dtype.itemsize:\n return a_dtype\n else:\n return np.float64\n elif is_integer(b_dtype):\n return np.float64\n return np.object\n\n\ndef _fill_zeros(result, x, y, name, fill):\n \"\"\"\n if this is a reversed op, then flip x,y\n\n if we have an integer value (or array in y)\n and we have 0's, fill them with the fill,\n return the result\n\n mask the nan's from x\n \"\"\"\n if fill is None or is_float_dtype(result):\n return result\n\n if name.startswith(('r', '__r')):\n x,y = y,x\n\n is_typed_variable = (hasattr(y, 'dtype') or hasattr(y,'type'))\n is_scalar = lib.isscalar(y)\n\n if not is_typed_variable and not is_scalar:\n return result\n\n if is_scalar:\n y = np.array(y)\n\n if is_integer_dtype(y):\n\n if (y == 0).any():\n\n # GH 7325, mask and nans must be broadcastable (also: PR 9308)\n # Raveling and then reshaping makes np.putmask faster\n mask = ((y == 0) & ~np.isnan(result)).ravel()\n\n shape = result.shape\n result = result.astype('float64', copy=False).ravel()\n\n np.putmask(result, mask, fill)\n\n # if we have a fill of inf, then sign it correctly\n # (GH 6178 and PR 9308)\n if np.isinf(fill):\n signs = np.sign(y if name.startswith(('r', '__r')) else x)\n negative_inf_mask = (signs.ravel() < 0) & mask\n np.putmask(result, negative_inf_mask, -fill)\n\n if \"floordiv\" in name: # (PR 9308)\n nan_mask = ((y == 0) & (x == 0)).ravel()\n np.putmask(result, nan_mask, np.nan)\n\n result = result.reshape(shape)\n\n return result\n\n\ndef _interp_wrapper(f, wrap_dtype, na_override=None):\n def wrapper(arr, mask, limit=None):\n view = arr.view(wrap_dtype)\n f(view, mask, limit=limit)\n return wrapper\n\n\n_pad_1d_datetime = _interp_wrapper(algos.pad_inplace_int64, np.int64)\n_pad_2d_datetime = _interp_wrapper(algos.pad_2d_inplace_int64, np.int64)\n_backfill_1d_datetime = _interp_wrapper(algos.backfill_inplace_int64,\n np.int64)\n_backfill_2d_datetime = _interp_wrapper(algos.backfill_2d_inplace_int64,\n np.int64)\n\n\ndef pad_1d(values, limit=None, mask=None, dtype=None):\n\n if dtype is None:\n dtype = values.dtype\n _method = None\n if is_float_dtype(values):\n _method = getattr(algos, 'pad_inplace_%s' % dtype.name, None)\n elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):\n _method = _pad_1d_datetime\n elif is_integer_dtype(values):\n values = _ensure_float64(values)\n _method = algos.pad_inplace_float64\n elif values.dtype == np.object_:\n _method = algos.pad_inplace_object\n\n if _method is None:\n raise ValueError('Invalid dtype for pad_1d [%s]' % dtype.name)\n\n if mask is None:\n mask = isnull(values)\n mask = mask.view(np.uint8)\n _method(values, mask, limit=limit)\n return values\n\n\ndef backfill_1d(values, limit=None, mask=None, dtype=None):\n\n if dtype is None:\n dtype = values.dtype\n _method = None\n if is_float_dtype(values):\n _method = getattr(algos, 'backfill_inplace_%s' % dtype.name, None)\n elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):\n _method = _backfill_1d_datetime\n elif is_integer_dtype(values):\n values = _ensure_float64(values)\n _method = algos.backfill_inplace_float64\n elif values.dtype == np.object_:\n _method = algos.backfill_inplace_object\n\n if _method is None:\n raise ValueError('Invalid dtype for backfill_1d [%s]' % dtype.name)\n\n if mask is None:\n mask = isnull(values)\n mask = mask.view(np.uint8)\n\n _method(values, mask, limit=limit)\n return values\n\n\ndef pad_2d(values, limit=None, mask=None, dtype=None):\n\n if dtype is None:\n dtype = values.dtype\n _method = None\n if is_float_dtype(values):\n _method = getattr(algos, 'pad_2d_inplace_%s' % dtype.name, None)\n elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):\n _method = _pad_2d_datetime\n elif is_integer_dtype(values):\n values = _ensure_float64(values)\n _method = algos.pad_2d_inplace_float64\n elif values.dtype == np.object_:\n _method = algos.pad_2d_inplace_object\n\n if _method is None:\n raise ValueError('Invalid dtype for pad_2d [%s]' % dtype.name)\n\n if mask is None:\n mask = isnull(values)\n mask = mask.view(np.uint8)\n\n if np.all(values.shape):\n _method(values, mask, limit=limit)\n else:\n # for test coverage\n pass\n return values\n\n\ndef backfill_2d(values, limit=None, mask=None, dtype=None):\n\n if dtype is None:\n dtype = values.dtype\n _method = None\n if is_float_dtype(values):\n _method = getattr(algos, 'backfill_2d_inplace_%s' % dtype.name, None)\n elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):\n _method = _backfill_2d_datetime\n elif is_integer_dtype(values):\n values = _ensure_float64(values)\n _method = algos.backfill_2d_inplace_float64\n elif values.dtype == np.object_:\n _method = algos.backfill_2d_inplace_object\n\n if _method is None:\n raise ValueError('Invalid dtype for backfill_2d [%s]' % dtype.name)\n\n if mask is None:\n mask = isnull(values)\n mask = mask.view(np.uint8)\n\n if np.all(values.shape):\n _method(values, mask, limit=limit)\n else:\n # for test coverage\n pass\n return values\n\n\ndef _clean_interp_method(method, order=None):\n valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',\n 'quadratic', 'cubic', 'barycentric', 'polynomial',\n 'krogh', 'piecewise_polynomial',\n 'pchip', 'spline']\n if method in ('spline', 'polynomial') and order is None:\n raise ValueError(\"You must specify the order of the spline or \"\n \"polynomial.\")\n if method not in valid:\n raise ValueError(\"method must be one of {0}.\"\n \"Got '{1}' instead.\".format(valid, method))\n return method\n\n\ndef interpolate_1d(xvalues, yvalues, method='linear', limit=None,\n fill_value=None, bounds_error=False, order=None):\n \"\"\"\n Logic for the 1-d interpolation. The result should be 1-d, inputs\n xvalues and yvalues will each be 1-d arrays of the same length.\n\n Bounds_error is currently hardcoded to False since non-scipy ones don't\n take it as an argumnet.\n \"\"\"\n # Treat the original, non-scipy methods first.\n\n invalid = isnull(yvalues)\n valid = ~invalid\n\n valid_y = yvalues[valid]\n valid_x = xvalues[valid]\n new_x = xvalues[invalid]\n\n if method == 'time':\n if not getattr(xvalues, 'is_all_dates', None):\n # if not issubclass(xvalues.dtype.type, np.datetime64):\n raise ValueError('time-weighted interpolation only works '\n 'on Series or DataFrames with a '\n 'DatetimeIndex')\n method = 'values'\n\n def _interp_limit(invalid, limit):\n \"\"\"mask off values that won't be filled since they exceed the limit\"\"\"\n all_nans = np.where(invalid)[0]\n if all_nans.size == 0: # no nans anyway\n return []\n violate = [invalid[x:x + limit + 1] for x in all_nans]\n violate = np.array([x.all() & (x.size > limit) for x in violate])\n return all_nans[violate] + limit\n\n xvalues = getattr(xvalues, 'values', xvalues)\n yvalues = getattr(yvalues, 'values', yvalues)\n\n if limit:\n violate_limit = _interp_limit(invalid, limit)\n if valid.any():\n firstIndex = valid.argmax()\n valid = valid[firstIndex:]\n invalid = invalid[firstIndex:]\n result = yvalues.copy()\n if valid.all():\n return yvalues\n else:\n # have to call np.array(xvalues) since xvalues could be an Index\n # which cant be mutated\n result = np.empty_like(np.array(xvalues), dtype=np.float64)\n result.fill(np.nan)\n return result\n\n if method in ['linear', 'time', 'index', 'values']:\n if method in ('values', 'index'):\n inds = np.asarray(xvalues)\n # hack for DatetimeIndex, #1646\n if issubclass(inds.dtype.type, np.datetime64):\n inds = inds.view(np.int64)\n\n if inds.dtype == np.object_:\n inds = lib.maybe_convert_objects(inds)\n else:\n inds = xvalues\n\n inds = inds[firstIndex:]\n\n result[firstIndex:][invalid] = np.interp(inds[invalid], inds[valid],\n yvalues[firstIndex:][valid])\n\n if limit:\n result[violate_limit] = np.nan\n return result\n\n sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'barycentric', 'krogh', 'spline', 'polynomial',\n 'piecewise_polynomial', 'pchip']\n if method in sp_methods:\n new_x = new_x[firstIndex:]\n xvalues = xvalues[firstIndex:]\n\n result[firstIndex:][invalid] = _interpolate_scipy_wrapper(\n valid_x, valid_y, new_x, method=method, fill_value=fill_value,\n bounds_error=bounds_error, order=order)\n if limit:\n result[violate_limit] = np.nan\n return result\n\n\ndef _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,\n bounds_error=False, order=None):\n \"\"\"\n passed off to scipy.interpolate.interp1d. method is scipy's kind.\n Returns an array interpolated at new_x. Add any new methods to\n the list in _clean_interp_method\n \"\"\"\n try:\n from scipy import interpolate\n from pandas import DatetimeIndex\n except ImportError:\n raise ImportError('{0} interpolation requires Scipy'.format(method))\n\n new_x = np.asarray(new_x)\n\n # ignores some kwargs that could be passed along.\n alt_methods = {\n 'barycentric': interpolate.barycentric_interpolate,\n 'krogh': interpolate.krogh_interpolate,\n 'piecewise_polynomial': interpolate.piecewise_polynomial_interpolate,\n }\n\n if getattr(x, 'is_all_dates', False):\n # GH 5975, scipy.interp1d can't hande datetime64s\n x, new_x = x.values.astype('i8'), new_x.astype('i8')\n\n try:\n alt_methods['pchip'] = interpolate.pchip_interpolate\n except AttributeError:\n if method == 'pchip':\n raise ImportError(\"Your version of scipy does not support \"\n \"PCHIP interpolation.\")\n\n interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial']\n if method in interp1d_methods:\n if method == 'polynomial':\n method = order\n terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,\n bounds_error=bounds_error)\n new_y = terp(new_x)\n elif method == 'spline':\n terp = interpolate.UnivariateSpline(x, y, k=order)\n new_y = terp(new_x)\n else:\n # GH 7295: need to be able to write for some reason\n # in some circumstances: check all three\n if not x.flags.writeable:\n x = x.copy()\n if not y.flags.writeable:\n y = y.copy()\n if not new_x.flags.writeable:\n new_x = new_x.copy()\n method = alt_methods[method]\n new_y = method(x, y, new_x)\n return new_y\n\n\ndef interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None):\n \"\"\" perform an actual interpolation of values, values will be make 2-d if\n needed fills inplace, returns the result\n \"\"\"\n\n transf = (lambda x: x) if axis == 0 else (lambda x: x.T)\n\n # reshape a 1 dim if needed\n ndim = values.ndim\n if values.ndim == 1:\n if axis != 0: # pragma: no cover\n raise AssertionError(\"cannot interpolate on a ndim == 1 with \"\n \"axis != 0\")\n values = values.reshape(tuple((1,) + values.shape))\n\n if fill_value is None:\n mask = None\n else: # todo create faster fill func without masking\n mask = mask_missing(transf(values), fill_value)\n\n method = _clean_fill_method(method)\n if method == 'pad':\n values = transf(pad_2d(transf(values), limit=limit, mask=mask, dtype=dtype))\n else:\n values = transf(backfill_2d(transf(values), limit=limit, mask=mask, dtype=dtype))\n\n # reshape back\n if ndim == 1:\n values = values[0]\n\n return values\n\n\ndef _consensus_name_attr(objs):\n name = objs[0].name\n for obj in objs[1:]:\n if obj.name != name:\n return None\n return name\n\n\n_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}\n\n\ndef _get_fill_func(method):\n method = _clean_fill_method(method)\n return _fill_methods[method]\n\n\n#----------------------------------------------------------------------\n# Lots of little utilities\n\ndef _validate_date_like_dtype(dtype):\n try:\n typ = np.datetime_data(dtype)[0]\n except ValueError as e:\n raise TypeError('%s' % e)\n if typ != 'generic' and typ != 'ns':\n raise ValueError('%r is too specific of a frequency, try passing %r'\n % (dtype.name, dtype.type.__name__))\n\n\ndef _invalidate_string_dtypes(dtype_set):\n \"\"\"Change string like dtypes to object for ``DataFrame.select_dtypes()``.\"\"\"\n non_string_dtypes = dtype_set - _string_dtypes\n if non_string_dtypes != dtype_set:\n raise TypeError(\"string dtypes are not allowed, use 'object' instead\")\n\n\ndef _get_dtype_from_object(dtype):\n \"\"\"Get a numpy dtype.type-style object.\n\n Notes\n -----\n If nothing can be found, returns ``object``.\n \"\"\"\n # type object from a dtype\n if isinstance(dtype, type) and issubclass(dtype, np.generic):\n return dtype\n elif isinstance(dtype, np.dtype): # dtype object\n try:\n _validate_date_like_dtype(dtype)\n except TypeError:\n # should still pass if we don't have a datelike\n pass\n return dtype.type\n elif isinstance(dtype, compat.string_types):\n if dtype == 'datetime' or dtype == 'timedelta':\n dtype += '64'\n elif dtype == 'category':\n return CategoricalDtypeType\n try:\n return _get_dtype_from_object(getattr(np, dtype))\n except AttributeError:\n # handles cases like _get_dtype(int)\n # i.e., python objects that are valid dtypes (unlike user-defined\n # types, in general)\n pass\n return _get_dtype_from_object(np.dtype(dtype))\n\n\ndef _get_info_slice(obj, indexer):\n \"\"\"Slice the info axis of `obj` with `indexer`.\"\"\"\n if not hasattr(obj, '_info_axis_number'):\n raise TypeError('object of type %r has no info axis' %\n type(obj).__name__)\n slices = [slice(None)] * obj.ndim\n slices[obj._info_axis_number] = indexer\n return tuple(slices)\n\n\ndef _maybe_box(indexer, values, obj, key):\n\n # if we have multiples coming back, box em\n if isinstance(values, np.ndarray):\n return obj[indexer.get_loc(key)]\n\n # return the value\n return values\n\n\ndef _maybe_box_datetimelike(value):\n # turn a datetime like into a Timestamp/timedelta as needed\n\n if isinstance(value, np.datetime64):\n value = tslib.Timestamp(value)\n elif isinstance(value, np.timedelta64):\n value = tslib.Timedelta(value)\n\n return value\n\n_values_from_object = lib.values_from_object\n\ndef _possibly_convert_objects(values, convert_dates=True,\n convert_numeric=True,\n convert_timedeltas=True):\n \"\"\" if we have an object dtype, try to coerce dates and/or numbers \"\"\"\n\n # if we have passed in a list or scalar\n if isinstance(values, (list, tuple)):\n values = np.array(values, dtype=np.object_)\n if not hasattr(values, 'dtype'):\n values = np.array([values], dtype=np.object_)\n\n # convert dates\n if convert_dates and values.dtype == np.object_:\n\n # we take an aggressive stance and convert to datetime64[ns]\n if convert_dates == 'coerce':\n new_values = _possibly_cast_to_datetime(\n values, 'M8[ns]', coerce=True)\n\n # if we are all nans then leave me alone\n if not isnull(new_values).all():\n values = new_values\n\n else:\n values = lib.maybe_convert_objects(\n values, convert_datetime=convert_dates)\n\n # convert timedeltas\n if convert_timedeltas and values.dtype == np.object_:\n\n if convert_timedeltas == 'coerce':\n from pandas.tseries.timedeltas import to_timedelta\n values = to_timedelta(values, coerce=True)\n\n # if we are all nans then leave me alone\n if not isnull(new_values).all():\n values = new_values\n\n else:\n values = lib.maybe_convert_objects(\n values, convert_timedelta=convert_timedeltas)\n\n # convert to numeric\n if values.dtype == np.object_:\n if convert_numeric:\n try:\n new_values = lib.maybe_convert_numeric(\n values, set(), coerce_numeric=True)\n\n # if we are all nans then leave me alone\n if not isnull(new_values).all():\n values = new_values\n\n except:\n pass\n else:\n\n # soft-conversion\n values = lib.maybe_convert_objects(values)\n\n return values\n\n\ndef _possibly_castable(arr):\n # return False to force a non-fastpath\n\n # check datetime64[ns]/timedelta64[ns] are valid\n # otherwise try to coerce\n kind = arr.dtype.kind\n if kind == 'M' or kind == 'm':\n return arr.dtype in _DATELIKE_DTYPES\n\n return arr.dtype.name not in _POSSIBLY_CAST_DTYPES\n\n\ndef _possibly_convert_platform(values):\n \"\"\" try to do platform conversion, allow ndarray or list here \"\"\"\n\n if isinstance(values, (list, tuple)):\n values = lib.list_to_object_array(values)\n if getattr(values, 'dtype', None) == np.object_:\n if hasattr(values, 'values'):\n values = values.values\n values = lib.maybe_convert_objects(values)\n\n return values\n\n\ndef _possibly_cast_to_datetime(value, dtype, coerce=False):\n \"\"\" try to cast the array/value to a datetimelike dtype, converting float\n nan to iNaT\n \"\"\"\n from pandas.tseries.timedeltas import to_timedelta\n from pandas.tseries.tools import to_datetime\n\n if dtype is not None:\n if isinstance(dtype, compat.string_types):\n dtype = np.dtype(dtype)\n\n is_datetime64 = is_datetime64_dtype(dtype)\n is_timedelta64 = is_timedelta64_dtype(dtype)\n\n if is_datetime64 or is_timedelta64:\n\n # force the dtype if needed\n if is_datetime64 and dtype != _NS_DTYPE:\n if dtype.name == 'datetime64[ns]':\n dtype = _NS_DTYPE\n else:\n raise TypeError(\n \"cannot convert datetimelike to dtype [%s]\" % dtype)\n elif is_timedelta64 and dtype != _TD_DTYPE:\n if dtype.name == 'timedelta64[ns]':\n dtype = _TD_DTYPE\n else:\n raise TypeError(\n \"cannot convert timedeltalike to dtype [%s]\" % dtype)\n\n if np.isscalar(value):\n if value == tslib.iNaT or isnull(value):\n value = tslib.iNaT\n else:\n value = np.array(value,copy=False)\n\n # have a scalar array-like (e.g. NaT)\n if value.ndim == 0:\n value = tslib.iNaT\n\n # we have an array of datetime or timedeltas & nulls\n elif np.prod(value.shape) and value.dtype != dtype:\n try:\n if is_datetime64:\n value = to_datetime(value, coerce=coerce).values\n elif is_timedelta64:\n value = to_timedelta(value, coerce=coerce).values\n except (AttributeError, ValueError):\n pass\n\n else:\n\n is_array = isinstance(value, np.ndarray)\n\n # catch a datetime/timedelta that is not of ns variety\n # and no coercion specified\n if is_array and value.dtype.kind in ['M', 'm']:\n dtype = value.dtype\n\n if dtype.kind == 'M' and dtype != _NS_DTYPE:\n value = value.astype(_NS_DTYPE)\n\n elif dtype.kind == 'm' and dtype != _TD_DTYPE:\n value = to_timedelta(value)\n\n # only do this if we have an array and the dtype of the array is not\n # setup already we are not an integer/object, so don't bother with this\n # conversion\n elif not (is_array and not (issubclass(value.dtype.type, np.integer) or\n value.dtype == np.object_)):\n value = _possibly_infer_to_datetimelike(value)\n\n return value\n\n\ndef _possibly_infer_to_datetimelike(value, convert_dates=False):\n \"\"\"\n we might have a array (or single object) that is datetime like,\n and no dtype is passed don't change the value unless we find a\n datetime/timedelta set\n\n this is pretty strict in that a datetime/timedelta is REQUIRED\n in addition to possible nulls/string likes\n\n ONLY strings are NOT datetimelike\n\n Parameters\n ----------\n value : np.array\n convert_dates : boolean, default False\n if True try really hard to convert dates (such as datetime.date), other\n leave inferred dtype 'date' alone\n\n \"\"\"\n\n v = value\n if not is_list_like(v):\n v = [v]\n v = np.array(v,copy=False)\n shape = v.shape\n if not v.ndim == 1:\n v = v.ravel()\n\n if len(v):\n\n def _try_datetime(v):\n # safe coerce to datetime64\n try:\n return tslib.array_to_datetime(v, raise_=True).reshape(shape)\n except:\n return v\n\n def _try_timedelta(v):\n # safe coerce to timedelta64\n\n # will try first with a string & object conversion\n from pandas.tseries.timedeltas import to_timedelta\n try:\n return to_timedelta(v).values.reshape(shape)\n except:\n return v\n\n # do a quick inference for perf\n sample = v[:min(3,len(v))]\n inferred_type = lib.infer_dtype(sample)\n\n if inferred_type in ['datetime', 'datetime64'] or (convert_dates and inferred_type in ['date']):\n value = _try_datetime(v).reshape(shape)\n elif inferred_type in ['timedelta', 'timedelta64']:\n value = _try_timedelta(v).reshape(shape)\n\n # its possible to have nulls intermixed within the datetime or timedelta\n # these will in general have an inferred_type of 'mixed', so have to try\n # both datetime and timedelta\n\n # try timedelta first to avoid spurious datetime conversions\n # e.g. '00:00:01' is a timedelta but technically is also a datetime\n elif inferred_type in ['mixed']:\n\n if lib.is_possible_datetimelike_array(_ensure_object(v)):\n value = _try_timedelta(v).reshape(shape)\n if lib.infer_dtype(value) in ['mixed']:\n value = _try_datetime(v).reshape(shape)\n\n return value\n\n\ndef is_bool_indexer(key):\n if isinstance(key, (ABCSeries, np.ndarray)):\n if key.dtype == np.object_:\n key = np.asarray(_values_from_object(key))\n\n if not lib.is_bool_array(key):\n if isnull(key).any():\n raise ValueError('cannot index with vector containing '\n 'NA / NaN values')\n return False\n return True\n elif key.dtype == np.bool_:\n return True\n elif isinstance(key, list):\n try:\n arr = np.asarray(key)\n return arr.dtype == np.bool_ and len(arr) == len(key)\n except TypeError: # pragma: no cover\n return False\n\n return False\n\n\ndef _default_index(n):\n from pandas.core.index import Int64Index\n values = np.arange(n, dtype=np.int64)\n result = Int64Index(values,name=None)\n result.is_unique = True\n return result\n\n\ndef ensure_float(arr):\n if issubclass(arr.dtype.type, (np.integer, np.bool_)):\n arr = arr.astype(float)\n return arr\n\n\ndef _mut_exclusive(**kwargs):\n item1, item2 = kwargs.items()\n label1, val1 = item1\n label2, val2 = item2\n if val1 is not None and val2 is not None:\n raise TypeError('mutually exclusive arguments: %r and %r' %\n (label1, label2))\n elif val1 is not None:\n return val1\n else:\n return val2\n\n\ndef _any_none(*args):\n for arg in args:\n if arg is None:\n return True\n return False\n\n\ndef _all_not_none(*args):\n for arg in args:\n if arg is None:\n return False\n return True\n\n\ndef _try_sort(iterable):\n listed = list(iterable)\n try:\n return sorted(listed)\n except Exception:\n return listed\n\n\ndef _count_not_none(*args):\n return sum(x is not None for x in args)\n\n#------------------------------------------------------------------------------\n# miscellaneous python tools\n\n\n\n\ndef adjoin(space, *lists):\n \"\"\"\n Glues together two sets of strings using the amount of space requested.\n The idea is to prettify.\n \"\"\"\n out_lines = []\n newLists = []\n lengths = [max(map(len, x)) + space for x in lists[:-1]]\n\n # not the last one\n lengths.append(max(map(len, lists[-1])))\n\n maxLen = max(map(len, lists))\n for i, lst in enumerate(lists):\n nl = [x.ljust(lengths[i]) for x in lst]\n nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))\n newLists.append(nl)\n toJoin = zip(*newLists)\n for lines in toJoin:\n out_lines.append(_join_unicode(lines))\n return _join_unicode(out_lines, sep='\\n')\n\n\ndef _join_unicode(lines, sep=''):\n try:\n return sep.join(lines)\n except UnicodeDecodeError:\n sep = compat.text_type(sep)\n return sep.join([x.decode('utf-8') if isinstance(x, str) else x\n for x in lines])\n\n\ndef iterpairs(seq):\n \"\"\"\n Parameters\n ----------\n seq: sequence\n\n Returns\n -------\n iterator returning overlapping pairs of elements\n\n Examples\n --------\n >>> list(iterpairs([1, 2, 3, 4]))\n [(1, 2), (2, 3), (3, 4)]\n \"\"\"\n # input may not be sliceable\n seq_it = iter(seq)\n seq_it_next = iter(seq)\n next(seq_it_next)\n\n return zip(seq_it, seq_it_next)\n\n\ndef split_ranges(mask):\n \"\"\" Generates tuples of ranges which cover all True value in mask\n\n >>> list(split_ranges([1,0,0,1,0]))\n [(0, 1), (3, 4)]\n \"\"\"\n ranges = [(0, len(mask))]\n\n for pos, val in enumerate(mask):\n if not val: # this pos should be ommited, split off the prefix range\n r = ranges.pop()\n if pos > r[0]: # yield non-zero range\n yield (r[0], pos)\n if pos + 1 < len(mask): # save the rest for processing\n ranges.append((pos + 1, len(mask)))\n if ranges:\n yield ranges[-1]\n\n\ndef indent(string, spaces=4):\n dent = ' ' * spaces\n return '\\n'.join([dent + x for x in string.split('\\n')])\n\n\ndef banner(message):\n \"\"\"\n Return 80-char width message declaration with = bars on top and bottom.\n \"\"\"\n bar = '=' * 80\n return '%s\\n%s\\n%s' % (bar, message, bar)\n\n\ndef _long_prod(vals):\n result = long(1)\n for x in vals:\n result *= x\n return result\n\n\nclass groupby(dict):\n\n \"\"\"\n A simple groupby different from the one in itertools.\n\n Does not require the sequence elements to be sorted by keys,\n however it is slower.\n \"\"\"\n\n def __init__(self, seq, key=lambda x: x):\n for value in seq:\n k = key(value)\n self.setdefault(k, []).append(value)\n try:\n __iter__ = dict.iteritems\n except AttributeError: # pragma: no cover\n # Python 3\n def __iter__(self):\n return iter(dict.items(self))\n\n\ndef map_indices_py(arr):\n \"\"\"\n Returns a dictionary with (element, index) pairs for each element in the\n given array/list\n \"\"\"\n return dict([(x, i) for i, x in enumerate(arr)])\n\n\ndef union(*seqs):\n result = set([])\n for seq in seqs:\n if not isinstance(seq, set):\n seq = set(seq)\n result |= seq\n return type(seqs[0])(list(result))\n\n\ndef difference(a, b):\n return type(a)(list(set(a) - set(b)))\n\n\ndef intersection(*seqs):\n result = set(seqs[0])\n for seq in seqs:\n if not isinstance(seq, set):\n seq = set(seq)\n result &= seq\n return type(seqs[0])(list(result))\n\n\ndef _asarray_tuplesafe(values, dtype=None):\n from pandas.core.index import Index\n\n if not (isinstance(values, (list, tuple))\n or hasattr(values, '__array__')):\n values = list(values)\n elif isinstance(values, Index):\n return values.values\n\n if isinstance(values, list) and dtype in [np.object_, object]:\n return lib.list_to_object_array(values)\n\n result = np.asarray(values, dtype=dtype)\n\n if issubclass(result.dtype.type, compat.string_types):\n result = np.asarray(values, dtype=object)\n\n if result.ndim == 2:\n if isinstance(values, list):\n return lib.list_to_object_array(values)\n else:\n # Making a 1D array that safely contains tuples is a bit tricky\n # in numpy, leading to the following\n try:\n result = np.empty(len(values), dtype=object)\n result[:] = values\n except ValueError:\n # we have a list-of-list\n result[:] = [tuple(x) for x in values]\n\n return result\n\n\ndef _index_labels_to_array(labels):\n if isinstance(labels, (compat.string_types, tuple)):\n labels = [labels]\n\n if not isinstance(labels, (list, np.ndarray)):\n try:\n labels = list(labels)\n except TypeError: # non-iterable\n labels = [labels]\n\n labels = _asarray_tuplesafe(labels)\n\n return labels\n\n\ndef _maybe_make_list(obj):\n if obj is not None and not isinstance(obj, (tuple, list)):\n return [obj]\n return obj\n\n########################\n##### TYPE TESTING #####\n########################\n\nis_bool = lib.is_bool\n\n\nis_integer = lib.is_integer\n\n\nis_float = lib.is_float\n\n\nis_complex = lib.is_complex\n\n\ndef is_iterator(obj):\n # python 3 generators have __next__ instead of next\n return hasattr(obj, 'next') or hasattr(obj, '__next__')\n\n\ndef is_number(obj):\n return isinstance(obj, (numbers.Number, np.number))\n\ndef is_period_arraylike(arr):\n \"\"\" return if we are period arraylike / PeriodIndex \"\"\"\n if isinstance(arr, pd.PeriodIndex):\n return True\n elif isinstance(arr, (np.ndarray, ABCSeries)):\n return arr.dtype == object and lib.infer_dtype(arr) == 'period'\n return getattr(arr, 'inferred_type', None) == 'period'\n\ndef is_datetime_arraylike(arr):\n \"\"\" return if we are datetime arraylike / DatetimeIndex \"\"\"\n if isinstance(arr, pd.DatetimeIndex):\n return True\n elif isinstance(arr, (np.ndarray, ABCSeries)):\n return arr.dtype == object and lib.infer_dtype(arr) == 'datetime'\n return getattr(arr, 'inferred_type', None) == 'datetime'\n\ndef is_datetimelike(arr):\n return arr.dtype in _DATELIKE_DTYPES or isinstance(arr, ABCPeriodIndex)\n\ndef _coerce_to_dtype(dtype):\n \"\"\" coerce a string / np.dtype to a dtype \"\"\"\n if is_categorical_dtype(dtype):\n dtype = CategoricalDtype()\n else:\n dtype = np.dtype(dtype)\n return dtype\n\ndef _get_dtype(arr_or_dtype):\n if isinstance(arr_or_dtype, np.dtype):\n return arr_or_dtype\n elif isinstance(arr_or_dtype, type):\n return np.dtype(arr_or_dtype)\n elif isinstance(arr_or_dtype, CategoricalDtype):\n return CategoricalDtype()\n return arr_or_dtype.dtype\n\n\ndef _get_dtype_type(arr_or_dtype):\n if isinstance(arr_or_dtype, np.dtype):\n return arr_or_dtype.type\n elif isinstance(arr_or_dtype, type):\n return np.dtype(arr_or_dtype).type\n elif isinstance(arr_or_dtype, CategoricalDtype):\n return CategoricalDtypeType\n elif isinstance(arr_or_dtype, compat.string_types):\n if is_categorical_dtype(arr_or_dtype):\n return CategoricalDtypeType\n return _get_dtype_type(np.dtype(arr_or_dtype))\n try:\n return arr_or_dtype.dtype.type\n except AttributeError:\n raise ValueError('%r is not a dtype' % arr_or_dtype)\n\ndef is_dtype_equal(source, target):\n \"\"\" return a boolean if the dtypes are equal \"\"\"\n source = _get_dtype_type(source)\n target = _get_dtype_type(target)\n\n try:\n return source == target\n except TypeError:\n\n # invalid comparison\n # object == category will hit this\n return False\n\ndef is_any_int_dtype(arr_or_dtype):\n tipo = _get_dtype_type(arr_or_dtype)\n return issubclass(tipo, np.integer)\n\n\ndef is_integer_dtype(arr_or_dtype):\n tipo = _get_dtype_type(arr_or_dtype)\n return (issubclass(tipo, np.integer) and\n not issubclass(tipo, (np.datetime64, np.timedelta64)))\n\n\ndef is_int_or_datetime_dtype(arr_or_dtype):\n tipo = _get_dtype_type(arr_or_dtype)\n return (issubclass(tipo, np.integer) or\n issubclass(tipo, (np.datetime64, np.timedelta64)))\n\n\ndef is_datetime64_dtype(arr_or_dtype):\n tipo = _get_dtype_type(arr_or_dtype)\n return issubclass(tipo, np.datetime64)\n\n\ndef is_datetime64_ns_dtype(arr_or_dtype):\n tipo = _get_dtype(arr_or_dtype)\n return tipo == _NS_DTYPE\n\ndef is_timedelta64_dtype(arr_or_dtype):\n tipo = _get_dtype_type(arr_or_dtype)\n return issubclass(tipo, np.timedelta64)\n\n\ndef is_timedelta64_ns_dtype(arr_or_dtype):\n tipo = _get_dtype_type(arr_or_dtype)\n return tipo == _TD_DTYPE\n\n\ndef is_datetime_or_timedelta_dtype(arr_or_dtype):\n tipo = _get_dtype_type(arr_or_dtype)\n return issubclass(tipo, (np.datetime64, np.timedelta64))\n\n\nneeds_i8_conversion = is_datetime_or_timedelta_dtype\n\ndef i8_boxer(arr_or_dtype):\n \"\"\" return the scalar boxer for the dtype \"\"\"\n if is_datetime64_dtype(arr_or_dtype):\n return lib.Timestamp\n elif is_timedelta64_dtype(arr_or_dtype):\n return lambda x: lib.Timedelta(x,unit='ns')\n raise ValueError(\"cannot find a scalar boxer for {0}\".format(arr_or_dtype))\n\ndef is_numeric_dtype(arr_or_dtype):\n tipo = _get_dtype_type(arr_or_dtype)\n return (issubclass(tipo, (np.number, np.bool_))\n and not issubclass(tipo, (np.datetime64, np.timedelta64)))\n\n\ndef is_float_dtype(arr_or_dtype):\n tipo = _get_dtype_type(arr_or_dtype)\n return issubclass(tipo, np.floating)\n\n\ndef is_floating_dtype(arr_or_dtype):\n tipo = _get_dtype_type(arr_or_dtype)\n return isinstance(tipo, np.floating)\n\n\ndef is_bool_dtype(arr_or_dtype):\n try:\n tipo = _get_dtype_type(arr_or_dtype)\n except ValueError:\n # this isn't even a dtype\n return False\n return issubclass(tipo, np.bool_)\n\ndef is_categorical(array):\n \"\"\" return if we are a categorical possibility \"\"\"\n return isinstance(array, ABCCategorical) or isinstance(array.dtype, CategoricalDtype)\n\ndef is_categorical_dtype(arr_or_dtype):\n if hasattr(arr_or_dtype,'dtype'):\n arr_or_dtype = arr_or_dtype.dtype\n\n if isinstance(arr_or_dtype, CategoricalDtype):\n return True\n try:\n return arr_or_dtype == 'category'\n except:\n return False\n\ndef is_complex_dtype(arr_or_dtype):\n tipo = _get_dtype_type(arr_or_dtype)\n return issubclass(tipo, np.complexfloating)\n\n\ndef is_object_dtype(arr_or_dtype):\n tipo = _get_dtype_type(arr_or_dtype)\n return issubclass(tipo, np.object_)\n\n\ndef is_re(obj):\n return isinstance(obj, re._pattern_type)\n\n\ndef is_re_compilable(obj):\n try:\n re.compile(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef is_list_like(arg):\n return (hasattr(arg, '__iter__') and\n not isinstance(arg, compat.string_and_binary_types))\n\ndef is_null_slice(obj):\n return (isinstance(obj, slice) and obj.start is None and\n obj.stop is None and obj.step is None)\n\n\ndef is_hashable(arg):\n \"\"\"Return True if hash(arg) will succeed, False otherwise.\n\n Some types will pass a test against collections.Hashable but fail when they\n are actually hashed with hash().\n\n Distinguish between these and other types by trying the call to hash() and\n seeing if they raise TypeError.\n\n Examples\n --------\n >>> a = ([],)\n >>> isinstance(a, collections.Hashable)\n True\n >>> is_hashable(a)\n False\n \"\"\"\n # unfortunately, we can't use isinstance(arg, collections.Hashable), which\n # can be faster than calling hash, because numpy scalars on Python 3 fail\n # this test\n\n # reconsider this decision once this numpy bug is fixed:\n # https://github.com/numpy/numpy/issues/5562\n\n try:\n hash(arg)\n except TypeError:\n return False\n else:\n return True\n\n\ndef is_sequence(x):\n try:\n iter(x)\n len(x) # it has a length\n return not isinstance(x, compat.string_and_binary_types)\n except (TypeError, AttributeError):\n return False\n\n\ndef _get_callable_name(obj):\n # typical case has name\n if hasattr(obj, '__name__'):\n return getattr(obj, '__name__')\n # some objects don't; could recurse\n if isinstance(obj, partial):\n return _get_callable_name(obj.func)\n # fall back to class name\n if hasattr(obj, '__call__'):\n return obj.__class__.__name__\n # everything failed (probably because the argument\n # wasn't actually callable); we return None\n # instead of the empty string in this case to allow\n # distinguishing between no name and a name of ''\n return None\n\n_string_dtypes = frozenset(map(_get_dtype_from_object, (compat.binary_type,\n compat.text_type)))\n\n\n_ensure_float64 = algos.ensure_float64\n_ensure_float32 = algos.ensure_float32\n_ensure_int64 = algos.ensure_int64\n_ensure_int32 = algos.ensure_int32\n_ensure_int16 = algos.ensure_int16\n_ensure_int8 = algos.ensure_int8\n_ensure_platform_int = algos.ensure_platform_int\n_ensure_object = algos.ensure_object\n\n\ndef _astype_nansafe(arr, dtype, copy=True):\n \"\"\" return a view if copy is False, but\n need to be very careful as the result shape could change! \"\"\"\n if not isinstance(dtype, np.dtype):\n dtype = _coerce_to_dtype(dtype)\n\n if issubclass(dtype.type, compat.text_type):\n # in Py3 that's str, in Py2 that's unicode\n return lib.astype_unicode(arr.ravel()).reshape(arr.shape)\n elif issubclass(dtype.type, compat.string_types):\n return lib.astype_str(arr.ravel()).reshape(arr.shape)\n elif is_datetime64_dtype(arr):\n if dtype == object:\n return tslib.ints_to_pydatetime(arr.view(np.int64))\n elif dtype == np.int64:\n return arr.view(dtype)\n elif dtype != _NS_DTYPE:\n raise TypeError(\"cannot astype a datetimelike from [%s] to [%s]\" %\n (arr.dtype, dtype))\n return arr.astype(_NS_DTYPE)\n elif is_timedelta64_dtype(arr):\n if dtype == np.int64:\n return arr.view(dtype)\n elif dtype == object:\n return tslib.ints_to_pytimedelta(arr.view(np.int64))\n\n # in py3, timedelta64[ns] are int64\n elif ((compat.PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or\n (not compat.PY3 and dtype != _TD_DTYPE)):\n\n # allow frequency conversions\n if dtype.kind == 'm':\n mask = isnull(arr)\n result = arr.astype(dtype).astype(np.float64)\n result[mask] = np.nan\n return result\n\n raise TypeError(\"cannot astype a timedelta from [%s] to [%s]\" %\n (arr.dtype, dtype))\n\n return arr.astype(_TD_DTYPE)\n elif (np.issubdtype(arr.dtype, np.floating) and\n np.issubdtype(dtype, np.integer)):\n\n if np.isnan(arr).any():\n raise ValueError('Cannot convert NA to integer')\n elif arr.dtype == np.object_ and np.issubdtype(dtype.type, np.integer):\n # work around NumPy brokenness, #1987\n return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)\n\n if copy:\n return arr.astype(dtype)\n return arr.view(dtype)\n\n\ndef _clean_fill_method(method, allow_nearest=False):\n if method is None:\n return None\n method = method.lower()\n if method == 'ffill':\n method = 'pad'\n if method == 'bfill':\n method = 'backfill'\n\n valid_methods = ['pad', 'backfill']\n expecting = 'pad (ffill) or backfill (bfill)'\n if allow_nearest:\n valid_methods.append('nearest')\n expecting = 'pad (ffill), backfill (bfill) or nearest'\n if method not in valid_methods:\n msg = ('Invalid fill method. Expecting %s. Got %s'\n % (expecting, method))\n raise ValueError(msg)\n return method\n\n\ndef _clean_reindex_fill_method(method):\n return _clean_fill_method(method, allow_nearest=True)\n\n\ndef _all_none(*args):\n for arg in args:\n if arg is not None:\n return False\n return True\n\n\nclass UTF8Recoder:\n\n \"\"\"\n Iterator that reads an encoded stream and reencodes the input to UTF-8\n \"\"\"\n\n def __init__(self, f, encoding):\n self.reader = codecs.getreader(encoding)(f)\n\n def __iter__(self):\n return self\n\n def read(self, bytes=-1):\n return self.reader.read(bytes).encode('utf-8')\n\n def readline(self):\n return self.reader.readline().encode('utf-8')\n\n def next(self):\n return next(self.reader).encode(\"utf-8\")\n\n # Python 3 iterator\n __next__ = next\n\n\ndef _get_handle(path, mode, encoding=None, compression=None):\n \"\"\"Gets file handle for given path and mode.\n NOTE: Under Python 3.2, getting a compressed file handle means reading in\n the entire file, decompressing it and decoding it to ``str`` all at once\n and then wrapping it in a StringIO.\n \"\"\"\n if compression is not None:\n if encoding is not None and not compat.PY3:\n msg = 'encoding + compression not yet supported in Python 2'\n raise ValueError(msg)\n\n if compression == 'gzip':\n import gzip\n f = gzip.GzipFile(path, 'rb')\n elif compression == 'bz2':\n import bz2\n\n f = bz2.BZ2File(path, 'rb')\n else:\n raise ValueError('Unrecognized compression type: %s' %\n compression)\n if compat.PY3_2:\n # gzip and bz2 don't work with TextIOWrapper in 3.2\n encoding = encoding or get_option('display.encoding')\n f = StringIO(f.read().decode(encoding))\n elif compat.PY3:\n from io import TextIOWrapper\n f = TextIOWrapper(f, encoding=encoding)\n return f\n else:\n if compat.PY3:\n if encoding:\n f = open(path, mode, encoding=encoding)\n else:\n f = open(path, mode, errors='replace')\n else:\n f = open(path, mode)\n\n return f\n\n\nif compat.PY3: # pragma: no cover\n def UnicodeReader(f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # ignore encoding\n return csv.reader(f, dialect=dialect, **kwds)\n\n def UnicodeWriter(f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n return csv.writer(f, dialect=dialect, **kwds)\nelse:\n class UnicodeReader:\n\n \"\"\"\n A CSV reader which will iterate over lines in the CSV file \"f\",\n which is encoded in the given encoding.\n\n On Python 3, this is replaced (below) by csv.reader, which handles\n unicode.\n \"\"\"\n\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n f = UTF8Recoder(f, encoding)\n self.reader = csv.reader(f, dialect=dialect, **kwds)\n\n def next(self):\n row = next(self.reader)\n return [compat.text_type(s, \"utf-8\") for s in row]\n\n # python 3 iterator\n __next__ = next\n\n def __iter__(self): # pragma: no cover\n return self\n\n class UnicodeWriter:\n\n \"\"\"\n A CSV writer which will write rows to CSV file \"f\",\n which is encoded in the given encoding.\n \"\"\"\n\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n self.quoting = kwds.get(\"quoting\", None)\n\n def writerow(self, row):\n def _check_as_is(x):\n return (self.quoting == csv.QUOTE_NONNUMERIC and\n is_number(x)) or isinstance(x, str)\n\n row = [x if _check_as_is(x)\n else pprint_thing(x).encode('utf-8') for x in row]\n\n self.writer.writerow([s for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n def _check_as_is(x):\n return (self.quoting == csv.QUOTE_NONNUMERIC and\n is_number(x)) or isinstance(x, str)\n\n for i, row in enumerate(rows):\n rows[i] = [x if _check_as_is(x)\n else pprint_thing(x).encode('utf-8') for x in row]\n\n self.writer.writerows([[s for s in row] for row in rows])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n\ndef get_dtype_kinds(l):\n \"\"\"\n Parameters\n ----------\n l : list of arrays\n\n Returns\n -------\n a set of kinds that exist in this list of arrays\n \"\"\"\n\n typs = set()\n for arr in l:\n\n dtype = arr.dtype\n if is_categorical_dtype(dtype):\n typ = 'category'\n elif isinstance(arr, ABCSparseArray):\n typ = 'sparse'\n elif is_datetime64_dtype(dtype):\n typ = 'datetime'\n elif is_timedelta64_dtype(dtype):\n typ = 'timedelta'\n elif is_object_dtype(dtype):\n typ = 'object'\n elif is_bool_dtype(dtype):\n typ = 'bool'\n else:\n typ = dtype.kind\n typs.add(typ)\n return typs\n\ndef _concat_compat(to_concat, axis=0):\n \"\"\"\n provide concatenation of an array of arrays each of which is a single\n 'normalized' dtypes (in that for example, if its object, then it is a non-datetimelike\n provde a combined dtype for the resulting array the preserves the overall dtype if possible)\n\n Parameters\n ----------\n to_concat : array of arrays\n axis : axis to provide concatenation\n\n Returns\n -------\n a single array, preserving the combined dtypes\n \"\"\"\n\n # filter empty arrays\n # 1-d dtypes always are included here\n def is_nonempty(x):\n try:\n return x.shape[axis] > 0\n except Exception:\n return True\n nonempty = [x for x in to_concat if is_nonempty(x)]\n\n # If all arrays are empty, there's nothing to convert, just short-cut to\n # the concatenation, #3121.\n #\n # Creating an empty array directly is tempting, but the winnings would be\n # marginal given that it would still require shape & dtype calculation and\n # np.concatenate which has them both implemented is compiled.\n\n typs = get_dtype_kinds(to_concat)\n\n # these are mandated to handle empties as well\n if 'datetime' in typs or 'timedelta' in typs:\n from pandas.tseries.common import _concat_compat\n return _concat_compat(to_concat, axis=axis)\n\n elif 'sparse' in typs:\n from pandas.sparse.array import _concat_compat\n return _concat_compat(to_concat, axis=axis)\n\n elif 'category' in typs:\n from pandas.core.categorical import _concat_compat\n return _concat_compat(to_concat, axis=axis)\n\n if not nonempty:\n\n # we have all empties, but may need to coerce the result dtype to object if we\n # have non-numeric type operands (numpy would otherwise cast this to float)\n typs = get_dtype_kinds(to_concat)\n if len(typs) != 1:\n\n if not len(typs-set(['i','u','f'])) or not len(typs-set(['bool','i','u'])):\n # let numpy coerce\n pass\n else:\n # coerce to object\n to_concat = [ x.astype('object') for x in to_concat ]\n\n return np.concatenate(to_concat,axis=axis)\n\ndef _where_compat(mask, arr1, arr2):\n if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE:\n new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8'))\n return new_vals.view(_NS_DTYPE)\n\n import pandas.tslib as tslib\n if arr1.dtype == _NS_DTYPE:\n arr1 = tslib.ints_to_pydatetime(arr1.view('i8'))\n if arr2.dtype == _NS_DTYPE:\n arr2 = tslib.ints_to_pydatetime(arr2.view('i8'))\n\n return np.where(mask, arr1, arr2)\n\n\ndef sentinel_factory():\n class Sentinel(object):\n pass\n\n return Sentinel()\n\n\ndef in_interactive_session():\n \"\"\" check if we're running in an interactive shell\n\n returns True if running under python/ipython interactive shell\n \"\"\"\n def check_main():\n import __main__ as main\n return (not hasattr(main, '__file__') or\n get_option('mode.sim_interactive'))\n\n try:\n return __IPYTHON__ or check_main()\n except:\n return check_main()\n\n\ndef in_qtconsole():\n \"\"\"\n check if we're inside an IPython qtconsole\n\n DEPRECATED: This is no longer needed, or working, in IPython 3 and above.\n \"\"\"\n try:\n ip = get_ipython()\n front_end = (\n ip.config.get('KernelApp', {}).get('parent_appname', \"\") or\n ip.config.get('IPKernelApp', {}).get('parent_appname', \"\")\n )\n if 'qtconsole' in front_end.lower():\n return True\n except:\n return False\n return False\n\n\ndef in_ipnb():\n \"\"\"\n check if we're inside an IPython Notebook\n\n DEPRECATED: This is no longer used in pandas, and won't work in IPython 3\n and above.\n \"\"\"\n try:\n ip = get_ipython()\n front_end = (\n ip.config.get('KernelApp', {}).get('parent_appname', \"\") or\n ip.config.get('IPKernelApp', {}).get('parent_appname', \"\")\n )\n if 'notebook' in front_end.lower():\n return True\n except:\n return False\n return False\n\n\ndef in_ipython_frontend():\n \"\"\"\n check if we're inside an an IPython zmq frontend\n \"\"\"\n try:\n ip = get_ipython()\n return 'zmq' in str(type(ip)).lower()\n except:\n pass\n\n return False\n\n# Unicode consolidation\n# ---------------------\n#\n# pprinting utility functions for generating Unicode text or\n# bytes(3.x)/str(2.x) representations of objects.\n# Try to use these as much as possible rather then rolling your own.\n#\n# When to use\n# -----------\n#\n# 1) If you're writing code internal to pandas (no I/O directly involved),\n# use pprint_thing().\n#\n# It will always return unicode text which can handled by other\n# parts of the package without breakage.\n#\n# 2) If you need to send something to the console, use console_encode().\n#\n# console_encode() should (hopefully) choose the right encoding for you\n# based on the encoding set in option \"display.encoding\"\n#\n# 3) if you need to write something out to file, use\n# pprint_thing_encoded(encoding).\n#\n# If no encoding is specified, it defaults to utf-8. Since encoding pure\n# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're\n# working with straight ascii.\n\n\ndef _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):\n \"\"\"\n internal. pprinter for iterables. you should probably use pprint_thing()\n rather then calling this directly.\n\n bounds length of printed sequence, depending on options\n \"\"\"\n if isinstance(seq, set):\n fmt = u(\"set([%s])\")\n else:\n fmt = u(\"[%s]\") if hasattr(seq, '__setitem__') else u(\"(%s)\")\n\n if max_seq_items is False:\n nitems = len(seq)\n else:\n nitems = max_seq_items or get_option(\"max_seq_items\") or len(seq)\n\n s = iter(seq)\n r = []\n for i in range(min(nitems, len(seq))): # handle sets, no slicing\n r.append(pprint_thing(next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds))\n body = \", \".join(r)\n\n if nitems < len(seq):\n body += \", ...\"\n elif isinstance(seq, tuple) and len(seq) == 1:\n body += ','\n\n return fmt % body\n\n\ndef _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):\n \"\"\"\n internal. pprinter for iterables. you should probably use pprint_thing()\n rather then calling this directly.\n \"\"\"\n fmt = u(\"{%s}\")\n pairs = []\n\n pfmt = u(\"%s: %s\")\n\n if max_seq_items is False:\n nitems = len(seq)\n else:\n nitems = max_seq_items or get_option(\"max_seq_items\") or len(seq)\n\n for k, v in list(seq.items())[:nitems]:\n pairs.append(pfmt % (pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),\n pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)))\n\n if nitems < len(seq):\n return fmt % (\", \".join(pairs) + \", ...\")\n else:\n return fmt % \", \".join(pairs)\n\n\ndef pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,\n quote_strings=False, max_seq_items=None):\n \"\"\"\n This function is the sanctioned way of converting objects\n to a unicode representation.\n\n properly handles nested sequences containing unicode strings\n (unicode(object) does not)\n\n Parameters\n ----------\n thing : anything to be formatted\n _nest_lvl : internal use only. pprint_thing() is mutually-recursive\n with pprint_sequence, this argument is used to keep track of the\n current nesting level, and limit it.\n escape_chars : list or dict, optional\n Characters to escape. If a dict is passed the values are the\n replacements\n default_escapes : bool, default False\n Whether the input escape characters replaces or adds to the defaults\n max_seq_items : False, int, default None\n Pass thru to other pretty printers to limit sequence printing\n\n Returns\n -------\n result - unicode object on py2, str on py3. Always Unicode.\n\n \"\"\"\n def as_escaped_unicode(thing, escape_chars=escape_chars):\n # Unicode is fine, else we try to decode using utf-8 and 'replace'\n # if that's not it either, we have no way of knowing and the user\n # should deal with it himself.\n\n try:\n result = compat.text_type(thing) # we should try this first\n except UnicodeDecodeError:\n # either utf-8 or we replace errors\n result = str(thing).decode('utf-8', \"replace\")\n\n translate = {'\\t': r'\\t',\n '\\n': r'\\n',\n '\\r': r'\\r',\n }\n if isinstance(escape_chars, dict):\n if default_escapes:\n translate.update(escape_chars)\n else:\n translate = escape_chars\n escape_chars = list(escape_chars.keys())\n else:\n escape_chars = escape_chars or tuple()\n for c in escape_chars:\n result = result.replace(c, translate[c])\n\n return compat.text_type(result)\n\n if (compat.PY3 and hasattr(thing, '__next__')) or hasattr(thing, 'next'):\n return compat.text_type(thing)\n elif (isinstance(thing, dict) and\n _nest_lvl < get_option(\"display.pprint_nest_depth\")):\n result = _pprint_dict(thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items)\n elif is_sequence(thing) and _nest_lvl < \\\n get_option(\"display.pprint_nest_depth\"):\n result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,\n quote_strings=quote_strings, max_seq_items=max_seq_items)\n elif isinstance(thing, compat.string_types) and quote_strings:\n if compat.PY3:\n fmt = \"'%s'\"\n else:\n fmt = \"u'%s'\"\n result = fmt % as_escaped_unicode(thing)\n else:\n result = as_escaped_unicode(thing)\n\n return compat.text_type(result) # always unicode\n\n\ndef pprint_thing_encoded(object, encoding='utf-8', errors='replace', **kwds):\n value = pprint_thing(object) # get unicode representation of object\n return value.encode(encoding, errors, **kwds)\n\n\ndef console_encode(object, **kwds):\n \"\"\"\n this is the sanctioned way to prepare something for\n sending *to the console*, it delegates to pprint_thing() to get\n a unicode representation of the object relies on the global encoding\n set in display.encoding. Use this everywhere\n where you output to the console.\n \"\"\"\n return pprint_thing_encoded(object,\n get_option(\"display.encoding\"))\n\n\ndef load(path): # TODO remove in 0.13\n \"\"\"\n Load pickled pandas object (or any other pickled object) from the specified\n file path\n\n Warning: Loading pickled data received from untrusted sources can be\n unsafe. See: http://docs.python.org/2.7/library/pickle.html\n\n Parameters\n ----------\n path : string\n File path\n\n Returns\n -------\n unpickled : type of object stored in file\n \"\"\"\n import warnings\n warnings.warn(\"load is deprecated, use read_pickle\", FutureWarning)\n from pandas.io.pickle import read_pickle\n return read_pickle(path)\n\n\ndef save(obj, path): # TODO remove in 0.13\n \"\"\"\n Pickle (serialize) object to input file path\n\n Parameters\n ----------\n obj : any object\n path : string\n File path\n \"\"\"\n import warnings\n warnings.warn(\"save is deprecated, use obj.to_pickle\", FutureWarning)\n from pandas.io.pickle import to_pickle\n return to_pickle(obj, path)\n\n\ndef _maybe_match_name(a, b):\n a_has = hasattr(a, 'name')\n b_has = hasattr(b, 'name')\n if a_has and b_has:\n if a.name == b.name:\n return a.name\n else:\n return None\n elif a_has:\n return a.name\n elif b_has:\n return b.name\n return None\n\ndef _random_state(state=None):\n \"\"\"\n Helper function for processing random_state arguments.\n\n Parameters\n ----------\n state : int, np.random.RandomState, None.\n If receives an int, passes to np.random.RandomState() as seed.\n If receives an np.random.RandomState object, just returns object.\n If receives `None`, returns an np.random.RandomState object.\n If receives anything else, raises an informative ValueError.\n Default None.\n\n Returns\n -------\n np.random.RandomState\n \"\"\"\n\n if is_integer(state):\n return np.random.RandomState(state)\n elif isinstance(state, np.random.RandomState):\n return state\n elif state is None:\n return np.random.RandomState()\n else:\n raise ValueError(\"random_state must be an integer, a numpy RandomState, or None\")\n"
] | [
[
"pandas.compat.BytesIO",
"pandas.Series",
"pandas.tslib.array_to_datetime",
"numpy.asarray",
"numpy.issubdtype",
"pandas.core.index.Int64Index",
"numpy.dtype",
"numpy.all",
"numpy.concatenate",
"pandas.compat.map",
"pandas.lib.maybe_convert_objects",
"numpy.iinfo",
"pandas.core.categorical._concat_compat",
"numpy.where",
"numpy.place",
"scipy.interpolate.UnivariateSpline",
"numpy.datetime_data",
"pandas.core.config.get_option",
"pandas.tseries.timedeltas.to_timedelta",
"numpy.allclose",
"pandas.lib.is_bool_array",
"numpy.arange",
"pandas.compat.text_type",
"pandas.lib.checknull_old",
"pandas.compat.StringIO",
"pandas.tslib.convert_to_timedelta",
"pandas.lib.Timedelta",
"scipy.interpolate.interp1d",
"pandas.lib.isscalar",
"numpy.interp",
"pandas.tseries.tools.to_datetime",
"pandas.compat.long",
"pandas.io.pickle.read_pickle",
"numpy.zeros",
"numpy.putmask",
"pandas.compat.u",
"numpy.isnan",
"pandas.tslib.Timestamp",
"pandas.tseries.timedeltas._coerce_scalar_to_timedelta_type",
"pandas.lib.list_to_object_array",
"pandas.io.pickle.to_pickle",
"numpy.array",
"numpy.random.RandomState",
"pandas.tslib.Timedelta",
"numpy.array_equal",
"numpy.isfinite",
"pandas.lib.infer_dtype",
"pandas.lib.Timestamp",
"pandas.compat.zip",
"numpy.isscalar",
"numpy.prod",
"pandas.lib.checknull",
"numpy.lib.format.write_array",
"numpy.isinf",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
sumantu-powale/team-4K | [
"b72c6cffdc16202c29b8acc6f31611c80e157fec"
] | [
"Team-4K-Bayesian_Matting/getKernel.py"
] | [
"import numpy as np\n\n\"\"\"Function to get kernel area\"\"\"\n\ndef getKernel(kernel_size, x_pos, y_pos, min_pix, Trimap):\n\n foreground_tri = (Trimap == 1) # foreground alpha\n background_tri = (Trimap == 0) # background alpha\n num_fore = 0\n num_back = 0\n M, N, Ch = np.shape(Trimap)\n\n while num_fore < min_pix or num_back < min_pix:\n half_size = np.fix(kernel_size / 2)\n ymin = int(max(0, y_pos - half_size))\n ymax = int(min(N, y_pos + half_size + 1))\n xmin = int(max(0, x_pos - half_size))\n xmax = int(min(M, x_pos + half_size + 1))\n num_fore = (foreground_tri[xmin:xmax, ymin:ymax, 1] == 1).sum()\n num_back = (background_tri[xmin:xmax, ymin:ymax, 1] == 1).sum()\n kernel_size = kernel_size + 4\n\n new_kernel_size = kernel_size - 4\n return xmin, xmax, ymin, ymax, new_kernel_size"
] | [
[
"numpy.fix",
"numpy.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ajlee21/core-accessory-interactome | [
"e2d8344e8c8abb1d0bda845ce2292b08ae590c51"
] | [
"archive/sra_experiment/nbconverted/2_correlation_genes.py"
] | [
"\n# coding: utf-8\n\n# # Correlation analysis\n# Explore the relationships between core and accessory genes\n\n# In[1]:\n\n\nget_ipython().run_line_magic('load_ext', 'autoreload')\nget_ipython().run_line_magic('autoreload', '2')\n\nimport pandas as pd\nimport os\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom textwrap import fill\nfrom core_acc_modules import utils, paths\n\nnp.random.seed(123)\n\n\n# In[2]:\n\n\n# Read in data (grouped by reference used)\ngene_expression_ref_pao1 = pd.read_csv(paths.PAO1_GE, sep=\"\\t\", header=0, index_col=0)\ngene_expression_ref_pa14 = pd.read_csv(paths.PA14_GE, sep=\"\\t\", header=0, index_col=0)\n\n\n# In[3]:\n\n\nprint(gene_expression_ref_pao1.shape)\nprint(gene_expression_ref_pa14.shape)\n\n\n# ## Get shuffled dataset\n# The shuffled gene expression data is generated by starting with the real gene expression data. For each sample, gene values within a given sample are shuffled independently in order to disrupt any existing relationship between genes. This will serve as a negative control when examining the relationship between genes later.\n\n# In[4]:\n\n\n# Shuffled gene expression datasets\nshuffled_all_ref_pao1 = utils.permute_expression_data(gene_expression_ref_pao1)\nshuffled_all_ref_pa14 = utils.permute_expression_data(gene_expression_ref_pa14)\n\n\n# ## Get gene mapping\n\n# In[5]:\n\n\n# Get mapping between PAO1 and PA14 genes using PAO1 reference\ngene_annot_file = paths.GENE_PAO1_ANNOT\ngene_mapping_pao1 = utils.get_pao1_pa14_gene_map(gene_annot_file, 'pao1')\ngene_mapping_pao1.head()\n\n\n# In[6]:\n\n\n# Get mapping between PAO1 and PA14 genes using PA14 reference\ngene_annot_file = paths.GENE_PA14_ANNOT\ngene_mapping_pa14 = utils.get_pao1_pa14_gene_map(gene_annot_file, 'pa14')\ngene_mapping_pa14.head()\n\n\n# In[7]:\n\n\n# Check that the number of non-unique mapped genes is consistent with our manual findings\nassert gene_mapping_pao1[gene_mapping_pao1[\"num_mapped_genes\"] > 1].shape[0] == 5\nassert gene_mapping_pa14[gene_mapping_pa14[\"num_mapped_genes\"] > 1].shape[0] == 10\n\n\n# ## Get core genes\n# Core genes homologous between PAO1 and PA14. **PAO1 core genes** are PAO1 reference genes that have a PA14 homolog found. Similarly, **PA14 core genes** are PA14 refrence genes that have a PAO1 homolog.\n# \n# * PAO1 core and PA14 core mostly overlap but not completely. \n# * 5355 genes overlap when compare PAO1 core genes map to PA14 ids vs PA14 core\n# * 5351 genes overlap when compare PA14 core genes map to PAO1 ids vs PA14 core\n# \n# Here we will define **core genes** = union(PAO1 core genes, PA14 core genes)\n# \n# The annotations for what genes are homoglous between PAO1 and PA14 were obtained from [BACTOME website](https://pseudomonas-annotator.shinyapps.io/pa_annotator/)\n\n# In[8]:\n\n\ncore_pao1_genes, core_pa14_genes = utils.get_core_genes(gene_mapping_pao1,\n gene_mapping_pa14,\n False)\nprint(f\"Number of PAO1 core genes: {len(core_pao1_genes)}\")\nprint(f\"Number of PA14 core genes: {len(core_pa14_genes)}\")\n\n\n# In[9]:\n\n\n## Check\n# Using annotation files with extension _mod.tsv\n#pao1_ref_core_df = gene_mapping_pao1[\n# (gene_mapping_pao1[\"annotation\"] == \"core\")\n# ]\n#pa14_ref_core_df = gene_mapping_pa14[\n# (gene_mapping_pa14[\"annotation\"] == \"core\")\n#]\n#pao1_mapped_genes = pa14_ref_core_df.loc[core_pa14_genes, \"PAO1_ID\"]\n#print(len(set(pao1_mapped_genes).intersection(core_pao1_genes)))\n\n#pa14_mapped_genes = pao1_ref_core_df.loc[core_pao1_genes, \"PA14_ID\"]\n#print(len(set(pa14_mapped_genes).intersection(core_pa14_genes)))\n\n\n# **Note:** \n# I have checked that the `core_pao1_genes` completely intersects with the core genes mapped from `core_pa14_genes` and vice versa. The 3 extra genes must be due to some many to one mapping between the two strains. Based on our check we do not believe these 3 genes are due to a issue taking the union \n\n# ## Get accessory genes\n# \n# **PAO1 accessory** = All PAO1 genes - core genes (PAO1-specific genes)\n# \n# **PA14 accessory** = All PA14 genes - core genes (PA14-specific genes)\n\n# In[10]:\n\n\n# Get PAO1-specific genes\npao1_ref_genes = gene_expression_ref_pao1.columns\npao1_acc = list(set(pao1_ref_genes) - set(core_pao1_genes))\nprint(f\"Number of PAO1-specific genes: {len(pao1_acc)}\")\n\n\n# In[11]:\n\n\n# Check that `get_pao1_pa14_gene_map` function is working as expected\nassert(\"PA0053\" not in core_pao1_genes and \"PA0053\" in pao1_acc)\n\n\n# In[12]:\n\n\n# Get PA14-specific genes\npa14_ref_genes = gene_expression_ref_pa14.columns\npa14_acc = list(set(pa14_ref_genes) - set(core_pa14_genes))\nprint(f\"Number of PA14-specific genes: {len(pa14_acc)}\")\n\n\n# In[13]:\n\n\n# Check that `get_pao1_pa14_gene_map` function is working as expected\nassert(\"PA14_00410\" not in core_pa14_genes and \"PA14_00410\" in pa14_acc)\n\n\n# ## Group samples by genotype\n\n# In[14]:\n\n\n# Group samples as PAO1 or PA14 based on experiment metadata\nsample_annot_file = paths.SAMPLE_ANNOT\n\npao1_ids, pa14_ids = utils.get_sample_grps(sample_annot_file)\n\n\n# In[15]:\n\n\n# PAO1 samples aligned to PAO1 reference\ndata_core_pao1_samples_pao1_ref = gene_expression_ref_pao1.reindex(index=pao1_ids, columns=core_pao1_genes)\ndata_acc_pao1_samples_pao1_ref = gene_expression_ref_pao1.loc[pao1_ids, pao1_acc]\nprint(data_core_pao1_samples_pao1_ref.shape)\nprint(data_acc_pao1_samples_pao1_ref.shape)\n\n\n# In[16]:\n\n\n# PA14 samples aligned to PA14 reference\ndata_core_pa14_samples_pa14_ref = gene_expression_ref_pa14.loc[pa14_ids, core_pa14_genes]\ndata_acc_pa14_samples_pa14_ref = gene_expression_ref_pa14.loc[pa14_ids, pa14_acc]\nprint(data_core_pa14_samples_pa14_ref.shape)\nprint(data_acc_pa14_samples_pa14_ref.shape)\n\n\n# In[17]:\n\n\n# PA14 samples aligned to PAO1 reference\ndata_core_pa14_samples_pao1_ref = gene_expression_ref_pao1.loc[pa14_ids, core_pao1_genes]\ndata_acc_pa14_samples_pao1_ref = gene_expression_ref_pao1.loc[pa14_ids, pao1_acc]\nprint(data_core_pa14_samples_pao1_ref.shape)\nprint(data_acc_pa14_samples_pao1_ref.shape)\n\n\n# In[18]:\n\n\n# PAO1 samples aligned to PA14 reference\ndata_core_pao1_samples_pa14_ref = gene_expression_ref_pa14.loc[pao1_ids, core_pa14_genes]\ndata_acc_pao1_samples_pa14_ref = gene_expression_ref_pa14.loc[pao1_ids, pa14_acc]\nprint(data_core_pao1_samples_pa14_ref.shape)\nprint(data_acc_pao1_samples_pa14_ref.shape)\n\n\n# ## Distribution of gene expression\n# Examine the distribution of mean gene expression for core and accessory genes in each of these 4 groups of samples (total of 8 plots)\n\n# In[19]:\n\n\n# Examine PAO1 samples in PAO1 reference\npao1_samples_pao1_ref_core = gene_expression_ref_pao1.loc[pao1_ids,core_pao1_genes].mean()\npao1_samples_pao1_ref_acc = gene_expression_ref_pao1.loc[pao1_ids,pao1_acc].mean()\n\n# Examine PA14 samples in PAO1 reference\npa14_samples_pao1_ref_core = gene_expression_ref_pao1.loc[pa14_ids,core_pao1_genes].mean()\npa14_samples_pao1_ref_acc = gene_expression_ref_pao1.loc[pa14_ids,pao1_acc].mean()\n\n# Examine PAO1 samples in PA14 reference\npao1_samples_pa14_ref_core = gene_expression_ref_pa14.loc[pao1_ids,core_pa14_genes].mean()\npao1_samples_pa14_ref_acc = gene_expression_ref_pa14.loc[pao1_ids,pa14_acc].mean()\n\n# Examine PA14 samples in PA14 reference\npa14_samples_pa14_ref_core = gene_expression_ref_pa14.loc[pa14_ids,core_pa14_genes].mean()\npa14_samples_pa14_ref_acc = gene_expression_ref_pa14.loc[pa14_ids,pa14_acc].mean()\n\n\n# In[20]:\n\n\n# Check if any core genes have 0 expression in PAO1 samples using PAO1 reference (or PA14 samples in PA14 reference)\n# We would expect core genes to have nonzero expression in these cases\n# This might call for an adjustment to how we are processing the data using Salmon quant\nprint(any(pao1_samples_pao1_ref_core < 0))\nprint(any(pa14_samples_pa14_ref_core < 0))\n\n\n# In[21]:\n\n\n# Save nonzero accessory genes in cross comparison\n# These are PAO1-specific genes that are nonzero in PA14 samples\n# or PA14-specific genes that are nonzero in PAO1 samples\n\npd.DataFrame(pao1_samples_pa14_ref_acc[pao1_samples_pa14_ref_acc>0]).to_csv(paths.PAO1_SAMPLE_PA14_REF, sep=\"\\t\")\npd.DataFrame(pa14_samples_pao1_ref_acc[pa14_samples_pao1_ref_acc>0]).to_csv(paths.PA14_SAMPLE_PAO1_REF, sep=\"\\t\")\n\n\n# In[22]:\n\n\n# Plot\nsns.set_style(\"darkgrid\")\n\n# Set up the matplotlib figure\nfig, axes = plt.subplots(ncols=2, nrows=1, figsize=(6,4))\n\n# Distribution plot for core genes\nsns.distplot(pao1_samples_pao1_ref_core.values, \n label='PAO1 samples PAO1 core genes', \n color='red',\n kde=False,\n ax=axes[0]\n )\n\nsns.distplot(pao1_samples_pao1_ref_acc.values,\n label='PAO1 samples PAO1 accessory genes',\n color='blue',\n kde=False,\n ax=axes[1]\n )\n\nfig.xlim=(0,10)\nplt.suptitle(fill('Histogram of mean gene expression for PAO1 samples (PAO1 reference)', width=40),\n x=0.5,\n y=1.2,\n fontsize=16)\naxes[0].set_title(fill('Expression of core genes in PAO1 samples', width=20))\naxes[1].set_title(fill('Expression of PAO1-specific genes in PAO1 samples', width=20))\nfig.text(0.5, 0.01, 'Mean gene expression', ha='center', fontsize=14)\nfig.text(0.01, 0.5, 'Count', ha='center', rotation=90, fontsize=14)\n\n\n# In[23]:\n\n\n# Set up the matplotlib figure\nfig, axes = plt.subplots(ncols=2, nrows=1, figsize=(6,4))\n\n# Distribution plot for core genes\nsns.distplot(pa14_samples_pao1_ref_core.values, \n label='PA14 samples PAO1 core genes', \n color='red',\n kde=False,\n ax=axes[0]\n )\n\nsns.distplot(pa14_samples_pao1_ref_acc.values,\n label='PA14 samples PAO1 accessory genes',\n color='blue',\n kde=False,\n ax=axes[1]\n )\n\nfig.xlim=(0,10)\nplt.suptitle(fill('Histogram of mean gene expression for PA14 samples (PAO1 reference)', width=40),\n x=0.5,\n y=1.2,\n fontsize=16)\naxes[0].set_title(fill('Expression of core genes in PA14 samples', width=20))\naxes[1].set_title(fill('Expression of PAO1-specific genes in PA14 samples', width=20))\nfig.text(0.5, 0.01, 'Mean gene expression', ha='center', fontsize=14)\nfig.text(0.01, 0.5, 'Count', ha='center', rotation=90, fontsize=14)\n\n\n# In[24]:\n\n\n# Set up the matplotlib figure\nfig, axes = plt.subplots(ncols=2, nrows=1, figsize=(6,4))\n\n# Distribution plot for core genes\nsns.distplot(pao1_samples_pa14_ref_core.values, \n label='PAO1 samples PA14 core genes', \n color='red',\n kde=False,\n ax=axes[0]\n )\n\nsns.distplot(pao1_samples_pa14_ref_acc.values,\n label='PAO1 samples PA14 accessory genes',\n color='blue',\n kde=False,\n ax=axes[1]\n )\n\nfig.xlim=(0,10)\nplt.suptitle(fill('Histogram of mean gene expression for PAO1 samples (PA14 reference)', width=40),\n x=0.5,\n y=1.2,\n fontsize=16)\naxes[0].set_title(fill('Expression of core genes in PAO1 samples', width=20))\naxes[1].set_title(fill('Expression of PA14-specific genes in PAO1 samples', width=20))\nfig.text(0.5, 0.01, 'Mean gene expression', ha='center', fontsize=14)\nfig.text(0.01, 0.5, 'Count', ha='center', rotation=90, fontsize=14)\n\n\n# In[25]:\n\n\n# Set up the matplotlib figure\nfig, axes = plt.subplots(ncols=2, nrows=1, figsize=(6,4))\n\n# Distribution plot for core genes\nsns.distplot(pa14_samples_pa14_ref_core.values, \n label='PA14 samples PA14 core genes', \n color='red',\n kde=False,\n ax=axes[0]\n )\n\nsns.distplot(pa14_samples_pa14_ref_acc.values,\n label='PA14 samples PA14 accessory genes',\n color='blue',\n kde=False,\n ax=axes[1]\n )\n\nfig.xlim=(0,10)\nplt.suptitle(fill('Histogram of mean gene expression for PA14 samples (PA14 reference)', width=40),\n x=0.5,\n y=1.2,\n fontsize=16)\naxes[0].set_title(fill('Expression of core genes in PA14 samples', width=20))\naxes[1].set_title(fill('Expression of PA14-specific genes in PA14 samples', width=20))\nfig.text(0.5, 0.01, 'Mean gene expression', ha='center', fontsize=14)\nfig.text(0.01, 0.5, 'Count', ha='center', rotation=90, fontsize=14)\n\n\n# **Takeaway:**\n# * PAO1-specific genes have mainly 0 mean gene expression in PA14 samples, as expected. A similar trend is seen in PA14-specific genes in PAO1 samples.\n# * In general, many accessory genes are 0 expressed in cases where the samples and reference match (i.e. PA14 samples in PA14 reference or PAO1 samples in PAO1 reference). This is consistent with the hypothesis that accessory genes are context specific and so perhaps in this experiment, these accessory genes are not expressed.\n# * There are a small number of PAO1-specific genes that have nonzero expression in PA14 samples. What are these nonzero accessory genes? Genes with some low level homology? Something else to be discussed with collaborators.\n\n# ## Correlation analysis\n# PAO1 samples using PAO1 reference:\n# * **corr**(core, PAO1 core)\n# * **corr**(core, PAO1 accessory)\n# * **corr**(PAO1 accessory, PAO1 accessory) \n# \n# PA14 samples using PA14 reference:\n# * **corr**(core, PA14 core)\n# * **corr**(core, PA14 accessory)\n# * **corr**(PA14 accessory, PA14 accessory)\n# \n# cross-correlation analysis:\n# \n# PA14 samples using PAO1 reference:\n# * **corr**(core, PAO1 core)\n# * **corr**(core, PAO1 accessory)\n# * **corr**(PAO1 accessory, PAO1 accessory) \n# \n# PAO1 samples using PA14 reference:\n# * **corr**(core, PA14 core)\n# * **corr**(core, PA14 accessory)\n# * **corr**(PA14 accessory, PA14 accessory)\n\n# In[26]:\n\n\n# Get correlation of core-core genes\npao1_core_corr = data_core_pao1_samples_pao1_ref.corr(method='pearson')\npao1_core_corr = pao1_core_corr.values[np.triu_indices(n=len(pao1_core_corr), k=1)]\n\npa14_core_corr = data_core_pa14_samples_pa14_ref.corr(method='pearson')\npa14_core_corr = pa14_core_corr.values[np.triu_indices(n=len(pa14_core_corr), k=1)]\n\npa14_samples_pao1_core_corr = data_core_pa14_samples_pao1_ref.corr(method='pearson')\npa14_samples_pao1_core_corr = pa14_samples_pao1_core_corr.values[\n np.triu_indices(n=len(pa14_samples_pao1_core_corr), k=1)]\n\npao1_samples_pa14_core_corr = data_core_pao1_samples_pa14_ref.corr(method='pearson')\npao1_samples_pa14_core_corr = pao1_samples_pa14_core_corr.values[\n np.triu_indices(n=len(pao1_samples_pa14_core_corr), k=1)]\n\n\n# In[27]:\n\n\n# Get correlation of accessory-accessory genes\npao1_acc_corr = data_acc_pao1_samples_pao1_ref.corr(method='pearson')\npao1_acc_corr = pao1_acc_corr.values[np.triu_indices(n=len(pao1_acc_corr), k=1)]\n\npa14_acc_corr = data_acc_pa14_samples_pa14_ref.corr(method='pearson')\npa14_acc_corr = pa14_acc_corr.values[np.triu_indices(n=len(pa14_acc_corr), k=1)]\n\npa14_samples_pao1_acc_corr = data_acc_pa14_samples_pao1_ref.corr(method='pearson')\npa14_samples_pao1_acc_corr = pa14_samples_pao1_acc_corr.values[\n np.triu_indices(n=len(pa14_samples_pao1_acc_corr), k=1)]\n\npao1_samples_pa14_acc_corr = data_acc_pao1_samples_pa14_ref.corr(method='pearson')\npao1_samples_pa14_acc_corr = pao1_samples_pa14_acc_corr.values[\n np.triu_indices(n=len(pao1_samples_pa14_acc_corr), k=1)]\n\n\n# In[28]:\n\n\n# Get correlation of core-accessory genes\npao1_all_corr = gene_expression_ref_pao1.loc[pao1_ids].corr(method='pearson')\npa14_all_corr = gene_expression_ref_pa14.loc[pa14_ids].corr(method='pearson')\n\npao1_core_acc_corr = pao1_all_corr.loc[core_pao1_genes, pao1_acc]\npao1_core_acc_corr = pao1_core_acc_corr.values.flatten().tolist()\n\npa14_core_acc_corr = pa14_all_corr.loc[core_pa14_genes, pa14_acc]\npa14_core_acc_corr = pa14_core_acc_corr.values.flatten().tolist()\n\npa14_samples_pao1_all_corr = gene_expression_ref_pao1.loc[pa14_ids].corr(method='pearson')\npa14_samples_pao1_core_acc_corr = pa14_samples_pao1_all_corr.loc[core_pao1_genes, pao1_acc]\npa14_samples_pao1_core_acc_corr = pa14_samples_pao1_core_acc_corr.values.flatten().tolist()\n\npao1_samples_pa14_all_corr = gene_expression_ref_pa14.loc[pao1_ids].corr(method='pearson')\npao1_samples_pa14_core_acc_corr = pao1_samples_pa14_all_corr.loc[core_pa14_genes, pa14_acc]\npao1_samples_pa14_core_acc_corr = pao1_samples_pa14_core_acc_corr.values.flatten().tolist()\n\n\n# In[29]:\n\n\n# Get correlation of control dataset\nshuffled_pao1_ref_pao1_corr = shuffled_all_ref_pao1.loc[pao1_ids].corr(method='pearson')\nshuffled_pao1_ref_pao1_corr = shuffled_pao1_ref_pao1_corr.values[\n np.triu_indices(n=len(shuffled_pao1_ref_pao1_corr), k=1)]\n\nshuffled_pa14_ref_pa14_corr = shuffled_all_ref_pa14.loc[pa14_ids].corr(method='pearson')\nshuffled_pa14_ref_pa14_corr = shuffled_pa14_ref_pa14_corr.values[\n np.triu_indices(n=len(shuffled_pa14_ref_pa14_corr), k=1)]\n\nshuffled_pa14_ref_pao1_corr = shuffled_all_ref_pao1.loc[pa14_ids].corr(method='pearson')\nshuffled_pa14_ref_pao1_corr = shuffled_pa14_ref_pao1_corr.values[\n np.triu_indices(n=len(shuffled_pa14_ref_pao1_corr), k=1)]\n\nshuffled_pao1_ref_pa14_corr = shuffled_all_ref_pa14.loc[pao1_ids].corr(method='pearson')\nshuffled_pao1_ref_pa14_corr = shuffled_pao1_ref_pa14_corr.values[\n np.triu_indices(n=len(shuffled_pao1_ref_pa14_corr), k=1)]\n\n\n# In[30]:\n\n\nsns.set_style(\"white\")\n\nsns.distplot(pao1_core_corr, label='core', color='red', hist_kws={\"linewidth\": 0})\nsns.distplot(pao1_acc_corr, label='accessory', color='blue', hist_kws={\"linewidth\": 0})\nsns.distplot(pao1_core_acc_corr, label='core-accessory', color='purple', hist_kws={\"linewidth\": 0})\nsns.distplot(shuffled_pao1_ref_pao1_corr, label='shuffled', color='grey', hist_kws={\"linewidth\": 0})\n\nplt.legend(prop={'size': 12})\nplt.title(fill('Density of correlation scores per group for PAO1 samples (PAO1 reference)', width=40),\n fontsize=14)\nplt.ylabel('Density')\n\n\n# In[31]:\n\n\nsns.distplot(pa14_core_corr, label='core', color='red', hist_kws={\"linewidth\": 0})\nsns.distplot(pa14_acc_corr, label='accessory', color='blue', hist_kws={\"linewidth\": 0})\nsns.distplot(pa14_core_acc_corr, label='core-accessory', color='purple', hist_kws={\"linewidth\": 0})\nsns.distplot(shuffled_pa14_ref_pa14_corr, label='shuffled', color='grey', hist_kws={\"linewidth\": 0})\n\nplt.legend(prop={'size': 12})\nplt.title(fill('Density of correlation scores per group for PA14 samples (PA14 reference)', width=40),\n fontsize=14)\nplt.ylabel('Density')\n\n\n# In[32]:\n\n\nsns.distplot(pa14_samples_pao1_core_corr, label='core', color='red', hist_kws={\"linewidth\": 0})\nsns.distplot(pa14_samples_pao1_acc_corr, label='accessory', color='blue', hist_kws={\"linewidth\": 0})\nsns.distplot(pa14_samples_pao1_core_acc_corr, label='core-accessory', color='purple', hist_kws={\"linewidth\": 0})\nsns.distplot(shuffled_pa14_ref_pao1_corr, label='shuffled', color='grey', hist_kws={\"linewidth\": 0})\n\nplt.legend(prop={'size': 12})\nplt.title(fill('Density of correlation scores per group for PA14 samples (PAO1 reference)', width=40),\n fontsize=14)\nplt.ylabel('Density')\n\n\n# In[33]:\n\n\nsns.distplot(pao1_samples_pa14_core_corr, label='core', color='red', hist_kws={\"linewidth\": 0})\nsns.distplot(pao1_samples_pa14_acc_corr, label='accessory', color='blue', hist_kws={\"linewidth\": 0})\nsns.distplot(pao1_samples_pa14_core_acc_corr, label='core-accessory', color='purple', hist_kws={\"linewidth\": 0})\nsns.distplot(shuffled_pao1_ref_pa14_corr, label='shuffled', color='grey', hist_kws={\"linewidth\": 0})\n\nplt.legend(prop={'size': 12})\nplt.title(fill('Density of correlation scores per group for PAO1 samples (PA14 reference)', width=40),\n fontsize=14)\nplt.ylabel('Density')\n\n\n# **Takeaway:**\n# * For PAO1 samples using PAO1 reference there does not seem to be a significant skewing the correlation between core-core, core-accessory or accessory-accessory\n# * For PA14 samples using PA14 reference there is a very slight bump in the accessory-accessory genes. We can try to look into what these genes are. But why was this trend not found in PAO1 samples using PAO1 reference?\n# * For PAO1 samples using PA14 reference and PA14 samples using PAO1 reference, there is the same slight bump. In general, we'd expect most accessory genes to be 0 expressed, is this the reason for the bump? \n\n# ### What are the accessory genes with high correlation?\n# There is a bump in the correlation between accessory-accessory genes using PA14 samples in PA14 reference, PAO1 samples in PA14 reference, and PA14 samples in PAO1 reference. We want to explore what these genes are and if their high correlation is due to having a 0 mean expression.\n# \n# \n# According to the above distribution plots, there are PAO1 accessory genes that are highly correlated in PA14 samples. There are also PA14 accessory genes that are high correlated in PAO1 and PA14 samples. We'd expect that the highly correlated PAO1 genes in PA14 samples (and PA14 genes in PAO1 samples) have a high correlation due to mean 0 expression. But we hope there is some biology behind the highly correlated PA14 genes in PA14 samples.\n\n# In[34]:\n\n\n# PA14 accessory genes in PA14 samples\npa14_acc_corr = data_acc_pa14_samples_pa14_ref.corr(method='pearson')\n\n# Reshape correlation data\npa14_acc_corr = pa14_acc_corr.where(np.triu(np.ones(pa14_acc_corr.shape), k=1).astype(np.bool))\npa14_acc_corr_df = pa14_acc_corr.stack().reset_index()\npa14_acc_corr_df.columns = ['in gene','out gene','corr score']\n\n# Select those genes in the \"bump\"\nselected_pa14_genes = pa14_acc_corr_df[pa14_acc_corr_df['corr score']>0.75]\nprint(selected_pa14_genes.shape)\nselected_pa14_genes.head()\n\n# Save genes to review with collaborators\nselected_pa14_genes.to_csv(paths.HIGH_PA14_SAMPLE_PA14_REF, sep=\"\\t\")\n\n\n# In[35]:\n\n\n# Are the accessory genes with high correlation those with 0 expression?\nhigh_corr_genes = np.concatenate((selected_pa14_genes['in gene'].values, \n selected_pa14_genes['out gene'].values)\n )\nany(pa14_samples_pa14_ref_acc[high_corr_genes] <1.0)\n\n\n# In[36]:\n\n\n# PA14 accessory genes in PAO1 samples\npa14_acc_pao1_samples_corr = data_acc_pao1_samples_pa14_ref.corr(method='pearson')\n\n# Reshape correlation data\npa14_acc_pao1_samples_corr = pa14_acc_pao1_samples_corr.where(\n np.triu(np.ones(pa14_acc_pao1_samples_corr.shape), k=1).astype(np.bool))\npa14_acc_pao1_samples_corr_df = pa14_acc_pao1_samples_corr.stack().reset_index()\npa14_acc_pao1_samples_corr_df.columns = ['in gene','out gene','corr score']\n\n# Select those genes in the \"bump\"\nselected_pa14_pao1_samples_genes = pa14_acc_pao1_samples_corr_df[pa14_acc_pao1_samples_corr_df['corr score']>0.75]\nprint(selected_pa14_pao1_samples_genes.shape)\nselected_pa14_pao1_samples_genes.head()\n\n# Are the accessory genes with high correlation those with 0 expression?\nhigh_corr_genes = np.concatenate((selected_pa14_pao1_samples_genes['in gene'].values, \n selected_pa14_pao1_samples_genes['out gene'].values)\n )\n(pao1_samples_pa14_ref_acc[high_corr_genes] <1.0).sum()/len(pao1_samples_pa14_ref_acc[high_corr_genes])\n\n\n# In[37]:\n\n\n# PAO1 accessory genes in PA14 samples\npao1_acc_pa14_samples_corr = data_acc_pa14_samples_pao1_ref.corr(method='pearson')\n\n# Reshape correlation data\npao1_acc_pa14_samples_corr = pao1_acc_pa14_samples_corr.where(\n np.triu(np.ones(pao1_acc_pa14_samples_corr.shape), k=1).astype(np.bool))\npao1_acc_pa14_samples_corr_df = pao1_acc_pa14_samples_corr.stack().reset_index()\npao1_acc_pa14_samples_corr_df.columns = ['in gene','out gene','corr score']\n\n# Select those genes in the \"bump\"\nselected_pao1_pa14_samples_genes = pao1_acc_pa14_samples_corr_df[pao1_acc_pa14_samples_corr_df['corr score']>0.75]\nprint(selected_pao1_pa14_samples_genes.shape)\nselected_pao1_pa14_samples_genes.head()\n\n# Are the accessory genes with high correlation those with 0 expression?\nhigh_corr_genes = np.concatenate((selected_pao1_pa14_samples_genes['in gene'].values, \n selected_pao1_pa14_samples_genes['out gene'].values)\n )\n(pa14_samples_pao1_ref_acc[high_corr_genes] <1.0).sum()/len(pa14_samples_pao1_ref_acc[high_corr_genes])\n\n\n# **Takeaway:**\n# * Looks like the highly correlated accessory-accessory genes in PA14 samples using PA14 reference are highly expressed (> 1.0 TPM). So there seems to be some real biological trend here\n# * Whereas, the highly correlated accessory-accessory genes in PA14 samples usng PAO1 reference and PAO1 samples using PA14 reference are mostly lowly expressed (<1.0 TPM) as we expected\n\n# **Conclusions:**\n# * Accessory genes have a different distribution of expression compared to core genes -- accessory genes tend to be more lowly expressed which is consistent with the idea that they are niche-specific\n# * There is a small group of accessory-accessory genes that are highly correlated in PA14 samples\n# \n# **Next steps:**\n# * Try to increase the number of samples using Discovery(Dartmouth server) and re-run analysis to see if there is a something *there*\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.ones",
"numpy.concatenate",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
nickp60/annofilt | [
"bb199309d697616fbfa5426052d197a7659b7342"
] | [
"annofilt/get_complete_genomes.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Nick Waters\n\"\"\"\n\nimport os\nimport sys\nimport subprocess\nimport argparse\nimport shutil\nimport pandas as pd\nimport urllib.request\n\n\nfrom . import shared_methods as sm\n\n\ndef get_args(): # pragma nocover\n parser = argparse.ArgumentParser(\n description=\"Given a genus and species, download complete \" +\n \"genomes from NCBI\",\n add_help=False)\n parser.add_argument(\n \"-g\", \"--genus\",\n help=\"genus\", required=True)\n parser.add_argument(\n \"-s\", \"--species\",\n help=\"genus\", required=True)\n parser.add_argument(\n \"-n\", \"--number_of_strains\",\n type=int,\n help=\"genus\")\n parser.add_argument(\n \"-o\",\n \"--output\",\n help=\"output dir\", required=True)\n optional = parser.add_argument_group('optional arguments')\n optional.add_argument(\n \"--assembly_summary\",\n dest=\"assembly_summary\",\n help=\"Path to assembly_summary.txt from NCBI; if not present, \" +\n \"will be downloaded\")\n optional.add_argument(\n \"--max_contigs\",\n dest=\"max_contigs\",\n default=10,\n help=\"Maximum number of contigs tolerated. Many fragmented \" +\n \" genome assemblies are categorized as 'complete' by ncbi; \" +\n \"so if you expect at most 1 chromosome and 10 plasmids, \" +\n \"set --max_contigs to 11. default: %(default)s\")\n optional.add_argument(\n \"--max_strains\",\n dest=\"max_strains\",\n help=\"how many genomes to try to download to \" +\n \" reach the --number_of_strains\")\n optional.add_argument(\n \"-v\",\n \"--verbosity\",\n dest='verbosity', action=\"store\",\n default=2, type=int,\n help=\"1 = debug(), 2 = info(), 3 = warning(), \" +\n \"4 = error() and 5 = critical(); \" +\n \"default: %(default)s\")\n optional.add_argument(\n \"-h\", \"--help\",\n action=\"help\", default=argparse.SUPPRESS,\n help=\"Displays this help message\")\n\n args = parser.parse_args()\n return args\n\n\ndef check_exes():\n for exe in [\"wget\"]:\n if shutil.which(exe) is None:\n raise ValueError(\"%s executable not found\" % exe)\n\n\ndef get_or_check_assembly_metadata(args, logger):\n if args.assembly_summary is not None:\n if not os.path.isfile(args.assembly_summary):\n raise ValueError(\"Metadata file %s invalid\" %\n args.assembly_summary)\n else:\n return args.assembly_summary\n else:\n new_path = os.path.join(\".\", \"assembly_summary.txt\")\n if not os.path.isfile(new_path):\n logger.info(\"downloading assembly_summary.txt\")\n urllib.request.urlretrieve(\"ftp://ftp.ncbi.nlm.nih.gov/\" +\n \"genomes/genbank/bacteria/\" +\n \"assembly_summary.txt\", new_path)\n return new_path\n\n\ndef filter_assembly_metadata(args, logger):\n names = \"assembly_accession\tbioproject\tbiosample\twgs_master\trefseq_category\ttaxid\tspecies_taxid\torganism_name\tinfraspecific_name\tisolate\tversion_status\tassembly_level\trelease_type\tgenome_rep\tseq_rel_date\tasm_name\tsubmitter\tgbrs_paired_asm\tpaired_asm_comp\tftp_path\texcluded_from_refseq\trelation_to_type_material\".split(\"\\t\")\n d = pd.read_csv(args.assembly_summary, sep=\"\\t\",\n skiprows=(0, 1), header=(0), names=names)\n # print(d.shape)\n d = d[d.genome_rep == \"Full\"]\n # print(d.shape)\n d = d[(d.assembly_level == \"Complete Genome\") |\n (d.assembly_level == \"Chromosome\")]\n # print(d.shape)\n d = d[(d.excluded_from_refseq.notnull())]\n # print(d.shape)\n qname = args.genus + \" \" + args.species\n # print(qname)\n d = d[d.organism_name.str.startswith(qname)]\n # print(d.shape)\n # print(d.genome_rep.head(3))\n return d\n\n\ndef get_genome(path, genomes_dir, logger):\n outpath = os.path.join(genomes_dir, os.path.basename(path) + \".fna.gz\")\n if not os.path.isfile(outpath):\n cmd = str(\"wget {0}/{1}_genomic.fna.gz -O {2}\").format(\n path, os.path.basename(path), outpath)\n logger.debug(\"Getting assembly \" + path)\n subprocess.run(cmd, shell=sys.platform != \"win32\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, check=True)\n return outpath\n\n\ndef main(args=None, logger=None):\n # get args\n if args is None:\n args = get_args()\n if args.max_strains is None:\n args.max_strains = args.number_of_strains + 10\n else:\n if args.max_strains < args.number_of_strains:\n raise ValueError(\"Maximum number of strains to try cannot be \" +\n \"less than the number of genomes\")\n output_root = os.path.abspath(os.path.expanduser(args.output))\n if not os.path.isdir(output_root):\n sys.stderr.write(\"creating output directory %s\\n\" % output_root)\n os.makedirs(output_root)\n else:\n sys.stderr.write(\"WARNING! Using existing output directory!\\n\")\n check_exes()\n if logger is None:\n logger = sm.set_up_logging(\n outfile=os.path.join(output_root, \"make_annofilt_pangenome.log\"),\n name=\"annofilt\",\n verbosity=args.verbosity)\n logger.debug(\"All settings used:\")\n for k, v in sorted(vars(args).items()):\n logger.debug(\"{0}: {1}\".format(k, v))\n logger.debug(\"sorting out the assembly_metadata.txt file\")\n args.assembly_summary = get_or_check_assembly_metadata(args, logger)\n logger.debug(\"filtering assembly metadata\")\n species_df = filter_assembly_metadata(args, logger)\n genomes_dir = os.path.join(output_root, \"\")\n not_enough_strains = True\n not_reached_max_tries = True\n counter = 0\n n_successful = 0\n # shuffle the order of the strains we have left after filtering\n species_df = species_df.sample(\n frac=1, random_state=12345).reset_index(drop=True)\n logger.debug(\"Attempting to download strains\")\n if species_df.shape[0] < args.number_of_strains:\n logger.warning(\"Only %i elegible strains available!\" %\n species_df.shape[0])\n args.number_of_strains = species_df.shape[0]\n while not_enough_strains and not_reached_max_tries:\n logger.debug(\"getting strain %i of %i\" %\n (n_successful+1, args.number_of_strains))\n this_path = get_genome(\n path=species_df.ftp_path[counter], genomes_dir=genomes_dir,\n logger=logger)\n logger.debug(\" unzipping %s\" % this_path)\n # -f force overwrite (on osx, -o overwrites like with `unzip`, but not linux?)\n unzip_cmd = \"gunzip -f %s\" % this_path\n this_path = this_path.replace(\".gz\", \"\")\n\n subprocess.run(unzip_cmd, shell=sys.platform != \"win32\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, check=True)\n\n logger.debug(\" checking number of contigs\")\n grep_cmd = \"grep '>' %s\" % this_path\n result = subprocess.run(grep_cmd, shell=sys.platform != \"win32\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, check=True)\n n_contigs = len(result.stdout.decode(\"utf-8\").split(\"\\n\"))\n if n_contigs > args.max_contigs:\n logger.info(\"rejecting %s: %i contigs\" % (this_path, n_contigs))\n os.remove(this_path)\n else:\n n_successful = n_successful + 1\n # status update\n if n_successful == args.number_of_strains:\n not_enough_strains = False\n if counter == args.max_strains:\n not_reached_max_tries = False\n counter = counter + 1\n logger.debug(\"Saved {} strains!\".format(n_successful))\n logger.debug(\"Done!\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
NuclearEngideer/openmc | [
"ffe0f0283a81d32759e4f877909bbb64d5ad0d3d"
] | [
"openmc/mesh.py"
] | [
"from abc import ABC\nfrom collections.abc import Iterable\nfrom numbers import Real, Integral\nimport warnings\nfrom xml.etree import ElementTree as ET\n\nimport numpy as np\n\nimport openmc.checkvalue as cv\nimport openmc\nfrom ._xml import get_text\nfrom .mixin import IDManagerMixin\nfrom .surface import _BOUNDARY_TYPES\n\n\nclass MeshBase(IDManagerMixin, ABC):\n \"\"\"A mesh that partitions geometry for tallying purposes.\n\n Parameters\n ----------\n mesh_id : int\n Unique identifier for the mesh\n name : str\n Name of the mesh\n\n Attributes\n ----------\n id : int\n Unique identifier for the mesh\n name : str\n Name of the mesh\n\n \"\"\"\n\n next_id = 1\n used_ids = set()\n\n def __init__(self, mesh_id=None, name=''):\n # Initialize Mesh class attributes\n self.id = mesh_id\n self.name = name\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n if name is not None:\n cv.check_type('name for mesh ID=\"{0}\"'.format(self._id),\n name, str)\n self._name = name\n else:\n self._name = ''\n\n def __repr__(self):\n string = type(self).__name__ + '\\n'\n string += '{0: <16}{1}{2}\\n'.format('\\tID', '=\\t', self._id)\n string += '{0: <16}{1}{2}\\n'.format('\\tName', '=\\t', self._name)\n return string\n\n @classmethod\n def from_hdf5(cls, group):\n \"\"\"Create mesh from HDF5 group\n\n Parameters\n ----------\n group : h5py.Group\n Group in HDF5 file\n\n Returns\n -------\n openmc.MeshBase\n Instance of a MeshBase subclass\n\n \"\"\"\n\n mesh_type = group['type'][()].decode()\n if mesh_type == 'regular':\n return RegularMesh.from_hdf5(group)\n elif mesh_type == 'rectilinear':\n return RectilinearMesh.from_hdf5(group)\n elif mesh_type == 'unstructured':\n return UnstructuredMesh.from_hdf5(group)\n else:\n raise ValueError('Unrecognized mesh type: \"' + mesh_type + '\"')\n\n\nclass RegularMesh(MeshBase):\n \"\"\"A regular Cartesian mesh in one, two, or three dimensions\n\n Parameters\n ----------\n mesh_id : int\n Unique identifier for the mesh\n name : str\n Name of the mesh\n\n Attributes\n ----------\n id : int\n Unique identifier for the mesh\n name : str\n Name of the mesh\n dimension : Iterable of int\n The number of mesh cells in each direction.\n n_dimension : int\n Number of mesh dimensions.\n lower_left : Iterable of float\n The lower-left corner of the structured mesh. If only two coordinate\n are given, it is assumed that the mesh is an x-y mesh.\n upper_right : Iterable of float\n The upper-right corner of the structrued mesh. If only two coordinate\n are given, it is assumed that the mesh is an x-y mesh.\n width : Iterable of float\n The width of mesh cells in each direction.\n indices : Iterable of tuple\n An iterable of mesh indices for each mesh element, e.g. [(1, 1, 1),\n (2, 1, 1), ...]\n\n \"\"\"\n\n def __init__(self, mesh_id=None, name=''):\n super().__init__(mesh_id, name)\n\n self._dimension = None\n self._lower_left = None\n self._upper_right = None\n self._width = None\n\n @property\n def dimension(self):\n return self._dimension\n\n @property\n def n_dimension(self):\n if self._dimension is not None:\n return len(self._dimension)\n else:\n return None\n\n @property\n def lower_left(self):\n return self._lower_left\n\n @property\n def upper_right(self):\n return self._upper_right\n\n @property\n def width(self):\n return self._width\n\n @property\n def num_mesh_cells(self):\n return np.prod(self._dimension)\n\n @property\n def indices(self):\n ndim = len(self._dimension)\n if ndim == 3:\n nx, ny, nz = self.dimension\n return ((x, y, z)\n for z in range(1, nz + 1)\n for y in range(1, ny + 1)\n for x in range(1, nx + 1))\n elif ndim == 2:\n nx, ny = self.dimension\n return ((x, y)\n for y in range(1, ny + 1)\n for x in range(1, nx + 1))\n else:\n nx, = self.dimension\n return ((x,) for x in range(1, nx + 1))\n\n @dimension.setter\n def dimension(self, dimension):\n cv.check_type('mesh dimension', dimension, Iterable, Integral)\n cv.check_length('mesh dimension', dimension, 1, 3)\n self._dimension = dimension\n\n @lower_left.setter\n def lower_left(self, lower_left):\n cv.check_type('mesh lower_left', lower_left, Iterable, Real)\n cv.check_length('mesh lower_left', lower_left, 1, 3)\n self._lower_left = lower_left\n\n @upper_right.setter\n def upper_right(self, upper_right):\n cv.check_type('mesh upper_right', upper_right, Iterable, Real)\n cv.check_length('mesh upper_right', upper_right, 1, 3)\n self._upper_right = upper_right\n\n @width.setter\n def width(self, width):\n cv.check_type('mesh width', width, Iterable, Real)\n cv.check_length('mesh width', width, 1, 3)\n self._width = width\n\n def __repr__(self):\n string = super().__repr__()\n string += '{0: <16}{1}{2}\\n'.format('\\tDimensions', '=\\t', self.n_dimension)\n string += '{0: <16}{1}{2}\\n'.format('\\tMesh Cells', '=\\t', self._dimension)\n string += '{0: <16}{1}{2}\\n'.format('\\tWidth', '=\\t', self._lower_left)\n string += '{0: <16}{1}{2}\\n'.format('\\tOrigin', '=\\t', self._upper_right)\n string += '{0: <16}{1}{2}\\n'.format('\\tPixels', '=\\t', self._width)\n return string\n\n @classmethod\n def from_hdf5(cls, group):\n mesh_id = int(group.name.split('/')[-1].lstrip('mesh '))\n\n # Read and assign mesh properties\n mesh = cls(mesh_id)\n mesh.dimension = group['dimension'][()]\n mesh.lower_left = group['lower_left'][()]\n mesh.upper_right = group['upper_right'][()]\n mesh.width = group['width'][()]\n\n return mesh\n\n @classmethod\n def from_rect_lattice(cls, lattice, division=1, mesh_id=None, name=''):\n \"\"\"Create mesh from an existing rectangular lattice\n\n Parameters\n ----------\n lattice : openmc.RectLattice\n Rectangular lattice used as a template for this mesh\n division : int\n Number of mesh cells per lattice cell.\n If not specified, there will be 1 mesh cell per lattice cell.\n mesh_id : int\n Unique identifier for the mesh\n name : str\n Name of the mesh\n\n Returns\n -------\n openmc.RegularMesh\n RegularMesh instance\n\n \"\"\"\n cv.check_type('rectangular lattice', lattice, openmc.RectLattice)\n\n shape = np.array(lattice.shape)\n width = lattice.pitch*shape\n\n mesh = cls(mesh_id, name)\n mesh.lower_left = lattice.lower_left\n mesh.upper_right = lattice.lower_left + width\n mesh.dimension = shape*division\n\n return mesh\n\n def to_xml_element(self):\n \"\"\"Return XML representation of the mesh\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing mesh data\n\n \"\"\"\n\n element = ET.Element(\"mesh\")\n element.set(\"id\", str(self._id))\n\n if self._dimension is not None:\n subelement = ET.SubElement(element, \"dimension\")\n subelement.text = ' '.join(map(str, self._dimension))\n\n subelement = ET.SubElement(element, \"lower_left\")\n subelement.text = ' '.join(map(str, self._lower_left))\n\n if self._upper_right is not None:\n subelement = ET.SubElement(element, \"upper_right\")\n subelement.text = ' '.join(map(str, self._upper_right))\n\n if self._width is not None:\n subelement = ET.SubElement(element, \"width\")\n subelement.text = ' '.join(map(str, self._width))\n\n return element\n\n @classmethod\n def from_xml_element(cls, elem):\n \"\"\"Generate mesh from an XML element\n\n Parameters\n ----------\n elem : xml.etree.ElementTree.Element\n XML element\n\n Returns\n -------\n openmc.Mesh\n Mesh generated from XML element\n\n \"\"\"\n mesh_id = int(get_text(elem, 'id'))\n mesh = cls(mesh_id)\n\n mesh_type = get_text(elem, 'type')\n if mesh_type is not None:\n mesh.type = mesh_type\n\n dimension = get_text(elem, 'dimension')\n if dimension is not None:\n mesh.dimension = [int(x) for x in dimension.split()]\n\n lower_left = get_text(elem, 'lower_left')\n if lower_left is not None:\n mesh.lower_left = [float(x) for x in lower_left.split()]\n\n upper_right = get_text(elem, 'upper_right')\n if upper_right is not None:\n mesh.upper_right = [float(x) for x in upper_right.split()]\n\n width = get_text(elem, 'width')\n if width is not None:\n mesh.width = [float(x) for x in width.split()]\n\n return mesh\n\n def build_cells(self, bc=None):\n \"\"\"Generates a lattice of universes with the same dimensionality\n as the mesh object. The individual cells/universes produced\n will not have material definitions applied and so downstream code\n will have to apply that information.\n\n Parameters\n ----------\n bc : iterable of {'reflective', 'periodic', 'transmission', 'vacuum', or 'white'}\n Boundary conditions for each of the four faces of a rectangle\n (if applying to a 2D mesh) or six faces of a parallelepiped\n (if applying to a 3D mesh) provided in the following order:\n [x min, x max, y min, y max, z min, z max]. 2-D cells do not\n contain the z min and z max entries. Defaults to 'reflective' for\n all faces.\n\n Returns\n -------\n root_cell : openmc.Cell\n The cell containing the lattice representing the mesh geometry;\n this cell is a single parallelepiped with boundaries matching\n the outermost mesh boundary with the boundary conditions from bc\n applied.\n cells : iterable of openmc.Cell\n The list of cells within each lattice position mimicking the mesh\n geometry.\n\n \"\"\"\n if bc is None:\n bc = ['reflective'] * 6\n if len(bc) not in (4, 6):\n raise ValueError('Boundary condition must be of length 4 or 6')\n for entry in bc:\n cv.check_value('bc', entry, _BOUNDARY_TYPES)\n\n n_dim = len(self.dimension)\n\n # Build the cell which will contain the lattice\n xplanes = [openmc.XPlane(self.lower_left[0], bc[0]),\n openmc.XPlane(self.upper_right[0], bc[1])]\n if n_dim == 1:\n yplanes = [openmc.YPlane(-1e10, 'reflective'),\n openmc.YPlane(1e10, 'reflective')]\n else:\n yplanes = [openmc.YPlane(self.lower_left[1], bc[2]),\n openmc.YPlane(self.upper_right[1], bc[3])]\n\n if n_dim <= 2:\n # Would prefer to have the z ranges be the max supported float, but\n # these values are apparently different between python and Fortran.\n # Choosing a safe and sane default.\n # Values of +/-1e10 are used here as there seems to be an\n # inconsistency between what numpy uses as the max float and what\n # Fortran expects for a real(8), so this avoids code complication\n # and achieves the same goal.\n zplanes = [openmc.ZPlane(-1e10, 'reflective'),\n openmc.ZPlane(1e10, 'reflective')]\n else:\n zplanes = [openmc.ZPlane(self.lower_left[2], bc[4]),\n openmc.ZPlane(self.upper_right[2], bc[5])]\n root_cell = openmc.Cell()\n root_cell.region = ((+xplanes[0] & -xplanes[1]) &\n (+yplanes[0] & -yplanes[1]) &\n (+zplanes[0] & -zplanes[1]))\n\n # Build the universes which will be used for each of the (i,j,k)\n # locations within the mesh.\n # We will concurrently build cells to assign to these universes\n cells = []\n universes = []\n for _ in self.indices:\n cells.append(openmc.Cell())\n universes.append(openmc.Universe())\n universes[-1].add_cell(cells[-1])\n\n lattice = openmc.RectLattice()\n lattice.lower_left = self.lower_left\n\n # Assign the universe and rotate to match the indexing expected for\n # the lattice\n if n_dim == 1:\n universe_array = np.array([universes])\n elif n_dim == 2:\n universe_array = np.empty(self.dimension[::-1],\n dtype=openmc.Universe)\n i = 0\n for y in range(self.dimension[1] - 1, -1, -1):\n for x in range(self.dimension[0]):\n universe_array[y][x] = universes[i]\n i += 1\n else:\n universe_array = np.empty(self.dimension[::-1],\n dtype=openmc.Universe)\n i = 0\n for z in range(self.dimension[2]):\n for y in range(self.dimension[1] - 1, -1, -1):\n for x in range(self.dimension[0]):\n universe_array[z][y][x] = universes[i]\n i += 1\n lattice.universes = universe_array\n\n if self.width is not None:\n lattice.pitch = self.width\n else:\n dx = ((self.upper_right[0] - self.lower_left[0]) /\n self.dimension[0])\n\n if n_dim == 1:\n lattice.pitch = [dx]\n elif n_dim == 2:\n dy = ((self.upper_right[1] - self.lower_left[1]) /\n self.dimension[1])\n lattice.pitch = [dx, dy]\n else:\n dy = ((self.upper_right[1] - self.lower_left[1]) /\n self.dimension[1])\n dz = ((self.upper_right[2] - self.lower_left[2]) /\n self.dimension[2])\n lattice.pitch = [dx, dy, dz]\n\n # Fill Cell with the Lattice\n root_cell.fill = lattice\n\n return root_cell, cells\n\n\ndef Mesh(*args, **kwargs):\n warnings.warn(\"Mesh has been renamed RegularMesh. Future versions of \"\n \"OpenMC will not accept the name Mesh.\")\n return RegularMesh(*args, **kwargs)\n\n\nclass RectilinearMesh(MeshBase):\n \"\"\"A 3D rectilinear Cartesian mesh\n\n Parameters\n ----------\n mesh_id : int\n Unique identifier for the mesh\n name : str\n Name of the mesh\n\n Attributes\n ----------\n id : int\n Unique identifier for the mesh\n name : str\n Name of the mesh\n n_dimension : int\n Number of mesh dimensions (always 3 for a RectilinearMesh).\n x_grid : Iterable of float\n Mesh boundary points along the x-axis.\n y_grid : Iterable of float\n Mesh boundary points along the y-axis.\n z_grid : Iterable of float\n Mesh boundary points along the z-axis.\n indices : Iterable of tuple\n An iterable of mesh indices for each mesh element, e.g. [(1, 1, 1),\n (2, 1, 1), ...]\n\n \"\"\"\n\n def __init__(self, mesh_id=None, name=''):\n super().__init__(mesh_id, name)\n\n self._x_grid = None\n self._y_grid = None\n self._z_grid = None\n\n @property\n def n_dimension(self):\n return 3\n\n @property\n def x_grid(self):\n return self._x_grid\n\n @property\n def y_grid(self):\n return self._y_grid\n\n @property\n def z_grid(self):\n return self._z_grid\n\n @property\n def indices(self):\n nx = len(self.x_grid) - 1\n ny = len(self.y_grid) - 1\n nz = len(self.z_grid) - 1\n return ((x, y, z)\n for z in range(1, nz + 1)\n for y in range(1, ny + 1)\n for x in range(1, nx + 1))\n\n @x_grid.setter\n def x_grid(self, grid):\n cv.check_type('mesh x_grid', grid, Iterable, Real)\n self._x_grid = grid\n\n @y_grid.setter\n def y_grid(self, grid):\n cv.check_type('mesh y_grid', grid, Iterable, Real)\n self._y_grid = grid\n\n @z_grid.setter\n def z_grid(self, grid):\n cv.check_type('mesh z_grid', grid, Iterable, Real)\n self._z_grid = grid\n\n def __repr__(self):\n fmt = '{0: <16}{1}{2}\\n'\n string = super().__repr__()\n string += fmt.format('\\tDimensions', '=\\t', self.n_dimension)\n x_grid_str = str(self._x_grid) if not self._x_grid else len(self._x_grid)\n string += fmt.format('\\tN X pnts:', '=\\t', x_grid_str)\n if self._x_grid:\n string += fmt.format('\\tX Min:', '=\\t', self._x_grid[0])\n string += fmt.format('\\tX Max:', '=\\t', self._x_grid[-1])\n y_grid_str = str(self._y_grid) if not self._y_grid else len(self._y_grid)\n string += fmt.format('\\tN Y pnts:', '=\\t', y_grid_str)\n if self._y_grid:\n string += fmt.format('\\tY Min:', '=\\t', self._y_grid[0])\n string += fmt.format('\\tY Max:', '=\\t', self._y_grid[-1])\n z_grid_str = str(self._z_grid) if not self._z_grid else len(self._z_grid)\n string += fmt.format('\\tN Z pnts:', '=\\t', z_grid_str)\n if self._z_grid:\n string += fmt.format('\\tZ Min:', '=\\t', self._z_grid[0])\n string += fmt.format('\\tZ Max:', '=\\t', self._z_grid[-1])\n return string\n\n @classmethod\n def from_hdf5(cls, group):\n mesh_id = int(group.name.split('/')[-1].lstrip('mesh '))\n\n # Read and assign mesh properties\n mesh = cls(mesh_id)\n mesh.x_grid = group['x_grid'][()]\n mesh.y_grid = group['y_grid'][()]\n mesh.z_grid = group['z_grid'][()]\n\n return mesh\n\n def to_xml_element(self):\n \"\"\"Return XML representation of the mesh\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing mesh data\n\n \"\"\"\n\n element = ET.Element(\"mesh\")\n element.set(\"id\", str(self._id))\n element.set(\"type\", \"rectilinear\")\n\n subelement = ET.SubElement(element, \"x_grid\")\n subelement.text = ' '.join(map(str, self.x_grid))\n\n subelement = ET.SubElement(element, \"y_grid\")\n subelement.text = ' '.join(map(str, self.y_grid))\n\n subelement = ET.SubElement(element, \"z_grid\")\n subelement.text = ' '.join(map(str, self.z_grid))\n\n return element\n\n\nclass UnstructuredMesh(MeshBase):\n \"\"\"A 3D unstructured mesh\n\n .. versionadded:: 0.12\n\n Parameters\n ----------\n filename : str\n Location of the unstructured mesh file\n mesh_id : int\n Unique identifier for the mesh\n name : str\n Name of the mesh\n\n Attributes\n ----------\n id : int\n Unique identifier for the mesh\n name : str\n Name of the mesh\n filename : str\n Name of the file containing the unstructured mesh\n volumes : Iterable of float\n Volumes of the unstructured mesh elements\n total_volume : float\n Volume of the unstructured mesh in total\n centroids : Iterable of tuple\n An iterable of element centroid coordinates, e.g. [(0.0, 0.0, 0.0),\n (1.0, 1.0, 1.0), ...]\n \"\"\"\n\n def __init__(self, filename, mesh_id=None, name=''):\n super().__init__(mesh_id, name)\n self.filename = filename\n self._volumes = None\n self._centroids = None\n\n @property\n def filename(self):\n return self._filename\n\n @filename.setter\n def filename(self, filename):\n cv.check_type('Unstructured Mesh filename', filename, str)\n self._filename = filename\n\n @property\n def volumes(self):\n return self._volumes\n\n @volumes.setter\n def volumes(self, volumes):\n cv.check_type(\"Unstructured mesh volumes\", volumes, Iterable, Real)\n self._volumes = volumes\n\n @property\n def total_volume(self):\n return np.sum(self.volumes)\n\n @property\n def centroids(self):\n return self._centroids\n\n @property\n def n_elements(self):\n if self._centroids is None:\n raise RuntimeError(\"No information about this mesh has \"\n \"been loaded from a statepoint file.\")\n return len(self._centroids)\n\n @centroids.setter\n def centroids(self, centroids):\n cv.check_type(\"Unstructured mesh centroids\", centroids,\n Iterable, Real)\n self._centroids = centroids\n\n def __repr__(self):\n string = super().__repr__()\n return string + '{: <16}=\\t{}\\n'.format('\\tFilename', self.filename)\n\n def write_data_to_vtk(self, filename, datasets, volume_normalization=True):\n \"\"\"Map data to the unstructured mesh element centroids\n to create a VTK point-cloud dataset.\n\n Parameters\n ----------\n filename : str\n Name of the VTK file to write.\n datasets : dict\n Dictionary whose keys are the data labels\n and values are the data sets.\n volume_normalization : bool\n Whether or not to normalize the data by the\n volume of the mesh elements\n \"\"\"\n\n import vtk\n from vtk.util import numpy_support as vtk_npsup\n\n if self.centroids is None:\n raise RuntimeError(\"No centroid information is present on this \"\n \"unstructured mesh. Please load this \"\n \"information from a relevant statepoint file.\")\n\n if self.volumes is None and volume_normalization:\n raise RuntimeError(\"No volume data is present on this \"\n \"unstructured mesh. Please load the \"\n \" mesh information from a statepoint file.\")\n\n # check that the data sets are appropriately sized\n for label, dataset in datasets.items():\n if isinstance(dataset, np.ndarray):\n assert dataset.size == self.n_elements\n else:\n assert len(dataset) == self.n_elements\n cv.check_type('label', label, str)\n\n # create data arrays for the cells/points\n cell_dim = 1\n vertices = vtk.vtkCellArray()\n points = vtk.vtkPoints()\n\n for centroid in self.centroids:\n # create a point for each centroid\n point_id = points.InsertNextPoint(centroid)\n # create a cell of type \"Vertex\" for each point\n cell_id = vertices.InsertNextCell(cell_dim, (point_id,))\n\n # create a VTK data object\n poly_data = vtk.vtkPolyData()\n poly_data.SetPoints(points)\n poly_data.SetVerts(vertices)\n\n # strange VTK nuance:\n # data must be held in some container\n # until the vtk file is written\n data_holder = []\n\n # create VTK arrays for each of\n # the data sets\n for label, dataset in datasets.items():\n dataset = np.asarray(dataset).flatten()\n\n if volume_normalization:\n dataset /= self.volumes.flatten()\n\n array = vtk.vtkDoubleArray()\n array.SetName(label)\n array.SetNumberOfComponents(1)\n array.SetArray(vtk_npsup.numpy_to_vtk(dataset),\n dataset.size,\n True)\n\n data_holder.append(dataset)\n poly_data.GetPointData().AddArray(array)\n\n # set filename\n if not filename.endswith(\".vtk\"):\n filename += \".vtk\"\n\n writer = vtk.vtkGenericDataObjectWriter()\n writer.SetFileName(filename)\n writer.SetInputData(poly_data)\n writer.Write()\n\n @classmethod\n def from_hdf5(cls, group):\n mesh_id = int(group.name.split('/')[-1].lstrip('mesh '))\n filename = group['filename'][()].decode()\n\n mesh = cls(filename, mesh_id=mesh_id)\n vol_data = group['volumes'][()]\n centroids = group['centroids'][()]\n mesh.volumes = np.reshape(vol_data, (vol_data.shape[0],))\n mesh.centroids = np.reshape(centroids, (vol_data.shape[0], 3))\n\n return mesh\n\n def to_xml_element(self):\n \"\"\"Return XML representation of the mesh\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing mesh data\n\n \"\"\"\n\n element = ET.Element(\"mesh\")\n element.set(\"id\", str(self._id))\n element.set(\"type\", \"unstructured\")\n\n subelement = ET.SubElement(element, \"filename\")\n subelement.text = self.filename\n\n return element\n\n @classmethod\n def from_xml_element(cls, elem):\n \"\"\"Generate unstructured mesh object from XML element\n\n Parameters\n ----------\n elem : xml.etree.ElementTree.Element\n XML element\n\n Returns\n -------\n openmc.UnstructuredMesh\n UnstructuredMesh generated from an XML element\n \"\"\"\n mesh_id = int(get_text(elem, 'id'))\n filename = get_text(elem, 'filename')\n\n mesh = cls(filename, mesh_id)\n\n return mesh\n"
] | [
[
"numpy.reshape",
"numpy.asarray",
"numpy.prod",
"numpy.array",
"numpy.sum",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NOAA-ORR-ERD/post_gnome | [
"0d4ac24c0afd64d1044c50d56090081537ce2928"
] | [
"src/post_gnome/plotting/examples/contour_particles_at_time.py"
] | [
"from post_gnome.plotting import geo_plots\nimport matplotlib.pyplot as plt\nimport datetime\n\n\nplt.clf()\n\nax = geo_plots.add_map(bbox=(-125.2,-124.2,47.7,48.3), bna='coast_wa.bna') \n\n#add particles at one time\nt = datetime.datetime(2016,8,18,12)\n\nfilename = 'WA_particles.nc'\nax = geo_plots.contour_particles(ax,filename,t,levels=[0.1, 0.4, 0.6, 1])\n\n\nplt.show()"
] | [
[
"matplotlib.pyplot.clf",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DavidNemeskey/pytorch_lm | [
"5ef28d863db5da5d88f3d7a6860b75065894c96a"
] | [
"pytorch_lm/lr_schedule.py"
] | [
"#!/usr/bin/env python3\n# vim: set fileencoding=utf-8 :\n\n\"\"\"LR schedules.\"\"\"\n\nfrom bisect import bisect_right\n\nfrom torch.optim.lr_scheduler import _LRScheduler, ExponentialLR, ReduceLROnPlateau\n\n\nclass ConstantLR(_LRScheduler):\n \"\"\"Keeps the learning rate constant.\"\"\"\n def get_lr(self):\n return [base_lr for base_lr in self.base_lrs]\n\n\nclass MultiScheduleLR(_LRScheduler):\n \"\"\"\n Sets a different learning rate scheduler once the number of epochs reaches\n one of the milestones. When last_epoch=-1, sets initial lr as lr.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n milestones (dict): A dictionary of {milestone: scheduler}. All scheduler\n objects must be associated with the same optimizer.\n The first milestone should be 0.\n last_epoch (int): The index of the last (previous) epoch. Default: -1.\n \"\"\"\n def __init__(self, optimizer, milestones, last_epoch=-1):\n if not (isinstance(milestones, dict) and dict):\n raise ValueError('Milestones should be non-empty dict of '\n '{milestone: scheduler}. Got {}', milestones)\n for scheduler in milestones.values():\n if not isinstance(scheduler, _LRScheduler):\n raise TypeError('{} is not an _LRScheduler'.format(\n type(scheduler).__name__))\n if scheduler.optimizer != optimizer:\n raise ValueError('All schedulers must be associated with the '\n 'same Optimizer.')\n self.milestones, self.schedulers = zip(*sorted(milestones.items()))\n if self.milestones[0] > last_epoch + 1:\n raise ValueError('The first milestone must be less or equal '\n 'than last_epoch.')\n self.scheduler = None\n super(MultiScheduleLR, self).__init__(optimizer, last_epoch)\n\n def step(self, epoch=None):\n super(MultiScheduleLR, self).step(epoch)\n scheduler = self.schedulers[\n bisect_right(self.milestones, self.last_epoch) - 1\n ]\n if scheduler != self.scheduler:\n self.scheduler = scheduler\n self.scheduler.step(epoch)\n\n def get_lr(self):\n milestone = bisect_right(self.milestones, self.last_epoch) - 1\n scheduler = self.schedulers[milestone]\n return scheduler.get_lr()\n\n\nclass ZarembaScheduleLR(MultiScheduleLR):\n \"\"\"The Zaremba schedule.\"\"\"\n def __init__(self, optimizer, lr_decay, decay_delay, last_epoch=-1):\n super(ZarembaScheduleLR, self).__init__(\n optimizer,\n {0: ConstantLR(optimizer),\n decay_delay: ExponentialLR(optimizer, lr_decay, last_epoch=0)}\n )\n\n\ndef lr_step_at_epoch_start(lr_scheduler):\n \"\"\"\n Tells if the step() method should be invoked at the beginning of the\n epoch. True for all LR schedulers, with the only exception being\n ReduceLROnPlateau.\n \"\"\"\n if isinstance(lr_scheduler, ReduceLROnPlateau):\n return False\n else:\n return True\n"
] | [
[
"torch.optim.lr_scheduler.ExponentialLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
caseybackes/mathcharcnn | [
"c66a2e609f2cc1ac2550b890fbf14f9e3de7f95f"
] | [
"src/my_cnn.py"
] | [
"import tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import (\n Dense,\n Dropout,\n Activation,\n Flatten,\n Conv2D,\n MaxPooling2D,\n)\nimport pickle\nimport numpy as np\n\n\ndef build_model(num_categories, filter_size=(3, 3)):\n # - - - THE SIMPLE CNN MODEL\n model = Sequential()\n\n # - - - ADD LAYERS TO THE MODEL\n model.add(Conv2D(45, filter_size, input_shape=X.shape[1:]))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(20, filter_size))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(20))\n\n model.add(Dense(num_categories))\n model.add(Activation(\"sigmoid\"))\n\n # - - - COMPILE THE MODEL\n model.compile(\n optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]\n )\n return model\n\n\nif __name__ == \"__main__\":\n\n X = pickle.load(open(\"X.pickle\", \"rb\"))\n y = pickle.load(open(\"y.pickle\", \"rb\"))\n\n # - - - Scale the X data\n X = X / 255\n filter_size = (3, 3)\n\n # - - - BUILD THE MODEL FROM FUNCTION ABOVE\n model = build_model(filter_size=filter_size)\n\n # - - - FIT THE MODEL\n model.fit(X, y, batch_size=10, epochs=3, validation_split=0.1)\n"
] | [
[
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
sergiobat/ppuda | [
"9b2a4b3dd6cced4d3db79a0a3262b69c49ab0781"
] | [
"ppuda/deepnets1m/genotypes.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nSome functionality in this script is based on the DARTS code: https://github.com/quark0/darts (Apache License 2.0)\n\n\"\"\"\n\n\nfrom collections import namedtuple\nimport torch.nn.functional as F\nimport torch\nfrom torch.autograd import Variable\n\n\nGenotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')\n\n\nPRIMITIVES_DEEPNETS1M = [\n 'max_pool',\n 'avg_pool',\n 'sep_conv',\n 'dil_conv',\n 'conv',\n 'msa',\n 'cse',\n 'sum',\n 'concat',\n 'input',\n 'bias',\n 'bn',\n 'ln',\n 'pos_enc',\n 'glob_avg',\n]\n\n\nDARTS = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5],\n reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])\n\nPDARTS = Genotype(normal=[('skip_connect', 0), ('dil_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('sep_conv_3x3', 3), ('sep_conv_3x3', 0), ('dil_conv_5x5', 4)], normal_concat=range(2, 6),\n reduce=[('avg_pool_3x3', 0), ('sep_conv_5x5', 1), ('sep_conv_3x3', 0), ('dil_conv_5x5', 2), ('max_pool_3x3', 0), ('dil_conv_3x3', 1), ('dil_conv_3x3', 1), ('dil_conv_5x5', 3)], reduce_concat=range(2, 6))\n\nViT = Genotype(normal=[('none', 0), ('msa', 1)], normal_concat=[2],\n reduce=[('none', 0), ('avg_pool_3x3', 1)], reduce_concat=[2])\n\n\n\ndef from_dict(genotype):\n return Genotype(normal=genotype['normal'],\n normal_concat=genotype['normal_concat'],\n reduce=genotype['reduce'],\n reduce_concat=genotype['reduce_concat'])\n\n\ndef to_dict(genotype):\n return {'normal': list(genotype.normal),\n 'normal_concat': list(genotype.normal_concat),\n 'reduce': list(genotype.reduce),\n 'reduce_concat': list(genotype.reduce_concat)}\n\n\n\ndef sample_genotype(steps=1, only_pool=False, allow_none=True, drop_concat=True, allow_transformer=False):\n\n # Extended set of primitives based on https://github.com/quark0/darts/blob/master/cnn/genotypes.py\n PRIMITIVES_DARTS_EXT = [\n 'none',\n 'max_pool_3x3',\n 'avg_pool_3x3',\n 'skip_connect',\n 'sep_conv_3x3',\n 'sep_conv_5x5',\n 'dil_conv_3x3',\n 'dil_conv_5x5',\n 'conv_1x1',\n 'conv_7x1_1x7',\n 'conv_3x3',\n 'conv_5x5',\n 'conv_7x7',\n 'msa',\n 'cse'\n ]\n\n multiplier = steps\n k = sum(1 for i in range(steps) for n in range(2 + i))\n num_ops = len(PRIMITIVES_DARTS_EXT)\n alphas_normal = Variable(1e-3 * torch.randn(k, num_ops))\n alphas_reduce = Variable(1e-3 * torch.randn(k, num_ops))\n\n if only_pool:\n assert PRIMITIVES_DARTS_EXT[3] == 'skip_connect', PRIMITIVES_DARTS_EXT\n assert PRIMITIVES_DARTS_EXT[4] == 'sep_conv_3x3', PRIMITIVES_DARTS_EXT\n alphas_reduce[:, 4:] = -1000 # prevent sampling operators with learnable params to sample the architectures similar to the best DARTS cell\n\n if not allow_transformer:\n ind = PRIMITIVES_DARTS_EXT.index('msa')\n assert ind == len(PRIMITIVES_DARTS_EXT) - 2, (ind, PRIMITIVES_DARTS_EXT)\n alphas_normal[:, ind] = -1000\n alphas_reduce[:, ind] = -1000\n\n def _parse(weights):\n # Based on https://github.com/quark0/darts/blob/master/cnn/model_search.py#L135\n gene = []\n n = 2\n start = 0\n for i in range(steps):\n end = start + n\n W = weights[start:end].copy()\n edges = sorted(range(i + 2),\n key=lambda x: -max(W[x][k] for k in range(len(W[x])) if (k != PRIMITIVES_DARTS_EXT.index('none') or allow_none)))[:2]\n for j in edges:\n k_best = None\n for k in range(len(W[j])):\n if k != PRIMITIVES_DARTS_EXT.index('none') or allow_none:\n if k_best is None or W[j][k] > W[j][k_best]:\n k_best = k\n gene.append((PRIMITIVES_DARTS_EXT[k_best], j))\n start = end\n n += 1\n return gene\n\n gene_normal = _parse(F.softmax(alphas_normal, dim=-1).data.numpy())\n gene_reduce = _parse(F.softmax(alphas_reduce, dim=-1).data.numpy())\n\n if drop_concat:\n concat = []\n for i in range(2 + steps - multiplier, steps + 2):\n if i == steps + 1 or torch.rand(1).item() > 0.5: # always add the last otherwise the features from the previous sum nodes will be lost\n concat.append(i)\n else:\n concat = range(2 + steps - multiplier, steps + 2)\n\n genotype = Genotype(\n normal=gene_normal, normal_concat=concat,\n reduce=gene_reduce, reduce_concat=concat\n )\n\n return genotype\n"
] | [
[
"torch.randn",
"torch.nn.functional.softmax",
"torch.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Kamlesh-Prajapati/Deep-Surveillance-System | [
"beb19225c9822aa831f29bc8bad7e4d814cb50c0"
] | [
"settings/TrainSettings.py"
] | [
"import tensorflow as tf\nimport settings.DataSettings as dataSettings\n\n'''\n Following two variables control the shape of input\n data as the shape: [BATCH_SIZE*UNROLLED_SIZE, w, h, c].\n BATCH_SIZE: number of Videos in a batch.\n UNROLLED_SIZE: number of Frames in a Video.\n For the ConvNet part, the input will be the shape:\n [BATCH_SIZE*UNROLLED_SIZE, w, h, c].\n For the RNN part, the input will be the shape:\n [BATCH_SIZE, UNROLLED_SIZE, w, h, c] so that the\n tf.nn.rnn_cell.dynamic_rnn() can unroll the RNN.\n The output of the total network will be the shape:\n [BATCH_SIZE, UNROLLED_SIZE, NUMBER_OF_CATEGORIES]\n'''\nBATCH_SIZE = 3\nUNROLLED_SIZE = 40\n#BATCH_SIZE = 4\n#UNROLLED_SIZE = 40\n\nPRETRAIN_MODEL_PATH_NAME = \"\"\n#PRETRAIN_MODEL_PATH_NAME = \"temp/G2D19_P2OF_ResHB_1LSTM_dataAug_expLR/save_epoch_14/ViolenceNet.ckpt\"\n\n'''\n If one want to finetune, insert the LastLayer to the following list.\n ex: NAME_SCOPES_NOT_TO_RECOVER_FROM_CHECKPOINT = ['Conv4', 'Conv5']\n'''\nNAME_SCOPES_NOT_TO_RECOVER_FROM_CHECKPOINT = []\n\nMAX_TRAINING_EPOCH = 1\n\nEPOCHS_TO_START_SAVE_MODEL = 1\nPATH_TO_SAVE_MODEL = \"tempkrinal/G2D19_P2OF_ResHB_1LSTM_dataAug_expLR\"\nMAX_TRAINING_SAVE_MODEL = MAX_TRAINING_EPOCH\nPERFORM_DATA_AUGMENTATION = True\n\ndef GetOptimizer(learningRate_):\n\treturn tf.train.AdamOptimizer(learning_rate=learningRate_)\n\n'''\n Following list three different LearningRate decay methods:\n\t1. _stairLearningRate(),\n\t2. _exponentialDecayLearningRate()\n\t3. _polynomialDecayLearningRate()\n'''\ndef _stairLearningRate(currentEpoch_, currentStep_):\n\t#LIST_OF_EPOCH_LEARNING_RATE_PAIRS = [ (0, 1e-4), (5, 1e-5) ]\n\tLIST_OF_EPOCH_LEARNING_RATE_PAIRS = [ (0, 1e-6), (15, 5e-7), (25, 1e-7) ]\n\t#LIST_OF_EPOCH_LEARNING_RATE_PAIRS = [ (0, 3e-6), (5, 2.5e-6), (10, 2e-6), (15, 1.5e-6), (20, 1e-6) ]\n\n\tfor eachPair in reversed(LIST_OF_EPOCH_LEARNING_RATE_PAIRS):\n\t\tif currentEpoch_ >= eachPair[0]:\n\t\t\treturn eachPair[1]\n\n\t# If nothing matched, return the first pair.learningRate as default\n\treturn LIST_OF_EPOCH_LEARNING_RATE_PAIRS[0][1] \n\n\ndef _exponentialDecayLearningRate(currentEpoch_, currentStep_):\n\t'''\n\t Exponential Decay:\n\t\tlearningRate = INITIAL_LEARNING_RATE * DECAY_RATE ^ (currentStep_ / DECAY_STEP) + END_LEARNING_RATE\n\t'''\n\tINITIAL_LEARNING_RATE = 1e-5\n\tDECAY_RATE = 0.9\n\tNUMBER_OF_BATCHES_PER_EPOCH = 250\n\tNUMBER_OF_EPOCHS_PER_DECAY = 1\n\tDECAY_STEP = int(NUMBER_OF_BATCHES_PER_EPOCH * NUMBER_OF_EPOCHS_PER_DECAY)\n\tEND_LEARNING_RATE = 0.0\n\n\tlearningRate = INITIAL_LEARNING_RATE * DECAY_RATE ** (currentStep_ / DECAY_STEP) + END_LEARNING_RATE\n\n\treturn learningRate\n\ndef _polynomialDecayLearningRate(currentEpoch_, currentStep_):\n\t'''\n\t Polynomial Decay:\n\t\tstep = min(currentStep_, MAX_STEPS)\n\t\tlearningRate = (START_LEARNING_RATE - END_LEARNING_RATE) * (1 - step/MAX_STEPS)^(POWER) + END_LEARNING_RATE\n\t'''\n\tSTART_LEARNING_RATE = 2e-6\n\tEND_LEARNING_RATE = 1e-7\n\tMAX_STEPS = MAX_TRAINING_EPOCH * 125\n\tPOWER = 4\n\n\n\ndef GetLearningRate(currentEpoch_, currentStep_):\n#\treturn _stairLearningRate(currentEpoch_, currentStep_)\n\treturn _exponentialDecayLearningRate(currentEpoch_, currentStep_=currentStep_)\n\n\n\n#####################\n# Advenced Settings #\n#####################\n'''\n Following settings depend on (BATCH_SIZE, UNROLLED_SIZE, PERFORM_DATA_AUGMENTATION):\n if (4, 40, False), Recommend values:\n\tWAITING_QUEUE_MAX_SIZE = 60\n\tLOADED_QUEUE_MAX_SIZE = 30\n\tNUMBER_OF_LOAD_DATA_THREADS=2\n\n if (4, 40, True), Recommend values:\n\tWAITING_QUEUE_MAX_SIZE = 180\n\tLOADED_QUEUE_MAX_SIZE = 80\n\tNUMBER_OF_LOAD_DATA_THREADS=2\n\n if (40, 1, False), Recommend values:\n\tWAITING_QUEUE_MAX_SIZE = 180\n\tLOADED_QUEUE_MAX_SIZE = 80\n\tNUMBER_OF_LOAD_DATA_THREADS=4\n\n if (40, 1, True), Recommend values:\n\tWAITING_QUEUE_MAX_SIZE = 180\n\tLOADED_QUEUE_MAX_SIZE = 80\n\tNUMBER_OF_LOAD_DATA_THREADS=4\n\n Note: The \"Averaged GetBatch Time\" that printed while you train an epoch, should be\n\t smaller than 0.001(s). Otherwise, increase NUMBER_OF_LOAD_DATA_THREADS.\n'''\nWAITING_QUEUE_MAX_SIZE = 180\nLOADED_QUEUE_MAX_SIZE = 80\nNUMBER_OF_LOAD_DATA_THREADS=3\n\nMAX_GRADIENT_VALUE = 5.0\nMIN_GRADIENT_VALUE = -5.0\n"
] | [
[
"tensorflow.train.AdamOptimizer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
sankhesh/tomviz | [
"7116f4eb75b30534a24462f4ddfb1694fe41c308"
] | [
"tomviz/python/AutoTiltAxisShiftAlignment.py"
] | [
"import numpy as np\nfrom scipy.interpolate import interp1d\nimport tomviz.operators\n\n\nclass AutoTiltAxisShiftAlignmentOperator(tomviz.operators.CancelableOperator):\n\n def transform(self, dataset):\n \"\"\"Automatic align the tilt axis to the center of images\"\"\"\n self.progress.maximum = 1\n\n # Get Tilt angles\n tilt_angles = dataset.tilt_angles\n\n tiltSeries = dataset.active_scalars\n if tiltSeries is None:\n raise RuntimeError(\"No scalars found!\")\n\n Nx, Ny, Nz = tiltSeries.shape\n\n shifts = (np.linspace(-20, 20, 41)).astype('int')\n numberOfSlices = 5 # number of slices used for recon\n\n # randomly choose slices with top 50% total intensities\n tiltSeriesSum = np.sum(tiltSeries, axis=(1, 2))\n temp = tiltSeriesSum.argsort()[Nx // 2:]\n slices = temp[np.random.permutation(temp.size)[:numberOfSlices]]\n print('Reconstruction slices:')\n print(slices)\n\n I = np.zeros(shifts.size)\n\n self.progress.maximum = shifts.size - 1\n step = 1\n\n for i in range(shifts.size):\n if self.canceled:\n return\n shiftedTiltSeries = np.roll(\n tiltSeries[slices, :, :, ], shifts[i], axis=1)\n for s in range(numberOfSlices):\n self.progress.message = ('Reconstructing slice No.%d/%d with a '\n 'shift of %d pixels' %\n (s, numberOfSlices, shifts[i]))\n\n recon = wbp2(shiftedTiltSeries[s, :, :],\n tilt_angles, Ny, 'ramp', 'linear')\n I[i] = I[i] + np.amax(recon)\n\n step += 1\n self.progress.value = step\n\n print('shift: %d' % shifts[np.argmax(I)])\n\n result = np.roll(tiltSeries, shifts[np.argmax(I)], axis=1)\n result = np.asfortranarray(result)\n\n # Set the result as the new scalars.\n dataset.active_scalars = result\n\n\ndef wbp2(sinogram, angles, N=None, filter=\"ramp\", interp=\"linear\"):\n if sinogram.ndim != 2:\n raise ValueError('Sinogram must be 2D')\n (Nray, Nproj) = sinogram.shape\n if Nproj != angles.size:\n raise ValueError('Sinogram does not match angles!')\n\n interpolation_methods = ('linear', 'nearest', 'spline', 'cubic')\n if interp not in interpolation_methods:\n raise ValueError(\"Unknown interpolation: %s\" % interp)\n if not N: # if ouput size is not given\n N = int(np.floor(np.sqrt(Nray**2 / 2.0)))\n\n ang = np.double(angles) * np.pi / 180.0\n # Create Fourier filter\n F = makeFilter(Nray, filter)\n # Pad sinogram for filtering\n s = np.lib.pad(sinogram, ((0, F.size - Nray), (0, 0)),\n 'constant', constant_values=(0, 0))\n # Apply Fourier filter\n s = np.fft.fft(s, axis=0) * F\n s = np.real(np.fft.ifft(s, axis=0))\n # Change back to original\n s = s[:Nray, :]\n\n # Back projection\n recon = np.zeros((N, N))\n center_proj = Nray // 2 # Index of center of projection\n [X, Y] = np.mgrid[0:N, 0:N]\n xpr = X - int(N) // 2\n ypr = Y - int(N) // 2\n\n for j in range(Nproj):\n t = ypr * np.cos(ang[j]) - xpr * np.sin(ang[j])\n x = np.arange(Nray) - center_proj\n if interp == 'linear':\n bp = np.interp(t, x, s[:, j], left=0, right=0)\n elif interp == 'spline':\n interpolant = interp1d(\n x, s[:, j], kind='slinear', bounds_error=False, fill_value=0)\n bp = interpolant(t)\n else:\n interpolant = interp1d(\n x, s[:, j], kind=interp, bounds_error=False, fill_value=0)\n bp = interpolant(t)\n recon = recon + bp\n\n # Normalize\n recon = recon * np.pi / 2 / Nproj\n return recon\n\n# Filter (1D) projections.\n\n\ndef makeFilter(Nray, filterMethod=\"ramp\"):\n # Calculate next power of 2\n N2 = 2**np.ceil(np.log2(Nray))\n # Make a ramp filter.\n freq = np.fft.fftfreq(int(N2)).reshape(-1, 1)\n omega = 2 * np.pi * freq\n filter = 2 * np.abs(freq)\n\n if filterMethod == \"ramp\":\n pass\n elif filterMethod == \"shepp-logan\":\n filter[1:] = filter[1:] * np.sin(omega[1:]) / omega[1:]\n elif filterMethod == \"cosine\":\n filter[1:] = filter[1:] * np.cos(filter[1:])\n elif filterMethod == \"hamming\":\n filter[1:] = filter[1:] * (0.54 + 0.46 * np.cos(omega[1:] / 2))\n elif filterMethod == \"hann\":\n filter[1:] = filter[1:] * (1 + np.cos(omega[1:] / 2)) / 2\n elif filterMethod == \"none\":\n filter[:] = 1\n else:\n raise ValueError(\"Unknown filter: %s\" % filterMethod)\n\n return filter\n"
] | [
[
"numpy.amax",
"numpy.sqrt",
"numpy.linspace",
"numpy.double",
"numpy.roll",
"numpy.arange",
"numpy.sin",
"scipy.interpolate.interp1d",
"numpy.argmax",
"numpy.interp",
"numpy.zeros",
"numpy.lib.pad",
"numpy.asfortranarray",
"numpy.fft.ifft",
"numpy.sum",
"numpy.log2",
"numpy.abs",
"numpy.fft.fft",
"numpy.cos",
"numpy.random.permutation"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
numba/roc-examples | [
"752391b1f014df8e8f6919279ffa382d278f3b4b"
] | [
"numba_roc_examples/pandas_eval/test_eval_engine.py"
] | [
"from __future__ import print_function, division, absolute_import\n\nfrom numba import unittest_support as unittest\n\nimport pandas as pd\nimport numpy as np\nfrom timeit import default_timer as timer\nfrom numba_roc_examples.pandas_eval import eval_engine\n\neval_engine.register()\n\n\ndef _best_time(fn):\n ts = timer()\n fn()\n te = timer()\n return te - ts\n\n\ndef best_time(fn, repeat):\n return min(_best_time(fn) for _ in range(repeat))\n\n\ndef eval_template(expr, engine, nelem=4, repeat=3):\n print(\"Eval:\", expr, \"Nelem:\", nelem)\n a = pd.DataFrame(dict(x=np.arange(nelem, dtype=np.float64),\n y=np.arange(1, 1 + nelem, dtype=np.float64)))\n\n # print('Input:', type(a), '\\n', a)\n\n b = a.eval(expr)\n # print('Output:', type(b), '\\n', b)\n\n c = a.eval(expr, engine=engine)\n # print('Output:', type(c), '\\n', c)\n\n np.testing.assert_allclose(b, c) # , rtol=1e-5)\n\n runtime = best_time(lambda: a.eval(expr), repeat=repeat)\n # print('Output:', type(b), '\\n', b)\n print('numexpr time', runtime)\n\n runtime = best_time(lambda: a.eval(expr, engine=engine), repeat=repeat)\n # print('Output:', type(c), '\\n', c)\n print('{0} time'.format(engine), runtime)\n\n\ndef query_template(expr, engine, nelem=4, repeat=3):\n print(\"Query:\", expr, \"Nelem:\", nelem)\n a = pd.DataFrame(dict(x=np.arange(nelem, dtype=np.float64),\n y=np.arange(1, 1 + nelem, dtype=np.float64)))\n\n # print('Input:', type(a), '\\n', a)\n\n b = a.query(expr)\n # print('Output:', type(b), '\\n', b)\n\n c = a.query(expr, engine=engine)\n # print('Output:', type(c), '\\n', c)\n\n pd.util.testing.assert_frame_equal(b, c)\n\n runtime = best_time(lambda: a.query(expr), repeat=repeat)\n # print('Output:', type(b), '\\n', b)\n print('numexpr time', runtime)\n\n runtime = best_time(lambda: a.query(expr, engine=engine), repeat=repeat)\n # print('Output:', type(c), '\\n', c)\n print('{0} time'.format(engine), runtime)\n\n\ndef driver_test_template(driver, expr, nelem=4, repeat=10):\n print(\"test cpu\")\n driver(expr, engine='numba.cpu', nelem=nelem, repeat=repeat)\n\n print(\"test roc\")\n driver(expr, engine='numba.roc', nelem=nelem, repeat=repeat)\n\n\nclass TestEvalEngine(unittest.TestCase):\n def test_simple_query(self):\n driver_test_template(query_template, \"x > 2 or y > 1\")\n\n def test_simple_eval(self):\n driver_test_template(eval_template, \"x + y\")\n\n def test_special_case_eval_sqrt(self):\n driver_test_template(eval_template, \"x + y ** 0.5\")\n\n def test_special_case_eval_square(self):\n driver_test_template(eval_template, \"x + y ** 2\")\n\n def test_special_case_boundaries(self):\n driver_test_template(eval_template, \"x + y ** 1.9\")\n driver_test_template(eval_template, \"x + y ** 0.49\")\n\n def test_math_calls(self):\n driver_test_template(eval_template, \"sin(x) + cos(y)\")\n\n def test_all_unary_math_calls(self):\n from pandas.computation.ops import _unary_math_ops\n\n for op in _unary_math_ops:\n expr = \"{0}(x)\".format(op)\n driver_test_template(eval_template, expr)\n\n def test_all_binary_math_calls(self):\n from pandas.computation.ops import _binary_math_ops\n\n for op in _binary_math_ops:\n expr = \"{0}(x, y)\".format(op)\n driver_test_template(eval_template, expr)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"pandas.util.testing.assert_frame_equal",
"numpy.arange",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
agschweingruber/icp | [
"b12996146cfb8cbce9da8d16f77169d428b5af98"
] | [
"training/src/models/loss.py"
] | [
"import torch\nfrom src.utils import pop_feature\n\n\nclass SequentialLoss:\n def __init__(self, categorical_feature_idcs, class_weights, target_names,\n norm_seq_len):\n self.categorical_feature_idcs = categorical_feature_idcs\n self.target_names = target_names\n self.norm_seq_len = norm_seq_len\n self.class_weights = class_weights\n self.cat_loss_fn = torch.nn.BCEWithLogitsLoss(reduction='none')\n self.regression_loss_fcn = torch.nn.MSELoss(reduction='none')\n\n def __call__(self, pred, target, mask):\n \"\"\"Calculates the loss and considers missing values in the loss given by mask\"\"\"\n # Apply mask:\n target[mask] = 0.0\n step_mask = mask.sum(-1).bool()\n\n # Pop out the categoricals:\n if self.categorical_feature_idcs:\n categorical_loss = 0\n pred_categorical, pred = pop_feature(pred, self.categorical_feature_idcs)\n target_categorical, target = pop_feature(target, self.categorical_feature_idcs)\n for idx, cat_idx in enumerate(self.categorical_feature_idcs):\n cat_preds = pred_categorical[:, :, idx]\n cat_targets = target_categorical[:, :, idx]\n cat_loss = self.cat_loss_fn(cat_preds, cat_targets)\n if self.class_weights:\n cat_loss[cat_targets == 1] *= self.class_weights[cat_idx]\n categorical_loss += cat_loss\n categorical_loss[step_mask] = 0\n\n # Calculate the loss of the regression on all other features:\n rest_loss = self.regression_loss_fcn(pred, target)\n rest_loss[step_mask] = 0\n\n sum_rest_loss = rest_loss.sum(dim=-1).sum(dim=-1)\n if self.categorical_feature_idcs:\n sum_categorical_loss = categorical_loss.sum(-1)#.sum(-1)\n else:\n sum_categorical_loss = 0.0\n\n # Norm loss per seq len and per non NAN targets.\n # Basically, this reduces the weight of longer sequences and of sequences with more NaNs.\n # Add 1 to avoid zero division:\n if self.norm_seq_len:\n count_per_pat = (~step_mask).sum(dim=-1) + 1\n else:\n count_per_pat = 1\n\n # Aggregate losses:\n loss = ((sum_rest_loss + sum_categorical_loss) /\n count_per_pat).mean()\n return loss\n"
] | [
[
"torch.nn.BCEWithLogitsLoss",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
robinlin1973/vnpy | [
"ab036f6bb48b4f788eec496deac377a8bce2ae83"
] | [
"docker/dockerTrader/ctaStrategy/strategy/strategyKingKeltner.py"
] | [
"# encoding: UTF-8\n\n\"\"\"\n基于King Keltner通道的交易策略,适合用在股指上,\n展示了OCO委托和5分钟K线聚合的方法。\n\n注意事项:\n1. 作者不对交易盈利做任何保证,策略代码仅供参考\n2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装\n3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略\n\"\"\"\n\nfrom __future__ import division\n\nfrom ..ctaBase import *\nfrom ..ctaTemplate import CtaTemplate\n\nimport talib\nimport numpy as np\n\n\n########################################################################\nclass KkStrategy(CtaTemplate):\n \"\"\"基于King Keltner通道的交易策略\"\"\"\n className = 'KkStrategy'\n author = u'用Python的交易员'\n\n # 策略参数\n kkLength = 11 # 计算通道中值的窗口数\n kkDev = 1.6 # 计算通道宽度的偏差\n trailingPrcnt = 0.8 # 移动止损\n initDays = 10 # 初始化数据所用的天数\n fixedSize = 1 # 每次交易的数量\n\n # 策略变量\n bar = None # 1分钟K线对象\n barMinute = EMPTY_STRING # K线当前的分钟\n fiveBar = None # 1分钟K线对象\n\n bufferSize = 100 # 需要缓存的数据的大小\n bufferCount = 0 # 目前已经缓存了的数据的计数\n highArray = np.zeros(bufferSize) # K线最高价的数组\n lowArray = np.zeros(bufferSize) # K线最低价的数组\n closeArray = np.zeros(bufferSize) # K线收盘价的数组\n \n atrValue = 0 # 最新的ATR指标数值\n kkMid = 0 # KK通道中轨\n kkUp = 0 # KK通道上轨\n kkDown = 0 # KK通道下轨\n intraTradeHigh = 0 # 持仓期内的最高点\n intraTradeLow = 0 # 持仓期内的最低点\n\n buyOrderID = None # OCO委托买入开仓的委托号\n shortOrderID = None # OCO委托卖出开仓的委托号\n orderList = [] # 保存委托代码的列表\n\n # 参数列表,保存了参数的名称\n paramList = ['name',\n 'className',\n 'author',\n 'vtSymbol',\n 'kkLength',\n 'kkDev'] \n\n # 变量列表,保存了变量的名称\n varList = ['inited',\n 'trading',\n 'pos',\n 'atrValue',\n 'kkMid',\n 'kkUp',\n 'kkDown'] \n\n #----------------------------------------------------------------------\n def __init__(self, ctaEngine, setting):\n \"\"\"Constructor\"\"\"\n super(KkStrategy, self).__init__(ctaEngine, setting)\n \n #----------------------------------------------------------------------\n def onInit(self):\n \"\"\"初始化策略(必须由用户继承实现)\"\"\"\n self.writeCtaLog(u'%s策略初始化' %self.name)\n \n # 载入历史数据,并采用回放计算的方式初始化策略数值\n initData = self.loadBar(self.initDays)\n for bar in initData:\n self.onBar(bar)\n\n self.putEvent()\n\n #----------------------------------------------------------------------\n def onStart(self):\n \"\"\"启动策略(必须由用户继承实现)\"\"\"\n self.writeCtaLog(u'%s策略启动' %self.name)\n self.putEvent()\n\n #----------------------------------------------------------------------\n def onStop(self):\n \"\"\"停止策略(必须由用户继承实现)\"\"\"\n self.writeCtaLog(u'%s策略停止' %self.name)\n self.putEvent()\n\n #----------------------------------------------------------------------\n def onTick(self, tick):\n \"\"\"收到行情TICK推送(必须由用户继承实现)\"\"\"\n # 聚合为1分钟K线\n tickMinute = tick.datetime.minute\n\n if tickMinute != self.barMinute: \n if self.bar:\n self.onBar(self.bar)\n\n bar = CtaBarData() \n bar.vtSymbol = tick.vtSymbol\n bar.symbol = tick.symbol\n bar.exchange = tick.exchange\n\n bar.open = tick.lastPrice\n bar.high = tick.lastPrice\n bar.low = tick.lastPrice\n bar.close = tick.lastPrice\n\n bar.date = tick.date\n bar.time = tick.time\n bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间\n\n self.bar = bar # 这种写法为了减少一层访问,加快速度\n self.barMinute = tickMinute # 更新当前的分钟\n else: # 否则继续累加新的K线\n bar = self.bar # 写法同样为了加快速度\n\n bar.high = max(bar.high, tick.lastPrice)\n bar.low = min(bar.low, tick.lastPrice)\n bar.close = tick.lastPrice\n\n #----------------------------------------------------------------------\n def onBar(self, bar):\n \"\"\"收到Bar推送(必须由用户继承实现)\"\"\"\n # 如果当前是一个5分钟走完\n if bar.datetime.minute % 5 == 0:\n # 如果已经有聚合5分钟K线\n if self.fiveBar:\n # 将最新分钟的数据更新到目前5分钟线中\n fiveBar = self.fiveBar\n fiveBar.high = max(fiveBar.high, bar.high)\n fiveBar.low = min(fiveBar.low, bar.low)\n fiveBar.close = bar.close\n \n # 推送5分钟线数据\n self.onFiveBar(fiveBar)\n \n # 清空5分钟线数据缓存\n self.fiveBar = None\n else:\n # 如果没有缓存则新建\n if not self.fiveBar:\n fiveBar = CtaBarData()\n \n fiveBar.vtSymbol = bar.vtSymbol\n fiveBar.symbol = bar.symbol\n fiveBar.exchange = bar.exchange\n \n fiveBar.open = bar.open\n fiveBar.high = bar.high\n fiveBar.low = bar.low\n fiveBar.close = bar.close\n \n fiveBar.date = bar.date\n fiveBar.time = bar.time\n fiveBar.datetime = bar.datetime \n \n self.fiveBar = fiveBar\n else:\n fiveBar = self.fiveBar\n fiveBar.high = max(fiveBar.high, bar.high)\n fiveBar.low = min(fiveBar.low, bar.low)\n fiveBar.close = bar.close\n \n #----------------------------------------------------------------------\n def onFiveBar(self, bar):\n \"\"\"收到5分钟K线\"\"\"\n # 撤销之前发出的尚未成交的委托(包括限价单和停止单)\n for orderID in self.orderList:\n self.cancelOrder(orderID)\n self.orderList = []\n \n # 保存K线数据\n self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize]\n self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize]\n self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize]\n \n self.closeArray[-1] = bar.close\n self.highArray[-1] = bar.high\n self.lowArray[-1] = bar.low\n \n self.bufferCount += 1\n if self.bufferCount < self.bufferSize:\n return\n \n # 计算指标数值\n self.atrValue = talib.ATR(self.highArray, \n self.lowArray, \n self.closeArray,\n self.kkLength)[-1]\n self.kkMid = talib.MA(self.closeArray, self.kkLength)[-1]\n self.kkUp = self.kkMid + self.atrValue * self.kkDev\n self.kkDown = self.kkMid - self.atrValue * self.kkDev\n \n # 判断是否要进行交易\n \n # 当前无仓位,发送OCO开仓委托\n if self.pos == 0:\n self.intraTradeHigh = bar.high\n self.intraTradeLow = bar.low \n self.sendOcoOrder(self.kkUp, self.kkDown, self.fixedSize)\n \n # 持有多头仓位\n elif self.pos > 0:\n self.intraTradeHigh = max(self.intraTradeHigh, bar.high)\n self.intraTradeLow = bar.low\n \n orderID = self.sell(self.intraTradeHigh*(1-self.trailingPrcnt/100), \n abs(self.pos), True)\n self.orderList.append(orderID)\n \n # 持有空头仓位\n elif self.pos < 0:\n self.intraTradeHigh = bar.high\n self.intraTradeLow = min(self.intraTradeLow, bar.low)\n \n orderID = self.cover(self.intraTradeLow*(1+self.trailingPrcnt/100),\n abs(self.pos), True)\n self.orderList.append(orderID)\n \n # 发出状态更新事件\n self.putEvent() \n\n #----------------------------------------------------------------------\n def onOrder(self, order):\n \"\"\"收到委托变化推送(必须由用户继承实现)\"\"\"\n pass\n\n #----------------------------------------------------------------------\n def onTrade(self, trade):\n # 多头开仓成交后,撤消空头委托\n if self.pos > 0:\n self.cancelOrder(self.shortOrderID)\n if self.buyOrderID in self.orderList:\n self.orderList.remove(self.buyOrderID)\n if self.shortOrderID in self.orderList:\n self.orderList.remove(self.shortOrderID)\n # 反之同样\n elif self.pos < 0:\n self.cancelOrder(self.buyOrderID)\n if self.buyOrderID in self.orderList:\n self.orderList.remove(self.buyOrderID)\n if self.shortOrderID in self.orderList:\n self.orderList.remove(self.shortOrderID)\n \n # 发出状态更新事件\n self.putEvent()\n \n #----------------------------------------------------------------------\n def sendOcoOrder(self, buyPrice, shortPrice, volume):\n \"\"\"\n 发送OCO委托\n \n OCO(One Cancel Other)委托:\n 1. 主要用于实现区间突破入场\n 2. 包含两个方向相反的停止单\n 3. 一个方向的停止单成交后会立即撤消另一个方向的\n \"\"\"\n # 发送双边的停止单委托,并记录委托号\n self.buyOrderID = self.buy(buyPrice, volume, True)\n self.shortOrderID = self.short(shortPrice, volume, True)\n \n # 将委托号记录到列表中\n self.orderList.append(self.buyOrderID)\n self.orderList.append(self.shortOrderID)\n\n\nif __name__ == '__main__':\n # 提供直接双击回测的功能\n # 导入PyQt5的包是为了保证matplotlib使用PyQt5而不是PySide,防止初始化出错\n from ctaBacktesting import *\n from PyQt5 import QtCore, QtWidgets\n \n # 创建回测引擎\n engine = BacktestingEngine()\n \n # 设置引擎的回测模式为K线\n engine.setBacktestingMode(engine.BAR_MODE)\n\n # 设置回测用的数据起始日期\n engine.setStartDate('20130101')\n \n # 设置产品相关参数\n engine.setSlippage(0.2) # 股指1跳\n engine.setRate(0.3/10000) # 万0.3\n engine.setSize(300) # 股指合约大小 \n engine.setPriceTick(0.2) # 股指最小价格变动 \n \n # 设置使用的历史数据库\n engine.setDatabase(MINUTE_DB_NAME, 'IF0000')\n \n # 在引擎中创建策略对象\n d = {}\n engine.initStrategy(KkStrategy, d)\n \n # 开始跑回测\n engine.runBacktesting()\n \n # 显示回测结果\n engine.showBacktestingResult()"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ericlearning/generative-cyclegan | [
"15f0c331ec2a5b69ca77c872d08b821a710e82db"
] | [
"trainers/trainer_lsgan_cyclegan.py"
] | [
"import os\nimport copy\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport pandas as pd\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, StepLR\nfrom utils import set_lr, get_lr, generate_noise, plot_multiple_images, save_fig, save, get_sample_images_list\nfrom itertools import chain\n\nclass Trainer_LSGAN_Cyclegan():\n\tdef __init__(self, netD_A, netD_B, netG_A2B, netG_B2A, device, train_dl, val_dl, lr_D = 0.0002, lr_G = 0.0002, loss_interval = 50, image_interval = 50, snapshot_interval = None, save_img_dir = 'saved_images/', save_snapshot_dir = 'saved_snapshots', resample = None):\n\t\tself.netD_A = netD_A\n\t\tself.netD_B = netD_B\n\t\tself.netG_A2B = netG_A2B\n\t\tself.netG_B2A = netG_B2A\n\t\tself.train_dl = train_dl\n\t\tself.val_dl = val_dl\n\t\tself.lr_D = lr_D\n\t\tself.lr_G = lr_G\n\t\tself.train_iteration_per_epoch = len(self.train_dl)\n\t\tself.device = device\n\t\tself.resample = resample\n\n\t\tself.optimizerD_A = optim.Adam(self.netD_A.parameters(), lr = self.lr_D, betas = (0.5, 0.999))\n\t\tself.optimizerD_B = optim.Adam(self.netD_B.parameters(), lr = self.lr_D, betas = (0.5, 0.999))\n\t\tself.optimizerG = optim.Adam(chain(self.netG_A2B.parameters(), self.netG_B2A.parameters()), lr = self.lr_G, betas = (0.5, 0.999))\n\n\t\tself.real_label = 1\n\t\tself.fake_label = 0\n\n\t\tself.loss_interval = loss_interval\n\t\tself.image_interval = image_interval\n\t\tself.snapshot_interval = snapshot_interval\n\n\t\tself.errD_A_records = []\n\t\tself.errD_B_records = []\n\t\tself.errG_records = []\n\n\t\tself.save_cnt = 0\n\t\tself.save_img_dir = save_img_dir\n\t\tself.save_snapshot_dir = save_snapshot_dir\n\t\tif(not os.path.exists(self.save_img_dir)):\n\t\t\tos.makedirs(self.save_img_dir)\n\t\tif(not os.path.exists(self.save_snapshot_dir)):\n\t\t\tos.makedirs(self.save_snapshot_dir)\n\n\tdef train(self, num_epoch):\n\t\tl1 = nn.L1Loss()\n\t\tfor epoch in range(num_epoch):\n\t\t\tif(self.resample):\n\t\t\t\ttrain_dl_iter = iter(self.train_dl)\n\t\t\tfor i, (a, b) in enumerate(tqdm(self.train_dl)):\n\t\t\t\ta = a.to(self.device)\n\t\t\t\tb = b.to(self.device)\n\n\t\t\t\t# calculate the generator results\n\t\t\t\tfake_a = self.netG_B2A(b)\n\t\t\t\tfake_b = self.netG_A2B(a)\n\n\t\t\t\tself.optimizerD_A.zero_grad()\n\t\t\t\t# calculate the discriminator results\n\t\t\t\tc_a_fake_a = self.netD_A(fake_a.detach())\n\t\t\t\tc_a_real_a = self.netD_A(a)\n\t\t\t\t# calculate the generator loss\n\t\t\t\tc_a_fake_a_loss = torch.mean((c_a_fake_a - torch.zeros(c_a_fake_a.size()).to(self.device)) ** 2)\n\t\t\t\tc_a_real_a_loss = torch.mean((c_a_real_a - torch.ones(c_a_real_a.size()).to(self.device)) ** 2)\n\t\t\t\tc_a_loss = (c_a_fake_a_loss + c_a_real_a_loss)\n\t\t\t\tc_a_loss = c_a_loss / 2.0\t\t# (discriminator updates slower)\n\t\t\t\tc_a_loss.backward()\n\t\t\t\t# update G using the gradients calculated previously\n\t\t\t\tself.optimizerD_A.step()\n\n\n\t\t\t\tself.optimizerD_B.zero_grad()\n\t\t\t\t# calculate the discriminator results\n\t\t\t\tc_b_fake_b = self.netD_B(fake_b.detach())\n\t\t\t\tc_b_real_b = self.netD_B(b)\n\t\t\t\t# calculate the generator loss\n\t\t\t\tc_b_fake_b_loss = torch.mean((c_b_fake_b - torch.zeros(c_b_fake_b.size()).to(self.device)) ** 2)\n\t\t\t\tc_b_real_b_loss = torch.mean((c_b_real_b - torch.ones(c_b_real_b.size()).to(self.device)) ** 2)\n\t\t\t\tc_b_loss = (c_b_fake_b_loss + c_b_real_b_loss)\n\t\t\t\tc_b_loss = c_b_loss / 2.0\t\t# (discriminator updates slower)\n\t\t\t\tc_b_loss.backward()\n\t\t\t\t# update G using the gradients calculated previously\n\t\t\t\tself.optimizerD_B.step()\n\n\n\t\t\t\tself.optimizerG.zero_grad()\n\t\t\t\tif(self.resample):\n\t\t\t\t\ta, b = next(train_dl_iter)\n\t\t\t\t\ta = a.to(self.device)\n\t\t\t\t\tb = b.to(self.device)\n\t\t\t\t\tfake_a = self.netG_B2A(b)\n\t\t\t\t\tfake_b = self.netG_A2B(a)\n\n\t\t\t\tcycle_a = self.netG_B2A(fake_b)\n\t\t\t\tcycle_b = self.netG_A2B(fake_a)\n\t\t\t\tidentity_a = self.netG_B2A(a)\n\t\t\t\tidentity_b = self.netG_A2B(b)\n\t\t\t\t\n\t\t\t\t# calculate the discriminator results for both real & fake\n\t\t\t\tc_a_fake_a = self.netD_A(fake_a)\n\t\t\t\tc_b_fake_b = self.netD_B(fake_b)\n\n\t\t\t\t# calculate the generator loss\n\t\t\t\tc_a_loss = torch.mean((c_a_fake_a - torch.ones(c_a_fake_a.size()).to(self.device)) ** 2)\n\t\t\t\tc_b_loss = torch.mean((c_b_fake_b - torch.ones(c_b_fake_b.size()).to(self.device)) ** 2)\n\t\t\t\tcycle_a_loss = l1(cycle_a, a)\n\t\t\t\tcycle_b_loss = l1(cycle_b, b)\n\t\t\t\tidentity_a_loss = l1(identity_a, a)\n\t\t\t\tidentity_b_loss = l1(identity_b, b)\n\n\t\t\t\terrG = c_a_loss + c_b_loss + (cycle_a_loss + cycle_b_loss) * 10.0 + (identity_a_loss + identity_b_loss) * 5.0\n\t\t\t\terrG.backward()\n\t\t\t\t# update G using the gradients calculated previously\n\t\t\t\tself.optimizerG.step()\n\n\t\t\t\tself.errD_A_records.append(float(c_a_loss))\n\t\t\t\tself.errD_B_records.append(float(c_b_loss))\n\t\t\t\tself.errG_records.append(float(errG))\n\n\t\t\t\tif(i % self.loss_interval == 0):\n\t\t\t\t\tprint('[%d/%d] [%d/%d] errD_A : %.4f, errD_B : %.4f, errG : %.4f'\n\t\t\t\t\t\t %(epoch+1, num_epoch, i+1, self.train_iteration_per_epoch, c_a_loss, c_b_loss, errG))\n\n\t\t\t\tif(i % self.image_interval == 0):\n\t\t\t\t\tsample_images_list = get_sample_images_list('Cyclegan', (self.val_dl,self.netG_A2B, self.netG_B2A, self.device))\n\t\t\t\t\tplot_fig = plot_multiple_images(sample_images_list, 3, 6)\n\t\t\t\t\tcur_file_name = os.path.join(self.save_img_dir, str(self.save_cnt)+' : '+str(epoch)+'-'+str(i)+'.jpg')\n\t\t\t\t\tself.save_cnt += 1\n\t\t\t\t\tsave_fig(cur_file_name, plot_fig)\n\t\t\t\t\tplot_fig.clf()\n\n\t\t\t\tif(self.snapshot_interval is not None):\n\t\t\t\t\tif(i % self.snapshot_interval == 0):\n\t\t\t\t\t\tsave(os.path.join(self.save_snapshot_dir, 'Epoch' + str(epoch) + '_' + str(i) + '.state'), self.netD, self.netG, self.optimizerD, self.optimizerG)\n\t\t\t\t\t\t"
] | [
[
"torch.nn.L1Loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SantiagoMille/micom | [
"8f3488a0d4261aacfa5707cfb5fcffa322485b47"
] | [
"micom/elasticity.py"
] | [
"\"\"\"Calculate elasticity coefficients.\n\nFunctions to calculate elasticity coefficients for various community\nquantities.\n\"\"\"\n\nfrom functools import partial\nimport pandas as pd\nimport numpy as np\nfrom tqdm.auto import tqdm\nfrom cobra.util import get_context\nfrom micom.util import reset_min_community_growth\nfrom micom.problems import regularize_l2_norm\nfrom micom.solution import optimize_with_fraction\n\n\nSTEP = 0.1\n\n\ndef _get_fluxes(sol, reactions):\n \"\"\"Get the primal values for a set of variables.\"\"\"\n fluxes = {\n r.id: sol.fluxes.loc[r.community_id, r.global_id] for r in reactions\n }\n return pd.Series(fluxes)\n\n\ndef _derivatives(before, after):\n \"\"\"Get the elasticities for fluxes.\"\"\"\n before_signs = np.sign(before)\n after_signs = np.sign(after)\n if any(np.abs(before_signs - after_signs) > 2):\n ValueError(\n \"Some of the fluxes changed sign. \" \"Can't compute elasticities :(\"\n )\n direction = np.repeat(\"zero\", len(before)).astype(\"<U8\")\n direction[(before > 1e-6) | (after > 1e-6)] = \"forward\"\n direction[(before < -1e-6) | (after < -1e-6)] = \"reverse\"\n derivs = (np.log(after.abs() + 1e-6) - np.log(before.abs() + 1e-6)) / STEP\n return derivs, direction\n\n\ndef elasticities_by_medium(com, reactions, fraction, growth_rate, progress):\n \"\"\"Get the elasticity coefficients for a set of variables.\n\n Arguments\n ---------\n com : micom.Community\n The community for wrhich to calculate elasticities.\n variables : list of optlang.Variable\n The variables for which to calculate the elasticities. All of these\n must have non-zero primal vaues in the previous solution.\n\n Returns\n -------\n pandas.Dataframe\n The long/tidy version of the elasticities. Contains columns variable,\n effector, and elasticity.\n \"\"\"\n regularize_l2_norm(com, 0.0)\n sol = optimize_with_fraction(com, fraction, growth_rate, True)\n before = _get_fluxes(sol, reactions)\n import_fluxes = pd.Series()\n dfs = []\n\n for ex in com.exchanges:\n export = len(ex.reactants) == 1\n flux = sol.fluxes.loc[ex.community_id, ex.global_id]\n if export and (flux < -1e-6):\n import_fluxes[ex] = flux\n elif not export and (flux > 1e-6):\n import_fluxes[ex] = -flux\n else:\n continue\n\n fluxes = import_fluxes.index\n if progress:\n fluxes = tqdm(fluxes, unit=\"optimizations\", desc=\"medium\")\n for r in fluxes:\n flux = import_fluxes[r]\n with com:\n if flux < -1e-6:\n r.lower_bound *= np.exp(STEP)\n else:\n r.upper_bound *= np.exp(STEP)\n sol = optimize_with_fraction(com, fraction, growth_rate, True)\n after = _get_fluxes(sol, reactions)\n deriv, dirs = _derivatives(before, after)\n res = pd.DataFrame(\n {\n \"reaction\": [rx.global_id for rx in reactions],\n \"taxon\": [list(r.compartments)[0] for r in reactions],\n \"effector\": r.id,\n \"direction\": dirs,\n \"elasticity\": deriv,\n }\n )\n dfs.append(res)\n\n return pd.concat(dfs)\n\n\ndef elasticities_by_abundance(com, reactions, fraction, growth_rate, progress):\n \"\"\"Get the elasticity coefficients for a set of variables.\n\n Arguments\n ---------\n com : micom.Community\n The community for which to calculate elasticities.\n variables : list of optlang.Variable\n The variables for which to calculate the elasticities. All of these\n must have non-zero primal vaues in the previous solution.\n\n Returns\n -------\n pandas.Dataframe\n The long/tidy version of the elasticities. Contains columns variable,\n effector, and elasticity.\n \"\"\"\n regularize_l2_norm(com, 0.0)\n sol = optimize_with_fraction(com, fraction, growth_rate, True)\n before = _get_fluxes(sol, reactions)\n dfs = []\n\n abundance = com.abundances.copy()\n taxa = abundance.index\n\n if progress:\n taxa = tqdm(taxa, unit=\"optimizations\", desc=\"taxa abundances\")\n for sp in taxa:\n old = abundance[sp]\n abundance.loc[sp] *= np.exp(STEP)\n com.set_abundance(abundance, normalize=False)\n sol = optimize_with_fraction(com, fraction, growth_rate, True)\n after = _get_fluxes(sol, reactions)\n abundance.loc[sp] = old\n com.set_abundance(abundance, normalize=False)\n deriv, dirs = _derivatives(before, after)\n res = pd.DataFrame(\n {\n \"reaction\": [r.global_id for r in reactions],\n \"taxon\": [list(r.compartments)[0] for r in reactions],\n \"effector\": sp,\n \"direction\": dirs,\n \"elasticity\": deriv,\n }\n )\n dfs.append(res)\n\n return pd.concat(dfs)\n\n\ndef elasticities(com, fraction=0.5, reactions=None, progress=True):\n \"\"\"Calculate elasticities for reactions.\n\n Calculates elasticity coefficients using the specified reactions as\n response and exchange bounds (diet) and taxa abundances as\n effectors/parameters. Will use an arbitrary flux distribution as base.\n\n Arguments\n ---------\n com : micom.Community\n The community for wrhich to calculate elasticities.\n fraction : double\n The tradeoff to use for the cooperative tradeoff method. Fraction of\n maximal community growth to enforce.\n reactions : iterable\n A list of reactions to get elasticities for. Elements can either be\n reactions from the model, strings specifying the ids of reactions\n or ints specifying the indices of reactions. Defaults to using all\n reactions.\n progress : boolean\n Whether to shwo progress bars. Will show two, one for the diet\n optimizations and another one for the taxa abundances.\n\n Returns\n -------\n pandas.DataFrame\n A data frame with the following columns:\n \"reaction\" - the exchange reaction (response),\n \"taxon\" - the taxon the reaction is from,\n \"effector\" - the parameter that was changed,\n \"direction\" - whether the flux runs in the forward or reverse\n direction,\n \"elasticity\" - the elasticity coefficient,\n \"type\" - the type of effector either \"exchange\" for diet or \"abundance\"\n for taxa abundances.\n \"\"\"\n growth_rate = None\n if reactions is None:\n reactions = com.reactions\n reactions = com.reactions.get_by_any(reactions)\n with com:\n context = get_context(com)\n context(partial(reset_min_community_growth, com))\n by_medium = elasticities_by_medium(\n com, reactions, fraction, growth_rate, progress\n )\n by_medium[\"type\"] = \"exchanges\"\n\n by_abundance = elasticities_by_abundance(\n com, reactions, fraction, growth_rate, progress\n )\n by_abundance[\"type\"] = \"abundance\"\n\n both = pd.concat([by_medium, by_abundance]).reset_index(drop=True)\n both.loc[both.taxon == \"m\", \"taxon\"] = \"medium\"\n return both\n"
] | [
[
"pandas.concat",
"numpy.abs",
"pandas.Series",
"numpy.sign",
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
codema-dev/cso-network-gas | [
"a8036f8a219684992c9932da6bab3440916d90aa"
] | [
"get_average_consumptions.py"
] | [
"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.10.2\n# kernelspec:\n# display_name: 'Python 3.9.1 64-bit (''cso-network-gas'': conda)'\n# metadata:\n# interpreter:\n# hash: b61c169e19bc7874f8b9dc129b8bf2779b9e66e59b5e90264a492b9ef4b9b65d\n# name: python3\n# ---\n\n# %%\nimport geopandas as gpd\nimport numpy as np\nimport pandas as pd\n\nfrom clean import amalgamate_postal_districts\n\n# %%\nyears = [str(2011 + i) for i in range(0, 9, 1)]\n\n# %% [markdown]\n# # Get Dublin Postcode Boundaries\n\n# %%\ndublin_postcode_boundaries = gpd.read_file(\"data/dublin_postcode_boundaries\").pipe(amalgamate_postal_districts)\n\n# %% [markdown]\n# # Get Residential Average Gas Consumption\n\n# %%\nresi_total_annual_gas_by_postcodes = gpd.read_file(\"data/resi_total_annual_gas_by_postcodes.geojson\", driver=\"GeoJSON\")\n\n# %%\nresi_total_meters_by_postcodes = gpd.read_file(\"data/resi_total_meters_by_postcodes.geojson\", driver=\"GeoJSON\")\n\n# %%\nresi_avg_annual_gas_by_postcodes = resi_total_annual_gas_by_postcodes.copy()\nresi_avg_annual_gas_by_postcodes.loc[:, years] = np.round(\n 1*10**6 * resi_total_annual_gas_by_postcodes[years].astype(np.int64) / resi_total_meters_by_postcodes[years].astype(np.int64),\n 0,\n)\n\n\n# %%\nresi_avg_annual_gas_by_postcodes.to_file(\"data/resi_avg_annual_gas_by_postcodes.geojson\", driver=\"GeoJSON\")\n\n# %% [markdown]\n# # Get Non-Residential Average Gas Consumption\n\n# %%\nnon_resi_total_annual_gas_by_postcodes = gpd.read_file(\"data/non_resi_total_annual_gas_by_postcodes.geojson\", driver=\"GeoJSON\")\n\n# %%\nnon_resi_total_annual_gas_dublin_postal_districts = (\n non_resi_total_annual_gas_by_postcodes\n .query(\"`postcodes` != 'Co. Dublin'\")\n .loc[:, years]\n .sum()\n .to_frame().T\n .assign(postcodes=\"Dublin Postal Districts\")\n .merge(dublin_postcode_boundaries)\n)\n\n# %%\nnon_resi_total_annual_gas_dublin_postal_districts\n\n# %%\nnon_resi_total_annual_gas_by_postcodes = pd.concat(\n [\n non_resi_total_annual_gas_by_postcodes,\n non_resi_total_annual_gas_dublin_postal_districts,\n ]\n).query(\"`postcodes` == ['Co. Dublin', 'Dublin Postal Districts']\").reset_index(drop=True)\n\n# %%\nnon_resi_total_annual_gas_by_postcodes\n\n# %%\nnon_resi_total_annual_gas_by_postcodes.loc[\"Dublin Postal Districts\", years] = non_resi_total_annual_gas_dublin_postal_districts\n\n# %%\nnon_resi_total_meters_by_postcodes = gpd.read_file(\"data/non_resi_total_meters_by_postcodes.geojson\", driver=\"GeoJSON\")\nnon_resi_total_meters_by_postcodes.loc[:, years] = non_resi_total_meters_by_postcodes[years].astype(np.int64)\n\n# %%\nnon_resi_total_annual_gas_by_postcodes\n\n# %%\nnon_resi_avg_annual_gas_by_postcodes = non_resi_total_annual_gas_by_postcodes.copy()\nnon_resi_avg_annual_gas_by_postcodes.loc[:, years] = 1*10**6 * non_resi_total_annual_gas_by_postcodes[years] / non_resi_total_meters_by_postcodes[years]\nnon_resi_avg_annual_gas_by_postcodes.loc[:, years] = non_resi_avg_annual_gas_by_postcodes[years].round(0)\n\n# %%\nnon_resi_avg_annual_gas_by_postcodes\n"
] | [
[
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
m3at/chainer-mask-rcnn | [
"83366fc77e52aa6a29cfac4caa697d8b45dcffc6"
] | [
"chainer_mask_rcnn/models/utils/proposal_target_creator.py"
] | [
"# Modified works:\n# --------------------------------------------------------\n# Copyright (c) 2017 - 2018 Kentaro Wada.\n# Licensed under The MIT License [see LICENSE for details]\n# --------------------------------------------------------\n\n# Original works:\n# --------------------------------------------------------\n# Copyright (c) 2017 Shingo Kitagawa.\n# Licensed under The MIT License [see LICENSE for details]\n# https://github.com/knorth55/chainer-fcis\n# --------------------------------------------------------\n# Copyright (c) 2017 Preferred Networks, Inc.\n# Licensed under The MIT License [see LICENSE for details]\n# https://github.com/chainer/chainercv\n# --------------------------------------------------------\n\nfrom chainer import cuda\nfrom chainercv.links.model.faster_rcnn.utils.bbox2loc import bbox2loc\nfrom chainercv.utils.bbox.bbox_iou import bbox_iou\nimport cv2\nimport numpy as np\n\n\nclass ProposalTargetCreator(object):\n \"\"\"Assign ground truth bounding boxes to given RoIs.\n\n The :meth:`__call__` of this class generates training targets\n for each object proposal.\n This is used to train Faster RCNN [#]_.\n\n .. [#] Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun. \\\n Faster R-CNN: Towards Real-Time Object Detection with \\\n Region Proposal Networks. NIPS 2015.\n\n Args:\n n_sample (int): The number of sampled regions.\n pos_ratio (float): Fraction of regions that is labeled as a\n foreground.\n pos_iou_thresh (float): IoU threshold for a RoI to be considered as a\n foreground.\n neg_iou_thresh_hi (float): RoI is considered to be the background\n if IoU is in\n [:obj:`neg_iou_thresh_hi`, :obj:`neg_iou_thresh_hi`).\n neg_iou_thresh_lo (float): See above.\n\n \"\"\"\n\n def __init__(self,\n n_sample=512,\n pos_ratio=0.25, pos_iou_thresh=0.5,\n neg_iou_thresh_hi=0.5, neg_iou_thresh_lo=0.0,\n mask_size=14, binary_thresh=0.4,\n ):\n self.n_sample = n_sample\n self.pos_ratio = pos_ratio\n self.pos_iou_thresh = pos_iou_thresh\n self.neg_iou_thresh_hi = neg_iou_thresh_hi\n self.neg_iou_thresh_lo = neg_iou_thresh_lo\n self.mask_size = mask_size\n self.binary_thresh = binary_thresh\n\n def __call__(self, roi, bbox, label, mask,\n loc_normalize_mean=(0., 0., 0., 0.),\n loc_normalize_std=(0.1, 0.1, 0.2, 0.2)):\n \"\"\"Assigns ground truth to sampled proposals.\n\n This function samples total of :obj:`self.n_sample` RoIs\n from the combination of :obj:`roi` and :obj:`bbox`.\n The RoIs are assigned with the ground truth class labels as well as\n bounding box offsets and scales to match the ground truth bounding\n boxes. As many as :obj:`pos_ratio * self.n_sample` RoIs are\n sampled as foregrounds.\n\n Offsets and scales of bounding boxes are calculated using\n :func:`chainercv.links.model.faster_rcnn.bbox2loc`.\n Also, types of input arrays and output arrays are same.\n\n Here are notations.\n\n * :math:`S` is the total number of sampled RoIs, which equals \\\n :obj:`self.n_sample`.\n * :math:`L` is number of object classes possibly including the \\\n background.\n\n Args:\n roi (array): Region of Interests (RoIs) from which we sample.\n Its shape is :math:`(R, 4)`\n bbox (array): The coordinates of ground truth bounding boxes.\n Its shape is :math:`(R', 4)`.\n label (array): Ground truth bounding box labels. Its shape\n is :math:`(R',)`. Its range is :math:`[0, L - 1]`, where\n :math:`L` is the number of foreground classes.\n loc_normalize_mean (tuple of four floats): Mean values to normalize\n coordinates of bouding boxes.\n loc_normalize_std (tupler of four floats): Standard deviation of\n the coordinates of bounding boxes.\n\n Returns:\n (array, array, array):\n\n * **sample_roi**: Regions of interests that are sampled. \\\n Its shape is :math:`(S, 4)`.\n * **gt_roi_loc**: Offsets and scales to match \\\n the sampled RoIs to the ground truth bounding boxes. \\\n Its shape is :math:`(S, 4)`.\n * **gt_roi_label**: Labels assigned to sampled RoIs. Its shape is \\\n :math:`(S,)`. Its range is :math:`[0, L]`. The label with \\\n value 0 is the background.\n\n \"\"\"\n xp = cuda.get_array_module(roi)\n roi = cuda.to_cpu(roi)\n bbox = cuda.to_cpu(bbox)\n label = cuda.to_cpu(label)\n\n n_bbox, _ = bbox.shape\n if n_bbox == 0:\n raise ValueError('Empty bbox is not supported.')\n\n roi = np.concatenate((roi, bbox), axis=0)\n\n pos_roi_per_image = np.round(self.n_sample * self.pos_ratio)\n iou = bbox_iou(roi, bbox)\n gt_assignment = iou.argmax(axis=1)\n max_iou = iou.max(axis=1)\n # Offset range of classes from [0, n_fg_class - 1] to [1, n_fg_class].\n # The label with value 0 is the background.\n gt_roi_label = label[gt_assignment] + 1\n\n # Select foreground RoIs as those with >= pos_iou_thresh IoU.\n pos_index = np.where(max_iou >= self.pos_iou_thresh)[0]\n pos_roi_per_this_image = int(min(pos_roi_per_image, pos_index.size))\n if pos_index.size > 0:\n pos_index = np.random.choice(\n pos_index, size=pos_roi_per_this_image, replace=False)\n\n # Select background RoIs as those within\n # [neg_iou_thresh_lo, neg_iou_thresh_hi).\n neg_index = np.where((max_iou < self.neg_iou_thresh_hi) &\n (max_iou >= self.neg_iou_thresh_lo))[0]\n neg_roi_per_this_image = self.n_sample - pos_roi_per_this_image\n neg_roi_per_this_image = int(min(neg_roi_per_this_image,\n neg_index.size))\n if neg_index.size > 0:\n neg_index = np.random.choice(\n neg_index, size=neg_roi_per_this_image, replace=False)\n\n # The indices that we're selecting (both positive and negative).\n keep_index = np.append(pos_index, neg_index)\n gt_roi_label = gt_roi_label[keep_index]\n gt_roi_label[pos_roi_per_this_image:] = 0 # negative labels --> 0\n sample_roi = roi[keep_index]\n\n # Compute offsets and scales to match sampled RoIs to the GTs.\n gt_roi_loc = bbox2loc(sample_roi, bbox[gt_assignment[keep_index]])\n gt_roi_loc = ((gt_roi_loc - np.array(loc_normalize_mean, np.float32)\n ) / np.array(loc_normalize_std, np.float32))\n\n # Compute gt masks\n gt_roi_mask = - np.ones(\n (len(sample_roi), self.mask_size, self.mask_size),\n dtype=np.int32)\n for i, pos_ind in enumerate(pos_index):\n roi = np.round(sample_roi[i]).astype(np.int32)\n gt_mask = mask[gt_assignment[pos_ind]]\n gt_roi_mask_i = gt_mask[roi[0]:roi[2], roi[1]:roi[3]]\n gt_roi_mask_i_score = (\n np.arange(gt_roi_mask_i.max() + 1) ==\n gt_roi_mask_i[..., None]).astype(np.float32) # label -> onehot\n gt_roi_mask_i_score = cv2.resize(\n gt_roi_mask_i_score, (self.mask_size, self.mask_size))\n if gt_roi_mask_i_score.ndim == 2:\n gt_roi_mask_i_score = gt_roi_mask_i_score.reshape(\n gt_roi_mask_i_score.shape[:2] + (1,))\n gt_roi_mask_i = np.argmax(gt_roi_mask_i_score, axis=2)\n gt_roi_mask[i] = gt_roi_mask_i.astype(np.int32)\n\n if xp != np:\n sample_roi = cuda.to_gpu(sample_roi)\n gt_roi_loc = cuda.to_gpu(gt_roi_loc)\n gt_roi_label = cuda.to_gpu(gt_roi_label)\n gt_roi_mask = cuda.to_gpu(gt_roi_mask)\n return sample_roi, gt_roi_loc, gt_roi_label, gt_roi_mask\n"
] | [
[
"numpy.random.choice",
"numpy.concatenate",
"numpy.round",
"numpy.append",
"numpy.argmax",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wangbingnan136/Multimodal-Toolkit | [
"568d0f5de1d370914a0f00f230a36d7985ba401f"
] | [
"multimodal_transformers/model/tabular_transformers.py"
] | [
"from torch import nn\nfrom transformers import (\n BertForSequenceClassification,\n RobertaForSequenceClassification,\n DistilBertForSequenceClassification,\n AlbertForSequenceClassification,\n XLNetForSequenceClassification,\n XLMForSequenceClassification\n)\n\nfrom transformers.models.bert.modeling_bert import BERT_INPUTS_DOCSTRING\nfrom transformers.models.roberta.modeling_roberta import ROBERTA_INPUTS_DOCSTRING\nfrom transformers.models.distilbert.modeling_distilbert import DISTILBERT_INPUTS_DOCSTRING\nfrom transformers.models.albert.modeling_albert import ALBERT_INPUTS_DOCSTRING\nfrom transformers.models.xlnet.modeling_xlnet import XLNET_INPUTS_DOCSTRING\nfrom transformers.models.xlm.modeling_xlm import XLM_INPUTS_DOCSTRING\nfrom transformers import XLMRobertaConfig\ndef add_start_docstrings_to_callable(*docstr):\n def docstring_decorator(fn):\n fn.__doc__ = ''.join(docstr) + fn.__doc__\n return fn\n return docstring_decorator\n\n\nfrom .tabular_combiner import TabularFeatCombiner\nfrom .tabular_config import TabularConfig\nfrom .layer_utils import MLP, calc_mlp_dims, hf_loss_func\n\n\nclass BertWithTabular(BertForSequenceClassification):\n \"\"\"\n Bert Model transformer with a sequence classification/regression head as well as\n a TabularFeatCombiner module to combine categorical and numerical features\n with the Bert pooled output\n\n Parameters:\n hf_model_config (:class:`~transformers.BertConfig`):\n Model configuration class with all the parameters of the model.\n This object must also have a tabular_config member variable that is a\n :obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`\n \"\"\"\n\n def __init__(self, hf_model_config):\n super().__init__(hf_model_config)\n tabular_config = hf_model_config.tabular_config\n if type(tabular_config) is dict: # when loading from saved model\n tabular_config = TabularConfig(**tabular_config)\n else:\n self.config.tabular_config = tabular_config.__dict__\n\n tabular_config.text_feat_dim = hf_model_config.hidden_size\n tabular_config.hidden_dropout_prob = hf_model_config.hidden_dropout_prob\n self.tabular_combiner = TabularFeatCombiner(tabular_config)\n self.num_labels = tabular_config.num_labels\n combined_feat_dim = self.tabular_combiner.final_out_dim\n if tabular_config.use_simple_classifier:\n self.tabular_classifier = nn.Linear(combined_feat_dim,\n tabular_config.num_labels)\n else:\n dims = calc_mlp_dims(combined_feat_dim,\n division=tabular_config.mlp_division,\n output_dim=tabular_config.num_labels)\n self.tabular_classifier = MLP(combined_feat_dim,\n tabular_config.num_labels,\n num_hidden_lyr=len(dims),\n dropout_prob=tabular_config.mlp_dropout,\n hidden_channels=dims,\n bn=True)\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n class_weights=None,\n output_attentions=None,\n output_hidden_states=None,\n cat_feats=None,\n numerical_feats=None\n ):\n r\"\"\"\n class_weights (:obj:`torch.FloatTensor` of shape :obj:`(tabular_config.num_labels,)`, `optional`, defaults to :obj:`None`):\n Class weights to be used for cross entropy loss function for classification task\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`tabular_config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`tabular_config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n cat_feats (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.cat_feat_dim)`, `optional`, defaults to :obj:`None`):\n Categorical features to be passed in to the TabularFeatCombiner\n numerical_feats (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.numerical_feat_dim)`, `optional`, defaults to :obj:`None`):\n Numerical features to be passed in to the TabularFeatCombiner\n Returns:\n :obj:`tuple` comprising various elements depending on configuration and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):\n Classification (or regression if tabular_config.num_labels==1) loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.num_labels)`):\n Classification (or regression if tabular_config.num_labels==1) scores (before SoftMax).\n classifier_layer_outputs(:obj:`list` of :obj:`torch.FloatTensor`):\n The outputs of each layer of the final classification layers. The 0th index of this list is the\n combining module's output\n \"\"\"\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n pooled_output = outputs[1]\n pooled_output = self.dropout(pooled_output)\n combined_feats = self.tabular_combiner(pooled_output,\n cat_feats,\n numerical_feats)\n loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,\n self.tabular_classifier,\n labels,\n self.num_labels,\n class_weights)\n return loss, logits, classifier_layer_outputs\n\n\nclass RobertaWithTabular(RobertaForSequenceClassification):\n \"\"\"\n Roberta Model transformer with a sequence classification/regression head as well as\n a TabularFeatCombiner module to combine categorical and numerical features\n with the Roberta pooled output\n\n Parameters:\n hf_model_config (:class:`~transformers.RobertaConfig`):\n Model configuration class with all the parameters of the model.\n This object must also have a tabular_config member variable that is a\n :obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`\n \"\"\"\n def __init__(self, hf_model_config):\n super().__init__(hf_model_config)\n tabular_config = hf_model_config.tabular_config\n if type(tabular_config) is dict: # when loading from saved model\n tabular_config = TabularConfig(**tabular_config)\n else:\n self.config.tabular_config = tabular_config.__dict__\n\n tabular_config.text_feat_dim = hf_model_config.hidden_size\n tabular_config.hidden_dropout_prob = hf_model_config.hidden_dropout_prob\n self.tabular_combiner = TabularFeatCombiner(tabular_config)\n self.num_labels = tabular_config.num_labels\n combined_feat_dim = self.tabular_combiner.final_out_dim\n self.dropout = nn.Dropout(hf_model_config.hidden_dropout_prob)\n if tabular_config.use_simple_classifier:\n self.tabular_classifier = nn.Linear(combined_feat_dim,\n tabular_config.num_labels)\n else:\n dims = calc_mlp_dims(combined_feat_dim,\n division=tabular_config.mlp_division,\n output_dim=tabular_config.num_labels)\n self.tabular_classifier = MLP(combined_feat_dim,\n tabular_config.num_labels,\n num_hidden_lyr=len(dims),\n dropout_prob=tabular_config.mlp_dropout,\n hidden_channels=dims,\n bn=True)\n\n @add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n class_weights=None,\n cat_feats=None,\n numerical_feats=None\n ):\n r\"\"\"\n class_weights (:obj:`torch.FloatTensor` of shape :obj:`(tabular_config.num_labels,)`, `optional`, defaults to :obj:`None`):\n Class weights to be used for cross entropy loss function for classification task\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`tabular_config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`tabular_config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n cat_feats (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.cat_feat_dim)`, `optional`, defaults to :obj:`None`):\n Categorical features to be passed in to the TabularFeatCombiner\n numerical_feats (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.numerical_feat_dim)`, `optional`, defaults to :obj:`None`):\n Numerical features to be passed in to the TabularFeatCombiner\n\n Returns:\n :obj:`tuple` comprising various elements depending on configuration and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):\n Classification (or regression if tabular_config.num_labels==1) loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.num_labels)`):\n Classification (or regression if tabular_config.num_labels==1) scores (before SoftMax).\n classifier_layer_outputs(:obj:`list` of :obj:`torch.FloatTensor`):\n The outputs of each layer of the final classification layers. The 0th index of this list is the\n combining module's output\n\n \"\"\"\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n sequence_output = outputs[0]\n text_feats = sequence_output[:, 0, :]\n text_feats = self.dropout(text_feats)\n combined_feats = self.tabular_combiner(text_feats,\n cat_feats,\n numerical_feats)\n loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,\n self.tabular_classifier,\n labels,\n self.num_labels,\n class_weights)\n return loss, logits, classifier_layer_outputs\n\n\nclass XLMRobertaWithTabular(RobertaWithTabular):\n \"\"\"\n This class overrides :class:`~RobertaWithTabular`. Please check the\n superclass for the appropriate documentation alongside usage examples.\n \"\"\"\n config_class = XLMRobertaConfig\n\n\nclass DistilBertWithTabular(DistilBertForSequenceClassification):\n \"\"\"\n DistilBert Model transformer with a sequence classification/regression head as well as\n a TabularFeatCombiner module to combine categorical and numerical features\n with the Roberta pooled output\n\n Parameters:\n hf_model_config (:class:`~transformers.DistilBertConfig`):\n Model configuration class with all the parameters of the model.\n This object must also have a tabular_config member variable that is a\n :obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`\n \"\"\"\n def __init__(self, hf_model_config):\n super().__init__(hf_model_config)\n tabular_config = hf_model_config.tabular_config\n if type(tabular_config) is dict: # when loading from saved model\n tabular_config = TabularConfig(**tabular_config)\n else:\n self.config.tabular_config = tabular_config.__dict__\n\n tabular_config.text_feat_dim = hf_model_config.hidden_size\n tabular_config.hidden_dropout_prob = hf_model_config.seq_classif_dropout\n self.tabular_combiner = TabularFeatCombiner(tabular_config)\n self.num_labels = tabular_config.num_labels\n combined_feat_dim = self.tabular_combiner.final_out_dim\n if tabular_config.use_simple_classifier:\n self.tabular_classifier = nn.Linear(combined_feat_dim,\n tabular_config.num_labels)\n else:\n dims = calc_mlp_dims(combined_feat_dim,\n division=tabular_config.mlp_division,\n output_dim=tabular_config.num_labels)\n self.tabular_classifier = MLP(combined_feat_dim,\n tabular_config.num_labels,\n num_hidden_lyr=len(dims),\n dropout_prob=tabular_config.mlp_dropout,\n hidden_channels=dims,\n bn=True)\n\n @add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n class_weights=None,\n cat_feats=None,\n numerical_feats=None\n ):\n r\"\"\"\n class_weights (:obj:`torch.FloatTensor` of shape :obj:`(tabular_config.num_labels,)`,`optional`, defaults to :obj:`None`):\n Class weights to be used for cross entropy loss function for classification task\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`tabular_config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`tabular_config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n cat_feats (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.cat_feat_dim)`,`optional`, defaults to :obj:`None`):\n Categorical features to be passed in to the TabularFeatCombiner\n numerical_feats (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.numerical_feat_dim)`,`optional`, defaults to :obj:`None`):\n Numerical features to be passed in to the TabularFeatCombiner\n Returns:\n :obj:`tuple` comprising various elements depending on configuration and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):\n Classification (or regression if tabular_config.num_labels==1) loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.num_labels)`):\n Classification (or regression if tabular_config.num_labels==1) scores (before SoftMax).\n classifier_layer_outputs(:obj:`list` of :obj:`torch.FloatTensor`):\n The outputs of each layer of the final classification layers. The 0th index of this list is the\n combining module's output\n \"\"\"\n\n distilbert_output = self.distilbert(\n input_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n hidden_state = distilbert_output[0] # (bs, seq_len, dim)\n pooled_output = hidden_state[:, 0] # (bs, dim)\n pooled_output = self.pre_classifier(pooled_output) # (bs, dim)\n pooled_output = nn.ReLU()(pooled_output) # (bs, dim)\n text_feats = self.dropout(pooled_output)\n combined_feats = self.tabular_combiner(text_feats,\n cat_feats,\n numerical_feats)\n loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,\n self.tabular_classifier,\n labels,\n self.num_labels,\n class_weights)\n return loss, logits, classifier_layer_outputs\n\n\nclass AlbertWithTabular(AlbertForSequenceClassification):\n \"\"\"\n ALBERT Model transformer with a sequence classification/regression head as well as\n a TabularFeatCombiner module to combine categorical and numerical features\n with the Roberta pooled output\n\n Parameters:\n hf_model_config (:class:`~transformers.AlbertConfig`):\n Model configuration class with all the parameters of the model.\n This object must also have a tabular_config member variable that is a\n :obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`\n \"\"\"\n\n def __init__(self, hf_model_config):\n super().__init__(hf_model_config)\n tabular_config = hf_model_config.tabular_config\n if type(tabular_config) is dict: # when loading from saved model\n tabular_config = TabularConfig(**tabular_config)\n else:\n self.config.tabular_config = tabular_config.__dict__\n\n tabular_config.text_feat_dim = hf_model_config.hidden_size\n tabular_config.hidden_dropout_prob = hf_model_config.hidden_dropout_prob\n self.tabular_combiner = TabularFeatCombiner(tabular_config)\n self.num_labels = tabular_config.num_labels\n combined_feat_dim = self.tabular_combiner.final_out_dim\n if tabular_config.use_simple_classifier:\n self.tabular_classifier = nn.Linear(combined_feat_dim,\n tabular_config.num_labels)\n else:\n dims = calc_mlp_dims(combined_feat_dim,\n division=tabular_config.mlp_division,\n output_dim=tabular_config.num_labels)\n self.tabular_classifier = MLP(combined_feat_dim,\n tabular_config.num_labels,\n num_hidden_lyr=len(dims),\n dropout_prob=tabular_config.mlp_dropout,\n hidden_channels=dims,\n bn=True)\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n class_weights=None,\n cat_feats=None,\n numerical_feats=None\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n combined_feats = self.tabular_combiner(pooled_output,\n cat_feats,\n numerical_feats)\n loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,\n self.tabular_classifier,\n labels,\n self.num_labels,\n class_weights)\n return loss, logits, classifier_layer_outputs\n\n\nclass XLNetWithTabular(XLNetForSequenceClassification):\n \"\"\"\n XLNet Model transformer with a sequence classification/regression head as well as\n a TabularFeatCombiner module to combine categorical and numerical features\n with the Roberta pooled output\n\n Parameters:\n hf_model_config (:class:`~transformers.XLNetConfig`):\n Model configuration class with all the parameters of the model.\n This object must also have a tabular_config member variable that is a\n :obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`\n \"\"\"\n def __init__(self, hf_model_config):\n super().__init__(hf_model_config)\n tabular_config = hf_model_config.tabular_config\n if type(tabular_config) is dict: # when loading from saved model\n tabular_config = TabularConfig(**tabular_config)\n else:\n self.config.tabular_config = tabular_config.__dict__\n\n tabular_config.text_feat_dim = hf_model_config.hidden_size\n self.tabular_combiner = TabularFeatCombiner(tabular_config)\n self.num_labels = tabular_config.num_labels\n combined_feat_dim = self.tabular_combiner.final_out_dim\n if tabular_config.use_simple_classifier:\n self.tabular_classifier = nn.Linear(combined_feat_dim,\n tabular_config.num_labels)\n else:\n dims = calc_mlp_dims(combined_feat_dim,\n division=tabular_config.mlp_division,\n output_dim=tabular_config.num_labels)\n self.tabular_classifier = MLP(combined_feat_dim,\n tabular_config.num_labels,\n num_hidden_lyr=len(dims),\n dropout_prob=tabular_config.mlp_dropout,\n hidden_channels=dims,\n bn=True)\n\n @add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n mems=None,\n perm_mask=None,\n target_mapping=None,\n token_type_ids=None,\n input_mask=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n class_weights=None,\n cat_feats=None,\n numerical_feats=None\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`)\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n use_cache = self.training or (use_cache if use_cache is not None else self.config.use_cache)\n\n transformer_outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n mems=mems,\n perm_mask=perm_mask,\n target_mapping=target_mapping,\n token_type_ids=token_type_ids,\n input_mask=input_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n output = transformer_outputs[0]\n\n output = self.sequence_summary(output)\n combined_feats = self.tabular_combiner(output,\n cat_feats,\n numerical_feats)\n loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,\n self.tabular_classifier,\n labels,\n self.num_labels,\n class_weights)\n return loss, logits, classifier_layer_outputs\n\n\nclass XLMWithTabular(XLMForSequenceClassification):\n \"\"\"\n XLM Model transformer with a sequence classification/regression head as well as\n a TabularFeatCombiner module to combine categorical and numerical features\n with the Roberta pooled output\n\n Parameters:\n hf_model_config (:class:`~transformers.XLMConfig`):\n Model configuration class with all the parameters of the model.\n This object must also have a tabular_config member variable that is a\n :obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`\n \"\"\"\n def __init__(self, hf_model_config):\n super().__init__(hf_model_config)\n tabular_config = hf_model_config.tabular_config\n if type(tabular_config) is dict: # when loading from saved model\n tabular_config = TabularConfig(**tabular_config)\n else:\n self.config.tabular_config = tabular_config.__dict__\n\n tabular_config.text_feat_dim = hf_model_config.hidden_size\n self.tabular_combiner = TabularFeatCombiner(tabular_config)\n self.num_labels = tabular_config.num_labels\n combined_feat_dim = self.tabular_combiner.final_out_dim\n if tabular_config.use_simple_classifier:\n self.tabular_classifier = nn.Linear(combined_feat_dim,\n tabular_config.num_labels)\n else:\n dims = calc_mlp_dims(combined_feat_dim,\n division=tabular_config.mlp_division,\n output_dim=tabular_config.num_labels)\n self.tabular_classifier = MLP(combined_feat_dim,\n tabular_config.num_labels,\n num_hidden_lyr=len(dims),\n dropout_prob=tabular_config.mlp_dropout,\n hidden_channels=dims,\n bn=True)\n\n @ add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n class_weights=None,\n cat_feats=None,\n numerical_feats=None\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n output = transformer_outputs[0]\n output = self.sequence_summary(output)\n combined_feats = self.tabular_combiner(output,\n cat_feats,\n numerical_feats)\n loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,\n self.tabular_classifier,\n labels,\n self.num_labels,\n class_weights)\n return loss, logits, classifier_layer_outputs"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marcscho/amllab | [
"147921ec4e42eae9fc4bdd457d2f7f37aefd04e2"
] | [
"batch_scripts/scoring_step.py"
] | [
"import os\nimport pickle\nimport joblib\nimport argparse\nimport pandas as pd\nfrom azureml.core.run import Run\nfrom azureml.core.model import Model\nfrom sklearn.preprocessing import LabelEncoder\n\n# load arguments\nparser = argparse.ArgumentParser(\"preprocess\")\nparser.add_argument(\"--intermediate-data-path\", type=str)\nparser.add_argument(\"--result-data-path\", type=str)\nargs = parser.parse_args()\n\n# load data\nrun = Run.get_context()\nprint(args.intermediate_data_path)\nfeatures = pickle.load(open(f'{args.intermediate_data_path}/preprocessed_features.pkl', \"rb\"))\ndata = pd.read_csv(f'{args.intermediate_data_path}/preprocessed_data.csv')\n\n# load model\nws = run.experiment.workspace\n# model = Model(ws, 'german-credit-local-model').download(exist_ok=True)\npipeline_path = Model.get_model_path('german-credit-local-model')\npipeline = joblib.load(pipeline_path)\n\n# score\nout = pipeline['classifier'].predict(features)\n\n# save result file\n\nif args.result_data_path is not None:\n os.makedirs(args.result_data_path, exist_ok=True)\n print(f\"{args.result_data_path} created\")\nle = LabelEncoder()\nle.fit(data['Risk'])\ndata['prediction'] = le.inverse_transform(out)\ndata.to_csv(f'{args.result_data_path}/result_data.csv', index=False)"
] | [
[
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gavswe/unimod-mapper | [
"2adb0c55c798d89b04c933a2b12c536462c7e6d2"
] | [
"tests/test_mass_combos.py"
] | [
"#!/usr/bin/env python\n# encoding: utf-8\nfrom pathlib import Path\nimport pandas as pd\n\nimport unimod_mapper\n\n\n# test_dir = Path(unimod_mapper.__file__).parent.parent / \"tests\"\n# package_dir = test_dir.parent\n\n\n# def test_mass_to_combos():\n# # the order of the files shouldn't change the unimodIDs\n# um = unimod_mapper.UnimodMapper(add_default_files=False)\n# um._df = pd.DataFrame([\n# {\"mono_mass\": 0, \"Name\": \"0\"},\n# {\"mono_mass\": 1, \"Name\": \"1\"},\n# {\"mono_mass\": 2, \"Name\": \"2\"},\n# {\"mono_mass\": 3, \"Name\": \"3\"},\n# {\"mono_mass\": 4, \"Name\": \"4\"},\n# {\"mono_mass\": 5, \"Name\": \"5\"},\n# {\"mono_mass\": 6, \"Name\": \"6\"},\n# {\"mono_mass\": 7, \"Name\": \"7\"},\n# ])\n# combo_list = um.mass_to_combos(5, decimals=0)\n# assert len(combo_list) == 27\n\n\ndef test_mass_to_combos_0_decimal():\n # the order of the files shouldn't change the unimodIDs\n um = unimod_mapper.UnimodMapper()\n um._df = pd.DataFrame(\n [\n {\"mono_mass\": 0.4, \"Name\": \"0.4\"},\n {\"mono_mass\": 0.5, \"Name\": \"0.5\"},\n {\"mono_mass\": 0.7, \"Name\": \"0.7\"},\n {\"mono_mass\": 1.1, \"Name\": \"1.1\"},\n {\"mono_mass\": 1.4, \"Name\": \"1.4\"},\n {\"mono_mass\": 1.5, \"Name\": \"1.5\"},\n ]\n )\n combo_list = um.mass_to_combos(1, decimals=0)\n assert len(combo_list) == 6\n # a = [\n # (0.8, [\"0.4\", \"0.4\"]),\n # (0.9, [\"0.4\", \"0.5\"]),\n # (1.0, [\"0.5\", \"0.5\"]),\n # (1.1, [\"0.4\", \"0.7\"]),\n # (1.2, [\"0.5\", \"0.7\"]),\n # (1.4, [\"0.7\", \"0.7\"]),\n # ]\n\n\ndef test_mass_to_combos_1_decimal():\n # the order of the files shouldn't change the unimodIDs\n um = unimod_mapper.UnimodMapper()\n um._df = pd.DataFrame(\n [\n {\"mono_mass\": 0.34, \"Name\": \"0.34\"},\n {\"mono_mass\": 0.45, \"Name\": \"0.45\"},\n {\"mono_mass\": 0.60, \"Name\": \"0.60\"},\n {\"mono_mass\": 0.64, \"Name\": \"0.64\"},\n {\"mono_mass\": 0.45, \"Name\": \"0.45\"},\n {\"mono_mass\": 0.56, \"Name\": \"0.56\"},\n {\"mono_mass\": 0.67, \"Name\": \"0.67\"},\n {\"mono_mass\": 0.78, \"Name\": \"0.78\"},\n ]\n )\n combo_list = um.mass_to_combos(1, decimals=1)\n assert len(combo_list) == 3\n # a = [\n # (0.98, [\"0.34\", \"0.64\"]),\n # (1.01, [\"0.34\", \"0.67\"]),\n # (1.01, [\"0.45\", \"0.56\"]),\n # (1.01, [\"0.45\", \"0.56\"]),\n # (1.04, [\"0.45\", \"0.59\"]),\n # (1.04, [\"0.59\", \"0.45\"]),\n # ]\n\n\ndef test_mass_to_combos_1B_decimal():\n # the order of the files shouldn't change the unimodIDs\n um = unimod_mapper.UnimodMapper()\n um._df = pd.DataFrame(\n [\n {\"mono_mass\": 0.33, \"Name\": \"0.33\"},\n {\"mono_mass\": 0.45, \"Name\": \"0.45\"},\n {\"mono_mass\": 0.60, \"Name\": \"0.60\"},\n {\"mono_mass\": 0.64, \"Name\": \"0.64\"},\n {\"mono_mass\": 0.45, \"Name\": \"0.45\"},\n {\"mono_mass\": 0.56, \"Name\": \"0.56\"},\n {\"mono_mass\": 0.67, \"Name\": \"0.67\"},\n {\"mono_mass\": 0.78, \"Name\": \"0.78\"},\n ]\n )\n combo_list = um.mass_to_combos(1, decimals=2)\n assert len(combo_list) == 1\n # a = [\n # (0.98, [\"0.34\", \"0.64\"]),\n # (1.01, [\"0.34\", \"0.67\"]),\n # (1.01, [\"0.45\", \"0.56\"]),\n # (1.01, [\"0.45\", \"0.56\"]),\n # (1.04, [\"0.45\", \"0.59\"]),\n # (1.04, [\"0.59\", \"0.45\"]),\n # ]\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
djrlj694/nyc-taxi-analysis | [
"0d62cc56594ef9260580c9e6c203e9fbde6fee24"
] | [
"src/python/packages/file/sqlite.py"
] | [
"#!/usr/bin/env python3\n\"\"\"\nsqlite.py - A module defining a class (`SQLiteFile`) for reading a SQLite\ndatabase file (`*.sqlite` or `.db`).\n\"\"\"\nimport sqlite3\nfrom dataclasses import dataclass\nfrom typing import List\n\nimport pandas as pd\n\nfrom .data import DataFile\nfrom .jinja2 import Jinja2File\n\n\n# =========================================================================== #\n# METADATA\n# =========================================================================== #\n\n\n__author__ = 'Robert (Bob) L. Jones'\n__credits__ = ['Robert (Bob) L. Jones']\n\n__created_date__ = 'Dec 28, 2021'\n__modified_date__ = 'Dec 28, 2021'\n\n\n# =========================================================================== #\n# CLASSES\n# =========================================================================== #\n\n\n@dataclass\nclass SQLiteFile(DataFile, Jinja2File):\n \"\"\"\n An object class representing a SQL file.\n\n Parameters\n ----------\n `path` : `str`\n The file's pathname.\n\n Returns\n -------\n `SQLiteFile`\n An instantiated `SQLiteFile` object.\n \"\"\"\n\n # -- Getter Methods -- #\n\n @property\n def columns(self) -> List[str]:\n \"\"\"\n The getter of the `SQLFile` class property `columns`.\n \"\"\"\n\n # Return the query result's columns.\n return [row[0] for row in self.cursor.description]\n\n @property\n def connection(self) -> sqlite3.Connection:\n \"\"\"\n The getter of the `SQLiteFile` class property `connection`.\n \"\"\"\n\n # Return the database connection.\n return sqlite3.connect(str(self))\n\n @property\n def cursor(self) -> sqlite3.Cursor:\n \"\"\"\n The getter of the `SQLiteFile` class property `cursor`.\n \"\"\"\n\n # Return the database cursor.\n return self.connection.cursor()\n\n # -- Output Methods -- #\n\n def to_df(self, *args, **kwargs) -> pd.DataFrame:\n \"\"\"\n Loads a DataFrame representation of the (rendered) SQL file.\n\n Returns\n -------\n `pd.DataFrame`\n The DataFrame.\n \"\"\"\n\n # Define an inner function to pass a query result into a DataFrame.\n def query(sql: str) -> pd.DataFrame:\n df = pd.read_sql_query(sql, self.connection)\n\n self.cursor.execute(sql)\n df = pd.DataFrame(self.cursor.fetchall())\n df.columns = self.columns\n df.reset_index(drop=True, inplace=True)\n return df\n\n # Return the DataFrame.\n return super().load(query, *args, **kwargs)\n"
] | [
[
"pandas.read_sql_query"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
brechtlaperre/Tsyganenko | [
"3122b21fd0476aea5e4fcc4fe9cc520c6468265d"
] | [
"src/DA/domain.py"
] | [
"from math import ceil\nimport os\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport click\nimport sys\n\nsys.path.append('.')\n\nfrom src.data.preprocess import read_and_parse\n\ndef accumulate_values(result, ux, uxx, uxz, x, z):\n\n if ux is None:\n ux = result # The mean E(X)\n uxx = np.multiply(result, result) # The squared mean, or E(X^2)\n uxz = np.zeros((ux.shape[0], ux.shape[1], len(x))) \n for i, locx in enumerate(x):\n uxz[:, :, i] = result*result[locx, z[i]]\n else:\n ux += result\n uxx += np.multiply(result, result)\n for i, locx in enumerate(x):\n uxz[:, :, i] += result*result[locx, z[i]]\n\n return ux.astype(np.float64), uxx.astype(np.float64), uxz.astype(np.float64)\n\n\ndef compute_domain(ux, uxx, uxy, x, z):\n var_x = uxx - np.multiply(ux, ux)\n var_z = np.zeros((len(x)))\n cov = np.zeros((var_x.shape[0], var_x.shape[1], len(x)))\n for i, _ in enumerate(x):\n var_z[i] = var_x[x[i], z[i]]\n cov[:, :, i] = uxy[:, :, i] - ux*(ux[x[i], z[i]])\n dv_ = np.sqrt(var_x*var_z[i])\n \n div = np.where(dv_ > 0, 1/dv_, 0)\n cov[:, :, i] = np.multiply(cov[:, :, i], div)\n return cov\n\n\ndef get_results(folder, x, z):\n total = 0\n ext_ux = [None, None, None]\n ext_uxx = [None, None, None]\n ext_uxz = [None, None, None] \n magn_ux, magn_uxx, magn_uxz = None, None, None\n for root, _, files in os.walk(folder):\n print(root)\n for file_ in files:\n if 'OUT0' in file_:\n total += 1 \n grid, ext_B, _, field, magn = read_and_parse(root+'/' + file_, True) \n for i, comp in enumerate(ext_B):\n ext_ux[i], ext_uxx[i], ext_uxz[i] = accumulate_values(comp, ext_ux[i], ext_uxx[i], ext_uxz[i], x, z)\n magn_ux, magn_uxx, magn_uxz = accumulate_values(magn[0], magn_ux, magn_uxx, magn_uxz, x, z)\n\n # normalize\n for i in range(3):\n ext_ux[i] = ext_ux[i]/total # Mean field result\n ext_uxx[i] = ext_uxx[i]/total \n ext_uxz[i] = ext_uxz[i]/total\n magn_ux, magn_uxx, magn_uxz = magn_ux/total, magn_uxx/total, magn_uxz/total\n\n cor_ext = {'Bx': None, 'By': None, 'Bz': None}\n var_ext = {'Bx': None, 'By': None, 'Bz': None}\n mean_ext = {'Bx': ext_ux[0], 'By': ext_ux[1], 'Bz': ext_ux[2]}\n for i, key in enumerate(cor_ext.keys()):\n var_ext[key] = ext_uxx[i] - np.multiply(ext_ux[i], ext_ux[i])\n cor_ext[key] = compute_domain(ext_ux[i], ext_uxx[i], ext_uxz[i], x, z)\n cor_magn = compute_domain(magn_ux, magn_uxx, magn_uxz, x, z)\n\n return grid, cor_ext, cor_magn, ext_ux, var_ext, mean_ext\n\ndef compute_matrix_coords(folder, x, z):\n xloc = np.zeros(x.shape)\n zloc = np.zeros(z.shape)\n grid, _, _, _, _ = read_and_parse(folder + '/OUT00.DAT')\n\n if (abs(x) > 1).any() or (abs(z) > 1).any():\n # in case the coordinates are absolute\n xmin = round(grid[0][0, 0])\n xmax = round(grid[0][0, -1])\n zmin = round(grid[2][0, 0])\n zmax = round(grid[2][-1, 0])\n if (x > xmin).all() & (x < xmax).all() & (z > zmin).all() & (z < zmax).all():\n x = x - xmin\n z = z - zmin\n x = x/(xmax - xmin)\n z = z/(zmax - zmin)\n else:\n raise ValueError('x and z are out of bounds')\n\n for i in range(len(x)):\n zloc[i] = int(grid[0].shape[1]*x[i]) # Translate x-pos to columnnumber\n xloc[i] = int(grid[0].shape[0]*z[i]) # Translate y-pos to rownumber\n xloc = xloc.astype(np.int)\n zloc = zloc.astype(np.int)\n\n loc = np.zeros((len(xloc), 2))\n for i in range(len(xloc)):\n loc[i, 0] = round(100*grid[0][xloc[i], zloc[i]])/100\n loc[i, 1] = round(100*grid[2][xloc[i], zloc[i]])/100\n\n return xloc, zloc, loc\n\[email protected]()\[email protected]('source', type=click.Path(exists=True))\[email protected]('varying', type=str, nargs=-1)\[email protected]('coords', type=(float, float))\[email protected]('--extra', type=(float, float), multiple=True)\[email protected]('--identifier', type=str, default='')\[email protected]('--folder', type=str, default='figures/')\[email protected]('--model', type=str, default='T89')\[email protected]('--datafile', type=str, default='model/input/inputVx1.csv')\ndef main(source, varying, coords, extra, identifier, folder, model, datafile):\n sns.set(context='paper', style='white', palette='deep', font_scale=1.5)\n sns.set_style('ticks')\n id = source.split('/')[-1].split('x')[-1]\n\n x_coords = [coords[0]]\n z_coords = [coords[1]]\n if len(extra) > 0:\n for pair in extra:\n x_coords.append(pair[0])\n z_coords.append(pair[1])\n\n x_coords = np.array(x_coords)\n z_coords = np.array(z_coords)\n x, z, pos = compute_matrix_coords(source, x_coords, z_coords)\n\n grid, cor_ext, cor_magn, field, variance, mean = get_results(source, x, z)\n\n base = pd.Timestamp('2004-05-08 09:00:00')\n src = pd.read_csv(datafile, index_col=0)\n ref = src.iloc[0,:]\n td = (ref.DOY - base.dayofyear).astype('timedelta64[m]')*60*60 + (ref.Hour - base.hour).astype('timedelta64[m]')*60 + (ref.Minute - base.minute).astype('timedelta64[m]')\n strTime = str(td).split(' ')[0]\n\n # Do this if you do not need to review the files\n filename=folder + model + '_'\n for w in varying:\n filename = filename + w + '_'\n\n f = filename + 'mean_t+{}'.format(strTime)\n plot_mean(mean, grid, field, f, model, strTime)\n\n f = filename + 'diff_mean_t+{}'.format(strTime)\n plot_mean_vs_ref(mean, model, id, strTime, f)\n\n for ind, (xs, zs) in enumerate(pos):\n f = filename + '{}_{}_t+{}'.format(int(x_coords[ind]),int(z_coords[ind]), strTime)\n plot_DOI(cor_ext, grid, field, xs, zs, ind, f, model, strTime)\n\ndef plot_mean(mean, grid, field, filename, model, time):\n k = list(mean.keys())\n del(k[1])\n\n for i in range(field[0].shape[0]):\n for j in range(field[2].shape[1]):\n if grid[0][i,j]**2 + grid[2][i,j]**2 <= 1.5:\n mean[k[0]][i,j] = 0\n mean[k[1]][i,j] = 0\n\n fig, axes = plt.subplots(1, len(k), figsize=(10,6), squeeze=True)\n labels = ['(a)', '(b)', '(c)']\n for i, key in enumerate(k):\n surf = axes[i].imshow(mean[key], origin='lower', cmap=plt.get_cmap('coolwarm'), extent=(grid[0][0,0], grid[0][0,-1], grid[2][0,0], grid[2][-1, 0]), vmin=-100, vmax=100)\n axes[i].streamplot(grid[0], grid[2], field[0], field[2], density=.85, linewidth=1, color='k', arrowsize=.5)\n axes[i].plot(0, 0, 'ko')\n axes[i].set_title(r'{}, Mean, {}[nT], $t_0$ + {} min'.format(model, key, time))\n axes[i].set_xticks(np.arange(-40, 21, 10.0))\n axes[i].tick_params(direction='out')\n axes[i].set_xlim(np.min(grid[0]), np.max(grid[0]))\n axes[i].set_ylim(np.min(grid[2]),np.max(grid[2]))\n axes[i].set_xlabel(r'x/$R_E$')\n axes[i].set_ylabel(r'z/$R_E$')\n # axes[i].text(0.05, 1.1, labels[i], transform=axes[i].transAxes, fontsize=12, va='top', ha='right')\n divider = make_axes_locatable(axes[i])\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(surf, cax=cax)\n \n plt.tight_layout()\n plt.savefig(filename+'.png', dpi=300, format='png', transparent=False, bbox_inches='tight', pad_inches=0)\n plt.close()\n\ndef plot_DOI(doi, grid, field, x, z, ind, filename, model, time):\n k = list(doi.keys())\n del(k[1])\n # k = k[1:] # remove first element of the keys\n fig, axes = plt.subplots(1, len(k), figsize=(8,6), sharey=True)\n labels = ['(a)', '(b)', '(c)']\n for i, key in enumerate(k):\n surf = axes[i].imshow(doi[key][:, :, ind], origin='lower', cmap=plt.get_cmap('coolwarm'), extent=(grid[0][0,0], grid[0][0,-1], grid[2][0,0], grid[2][-1, 0]), vmin=-1, vmax=1)\n axes[i].set_title(r'{}, DoI, {}, $t_0$ + {} min'.format(model, key, time))\n #axes[i].text(0.00, 1.1, labels[i], transform=axes[i].transAxes, fontsize=12, va='top', ha='right')\n axes[i].set_xticks(np.arange(-40, 21, 10.0))\n axes[i].tick_params(direction='out')\n #else:\n axes[i].set_xlim(np.min(grid[0]), np.max(grid[0]))\n axes[i].set_ylim(np.min(grid[2]),np.max(grid[2]))\n axes[i].set_xlabel(r'x/$R_E$')\n axes[i].set_ylabel(r'z/$R_E$')\n axes[i].plot(0, 0, 'ko')\n divider = make_axes_locatable(axes[i])\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n\n plt.colorbar(surf, cax=cax)\n axes[i].streamplot(grid[0], grid[2], field[0], field[2], density=1, linewidth=1, color='k', arrowsize=.5)\n\n axes[i].plot(x, z, 'g*', markersize=7)\n\n plt.tight_layout()\n plt.savefig(filename+'.png', dpi=300, format='png', transparent=False, bbox_inches='tight', pad_inches=0)\n plt.close()\n\ndef plot_mean_vs_ref(mean, model, id, time, filename):\n grid, field, _ ,_ ,_ = read_and_parse('model/{}/outputref2/OUT{:02d}.DAT'.format(model, int(id)-1))\n dict_field = {'Bx': field[0], 'By': field[1], 'Bz': field[2]}\n keys = list(dict_field.keys())\n del(keys[1])\n \n for i in range(field[0].shape[0]):\n for j in range(field[2].shape[1]):\n if grid[0][i,j]**2 + grid[2][i,j]**2 <= 1.8:\n for k in keys:\n dict_field[k][i,j] = 0\n mean[k][i,j] = 0\n\n fig, axes = plt.subplots(1, len(keys), figsize=(10,6), squeeze=True)\n labels = ['(a)', '(b)', '(c)']\n\n for i, key in enumerate(keys):\n surf = axes[i].imshow(np.log(np.abs(dict_field[key] - mean[key])+0.001), origin='lower', cmap=plt.get_cmap('coolwarm'), extent=(grid[0][0,0], grid[0][0,-1], grid[2][0,0], grid[2][-1, 0]))\n axes[i].streamplot(grid[0], grid[2], field[0], field[2], density=1.1, linewidth=1, color='k', arrowsize=.5)\n axes[i].plot(0, 0, 'ko')\n axes[i].set_title(r'{}, Difference, {}[nT], $t_0$ + {} min'.format(model, key, time))\n axes[i].set_xticks(np.arange(-40, 21, 10.0))\n axes[i].tick_params(direction='out')\n axes[i].set_xlim(np.min(grid[0]), np.max(grid[0]))\n axes[i].set_ylim(np.min(grid[2]),np.max(grid[2]))\n axes[i].set_xlabel(r'x/$R_E$')\n axes[i].set_ylabel(r'z/$R_E$')\n # axes[i].text(0.05, 1.1, labels[i], transform=axes[i].transAxes, fontsize=12, va='top', ha='right')\n divider = make_axes_locatable(axes[i])\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n if i < 1:\n plt.colorbar(surf, cax=cax)\n else:\n plt.colorbar(surf, cax=cax, label='Log(|Reference - Mean|)')\n \n plt.tight_layout()\n plt.savefig(filename+'.png', dpi=300, format='png', transparent=False, bbox_inches='tight', pad_inches=0)\n plt.close()\n\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"numpy.sqrt",
"numpy.multiply",
"pandas.Timestamp",
"numpy.min",
"numpy.arange",
"numpy.abs",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
suryatmodulus/transformers | [
"eb5bdcdfa51f743887ee1d9c7f230444d7a8b23c"
] | [
"examples/pytorch/summarization/run_summarization_no_trainer.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\n# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning a 🤗 Transformers model on summarization.\n\"\"\"\n# You can also adapt this script on your own summarization task. Pointers for this are left as comments.\n\nimport argparse\nimport json\nimport logging\nimport math\nimport os\nimport random\nfrom pathlib import Path\n\nimport datasets\nimport nltk\nimport numpy as np\nimport torch\nfrom datasets import load_dataset, load_metric\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import tqdm\n\nimport transformers\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom filelock import FileLock\nfrom huggingface_hub import Repository\nfrom transformers import (\n CONFIG_MAPPING,\n MODEL_MAPPING,\n AdamW,\n AutoConfig,\n AutoModelForSeq2SeqLM,\n AutoTokenizer,\n DataCollatorForSeq2Seq,\n SchedulerType,\n get_scheduler,\n)\nfrom transformers.utils import get_full_repo_name, is_offline_mode\nfrom transformers.utils.versions import require_version\n\n\nlogger = logging.getLogger(__name__)\nrequire_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/summarization/requirements.txt\")\n\n# You should update this to your particular problem to have better documentation of `model_type`\nMODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\ntry:\n nltk.data.find(\"tokenizers/punkt\")\nexcept (LookupError, OSError):\n if is_offline_mode():\n raise LookupError(\n \"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files\"\n )\n with FileLock(\".lock\") as lock:\n nltk.download(\"punkt\", quiet=True)\n\nsummarization_name_mapping = {\n \"amazon_reviews_multi\": (\"review_body\", \"review_title\"),\n \"big_patent\": (\"description\", \"abstract\"),\n \"cnn_dailymail\": (\"article\", \"highlights\"),\n \"orange_sum\": (\"text\", \"summary\"),\n \"pn_summary\": (\"article\", \"summary\"),\n \"psc\": (\"extract_text\", \"summary_text\"),\n \"samsum\": (\"dialogue\", \"summary\"),\n \"thaisum\": (\"body\", \"summary\"),\n \"xglue\": (\"news_body\", \"news_title\"),\n \"xsum\": (\"document\", \"summary\"),\n \"wiki_summary\": (\"article\", \"highlights\"),\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Finetune a transformers model on a summarization task\")\n parser.add_argument(\n \"--dataset_name\",\n type=str,\n default=None,\n help=\"The name of the dataset to use (via the datasets library).\",\n )\n parser.add_argument(\n \"--dataset_config_name\",\n type=str,\n default=None,\n help=\"The configuration name of the dataset to use (via the datasets library).\",\n )\n parser.add_argument(\n \"--train_file\", type=str, default=None, help=\"A csv or a json file containing the training data.\"\n )\n parser.add_argument(\n \"--validation_file\", type=str, default=None, help=\"A csv or a json file containing the validation data.\"\n )\n parser.add_argument(\n \"--ignore_pad_token_for_loss\",\n type=bool,\n default=True,\n help=\"Whether to ignore the tokens corresponding to \" \"padded labels in the loss computation or not.\",\n )\n parser.add_argument(\n \"--max_source_length\",\n type=int,\n default=1024,\n help=\"The maximum total input sequence length after \"\n \"tokenization.Sequences longer than this will be truncated, sequences shorter will be padded.\",\n )\n parser.add_argument(\n \"--source_prefix\",\n type=str,\n default=None,\n help=\"A prefix to add before every source text \" \"(useful for T5 models).\",\n )\n parser.add_argument(\n \"--preprocessing_num_workers\",\n type=int,\n default=None,\n help=\"The number of processes to use for the preprocessing.\",\n )\n parser.add_argument(\n \"--overwrite_cache\", type=bool, default=None, help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\n \"--max_target_length\",\n type=int,\n default=128,\n help=\"The maximum total sequence length for target text after \"\n \"tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.\"\n \"during ``evaluate`` and ``predict``.\",\n )\n parser.add_argument(\n \"--val_max_target_length\",\n type=int,\n default=None,\n help=\"The maximum total sequence length for validation \"\n \"target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be \"\n \"padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` \"\n \"param of ``model.generate``, which is used during ``evaluate`` and ``predict``.\",\n )\n parser.add_argument(\n \"--max_length\",\n type=int,\n default=128,\n help=(\n \"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,\"\n \" sequences shorter will be padded if `--pad_to_max_lengh` is passed.\"\n ),\n )\n parser.add_argument(\n \"--num_beams\",\n type=int,\n default=None,\n help=\"Number of beams to use for evaluation. This argument will be \"\n \"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.\",\n )\n parser.add_argument(\n \"--pad_to_max_length\",\n action=\"store_true\",\n help=\"If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.\",\n )\n parser.add_argument(\n \"--model_name_or_path\",\n type=str,\n help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n required=True,\n )\n parser.add_argument(\n \"--config_name\",\n type=str,\n default=None,\n help=\"Pretrained config name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n type=str,\n default=None,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--text_column\",\n type=str,\n default=None,\n help=\"The name of the column in the datasets containing the full texts (for summarization).\",\n )\n parser.add_argument(\n \"--summary_column\",\n type=str,\n default=None,\n help=\"The name of the column in the datasets containing the summaries (for summarization).\",\n )\n parser.add_argument(\n \"--use_slow_tokenizer\",\n action=\"store_true\",\n help=\"If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).\",\n )\n parser.add_argument(\n \"--per_device_train_batch_size\",\n type=int,\n default=8,\n help=\"Batch size (per device) for the training dataloader.\",\n )\n parser.add_argument(\n \"--per_device_eval_batch_size\",\n type=int,\n default=8,\n help=\"Batch size (per device) for the evaluation dataloader.\",\n )\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=5e-5,\n help=\"Initial learning rate (after the potential warmup period) to use.\",\n )\n parser.add_argument(\"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\")\n parser.add_argument(\"--num_train_epochs\", type=int, default=3, help=\"Total number of training epochs to perform.\")\n parser.add_argument(\n \"--max_train_steps\",\n type=int,\n default=None,\n help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\n \"--lr_scheduler_type\",\n type=SchedulerType,\n default=\"linear\",\n help=\"The scheduler type to use.\",\n choices=[\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup\"],\n )\n parser.add_argument(\n \"--num_warmup_steps\", type=int, default=0, help=\"Number of steps for the warmup in the lr scheduler.\"\n )\n parser.add_argument(\"--output_dir\", type=str, default=None, help=\"Where to store the final model.\")\n parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\n parser.add_argument(\n \"--model_type\",\n type=str,\n default=None,\n help=\"Model type to use if training from scratch.\",\n choices=MODEL_TYPES,\n )\n parser.add_argument(\"--push_to_hub\", action=\"store_true\", help=\"Whether or not to push the model to the Hub.\")\n parser.add_argument(\n \"--hub_model_id\", type=str, help=\"The name of the repository to keep in sync with the local `output_dir`.\"\n )\n parser.add_argument(\"--hub_token\", type=str, help=\"The token to use to push to the Model Hub.\")\n parser.add_argument(\n \"--checkpointing_steps\",\n type=str,\n default=None,\n help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n )\n parser.add_argument(\n \"--resume_from_checkpoint\",\n type=str,\n default=None,\n help=\"If the training should continue from a checkpoint folder.\",\n )\n parser.add_argument(\n \"--with_tracking\",\n required=False,\n help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\n )\n args = parser.parse_args()\n\n # Sanity checks\n if args.dataset_name is None and args.train_file is None and args.validation_file is None:\n raise ValueError(\"Need either a dataset name or a training/validation file.\")\n else:\n if args.train_file is not None:\n extension = args.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if args.validation_file is not None:\n extension = args.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n\n if args.push_to_hub:\n assert args.output_dir is not None, \"Need an `output_dir` to create a repo when `--push_to_hub` is passed.\"\n\n return args\n\n\ndef main():\n args = parse_args()\n\n if args.source_prefix is None and args.model_name_or_path in [\n \"t5-small\",\n \"t5-base\",\n \"t5-large\",\n \"t5-3b\",\n \"t5-11b\",\n ]:\n logger.warning(\n \"You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with \"\n \"`--source_prefix 'summarize: ' `\"\n )\n # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n # If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment\n accelerator = Accelerator(log_with=\"all\") if args.with_tracking else Accelerator()\n # Make one log on every process with the configuration for debugging.\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n )\n logger.info(accelerator.state)\n\n # Setup logging, we only want one process per machine to log things on the screen.\n # accelerator.is_local_main_process is only True for one process per machine.\n logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)\n if accelerator.is_local_main_process:\n datasets.utils.logging.set_verbosity_warning()\n transformers.utils.logging.set_verbosity_info()\n else:\n datasets.utils.logging.set_verbosity_error()\n transformers.utils.logging.set_verbosity_error()\n\n # If passed along, set the training seed now.\n if args.seed is not None:\n set_seed(args.seed)\n\n # Handle the repository creation\n if accelerator.is_main_process:\n if args.push_to_hub:\n if args.hub_model_id is None:\n repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)\n else:\n repo_name = args.hub_model_id\n repo = Repository(args.output_dir, clone_from=repo_name)\n\n with open(os.path.join(args.output_dir, \".gitignore\"), \"w+\") as gitignore:\n if \"step_*\" not in gitignore:\n gitignore.write(\"step_*\\n\")\n if \"epoch_*\" not in gitignore:\n gitignore.write(\"epoch_*\\n\")\n elif args.output_dir is not None:\n os.makedirs(args.output_dir, exist_ok=True)\n accelerator.wait_for_everyone()\n\n # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n #\n # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n # 'text' is found. You can easily tweak this behavior (see below).\n #\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if args.dataset_name is not None:\n # Downloading and loading a dataset from the hub.\n raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)\n else:\n data_files = {}\n if args.train_file is not None:\n data_files[\"train\"] = args.train_file\n if args.validation_file is not None:\n data_files[\"validation\"] = args.validation_file\n extension = args.train_file.split(\".\")[-1]\n raw_datasets = load_dataset(extension, data_files=data_files)\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Load pretrained model and tokenizer\n #\n # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n if args.config_name:\n config = AutoConfig.from_pretrained(args.config_name)\n elif args.model_name_or_path:\n config = AutoConfig.from_pretrained(args.model_name_or_path)\n else:\n config = CONFIG_MAPPING[args.model_type]()\n logger.warning(\"You are instantiating a new config instance from scratch.\")\n\n if args.tokenizer_name:\n tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)\n elif args.model_name_or_path:\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)\n else:\n raise ValueError(\n \"You are instantiating a new tokenizer from scratch. This is not supported by this script.\"\n \"You can do it from another script, save it, and load it from here, using --tokenizer_name.\"\n )\n\n if args.model_name_or_path:\n model = AutoModelForSeq2SeqLM.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n )\n else:\n logger.info(\"Training new model from scratch\")\n model = AutoModelForSeq2SeqLM.from_config(config)\n\n model.resize_token_embeddings(len(tokenizer))\n if model.config.decoder_start_token_id is None:\n raise ValueError(\"Make sure that `config.decoder_start_token_id` is correctly defined\")\n\n prefix = args.source_prefix if args.source_prefix is not None else \"\"\n\n # Preprocessing the datasets.\n # First we tokenize all the texts.\n column_names = raw_datasets[\"train\"].column_names\n\n # Get the column names for input/target.\n dataset_columns = summarization_name_mapping.get(args.dataset_name, None)\n if args.text_column is None:\n text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]\n else:\n text_column = args.text_column\n if text_column not in column_names:\n raise ValueError(\n f\"--text_column' value '{args.text_column}' needs to be one of: {', '.join(column_names)}\"\n )\n if args.summary_column is None:\n summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1]\n else:\n summary_column = args.summary_column\n if summary_column not in column_names:\n raise ValueError(\n f\"--summary_column' value '{args.summary_column}' needs to be one of: {', '.join(column_names)}\"\n )\n\n # Temporarily set max_target_length for training.\n max_target_length = args.max_target_length\n padding = \"max_length\" if args.pad_to_max_length else False\n\n def preprocess_function(examples):\n inputs = examples[text_column]\n targets = examples[summary_column]\n inputs = [prefix + inp for inp in inputs]\n model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)\n\n # Setup the tokenizer for targets\n with tokenizer.as_target_tokenizer():\n labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)\n\n # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore\n # padding in the loss.\n if padding == \"max_length\" and args.ignore_pad_token_for_loss:\n labels[\"input_ids\"] = [\n [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels[\"input_ids\"]\n ]\n\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs\n\n with accelerator.main_process_first():\n processed_datasets = raw_datasets.map(\n preprocess_function,\n batched=True,\n num_proc=args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not args.overwrite_cache,\n desc=\"Running tokenizer on dataset\",\n )\n\n train_dataset = processed_datasets[\"train\"]\n eval_dataset = processed_datasets[\"validation\"]\n\n # Log a few random samples from the training set:\n for index in random.sample(range(len(train_dataset)), 1):\n logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n label_pad_token_id = -100 if args.ignore_pad_token_for_loss else tokenizer.pad_token_id\n data_collator = DataCollatorForSeq2Seq(\n tokenizer,\n model=model,\n label_pad_token_id=label_pad_token_id,\n pad_to_multiple_of=8 if accelerator.use_fp16 else None,\n )\n\n def postprocess_text(preds, labels):\n preds = [pred.strip() for pred in preds]\n labels = [label.strip() for label in labels]\n\n # rougeLSum expects newline after each sentence\n preds = [\"\\n\".join(nltk.sent_tokenize(pred)) for pred in preds]\n labels = [\"\\n\".join(nltk.sent_tokenize(label)) for label in labels]\n\n return preds, labels\n\n train_dataloader = DataLoader(\n train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size\n )\n eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)\n\n # Optimizer\n # Split weights in two groups, one with weight decay and the other not.\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n\n # Scheduler and math around the number of training steps.\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n if args.max_train_steps is None:\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n else:\n args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n lr_scheduler = get_scheduler(\n name=args.lr_scheduler_type,\n optimizer=optimizer,\n num_warmup_steps=args.num_warmup_steps,\n num_training_steps=args.max_train_steps,\n )\n\n # Prepare everything with our `accelerator`.\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n )\n\n # Figure out how many steps we should save the Accelerator states\n if hasattr(args.checkpointing_steps, \"isdigit\"):\n checkpointing_steps = args.checkpointing_steps\n if args.checkpointing_steps.isdigit():\n checkpointing_steps = int(args.checkpointing_steps)\n else:\n checkpointing_steps = None\n\n # We need to initialize the trackers we use, and also store our configuration\n if args.with_tracking:\n accelerator.init_trackers(\"summarization_no_trainer\", args)\n\n # Metric\n metric = load_metric(\"rouge\")\n\n # Train!\n total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataset)}\")\n logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n # Only show the progress bar once on each machine.\n progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)\n completed_steps = 0\n # Potentially load in the weights and states from a previous save\n if args.resume_from_checkpoint:\n if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n accelerator.load_state(args.resume_from_checkpoint)\n resume_step = None\n path = args.resume_from_checkpoint\n else:\n # Get the most recent checkpoint\n dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n dirs.sort(key=os.path.getctime)\n path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n if \"epoch\" in path:\n args.num_train_epochs -= int(path.replace(\"epoch_\", \"\"))\n else:\n resume_step = int(path.replace(\"step_\", \"\"))\n args.num_train_epochs -= resume_step // len(train_dataloader)\n resume_step = (args.num_train_epochs * len(train_dataloader)) - resume_step\n\n for epoch in range(args.num_train_epochs):\n model.train()\n if args.with_tracking:\n total_loss = 0\n for step, batch in enumerate(train_dataloader):\n # We need to skip steps until we reach the resumed step\n if args.resume_from_checkpoint and epoch == 0 and step < resume_step:\n continue\n outputs = model(**batch)\n loss = outputs.loss\n # We keep track of the loss at each epoch\n if args.with_tracking:\n total_loss += loss.detach().float()\n loss = loss / args.gradient_accumulation_steps\n accelerator.backward(loss)\n if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n progress_bar.update(1)\n completed_steps += 1\n\n if isinstance(checkpointing_steps, int):\n if completed_steps % checkpointing_steps == 0:\n output_dir = f\"step_{completed_steps}\"\n if args.output_dir is not None:\n output_dir = os.path.join(args.output_dir, output_dir)\n accelerator.save_state(output_dir)\n\n if completed_steps >= args.max_train_steps:\n break\n\n model.eval()\n if args.val_max_target_length is None:\n args.val_max_target_length = args.max_target_length\n\n gen_kwargs = {\n \"max_length\": args.val_max_target_length if args is not None else config.max_length,\n \"num_beams\": args.num_beams,\n }\n for step, batch in enumerate(eval_dataloader):\n with torch.no_grad():\n generated_tokens = accelerator.unwrap_model(model).generate(\n batch[\"input_ids\"],\n attention_mask=batch[\"attention_mask\"],\n **gen_kwargs,\n )\n\n generated_tokens = accelerator.pad_across_processes(\n generated_tokens, dim=1, pad_index=tokenizer.pad_token_id\n )\n labels = batch[\"labels\"]\n if not args.pad_to_max_length:\n # If we did not pad to max length, we need to pad the labels too\n labels = accelerator.pad_across_processes(batch[\"labels\"], dim=1, pad_index=tokenizer.pad_token_id)\n\n generated_tokens = accelerator.gather(generated_tokens).cpu().numpy()\n labels = accelerator.gather(labels).cpu().numpy()\n\n if args.ignore_pad_token_for_loss:\n # Replace -100 in the labels as we can't decode them.\n labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n if isinstance(generated_tokens, tuple):\n generated_tokens = generated_tokens[0]\n decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)\n\n metric.add_batch(predictions=decoded_preds, references=decoded_labels)\n result = metric.compute(use_stemmer=True)\n # Extract a few results from ROUGE\n result = {key: value.mid.fmeasure * 100 for key, value in result.items()}\n\n result = {k: round(v, 4) for k, v in result.items()}\n\n logger.info(result)\n\n if args.with_tracking:\n result[\"train_loss\"] = total_loss\n result[\"epoch\"] = epoch\n accelerator.log(result, step=completed_steps)\n\n if args.push_to_hub and epoch < args.num_train_epochs - 1:\n accelerator.wait_for_everyone()\n unwrapped_model = accelerator.unwrap_model(model)\n unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)\n if accelerator.is_main_process:\n tokenizer.save_pretrained(args.output_dir)\n repo.push_to_hub(\n commit_message=f\"Training in progress epoch {epoch}\", blocking=False, auto_lfs_prune=True\n )\n\n if args.checkpointing_steps == \"epoch\":\n output_dir = f\"epoch_{epoch}\"\n if args.output_dir is not None:\n output_dir = os.path.join(args.output_dir, output_dir)\n accelerator.save_state(output_dir)\n\n if args.output_dir is not None:\n accelerator.wait_for_everyone()\n unwrapped_model = accelerator.unwrap_model(model)\n unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)\n if accelerator.is_main_process:\n tokenizer.save_pretrained(args.output_dir)\n if args.push_to_hub:\n repo.push_to_hub(commit_message=\"End of training\", auto_lfs_prune=True)\n with open(os.path.join(args.output_dir, \"all_results.json\"), \"w\") as f:\n json.dump(\n {\n \"eval_rouge1\": result[\"rouge1\"],\n \"eval_rouge2\": result[\"rouge2\"],\n \"eval_rougeL\": result[\"rougeL\"],\n \"eval_rougeLsum\": result[\"rougeLsum\"],\n },\n f,\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.no_grad",
"torch.utils.data.DataLoader",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sanatb97/SokobanAI | [
"209a5efab50373786157827fded5acfee556db64"
] | [
"hungarian.py"
] | [
"#!/usr/bin/python\n\"\"\"\nCopyright (c) 2010 Thom Dedecko\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE\n\nImplementation of the Hungarian (Munkres) Algorithm using Python and NumPy\nReferences: http://www.ams.jhu.edu/~castello/362/Handouts/hungarian.pdf\n http://weber.ucsd.edu/~vcrawfor/hungar.pdf\n http://en.wikipedia.org/wiki/Hungarian_algorithm\n http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html\n http://www.clapper.org/software/python/munkres/\n\"\"\"\n\n# Module Information.\n__version__ = \"1.1.1\"\n__author__ = \"Thom Dedecko\"\n__url__ = \"http://github.com/tdedecko/hungarian-algorithm\"\n__copyright__ = \"(c) 2010 Thom Dedecko\"\n__license__ = \"MIT License\"\n\n\nclass HungarianError(Exception):\n pass\n\n# Import numpy. Error if fails\ntry:\n import numpy as np\nexcept ImportError:\n raise HungarianError(\"NumPy is not installed.\")\n\n\nclass Hungarian:\n \"\"\"\n Implementation of the Hungarian (Munkres) Algorithm using np.\n\n Usage:\n hungarian = Hungarian(cost_matrix)\n hungarian.calculate()\n or\n hungarian = Hungarian()\n hungarian.calculate(cost_matrix)\n\n Handle Profit matrix:\n hungarian = Hungarian(profit_matrix, is_profit_matrix=True)\n or\n cost_matrix = Hungarian.make_cost_matrix(profit_matrix)\n\n The matrix will be automatically padded if it is not square.\n For that numpy's resize function is used, which automatically adds 0's to any row/column that is added\n\n Get results and total potential after calculation:\n hungarian.get_results()\n hungarian.get_total_potential()\n \"\"\"\n\n def __init__(self, input_matrix=None, is_profit_matrix=False):\n \"\"\"\n input_matrix is a List of Lists.\n input_matrix is assumed to be a cost matrix unless is_profit_matrix is True.\n \"\"\"\n if input_matrix is not None:\n # Save input\n my_matrix = np.array(input_matrix)\n self._input_matrix = np.array(input_matrix)\n self._maxColumn = my_matrix.shape[1]\n self._maxRow = my_matrix.shape[0]\n\n # Adds 0s if any columns/rows are added. Otherwise stays unaltered\n matrix_size = max(self._maxColumn, self._maxRow)\n my_matrix.resize(matrix_size, matrix_size)\n\n # Convert matrix to profit matrix if necessary\n if is_profit_matrix:\n my_matrix = self.make_cost_matrix(my_matrix)\n\n self._cost_matrix = my_matrix\n self._size = len(my_matrix)\n self._shape = my_matrix.shape\n\n # Results from algorithm.\n self._results = []\n self._totalPotential = 0\n else:\n self._cost_matrix = None\n\n def get_results(self):\n \"\"\"Get results after calculation.\"\"\"\n return self._results\n\n def get_total_potential(self):\n \"\"\"Returns expected value after calculation.\"\"\"\n return self._totalPotential\n\n def calculate(self, input_matrix=None, is_profit_matrix=False):\n \"\"\"\n Implementation of the Hungarian (Munkres) Algorithm.\n\n input_matrix is a List of Lists.\n input_matrix is assumed to be a cost matrix unless is_profit_matrix is True.\n \"\"\"\n # Handle invalid and new matrix inputs.\n if input_matrix is None and self._cost_matrix is None:\n raise HungarianError(\"Invalid input\")\n elif input_matrix is not None:\n self.__init__(input_matrix, is_profit_matrix)\n\n result_matrix = self._cost_matrix.copy()\n\n # Step 1: Subtract row mins from each row.\n for index, row in enumerate(result_matrix):\n result_matrix[index] -= row.min()\n\n # Step 2: Subtract column mins from each column.\n for index, column in enumerate(result_matrix.T):\n result_matrix[:, index] -= column.min()\n\n # Step 3: Use minimum number of lines to cover all zeros in the matrix.\n # If the total covered rows+columns is not equal to the matrix size then adjust matrix and repeat.\n total_covered = 0\n while total_covered < self._size:\n # Find minimum number of lines to cover all zeros in the matrix and find total covered rows and columns.\n cover_zeros = CoverZeros(result_matrix)\n covered_rows = cover_zeros.get_covered_rows()\n covered_columns = cover_zeros.get_covered_columns()\n total_covered = len(covered_rows) + len(covered_columns)\n\n # if the total covered rows+columns is not equal to the matrix size then adjust it by min uncovered num (m).\n if total_covered < self._size:\n result_matrix = self._adjust_matrix_by_min_uncovered_num(result_matrix, covered_rows, covered_columns)\n\n # Step 4: Starting with the top row, work your way downwards as you make assignments.\n # Find single zeros in rows or columns.\n # Add them to final result and remove them and their associated row/column from the matrix.\n expected_results = min(self._maxColumn, self._maxRow)\n zero_locations = (result_matrix == 0)\n while len(self._results) != expected_results:\n\n # If number of zeros in the matrix is zero before finding all the results then an error has occurred.\n if not zero_locations.any():\n raise HungarianError(\"Unable to find results. Algorithm has failed.\")\n\n # Find results and mark rows and columns for deletion\n matched_rows, matched_columns = self.__find_matches(zero_locations)\n\n # Make arbitrary selection\n total_matched = len(matched_rows) + len(matched_columns)\n if total_matched == 0:\n matched_rows, matched_columns = self.select_arbitrary_match(zero_locations)\n\n # Delete rows and columns\n for row in matched_rows:\n zero_locations[row] = False\n for column in matched_columns:\n zero_locations[:, column] = False\n\n # Save Results\n self.__set_results(zip(matched_rows, matched_columns))\n\n # Calculate total potential\n value = 0\n for row, column in self._results:\n value += self._input_matrix[row, column]\n self._totalPotential = value\n\n @staticmethod\n def make_cost_matrix(profit_matrix):\n \"\"\"\n Converts a profit matrix into a cost matrix.\n Expects NumPy objects as input.\n \"\"\"\n # subtract profit matrix from a matrix made of the max value of the profit matrix\n matrix_shape = profit_matrix.shape\n offset_matrix = np.ones(matrix_shape) * profit_matrix.max()\n cost_matrix = offset_matrix - profit_matrix\n return cost_matrix\n\n def _adjust_matrix_by_min_uncovered_num(self, result_matrix, covered_rows, covered_columns):\n \"\"\"Subtract m from every uncovered number and add m to every element covered with two lines.\"\"\"\n # Calculate minimum uncovered number (m)\n elements = []\n for row_index, row in enumerate(result_matrix):\n if row_index not in covered_rows:\n for index, element in enumerate(row):\n if index not in covered_columns:\n elements.append(element)\n min_uncovered_num = min(elements)\n\n # Add m to every covered element\n adjusted_matrix = result_matrix\n for row in covered_rows:\n adjusted_matrix[row] += min_uncovered_num\n for column in covered_columns:\n adjusted_matrix[:, column] += min_uncovered_num\n\n # Subtract m from every element\n m_matrix = np.ones(self._shape) * min_uncovered_num\n adjusted_matrix -= m_matrix\n\n return adjusted_matrix\n\n def __find_matches(self, zero_locations):\n \"\"\"Returns rows and columns with matches in them.\"\"\"\n marked_rows = np.array([], dtype=int)\n marked_columns = np.array([], dtype=int)\n\n # Mark rows and columns with matches\n # Iterate over rows\n for index, row in enumerate(zero_locations):\n row_index = np.array([index])\n if np.sum(row) == 1:\n column_index, = np.where(row)\n marked_rows, marked_columns = self.__mark_rows_and_columns(marked_rows, marked_columns, row_index,\n column_index)\n\n # Iterate over columns\n for index, column in enumerate(zero_locations.T):\n column_index = np.array([index])\n if np.sum(column) == 1:\n row_index, = np.where(column)\n marked_rows, marked_columns = self.__mark_rows_and_columns(marked_rows, marked_columns, row_index,\n column_index)\n\n return marked_rows, marked_columns\n\n @staticmethod\n def __mark_rows_and_columns(marked_rows, marked_columns, row_index, column_index):\n \"\"\"Check if column or row is marked. If not marked then mark it.\"\"\"\n new_marked_rows = marked_rows\n new_marked_columns = marked_columns\n if not (marked_rows == row_index).any() and not (marked_columns == column_index).any():\n new_marked_rows = np.insert(marked_rows, len(marked_rows), row_index)\n new_marked_columns = np.insert(marked_columns, len(marked_columns), column_index)\n return new_marked_rows, new_marked_columns\n\n @staticmethod\n def select_arbitrary_match(zero_locations):\n \"\"\"Selects row column combination with minimum number of zeros in it.\"\"\"\n # Count number of zeros in row and column combinations\n rows, columns = np.where(zero_locations)\n zero_count = []\n for index, row in enumerate(rows):\n total_zeros = np.sum(zero_locations[row]) + np.sum(zero_locations[:, columns[index]])\n zero_count.append(total_zeros)\n\n # Get the row column combination with the minimum number of zeros.\n indices = zero_count.index(min(zero_count))\n row = np.array([rows[indices]])\n column = np.array([columns[indices]])\n\n return row, column\n\n def __set_results(self, result_lists):\n \"\"\"Set results during calculation.\"\"\"\n # Check if results values are out of bound from input matrix (because of matrix being padded).\n # Add results to results list.\n for result in result_lists:\n row, column = result\n if row < self._maxRow and column < self._maxColumn:\n new_result = (int(row), int(column))\n self._results.append(new_result)\n\n\nclass CoverZeros:\n \"\"\"\n Use minimum number of lines to cover all zeros in the matrix.\n Algorithm based on: http://weber.ucsd.edu/~vcrawfor/hungar.pdf\n \"\"\"\n\n def __init__(self, matrix):\n \"\"\"\n Input a matrix and save it as a boolean matrix to designate zero locations.\n Run calculation procedure to generate results.\n \"\"\"\n # Find zeros in matrix\n self._zero_locations = (matrix == 0)\n self._shape = matrix.shape\n\n # Choices starts without any choices made.\n self._choices = np.zeros(self._shape, dtype=bool)\n\n self._marked_rows = []\n self._marked_columns = []\n\n # marks rows and columns\n self.__calculate()\n\n # Draw lines through all unmarked rows and all marked columns.\n self._covered_rows = list(set(range(self._shape[0])) - set(self._marked_rows))\n self._covered_columns = self._marked_columns\n\n def get_covered_rows(self):\n \"\"\"Return list of covered rows.\"\"\"\n return self._covered_rows\n\n def get_covered_columns(self):\n \"\"\"Return list of covered columns.\"\"\"\n return self._covered_columns\n\n def __calculate(self):\n \"\"\"\n Calculates minimum number of lines necessary to cover all zeros in a matrix.\n Algorithm based on: http://weber.ucsd.edu/~vcrawfor/hungar.pdf\n \"\"\"\n while True:\n # Erase all marks.\n self._marked_rows = []\n self._marked_columns = []\n\n # Mark all rows in which no choice has been made.\n for index, row in enumerate(self._choices):\n if not row.any():\n self._marked_rows.append(index)\n\n # If no marked rows then finish.\n if not self._marked_rows:\n return True\n\n # Mark all columns not already marked which have zeros in marked rows.\n num_marked_columns = self.__mark_new_columns_with_zeros_in_marked_rows()\n\n # If no new marked columns then finish.\n if num_marked_columns == 0:\n return True\n\n # While there is some choice in every marked column.\n while self.__choice_in_all_marked_columns():\n # Some Choice in every marked column.\n\n # Mark all rows not already marked which have choices in marked columns.\n num_marked_rows = self.__mark_new_rows_with_choices_in_marked_columns()\n\n # If no new marks then Finish.\n if num_marked_rows == 0:\n return True\n\n # Mark all columns not already marked which have zeros in marked rows.\n num_marked_columns = self.__mark_new_columns_with_zeros_in_marked_rows()\n\n # If no new marked columns then finish.\n if num_marked_columns == 0:\n return True\n\n # No choice in one or more marked columns.\n # Find a marked column that does not have a choice.\n choice_column_index = self.__find_marked_column_without_choice()\n\n while choice_column_index is not None:\n # Find a zero in the column indexed that does not have a row with a choice.\n choice_row_index = self.__find_row_without_choice(choice_column_index)\n\n # Check if an available row was found.\n new_choice_column_index = None\n if choice_row_index is None:\n # Find a good row to accomodate swap. Find its column pair.\n choice_row_index, new_choice_column_index = \\\n self.__find_best_choice_row_and_new_column(choice_column_index)\n\n # Delete old choice.\n self._choices[choice_row_index, new_choice_column_index] = False\n\n # Set zero to choice.\n self._choices[choice_row_index, choice_column_index] = True\n\n # Loop again if choice is added to a row with a choice already in it.\n choice_column_index = new_choice_column_index\n\n def __mark_new_columns_with_zeros_in_marked_rows(self):\n \"\"\"Mark all columns not already marked which have zeros in marked rows.\"\"\"\n num_marked_columns = 0\n for index, column in enumerate(self._zero_locations.T):\n if index not in self._marked_columns:\n if column.any():\n row_indices, = np.where(column)\n zeros_in_marked_rows = (set(self._marked_rows) & set(row_indices)) != set([])\n if zeros_in_marked_rows:\n self._marked_columns.append(index)\n num_marked_columns += 1\n return num_marked_columns\n\n def __mark_new_rows_with_choices_in_marked_columns(self):\n \"\"\"Mark all rows not already marked which have choices in marked columns.\"\"\"\n num_marked_rows = 0\n for index, row in enumerate(self._choices):\n if index not in self._marked_rows:\n if row.any():\n column_index, = np.where(row)\n if column_index in self._marked_columns:\n self._marked_rows.append(index)\n num_marked_rows += 1\n return num_marked_rows\n\n def __choice_in_all_marked_columns(self):\n \"\"\"Return Boolean True if there is a choice in all marked columns. Returns boolean False otherwise.\"\"\"\n for column_index in self._marked_columns:\n if not self._choices[:, column_index].any():\n return False\n return True\n\n def __find_marked_column_without_choice(self):\n \"\"\"Find a marked column that does not have a choice.\"\"\"\n for column_index in self._marked_columns:\n if not self._choices[:, column_index].any():\n return column_index\n\n raise HungarianError(\n \"Could not find a column without a choice. Failed to cover matrix zeros. Algorithm has failed.\")\n\n def __find_row_without_choice(self, choice_column_index):\n \"\"\"Find a row without a choice in it for the column indexed. If a row does not exist then return None.\"\"\"\n row_indices, = np.where(self._zero_locations[:, choice_column_index])\n for row_index in row_indices:\n if not self._choices[row_index].any():\n return row_index\n\n # All rows have choices. Return None.\n return None\n\n def __find_best_choice_row_and_new_column(self, choice_column_index):\n \"\"\"\n Find a row index to use for the choice so that the column that needs to be changed is optimal.\n Return a random row and column if unable to find an optimal selection.\n \"\"\"\n row_indices, = np.where(self._zero_locations[:, choice_column_index])\n for row_index in row_indices:\n column_indices, = np.where(self._choices[row_index])\n column_index = column_indices[0]\n if self.__find_row_without_choice(column_index) is not None:\n return row_index, column_index\n\n # Cannot find optimal row and column. Return a random row and column.\n from random import shuffle\n\n shuffle(row_indices)\n column_index, = np.where(self._choices[row_indices[0]])\n return row_indices[0], column_index[0]\n\n\nif __name__ == '__main__':\n profit_matrix = [\n [62, 75, 80, 93, 95, 97],\n [75, 80, 82, 85, 71, 97],\n [80, 75, 81, 98, 90, 97],\n [78, 82, 84, 80, 50, 98],\n [90, 85, 85, 80, 85, 99],\n [65, 75, 80, 75, 68, 96]]\n\n hungarian = Hungarian(profit_matrix, is_profit_matrix=True)\n hungarian.calculate()\n print(\"Expected value:\\t\\t543\")\n print(\"Calculated value:\\t\", hungarian.get_total_potential()) # = 543\n print(\"Expected results:\\n\\t[(0, 4), (2, 3), (5, 5), (4, 0), (1, 1), (3, 2)]\")\n print(\"Results:\\n\\t\", hungarian.get_results())\n print(\"-\" * 80)\n\n cost_matrix = [\n [4, 2, 8],\n [4, 3, 7],\n [3, 1, 6]]\n hungarian = Hungarian(cost_matrix)\n print('calculating...')\n hungarian.calculate()\n print(\"Expected value:\\t\\t12\")\n print(\"Calculated value:\\t\", hungarian.get_total_potential()) # = 12\n print(\"Expected results:\\n\\t[(0, 1), (1, 0), (2, 2)]\")\n print(\"Results:\\n\\t\", hungarian.get_results())\n print(\"-\" * 80)\n\n profit_matrix = [\n [62, 75, 80, 93, 0, 97],\n [75, 0, 82, 85, 71, 97],\n [80, 75, 81, 0, 90, 97],\n [78, 82, 0, 80, 50, 98],\n [0, 85, 85, 80, 85, 99],\n [65, 75, 80, 75, 68, 0]]\n hungarian = Hungarian()\n hungarian.calculate(profit_matrix, is_profit_matrix=True)\n print(\"Expected value:\\t\\t523\")\n print(\"Calculated value:\\t\", hungarian.get_total_potential()) # = 523\n print(\"Expected results:\\n\\t[(0, 3), (2, 4), (3, 0), (5, 2), (1, 5), (4, 1)]\")\n print(\"Results:\\n\\t\", hungarian.get_results())\n print(\"-\" * 80)\n"
] | [
[
"numpy.ones",
"numpy.array",
"numpy.where",
"numpy.sum",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kirarpit/ray | [
"bcc379556b135ee2e472b0e4b388c9e1f8274dc9"
] | [
"python/ray/rllib/tests/test_catalog.py"
] | [
"import gym\nimport numpy as np\nimport unittest\nfrom gym.spaces import Box, Discrete, Tuple\n\nimport ray\n\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.models.model import Model\nfrom ray.rllib.models.preprocessors import (NoPreprocessor, OneHotPreprocessor,\n Preprocessor)\nfrom ray.rllib.models.fcnet import FullyConnectedNetwork\nfrom ray.rllib.models.visionnet import VisionNetwork\nfrom ray.rllib.utils import try_import_tf\n\ntf = try_import_tf()\n\n\nclass CustomPreprocessor(Preprocessor):\n def _init_shape(self, obs_space, options):\n return [1]\n\n\nclass CustomPreprocessor2(Preprocessor):\n def _init_shape(self, obs_space, options):\n return [1]\n\n\nclass CustomModel(Model):\n def _build_layers(self, *args):\n return tf.constant([[0] * 5]), None\n\n\nclass ModelCatalogTest(unittest.TestCase):\n def tearDown(self):\n ray.shutdown()\n\n def testGymPreprocessors(self):\n p1 = ModelCatalog.get_preprocessor(gym.make(\"CartPole-v0\"))\n self.assertEqual(type(p1), NoPreprocessor)\n\n p2 = ModelCatalog.get_preprocessor(gym.make(\"FrozenLake-v0\"))\n self.assertEqual(type(p2), OneHotPreprocessor)\n\n def testTuplePreprocessor(self):\n ray.init()\n\n class TupleEnv(object):\n def __init__(self):\n self.observation_space = Tuple(\n [Discrete(5),\n Box(0, 5, shape=(3, ), dtype=np.float32)])\n\n p1 = ModelCatalog.get_preprocessor(TupleEnv())\n self.assertEqual(p1.shape, (8, ))\n self.assertEqual(\n list(p1.transform((0, np.array([1, 2, 3])))),\n [float(x) for x in [1, 0, 0, 0, 0, 1, 2, 3]])\n\n def testCustomPreprocessor(self):\n ray.init()\n ModelCatalog.register_custom_preprocessor(\"foo\", CustomPreprocessor)\n ModelCatalog.register_custom_preprocessor(\"bar\", CustomPreprocessor2)\n env = gym.make(\"CartPole-v0\")\n p1 = ModelCatalog.get_preprocessor(env, {\"custom_preprocessor\": \"foo\"})\n self.assertEqual(str(type(p1)), str(CustomPreprocessor))\n p2 = ModelCatalog.get_preprocessor(env, {\"custom_preprocessor\": \"bar\"})\n self.assertEqual(str(type(p2)), str(CustomPreprocessor2))\n p3 = ModelCatalog.get_preprocessor(env)\n self.assertEqual(type(p3), NoPreprocessor)\n\n def testDefaultModels(self):\n ray.init()\n\n with tf.variable_scope(\"test1\"):\n p1 = ModelCatalog.get_model({\n \"obs\": tf.zeros((10, 3), dtype=tf.float32)\n }, Box(0, 1, shape=(3, ), dtype=np.float32), Discrete(5), 5, {})\n self.assertEqual(type(p1), FullyConnectedNetwork)\n\n with tf.variable_scope(\"test2\"):\n p2 = ModelCatalog.get_model({\n \"obs\": tf.zeros((10, 84, 84, 3), dtype=tf.float32)\n }, Box(0, 1, shape=(84, 84, 3), dtype=np.float32), Discrete(5), 5,\n {})\n self.assertEqual(type(p2), VisionNetwork)\n\n def testCustomModel(self):\n ray.init()\n ModelCatalog.register_custom_model(\"foo\", CustomModel)\n p1 = ModelCatalog.get_model({\n \"obs\": tf.constant([1, 2, 3])\n }, Box(0, 1, shape=(3, ), dtype=np.float32), Discrete(5), 5,\n {\"custom_model\": \"foo\"})\n self.assertEqual(str(type(p1)), str(CustomModel))\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kammerje/webbpsf | [
"a3628dcb40e378121cf36872f44f29b195c2c2cf"
] | [
"webbpsf/tests/test_roman.py"
] | [
"import os\nimport numpy as np\nimport pytest\nfrom webbpsf import roman, measure_fwhm\nfrom astropy.table import Table\nfrom numpy import allclose\n\n\nGRISM_FILTERS = roman.GRISM_FILTERS\nPRISM_FILTERS = roman.PRISM_FILTERS\n\ndef detector_substr(detector):\n \"\"\"\n change detector string to match file format\n (e.g., \"SCA01\" -> \"SCA_1\")\n \"\"\"\n return f\"{detector[:3]}_{str(int((detector[3:])))}\"\n\ndef pupil_path(wfi, mask=None):\n \"\"\"\n dynamically generate current pupil path for a given WFI instance\n \"\"\"\n mask = (wfi._pupil_controller._get_filter_mask(wfi.filter) if mask is None\n else mask)\n detector = detector_substr(wfi.detector)\n\n base = wfi._pupil_controller._pupil_basepath\n file = wfi._pupil_controller.pupil_file_formatters[mask]\n\n return os.path.join(base, file).format(detector)\n\ndef test_WFI_psf():\n \"\"\"\n Test that instantiating WFI works and can compute a PSF without\n raising any exceptions\n \"\"\"\n wfi = roman.WFI()\n wfi.calc_psf(fov_pixels=4)\n\ndef test_WFI_filters():\n wfi = roman.WFI()\n filter_list = wfi.filter_list\n\n for filter in filter_list:\n wfi.filter = filter\n wfi.calc_psf(fov_pixels=4, oversample=1, nlambda=3)\n\ndef test_aberration_detector_position_setter():\n detector = roman.FieldDependentAberration(4096, 4096)\n\n with pytest.raises(ValueError) as excinfo:\n detector.field_position = (-1, 1)\n assert 'pixel_x' in str(excinfo.value), 'Failed to raise exception for small out-of-bounds ' \\\n 'x pixel position'\n with pytest.raises(ValueError) as excinfo:\n detector.field_position = (4096+1, 1)\n assert 'pixel_x' in str(excinfo.value), 'Failed to raise exception for large out-of-bounds ' \\\n 'x pixel position'\n with pytest.raises(ValueError) as excinfo:\n detector.field_position = (1, -1)\n assert 'pixel_y' in str(excinfo.value), 'Failed to raise exception for small out-of-bounds ' \\\n 'y pixel position'\n with pytest.raises(ValueError) as excinfo:\n detector.field_position = (1, 4096+1)\n assert 'pixel_y' in str(excinfo.value), 'Failed to raise exception for large out-of-bounds ' \\\n 'y pixel position'\n\n valid_pos = (1.0, 1.0)\n detector.field_position = valid_pos\n assert detector._field_position == valid_pos, 'Setting field position through setter did not ' \\\n 'update private `_field_position` value'\n\ndef test_WFI_fwhm():\n \"\"\"\n Test that computed PSFs are physically realistic, at least relatively.\n Loose test...\n \"\"\"\n wfi = roman.WFI()\n\n wfi.pupilopd = None\n wfi.options['jitter'] = None\n\n wfi.filter = 'F062'\n fwhm_f062 = measure_fwhm(wfi.calc_psf(oversample= 6))\n\n wfi.filter = 'F184'\n fwhm_f184 = measure_fwhm(wfi.calc_psf(oversample= 6))\n\n assert (4.0 > fwhm_f184/fwhm_f062 > 2.0)\n\ndef test_WFI_pupil_controller():\n wfi = roman.WFI()\n\n for detector in wfi.detector_list:\n wfi.detector = detector\n\n assert os.path.isfile(pupil_path(wfi)), f\"Pupil file missing: {pupil_path(wfi)}\"\n\n # Test detector change was successful\n assert wfi.detector == detector, \"WFI detector was not set correctly\"\n assert wfi.pupil == pupil_path(wfi), \"pupil path was not set correctly\"\n\n # Test pupil mask lock/unlock\n for mask in wfi.pupil_mask_list:\n # test lock\n wfi.lock_pupil_mask(mask)\n\n assert wfi.pupil == pupil_path(wfi, mask), \"Pupil path was not set correctly\"\n\n # introduce differing filter to modify\n wfi.filter = \"PRISM\" if mask != \"PRISM\" else \"F062\"\n\n assert wfi._pupil_controller._pupil_mask == wfi.pupil_mask, \"Pupil mask was not set correctly\"\n\n # test unlock\n wfi.unlock_pupil_mask()\n\n assert wfi.pupil == pupil_path(wfi), f\"Pupil mask unlock failed\"\n\n assert wfi._pupil_controller._auto_pupil, \"Pupil is locked and should not be\"\n assert wfi._pupil_controller._auto_pupil_mask, \"Pupil mask is locked and should not be\"\n\n # Test pupil lock/unlock\n with pytest.raises(FileNotFoundError) as err:\n assert wfi.lock_pupil(\"file_that_does_not_exist.fits\"), \"FileNotFoundError was not raised\"\n\n this_file = __file__\n wfi.lock_pupil(this_file)\n assert wfi.pupil == this_file, \"Pupil did not lock to proper file.\"\n\n wfi.unlock_pupil()\n assert wfi.pupil == pupil_path(wfi), f\"Pupil unlock failed.\"\n\n assert wfi._pupil_controller._auto_pupil, \"Pupil is locked and should not be\"\n assert wfi._pupil_controller._auto_pupil_mask, \"Pupil mask is locked and should not be\"\n\n # Test effect of changing the filter on pupil path\n for filter in wfi.filter_list:\n wfi.filter = filter\n\n assert wfi.pupil == pupil_path(wfi), f\"Pupil was not set to correct value for filter {filter}\"\n\n # Test persistence of pupil and pupil mask locks through a PSF calculation\n wfi2 = roman.WFI()\n wfi2.detector = detector\n valid_pos = (4000, 1000)\n wfi2.detector_position = valid_pos\n\n wfi2.filter = \"F129\"\n wfi2.lock_pupil_mask(\"GRISM\")\n wfi2.filter = \"F129\"\n assert wfi2.pupil == pupil_path(wfi2, \"GRISM\"), \"Pupil path was not set correctly\"\n wfi2.calc_psf(monochromatic=1.3e-6, fov_pixels=4)\n\n assert wfi.pupil_mask == \"GRISM\", \"Pupil mask changed during PSF calculation\"\n assert wfi2.pupil == pupil_path(wfi2, \"GRISM\"), \"Pupil path changed during PSF calculation\"\n\ndef test_WFI_detector_position_setter():\n wfi = roman.WFI()\n wfi.detector = 'SCA01'\n valid_pos = (4000, 1000)\n wfi.detector_position = valid_pos\n assert wfi._detectors[wfi._detector].field_position == valid_pos, (\n \"Setting field position through Instrument.detector_position did not update field_position \"\n \"for the detector's aberration optic\"\n )\n assert wfi.detector_position == valid_pos, \"`detector_position` getter doesn't reflect \" \\\n \"assignment to setter\"\n\ndef test_WFI_includes_aberrations():\n wfi = roman.WFI()\n wfi.detector = 'SCA01'\n osys = wfi.get_optical_system()\n assert isinstance(osys[2], roman.FieldDependentAberration), (\n \"Third plane of Roman WFI optical system should be the \"\n \"field dependent aberration virtual optic\"\n )\n\ndef test_swapping_modes(wfi=None):\n\n if wfi is None:\n wfi = roman.WFI()\n\n # change detector string to match file format (e.g., \"SCA01\" -> \"SCA_1\")\n detector_substr = lambda det: f\"{det[:3]}_{str(int((det[3:])))}\"\n\n # dynamically generate current pupil path for a given WFI instance\n pupil_path = (\n lambda self, mask=None: os.path.join(\n self._pupil_controller._pupil_basepath,\n self._pupil_controller.pupil_file_formatters[self._pupil_controller._get_filter_mask(self.filter) if mask is None else mask]\n ).format(detector_substr(self.detector))\n )\n\n tests = [\n # [filter, mode, pupil_file]\n ['F146', 'imaging', pupil_path],\n ['F213', 'imaging', pupil_path],\n [PRISM_FILTERS[0], 'prism', pupil_path],\n [GRISM_FILTERS[0], 'grism', pupil_path],\n ]\n\n for test_filter, test_mode, test_pupil in tests:\n wfi.filter = test_filter\n\n fail_str = (f\"failed on {test_filter}, {test_mode}, \"\n f\"{test_pupil(wfi).split('/')[-1]}\")\n\n assert wfi.filter == test_filter, fail_str\n assert wfi.mode == test_mode, fail_str\n assert wfi._current_aberration_file == wfi._aberration_files[test_mode], fail_str\n assert wfi.pupil == test_pupil(wfi), fail_str\n\ndef test_custom_aberrations():\n\n wfi = roman.WFI()\n\n # Use grism aberration_file for testing\n test_aberration_file = wfi._aberration_files['grism']\n\n # Test override\n # -------------\n wfi.lock_aberrations(test_aberration_file)\n\n for filter in wfi.filter_list:\n wfi.filter = filter\n assert wfi._current_aberration_file == test_aberration_file, \"Filter change caused override to fail\"\n\n # Test Release Override\n # ---------------------\n wfi.unlock_aberrations()\n assert wfi._aberration_files['custom'] is None, \"Custom aberration file not deleted on override release.\"\n test_swapping_modes(wfi)\n\ndef test_WFI_limits_interpolation_range():\n wfi = roman.WFI()\n det = wfi._detectors['SCA01']\n det.get_aberration_terms(1.29e-6)\n det.field_position = (0, 0)\n det.get_aberration_terms(1.29e-6)\n\n with pytest.raises(ValueError) as excinfo:\n det.field_position = (500000, 0)\n assert 'Requested pixel_x position' in str(excinfo.value), (\n \"FieldDependentAberration did not error on out-of-bounds field point\"\n )\n\n with pytest.raises(ValueError) as excinfo:\n det.field_position = (-1, 0)\n assert 'Requested pixel_x position' in str(excinfo.value), (\n \"FieldDependentAberration did not error on out-of-bounds field point\"\n )\n\n with pytest.raises(ValueError) as excinfo:\n det.field_position = (0, 500000)\n assert 'Requested pixel_y position' in str(excinfo.value), (\n \"FieldDependentAberration did not error on out-of-bounds field point\"\n )\n\n with pytest.raises(ValueError) as excinfo:\n det.field_position = (0, -1)\n assert 'Requested pixel_y position' in str(excinfo.value), (\n \"FieldDependentAberration did not error on out-of-bounds field point\"\n )\n\n det.field_position = (2048, 2048)\n\n # Get min and max valid wavelengths from aberration file\n zern = Table.read(wfi._aberration_files[wfi.mode], format='ascii.csv')\n min_wv = zern['wavelength'][0] * 1e-6 # convert from micron to meter\n max_wv = zern['wavelength'][-1] * 1e-6\n\n # Test that get_aberration_terms() uses an approximated wavelength when\n # called with an out-of-bounds wavelength.\n too_lo_wv = min_wv * .9; too_hi_wv = max_wv / .9\n valid_wv = np.mean([min_wv, max_wv])\n\n assert allclose(det.get_aberration_terms(min_wv),\n det.get_aberration_terms(too_lo_wv)), (\n \"Aberration below wavelength range did not return closest value.\"\n )\n\n assert allclose(det.get_aberration_terms(max_wv),\n det.get_aberration_terms(too_hi_wv)), (\n \"Aberration above wavelength range did not return closest value.\"\n )\n\n # Test border pixels outside the ref data. In Cycle 9, (0, 37) is the first\n # pixel, so we check if (0, 0) is approximated to it as the nearest point.\n det.field_position = (0, 0)\n coefficients_outlier = det.get_aberration_terms(valid_wv)\n\n det.field_position = (0, 37)\n coefficients_data = det.get_aberration_terms(valid_wv)\n\n assert np.allclose(coefficients_outlier, coefficients_data), \"nearest point extrapolation \" \\\n \"failed for outlier field point\"\n\ndef test_CGI_detector_position():\n \"\"\" Test existence of the CGI detector position etc, and that you can't set it.\"\"\"\n cgi = roman.CGI()\n\n valid_pos = (512,512)\n assert cgi.detector_position == valid_pos, \"CGI detector position isn't as expected\"\n\n with pytest.raises(RuntimeError) as excinfo:\n cgi.detector_position = valid_pos\n assert 'not adjustable' in str(excinfo.value), (\"Failed to raise exception for\"\\\n \"trying to change CGI detector position.\")\n\ndef test_CGI_psf(display=False):\n \"\"\"\n Just test that instantiating CGI works and can compute a PSF without raising\n any exceptions\n \"\"\"\n char_spc = roman.CGI()\n char_spc.mode = 'CHARSPC_F660'\n\n #print('Reading instrument data from {:s}'.format(charspc._WebbPSF_basepath)\n #print('Filter list: {:}'.format(charspc.filter_list))\n\n monopsf = char_spc.calc_psf(nlambda=1, display=False)\n if display:\n roman.poppy.display_psf(monopsf)\n"
] | [
[
"numpy.mean",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
changliuroger/scikit-learn | [
"0da7c3c8d3ba62e742d63f3bed26947a72e9803d"
] | [
"sklearn/metrics/pairwise.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Authors: Alexandre Gramfort <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Robert Layton <[email protected]>\n# Andreas Mueller <[email protected]>\n# Philippe Gervais <[email protected]>\n# Lars Buitinck\n# Joel Nothman <[email protected]>\n# License: BSD 3 clause\n\nimport itertools\nfrom functools import partial\nimport warnings\n\nimport numpy as np\nfrom scipy.spatial import distance\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import issparse\nfrom joblib import Parallel, effective_n_jobs\n\nfrom ..utils.validation import _num_samples\nfrom ..utils.validation import check_non_negative\nfrom ..utils import check_array\nfrom ..utils import gen_even_slices\nfrom ..utils import gen_batches, get_chunk_n_rows\nfrom ..utils import is_scalar_nan\nfrom ..utils.extmath import row_norms, safe_sparse_dot\nfrom ..preprocessing import normalize\nfrom ..utils._mask import _get_mask\nfrom ..utils.fixes import delayed\nfrom ..utils.fixes import sp_version, parse_version\n\nfrom ._pairwise_fast import _chi2_kernel_fast, _sparse_manhattan\nfrom ..exceptions import DataConversionWarning\n\n\n# Utility Functions\ndef _return_float_dtype(X, Y):\n \"\"\"\n 1. If dtype of X and Y is float32, then dtype float32 is returned.\n 2. Else dtype float is returned.\n \"\"\"\n if not issparse(X) and not isinstance(X, np.ndarray):\n X = np.asarray(X)\n\n if Y is None:\n Y_dtype = X.dtype\n elif not issparse(Y) and not isinstance(Y, np.ndarray):\n Y = np.asarray(Y)\n Y_dtype = Y.dtype\n else:\n Y_dtype = Y.dtype\n\n if X.dtype == Y_dtype == np.float32:\n dtype = np.float32\n else:\n dtype = float\n\n return X, Y, dtype\n\n\ndef check_pairwise_arrays(\n X,\n Y,\n *,\n precomputed=False,\n dtype=None,\n accept_sparse=\"csr\",\n force_all_finite=True,\n copy=False,\n):\n \"\"\"Set X and Y appropriately and checks inputs.\n\n If Y is None, it is set as a pointer to X (i.e. not a copy).\n If Y is given, this does not happen.\n All distance metrics should use this function first to assert that the\n given parameters are correct and safe to use.\n\n Specifically, this function first ensures that both X and Y are arrays,\n then checks that they are at least two dimensional while ensuring that\n their elements are floats (or dtype if provided). Finally, the function\n checks that the size of the second dimension of the two arrays is equal, or\n the equivalent check for a precomputed distance matrix.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\n\n Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)\n\n precomputed : bool, default=False\n True if X is to be treated as precomputed distances to the samples in\n Y.\n\n dtype : str, type, list of type, default=None\n Data type required for X and Y. If None, the dtype will be an\n appropriate float type selected by _return_float_dtype.\n\n .. versionadded:: 0.18\n\n accept_sparse : str, bool or list/tuple of str, default='csr'\n String[s] representing allowed sparse matrix formats, such as 'csc',\n 'csr', etc. If the input is sparse but not in the allowed format,\n it will be converted to the first listed format. True allows the input\n to be any format. False means that a sparse matrix input will\n raise an error.\n\n force_all_finite : bool or 'allow-nan', default=True\n Whether to raise an error on np.inf, np.nan, pd.NA in array. The\n possibilities are:\n\n - True: Force all values of array to be finite.\n - False: accepts np.inf, np.nan, pd.NA in array.\n - 'allow-nan': accepts only np.nan and pd.NA values in array. Values\n cannot be infinite.\n\n .. versionadded:: 0.22\n ``force_all_finite`` accepts the string ``'allow-nan'``.\n\n .. versionchanged:: 0.23\n Accepts `pd.NA` and converts it into `np.nan`.\n\n copy : bool, default=False\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n .. versionadded:: 0.22\n\n Returns\n -------\n safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\n An array equal to X, guaranteed to be a numpy array.\n\n safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)\n An array equal to Y if Y was not None, guaranteed to be a numpy array.\n If Y was None, safe_Y will be a pointer to X.\n\n \"\"\"\n X, Y, dtype_float = _return_float_dtype(X, Y)\n\n estimator = \"check_pairwise_arrays\"\n if dtype is None:\n dtype = dtype_float\n\n if Y is X or Y is None:\n X = Y = check_array(\n X,\n accept_sparse=accept_sparse,\n dtype=dtype,\n copy=copy,\n force_all_finite=force_all_finite,\n estimator=estimator,\n )\n else:\n X = check_array(\n X,\n accept_sparse=accept_sparse,\n dtype=dtype,\n copy=copy,\n force_all_finite=force_all_finite,\n estimator=estimator,\n )\n Y = check_array(\n Y,\n accept_sparse=accept_sparse,\n dtype=dtype,\n copy=copy,\n force_all_finite=force_all_finite,\n estimator=estimator,\n )\n\n if precomputed:\n if X.shape[1] != Y.shape[0]:\n raise ValueError(\n \"Precomputed metric requires shape \"\n \"(n_queries, n_indexed). Got (%d, %d) \"\n \"for %d indexed.\" % (X.shape[0], X.shape[1], Y.shape[0])\n )\n elif X.shape[1] != Y.shape[1]:\n raise ValueError(\n \"Incompatible dimension for X and Y matrices: \"\n \"X.shape[1] == %d while Y.shape[1] == %d\" % (X.shape[1], Y.shape[1])\n )\n\n return X, Y\n\n\ndef check_paired_arrays(X, Y):\n \"\"\"Set X and Y appropriately and checks inputs for paired distances.\n\n All paired distance metrics should use this function first to assert that\n the given parameters are correct and safe to use.\n\n Specifically, this function first ensures that both X and Y are arrays,\n then checks that they are at least two dimensional while ensuring that\n their elements are floats. Finally, the function checks that the size\n of the dimensions of the two arrays are equal.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\n\n Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)\n\n Returns\n -------\n safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\n An array equal to X, guaranteed to be a numpy array.\n\n safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)\n An array equal to Y if Y was not None, guaranteed to be a numpy array.\n If Y was None, safe_Y will be a pointer to X.\n\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n if X.shape != Y.shape:\n raise ValueError(\n \"X and Y should be of same shape. They were respectively %r and %r long.\"\n % (X.shape, Y.shape)\n )\n return X, Y\n\n\n# Pairwise distances\ndef euclidean_distances(\n X, Y=None, *, Y_norm_squared=None, squared=False, X_norm_squared=None\n):\n \"\"\"\n Compute the distance matrix between each pair from a vector array X and Y.\n\n For efficiency reasons, the euclidean distance between a pair of row\n vector x and y is computed as::\n\n dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))\n\n This formulation has two advantages over other ways of computing distances.\n First, it is computationally efficient when dealing with sparse data.\n Second, if one argument varies but the other remains unchanged, then\n `dot(x, x)` and/or `dot(y, y)` can be pre-computed.\n\n However, this is not the most precise way of doing this computation,\n because this equation potentially suffers from \"catastrophic cancellation\".\n Also, the distance matrix returned by this function may not be exactly\n symmetric as required by, e.g., ``scipy.spatial.distance`` functions.\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\n An array where each row is a sample and each column is a feature.\n\n Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \\\n default=None\n An array where each row is a sample and each column is a feature.\n If `None`, method uses `Y=X`.\n\n Y_norm_squared : array-like of shape (n_samples_Y,) or (n_samples_Y, 1) \\\n or (1, n_samples_Y), default=None\n Pre-computed dot-products of vectors in Y (e.g.,\n ``(Y**2).sum(axis=1)``)\n May be ignored in some cases, see the note below.\n\n squared : bool, default=False\n Return squared Euclidean distances.\n\n X_norm_squared : array-like of shape (n_samples_X,) or (n_samples_X, 1) \\\n or (1, n_samples_X), default=None\n Pre-computed dot-products of vectors in X (e.g.,\n ``(X**2).sum(axis=1)``)\n May be ignored in some cases, see the note below.\n\n Returns\n -------\n distances : ndarray of shape (n_samples_X, n_samples_Y)\n Returns the distances between the row vectors of `X`\n and the row vectors of `Y`.\n\n See Also\n --------\n paired_distances : Distances betweens pairs of elements of X and Y.\n\n Notes\n -----\n To achieve a better accuracy, `X_norm_squared` and `Y_norm_squared` may be\n unused if they are passed as `np.float32`.\n\n Examples\n --------\n >>> from sklearn.metrics.pairwise import euclidean_distances\n >>> X = [[0, 1], [1, 1]]\n >>> # distance between rows of X\n >>> euclidean_distances(X, X)\n array([[0., 1.],\n [1., 0.]])\n >>> # get distance to origin\n >>> euclidean_distances(X, [[0, 0]])\n array([[1. ],\n [1.41421356]])\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n\n if X_norm_squared is not None:\n X_norm_squared = check_array(X_norm_squared, ensure_2d=False)\n original_shape = X_norm_squared.shape\n if X_norm_squared.shape == (X.shape[0],):\n X_norm_squared = X_norm_squared.reshape(-1, 1)\n if X_norm_squared.shape == (1, X.shape[0]):\n X_norm_squared = X_norm_squared.T\n if X_norm_squared.shape != (X.shape[0], 1):\n raise ValueError(\n f\"Incompatible dimensions for X of shape {X.shape} and \"\n f\"X_norm_squared of shape {original_shape}.\"\n )\n\n if Y_norm_squared is not None:\n Y_norm_squared = check_array(Y_norm_squared, ensure_2d=False)\n original_shape = Y_norm_squared.shape\n if Y_norm_squared.shape == (Y.shape[0],):\n Y_norm_squared = Y_norm_squared.reshape(1, -1)\n if Y_norm_squared.shape == (Y.shape[0], 1):\n Y_norm_squared = Y_norm_squared.T\n if Y_norm_squared.shape != (1, Y.shape[0]):\n raise ValueError(\n f\"Incompatible dimensions for Y of shape {Y.shape} and \"\n f\"Y_norm_squared of shape {original_shape}.\"\n )\n\n return _euclidean_distances(X, Y, X_norm_squared, Y_norm_squared, squared)\n\n\ndef _euclidean_distances(X, Y, X_norm_squared=None, Y_norm_squared=None, squared=False):\n \"\"\"Computational part of euclidean_distances\n\n Assumes inputs are already checked.\n\n If norms are passed as float32, they are unused. If arrays are passed as\n float32, norms needs to be recomputed on upcast chunks.\n TODO: use a float64 accumulator in row_norms to avoid the latter.\n \"\"\"\n if X_norm_squared is not None:\n if X_norm_squared.dtype == np.float32:\n XX = None\n else:\n XX = X_norm_squared.reshape(-1, 1)\n elif X.dtype == np.float32:\n XX = None\n else:\n XX = row_norms(X, squared=True)[:, np.newaxis]\n\n if Y is X:\n YY = None if XX is None else XX.T\n else:\n if Y_norm_squared is not None:\n if Y_norm_squared.dtype == np.float32:\n YY = None\n else:\n YY = Y_norm_squared.reshape(1, -1)\n elif Y.dtype == np.float32:\n YY = None\n else:\n YY = row_norms(Y, squared=True)[np.newaxis, :]\n\n if X.dtype == np.float32:\n # To minimize precision issues with float32, we compute the distance\n # matrix on chunks of X and Y upcast to float64\n distances = _euclidean_distances_upcast(X, XX, Y, YY)\n else:\n # if dtype is already float64, no need to chunk and upcast\n distances = -2 * safe_sparse_dot(X, Y.T, dense_output=True)\n distances += XX\n distances += YY\n np.maximum(distances, 0, out=distances)\n\n # Ensure that distances between vectors and themselves are set to 0.0.\n # This may not be the case due to floating point rounding errors.\n if X is Y:\n np.fill_diagonal(distances, 0)\n\n return distances if squared else np.sqrt(distances, out=distances)\n\n\ndef nan_euclidean_distances(\n X, Y=None, *, squared=False, missing_values=np.nan, copy=True\n):\n \"\"\"Calculate the euclidean distances in the presence of missing values.\n\n Compute the euclidean distance between each pair of samples in X and Y,\n where Y=X is assumed if Y=None. When calculating the distance between a\n pair of samples, this formulation ignores feature coordinates with a\n missing value in either sample and scales up the weight of the remaining\n coordinates:\n\n dist(x,y) = sqrt(weight * sq. distance from present coordinates)\n where,\n weight = Total # of coordinates / # of present coordinates\n\n For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]``\n is:\n\n .. math::\n \\\\sqrt{\\\\frac{4}{2}((3-1)^2 + (6-5)^2)}\n\n If all the coordinates are missing or if there are no common present\n coordinates then NaN is returned for that pair.\n\n Read more in the :ref:`User Guide <metrics>`.\n\n .. versionadded:: 0.22\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, n_features)\n An array where each row is a sample and each column is a feature.\n\n Y : array-like of shape (n_samples_Y, n_features), default=None\n An array where each row is a sample and each column is a feature.\n If `None`, method uses `Y=X`.\n\n squared : bool, default=False\n Return squared Euclidean distances.\n\n missing_values : np.nan or int, default=np.nan\n Representation of missing value.\n\n copy : bool, default=True\n Make and use a deep copy of X and Y (if Y exists).\n\n Returns\n -------\n distances : ndarray of shape (n_samples_X, n_samples_Y)\n Returns the distances between the row vectors of `X`\n and the row vectors of `Y`.\n\n See Also\n --------\n paired_distances : Distances between pairs of elements of X and Y.\n\n References\n ----------\n * John K. Dixon, \"Pattern Recognition with Partly Missing Data\",\n IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:\n 10, pp. 617 - 621, Oct. 1979.\n http://ieeexplore.ieee.org/abstract/document/4310090/\n\n Examples\n --------\n >>> from sklearn.metrics.pairwise import nan_euclidean_distances\n >>> nan = float(\"NaN\")\n >>> X = [[0, 1], [1, nan]]\n >>> nan_euclidean_distances(X, X) # distance between rows of X\n array([[0. , 1.41421356],\n [1.41421356, 0. ]])\n\n >>> # get distance to origin\n >>> nan_euclidean_distances(X, [[0, 0]])\n array([[1. ],\n [1.41421356]])\n \"\"\"\n\n force_all_finite = \"allow-nan\" if is_scalar_nan(missing_values) else True\n X, Y = check_pairwise_arrays(\n X, Y, accept_sparse=False, force_all_finite=force_all_finite, copy=copy\n )\n # Get missing mask for X\n missing_X = _get_mask(X, missing_values)\n\n # Get missing mask for Y\n missing_Y = missing_X if Y is X else _get_mask(Y, missing_values)\n\n # set missing values to zero\n X[missing_X] = 0\n Y[missing_Y] = 0\n\n distances = euclidean_distances(X, Y, squared=True)\n\n # Adjust distances for missing values\n XX = X * X\n YY = Y * Y\n distances -= np.dot(XX, missing_Y.T)\n distances -= np.dot(missing_X, YY.T)\n\n np.clip(distances, 0, None, out=distances)\n\n if X is Y:\n # Ensure that distances between vectors and themselves are set to 0.0.\n # This may not be the case due to floating point rounding errors.\n np.fill_diagonal(distances, 0.0)\n\n present_X = 1 - missing_X\n present_Y = present_X if Y is X else ~missing_Y\n present_count = np.dot(present_X, present_Y.T)\n distances[present_count == 0] = np.nan\n # avoid divide by zero\n np.maximum(1, present_count, out=present_count)\n distances /= present_count\n distances *= X.shape[1]\n\n if not squared:\n np.sqrt(distances, out=distances)\n\n return distances\n\n\ndef _euclidean_distances_upcast(X, XX=None, Y=None, YY=None, batch_size=None):\n \"\"\"Euclidean distances between X and Y.\n\n Assumes X and Y have float32 dtype.\n Assumes XX and YY have float64 dtype or are None.\n\n X and Y are upcast to float64 by chunks, which size is chosen to limit\n memory increase by approximately 10% (at least 10MiB).\n \"\"\"\n n_samples_X = X.shape[0]\n n_samples_Y = Y.shape[0]\n n_features = X.shape[1]\n\n distances = np.empty((n_samples_X, n_samples_Y), dtype=np.float32)\n\n if batch_size is None:\n x_density = X.nnz / np.prod(X.shape) if issparse(X) else 1\n y_density = Y.nnz / np.prod(Y.shape) if issparse(Y) else 1\n\n # Allow 10% more memory than X, Y and the distance matrix take (at\n # least 10MiB)\n maxmem = max(\n (\n (x_density * n_samples_X + y_density * n_samples_Y) * n_features\n + (x_density * n_samples_X * y_density * n_samples_Y)\n )\n / 10,\n 10 * 2 ** 17,\n )\n\n # The increase amount of memory in 8-byte blocks is:\n # - x_density * batch_size * n_features (copy of chunk of X)\n # - y_density * batch_size * n_features (copy of chunk of Y)\n # - batch_size * batch_size (chunk of distance matrix)\n # Hence x² + (xd+yd)kx = M, where x=batch_size, k=n_features, M=maxmem\n # xd=x_density and yd=y_density\n tmp = (x_density + y_density) * n_features\n batch_size = (-tmp + np.sqrt(tmp ** 2 + 4 * maxmem)) / 2\n batch_size = max(int(batch_size), 1)\n\n x_batches = gen_batches(n_samples_X, batch_size)\n\n for i, x_slice in enumerate(x_batches):\n X_chunk = X[x_slice].astype(np.float64)\n if XX is None:\n XX_chunk = row_norms(X_chunk, squared=True)[:, np.newaxis]\n else:\n XX_chunk = XX[x_slice]\n\n y_batches = gen_batches(n_samples_Y, batch_size)\n\n for j, y_slice in enumerate(y_batches):\n if X is Y and j < i:\n # when X is Y the distance matrix is symmetric so we only need\n # to compute half of it.\n d = distances[y_slice, x_slice].T\n\n else:\n Y_chunk = Y[y_slice].astype(np.float64)\n if YY is None:\n YY_chunk = row_norms(Y_chunk, squared=True)[np.newaxis, :]\n else:\n YY_chunk = YY[:, y_slice]\n\n d = -2 * safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True)\n d += XX_chunk\n d += YY_chunk\n\n distances[x_slice, y_slice] = d.astype(np.float32, copy=False)\n\n return distances\n\n\ndef _argmin_min_reduce(dist, start):\n indices = dist.argmin(axis=1)\n values = dist[np.arange(dist.shape[0]), indices]\n return indices, values\n\n\ndef pairwise_distances_argmin_min(\n X, Y, *, axis=1, metric=\"euclidean\", metric_kwargs=None\n):\n \"\"\"Compute minimum distances between one point and a set of points.\n\n This function computes for each row in X, the index of the row of Y which\n is closest (according to the specified distance). The minimal distances are\n also returned.\n\n This is mostly equivalent to calling:\n\n (pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),\n pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))\n\n but uses much less memory, and is faster for large arrays.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\n Array containing points.\n\n Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)\n Array containing points.\n\n axis : int, default=1\n Axis along which the argmin and distances are to be computed.\n\n metric : str or callable, default='euclidean'\n Metric to use for distance computation. Any metric from scikit-learn\n or scipy.spatial.distance can be used.\n\n If metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays as input and return one value indicating the\n distance between them. This works for Scipy's metrics, but is less\n efficient than passing the metric name as a string.\n\n Distance matrices are not supported.\n\n Valid values for metric are:\n\n - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',\n 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',\n 'yule']\n\n See the documentation for scipy.spatial.distance for details on these\n metrics.\n\n metric_kwargs : dict, default=None\n Keyword arguments to pass to specified metric function.\n\n Returns\n -------\n argmin : ndarray\n Y[argmin[i], :] is the row in Y that is closest to X[i, :].\n\n distances : ndarray\n distances[i] is the distance between the i-th row in X and the\n argmin[i]-th row in Y.\n\n See Also\n --------\n sklearn.metrics.pairwise_distances\n sklearn.metrics.pairwise_distances_argmin\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n\n if metric_kwargs is None:\n metric_kwargs = {}\n\n if axis == 0:\n X, Y = Y, X\n\n indices, values = zip(\n *pairwise_distances_chunked(\n X, Y, reduce_func=_argmin_min_reduce, metric=metric, **metric_kwargs\n )\n )\n indices = np.concatenate(indices)\n values = np.concatenate(values)\n\n return indices, values\n\n\ndef pairwise_distances_argmin(X, Y, *, axis=1, metric=\"euclidean\", metric_kwargs=None):\n \"\"\"Compute minimum distances between one point and a set of points.\n\n This function computes for each row in X, the index of the row of Y which\n is closest (according to the specified distance).\n\n This is mostly equivalent to calling:\n\n pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)\n\n but uses much less memory, and is faster for large arrays.\n\n This function works with dense 2D arrays only.\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, n_features)\n Array containing points.\n\n Y : array-like of shape (n_samples_Y, n_features)\n Arrays containing points.\n\n axis : int, default=1\n Axis along which the argmin and distances are to be computed.\n\n metric : str or callable, default=\"euclidean\"\n Metric to use for distance computation. Any metric from scikit-learn\n or scipy.spatial.distance can be used.\n\n If metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays as input and return one value indicating the\n distance between them. This works for Scipy's metrics, but is less\n efficient than passing the metric name as a string.\n\n Distance matrices are not supported.\n\n Valid values for metric are:\n\n - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',\n 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',\n 'yule']\n\n See the documentation for scipy.spatial.distance for details on these\n metrics.\n\n metric_kwargs : dict, default=None\n Keyword arguments to pass to specified metric function.\n\n Returns\n -------\n argmin : numpy.ndarray\n Y[argmin[i], :] is the row in Y that is closest to X[i, :].\n\n See Also\n --------\n sklearn.metrics.pairwise_distances\n sklearn.metrics.pairwise_distances_argmin_min\n \"\"\"\n if metric_kwargs is None:\n metric_kwargs = {}\n\n return pairwise_distances_argmin_min(\n X, Y, axis=axis, metric=metric, metric_kwargs=metric_kwargs\n )[0]\n\n\ndef haversine_distances(X, Y=None):\n \"\"\"Compute the Haversine distance between samples in X and Y.\n\n The Haversine (or great circle) distance is the angular distance between\n two points on the surface of a sphere. The first coordinate of each point\n is assumed to be the latitude, the second is the longitude, given\n in radians. The dimension of the data must be 2.\n\n .. math::\n D(x, y) = 2\\\\arcsin[\\\\sqrt{\\\\sin^2((x1 - y1) / 2)\n + \\\\cos(x1)\\\\cos(y1)\\\\sin^2((x2 - y2) / 2)}]\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, 2)\n\n Y : array-like of shape (n_samples_Y, 2), default=None\n\n Returns\n -------\n distance : ndarray of shape (n_samples_X, n_samples_Y)\n\n Notes\n -----\n As the Earth is nearly spherical, the haversine formula provides a good\n approximation of the distance between two points of the Earth surface, with\n a less than 1% error on average.\n\n Examples\n --------\n We want to calculate the distance between the Ezeiza Airport\n (Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris,\n France).\n\n >>> from sklearn.metrics.pairwise import haversine_distances\n >>> from math import radians\n >>> bsas = [-34.83333, -58.5166646]\n >>> paris = [49.0083899664, 2.53844117956]\n >>> bsas_in_radians = [radians(_) for _ in bsas]\n >>> paris_in_radians = [radians(_) for _ in paris]\n >>> result = haversine_distances([bsas_in_radians, paris_in_radians])\n >>> result * 6371000/1000 # multiply by Earth radius to get kilometers\n array([[ 0. , 11099.54035582],\n [11099.54035582, 0. ]])\n \"\"\"\n from ..metrics import DistanceMetric\n\n return DistanceMetric.get_metric(\"haversine\").pairwise(X, Y)\n\n\ndef manhattan_distances(X, Y=None, *, sum_over_features=True):\n \"\"\"Compute the L1 distances between the vectors in X and Y.\n\n With sum_over_features equal to False it returns the componentwise\n distances.\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, n_features)\n An array where each row is a sample and each column is a feature.\n\n Y : array-like of shape (n_samples_Y, n_features), default=None\n An array where each row is a sample and each column is a feature.\n If `None`, method uses `Y=X`.\n\n sum_over_features : bool, default=True\n If True the function returns the pairwise distance matrix\n else it returns the componentwise L1 pairwise-distances.\n Not supported for sparse matrix inputs.\n\n Returns\n -------\n D : ndarray of shape (n_samples_X * n_samples_Y, n_features) or \\\n (n_samples_X, n_samples_Y)\n If sum_over_features is False shape is\n (n_samples_X * n_samples_Y, n_features) and D contains the\n componentwise L1 pairwise-distances (ie. absolute difference),\n else shape is (n_samples_X, n_samples_Y) and D contains\n the pairwise L1 distances.\n\n Notes\n --------\n When X and/or Y are CSR sparse matrices and they are not already\n in canonical format, this function modifies them in-place to\n make them canonical.\n\n Examples\n --------\n >>> from sklearn.metrics.pairwise import manhattan_distances\n >>> manhattan_distances([[3]], [[3]])\n array([[0.]])\n >>> manhattan_distances([[3]], [[2]])\n array([[1.]])\n >>> manhattan_distances([[2]], [[3]])\n array([[1.]])\n >>> manhattan_distances([[1, 2], [3, 4]],\\\n [[1, 2], [0, 3]])\n array([[0., 2.],\n [4., 4.]])\n >>> import numpy as np\n >>> X = np.ones((1, 2))\n >>> y = np.full((2, 2), 2.)\n >>> manhattan_distances(X, y, sum_over_features=False)\n array([[1., 1.],\n [1., 1.]])\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n\n if issparse(X) or issparse(Y):\n if not sum_over_features:\n raise TypeError(\n \"sum_over_features=%r not supported for sparse matrices\"\n % sum_over_features\n )\n\n X = csr_matrix(X, copy=False)\n Y = csr_matrix(Y, copy=False)\n X.sum_duplicates() # this also sorts indices in-place\n Y.sum_duplicates()\n D = np.zeros((X.shape[0], Y.shape[0]))\n _sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, D)\n return D\n\n if sum_over_features:\n return distance.cdist(X, Y, \"cityblock\")\n\n D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]\n D = np.abs(D, D)\n return D.reshape((-1, X.shape[1]))\n\n\ndef cosine_distances(X, Y=None):\n \"\"\"Compute cosine distance between samples in X and Y.\n\n Cosine distance is defined as 1.0 minus the cosine similarity.\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\n Matrix `X`.\n\n Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \\\n default=None\n Matrix `Y`.\n\n Returns\n -------\n distance matrix : ndarray of shape (n_samples_X, n_samples_Y)\n\n See Also\n --------\n cosine_similarity\n scipy.spatial.distance.cosine : Dense matrices only.\n \"\"\"\n # 1.0 - cosine_similarity(X, Y) without copy\n S = cosine_similarity(X, Y)\n S *= -1\n S += 1\n np.clip(S, 0, 2, out=S)\n if X is Y or Y is None:\n # Ensure that distances between vectors and themselves are set to 0.0.\n # This may not be the case due to floating point rounding errors.\n S[np.diag_indices_from(S)] = 0.0\n return S\n\n\n# Paired distances\ndef paired_euclidean_distances(X, Y):\n \"\"\"\n Computes the paired euclidean distances between X and Y.\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n\n Y : array-like of shape (n_samples, n_features)\n\n Returns\n -------\n distances : ndarray of shape (n_samples,)\n \"\"\"\n X, Y = check_paired_arrays(X, Y)\n return row_norms(X - Y)\n\n\ndef paired_manhattan_distances(X, Y):\n \"\"\"Compute the L1 distances between the vectors in X and Y.\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n\n Y : array-like of shape (n_samples, n_features)\n\n Returns\n -------\n distances : ndarray of shape (n_samples,)\n \"\"\"\n X, Y = check_paired_arrays(X, Y)\n diff = X - Y\n if issparse(diff):\n diff.data = np.abs(diff.data)\n return np.squeeze(np.array(diff.sum(axis=1)))\n else:\n return np.abs(diff).sum(axis=-1)\n\n\ndef paired_cosine_distances(X, Y):\n \"\"\"\n Compute the paired cosine distances between X and Y.\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n An array where each row is a sample and each column is a feature.\n\n Y : array-like of shape (n_samples, n_features)\n An array where each row is a sample and each column is a feature.\n\n Returns\n -------\n distances : ndarray of shape (n_samples,)\n Returns the distances between the row vectors of `X`\n and the row vectors of `Y`, where `distances[i]` is the\n distance between `X[i]` and `Y[i]`.\n\n Notes\n -----\n The cosine distance is equivalent to the half the squared\n euclidean distance if each sample is normalized to unit norm.\n \"\"\"\n X, Y = check_paired_arrays(X, Y)\n return 0.5 * row_norms(normalize(X) - normalize(Y), squared=True)\n\n\nPAIRED_DISTANCES = {\n \"cosine\": paired_cosine_distances,\n \"euclidean\": paired_euclidean_distances,\n \"l2\": paired_euclidean_distances,\n \"l1\": paired_manhattan_distances,\n \"manhattan\": paired_manhattan_distances,\n \"cityblock\": paired_manhattan_distances,\n}\n\n\ndef paired_distances(X, Y, *, metric=\"euclidean\", **kwds):\n \"\"\"\n Computes the paired distances between X and Y.\n\n Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Array 1 for distance computation.\n\n Y : ndarray of shape (n_samples, n_features)\n Array 2 for distance computation.\n\n metric : str or callable, default=\"euclidean\"\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string, it must be one of the options\n specified in PAIRED_DISTANCES, including \"euclidean\",\n \"manhattan\", or \"cosine\".\n Alternatively, if metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays from X as input and return a value indicating\n the distance between them.\n\n Returns\n -------\n distances : ndarray of shape (n_samples,)\n\n See Also\n --------\n pairwise_distances : Computes the distance between every pair of samples.\n\n Examples\n --------\n >>> from sklearn.metrics.pairwise import paired_distances\n >>> X = [[0, 1], [1, 1]]\n >>> Y = [[0, 1], [2, 1]]\n >>> paired_distances(X, Y)\n array([0., 1.])\n \"\"\"\n\n if metric in PAIRED_DISTANCES:\n func = PAIRED_DISTANCES[metric]\n return func(X, Y)\n elif callable(metric):\n # Check the matrix first (it is usually done by the metric)\n X, Y = check_paired_arrays(X, Y)\n distances = np.zeros(len(X))\n for i in range(len(X)):\n distances[i] = metric(X[i], Y[i])\n return distances\n else:\n raise ValueError(\"Unknown distance %s\" % metric)\n\n\n# Kernels\ndef linear_kernel(X, Y=None, dense_output=True):\n \"\"\"\n Compute the linear kernel between X and Y.\n\n Read more in the :ref:`User Guide <linear_kernel>`.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_X, n_features)\n A feature array.\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n An optional second feature array. If `None`, uses `Y=X`.\n\n dense_output : bool, default=True\n Whether to return dense output even when the input is sparse. If\n ``False``, the output is sparse if both input arrays are sparse.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)\n The Gram matrix of the linear kernel, i.e. `X @ Y.T`.\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n return safe_sparse_dot(X, Y.T, dense_output=dense_output)\n\n\ndef polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):\n \"\"\"\n Compute the polynomial kernel between X and Y::\n\n K(X, Y) = (gamma <X, Y> + coef0)^degree\n\n Read more in the :ref:`User Guide <polynomial_kernel>`.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_X, n_features)\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n\n degree : int, default=3\n\n gamma : float, default=None\n If None, defaults to 1.0 / n_features.\n\n coef0 : float, default=1\n\n Returns\n -------\n Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n if gamma is None:\n gamma = 1.0 / X.shape[1]\n\n K = safe_sparse_dot(X, Y.T, dense_output=True)\n K *= gamma\n K += coef0\n K **= degree\n return K\n\n\ndef sigmoid_kernel(X, Y=None, gamma=None, coef0=1):\n \"\"\"\n Compute the sigmoid kernel between X and Y::\n\n K(X, Y) = tanh(gamma <X, Y> + coef0)\n\n Read more in the :ref:`User Guide <sigmoid_kernel>`.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_X, n_features)\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n If `None`, uses `Y=X`.\n\n gamma : float, default=None\n If None, defaults to 1.0 / n_features.\n\n coef0 : float, default=1\n\n Returns\n -------\n Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n if gamma is None:\n gamma = 1.0 / X.shape[1]\n\n K = safe_sparse_dot(X, Y.T, dense_output=True)\n K *= gamma\n K += coef0\n np.tanh(K, K) # compute tanh in-place\n return K\n\n\ndef rbf_kernel(X, Y=None, gamma=None):\n \"\"\"\n Compute the rbf (gaussian) kernel between X and Y::\n\n K(x, y) = exp(-gamma ||x-y||^2)\n\n for each pair of rows x in X and y in Y.\n\n Read more in the :ref:`User Guide <rbf_kernel>`.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_X, n_features)\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n If `None`, uses `Y=X`.\n\n gamma : float, default=None\n If None, defaults to 1.0 / n_features.\n\n Returns\n -------\n kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n if gamma is None:\n gamma = 1.0 / X.shape[1]\n\n K = euclidean_distances(X, Y, squared=True)\n K *= -gamma\n np.exp(K, K) # exponentiate K in-place\n return K\n\n\ndef laplacian_kernel(X, Y=None, gamma=None):\n \"\"\"Compute the laplacian kernel between X and Y.\n\n The laplacian kernel is defined as::\n\n K(x, y) = exp(-gamma ||x-y||_1)\n\n for each pair of rows x in X and y in Y.\n Read more in the :ref:`User Guide <laplacian_kernel>`.\n\n .. versionadded:: 0.17\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_X, n_features)\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n If `None`, uses `Y=X`.\n\n gamma : float, default=None\n If None, defaults to 1.0 / n_features.\n\n Returns\n -------\n kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n if gamma is None:\n gamma = 1.0 / X.shape[1]\n\n K = -gamma * manhattan_distances(X, Y)\n np.exp(K, K) # exponentiate K in-place\n return K\n\n\ndef cosine_similarity(X, Y=None, dense_output=True):\n \"\"\"Compute cosine similarity between samples in X and Y.\n\n Cosine similarity, or the cosine kernel, computes similarity as the\n normalized dot product of X and Y:\n\n K(X, Y) = <X, Y> / (||X||*||Y||)\n\n On L2-normalized data, this function is equivalent to linear_kernel.\n\n Read more in the :ref:`User Guide <cosine_similarity>`.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples_X, n_features)\n Input data.\n\n Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features), \\\n default=None\n Input data. If ``None``, the output will be the pairwise\n similarities between all samples in ``X``.\n\n dense_output : bool, default=True\n Whether to return dense output even when the input is sparse. If\n ``False``, the output is sparse if both input arrays are sparse.\n\n .. versionadded:: 0.17\n parameter ``dense_output`` for dense output.\n\n Returns\n -------\n kernel matrix : ndarray of shape (n_samples_X, n_samples_Y)\n \"\"\"\n # to avoid recursive import\n\n X, Y = check_pairwise_arrays(X, Y)\n\n X_normalized = normalize(X, copy=True)\n if X is Y:\n Y_normalized = X_normalized\n else:\n Y_normalized = normalize(Y, copy=True)\n\n K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)\n\n return K\n\n\ndef additive_chi2_kernel(X, Y=None):\n \"\"\"Computes the additive chi-squared kernel between observations in X and\n Y.\n\n The chi-squared kernel is computed between each pair of rows in X and Y. X\n and Y have to be non-negative. This kernel is most commonly applied to\n histograms.\n\n The chi-squared kernel is given by::\n\n k(x, y) = -Sum [(x - y)^2 / (x + y)]\n\n It can be interpreted as a weighted difference per entry.\n\n Read more in the :ref:`User Guide <chi2_kernel>`.\n\n Notes\n -----\n As the negative of a distance, this kernel is only conditionally positive\n definite.\n\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, n_features)\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n If `None`, uses `Y=X`.\n\n Returns\n -------\n kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)\n\n See Also\n --------\n chi2_kernel : The exponentiated version of the kernel, which is usually\n preferable.\n sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation\n to this kernel.\n\n References\n ----------\n * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.\n Local features and kernels for classification of texture and object\n categories: A comprehensive study\n International Journal of Computer Vision 2007\n https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf\n \"\"\"\n if issparse(X) or issparse(Y):\n raise ValueError(\"additive_chi2 does not support sparse matrices.\")\n X, Y = check_pairwise_arrays(X, Y)\n if (X < 0).any():\n raise ValueError(\"X contains negative values.\")\n if Y is not X and (Y < 0).any():\n raise ValueError(\"Y contains negative values.\")\n\n result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)\n _chi2_kernel_fast(X, Y, result)\n return result\n\n\ndef chi2_kernel(X, Y=None, gamma=1.0):\n \"\"\"Computes the exponential chi-squared kernel X and Y.\n\n The chi-squared kernel is computed between each pair of rows in X and Y. X\n and Y have to be non-negative. This kernel is most commonly applied to\n histograms.\n\n The chi-squared kernel is given by::\n\n k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])\n\n It can be interpreted as a weighted difference per entry.\n\n Read more in the :ref:`User Guide <chi2_kernel>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, n_features)\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n\n gamma : float, default=1.\n Scaling parameter of the chi2 kernel.\n\n Returns\n -------\n kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)\n\n See Also\n --------\n additive_chi2_kernel : The additive version of this kernel.\n sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation\n to the additive version of this kernel.\n\n References\n ----------\n * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.\n Local features and kernels for classification of texture and object\n categories: A comprehensive study\n International Journal of Computer Vision 2007\n https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf\n \"\"\"\n K = additive_chi2_kernel(X, Y)\n K *= gamma\n return np.exp(K, K)\n\n\n# Helper functions - distance\nPAIRWISE_DISTANCE_FUNCTIONS = {\n # If updating this dictionary, update the doc in both distance_metrics()\n # and also in pairwise_distances()!\n \"cityblock\": manhattan_distances,\n \"cosine\": cosine_distances,\n \"euclidean\": euclidean_distances,\n \"haversine\": haversine_distances,\n \"l2\": euclidean_distances,\n \"l1\": manhattan_distances,\n \"manhattan\": manhattan_distances,\n \"precomputed\": None, # HACK: precomputed is always allowed, never called\n \"nan_euclidean\": nan_euclidean_distances,\n}\n\n\ndef distance_metrics():\n \"\"\"Valid metrics for pairwise_distances.\n\n This function simply returns the valid pairwise distance metrics.\n It exists to allow for a description of the mapping for\n each of the valid strings.\n\n The valid distance metrics, and the function they map to, are:\n\n =============== ========================================\n metric Function\n =============== ========================================\n 'cityblock' metrics.pairwise.manhattan_distances\n 'cosine' metrics.pairwise.cosine_distances\n 'euclidean' metrics.pairwise.euclidean_distances\n 'haversine' metrics.pairwise.haversine_distances\n 'l1' metrics.pairwise.manhattan_distances\n 'l2' metrics.pairwise.euclidean_distances\n 'manhattan' metrics.pairwise.manhattan_distances\n 'nan_euclidean' metrics.pairwise.nan_euclidean_distances\n =============== ========================================\n\n Read more in the :ref:`User Guide <metrics>`.\n\n \"\"\"\n return PAIRWISE_DISTANCE_FUNCTIONS\n\n\ndef _dist_wrapper(dist_func, dist_matrix, slice_, *args, **kwargs):\n \"\"\"Write in-place to a slice of a distance matrix.\"\"\"\n dist_matrix[:, slice_] = dist_func(*args, **kwargs)\n\n\ndef _parallel_pairwise(X, Y, func, n_jobs, **kwds):\n \"\"\"Break the pairwise matrix in n_jobs even slices\n and compute them in parallel.\"\"\"\n\n if Y is None:\n Y = X\n X, Y, dtype = _return_float_dtype(X, Y)\n\n if effective_n_jobs(n_jobs) == 1:\n return func(X, Y, **kwds)\n\n # enforce a threading backend to prevent data communication overhead\n fd = delayed(_dist_wrapper)\n ret = np.empty((X.shape[0], Y.shape[0]), dtype=dtype, order=\"F\")\n Parallel(backend=\"threading\", n_jobs=n_jobs)(\n fd(func, ret, s, X, Y[s], **kwds)\n for s in gen_even_slices(_num_samples(Y), effective_n_jobs(n_jobs))\n )\n\n if (X is Y or Y is None) and func is euclidean_distances:\n # zeroing diagonal for euclidean norm.\n # TODO: do it also for other norms.\n np.fill_diagonal(ret, 0)\n\n return ret\n\n\ndef _pairwise_callable(X, Y, metric, force_all_finite=True, **kwds):\n \"\"\"Handle the callable case for pairwise_{distances,kernels}.\"\"\"\n X, Y = check_pairwise_arrays(X, Y, force_all_finite=force_all_finite)\n\n if X is Y:\n # Only calculate metric for upper triangle\n out = np.zeros((X.shape[0], Y.shape[0]), dtype=\"float\")\n iterator = itertools.combinations(range(X.shape[0]), 2)\n for i, j in iterator:\n out[i, j] = metric(X[i], Y[j], **kwds)\n\n # Make symmetric\n # NB: out += out.T will produce incorrect results\n out = out + out.T\n\n # Calculate diagonal\n # NB: nonzero diagonals are allowed for both metrics and kernels\n for i in range(X.shape[0]):\n x = X[i]\n out[i, i] = metric(x, x, **kwds)\n\n else:\n # Calculate all cells\n out = np.empty((X.shape[0], Y.shape[0]), dtype=\"float\")\n iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))\n for i, j in iterator:\n out[i, j] = metric(X[i], Y[j], **kwds)\n\n return out\n\n\n_VALID_METRICS = [\n \"euclidean\",\n \"l2\",\n \"l1\",\n \"manhattan\",\n \"cityblock\",\n \"braycurtis\",\n \"canberra\",\n \"chebyshev\",\n \"correlation\",\n \"cosine\",\n \"dice\",\n \"hamming\",\n \"jaccard\",\n \"kulsinski\",\n \"mahalanobis\",\n \"matching\",\n \"minkowski\",\n \"rogerstanimoto\",\n \"russellrao\",\n \"seuclidean\",\n \"sokalmichener\",\n \"sokalsneath\",\n \"sqeuclidean\",\n \"yule\",\n \"wminkowski\",\n \"nan_euclidean\",\n \"haversine\",\n]\n\n_NAN_METRICS = [\"nan_euclidean\"]\n\n\ndef _check_chunk_size(reduced, chunk_size):\n \"\"\"Checks chunk is a sequence of expected size or a tuple of same.\"\"\"\n if reduced is None:\n return\n is_tuple = isinstance(reduced, tuple)\n if not is_tuple:\n reduced = (reduced,)\n if any(isinstance(r, tuple) or not hasattr(r, \"__iter__\") for r in reduced):\n raise TypeError(\n \"reduce_func returned %r. Expected sequence(s) of length %d.\"\n % (reduced if is_tuple else reduced[0], chunk_size)\n )\n if any(_num_samples(r) != chunk_size for r in reduced):\n actual_size = tuple(_num_samples(r) for r in reduced)\n raise ValueError(\n \"reduce_func returned object of length %s. \"\n \"Expected same length as input: %d.\"\n % (actual_size if is_tuple else actual_size[0], chunk_size)\n )\n\n\ndef _precompute_metric_params(X, Y, metric=None, **kwds):\n \"\"\"Precompute data-derived metric parameters if not provided.\"\"\"\n if metric == \"seuclidean\" and \"V\" not in kwds:\n # There is a bug in scipy < 1.5 that will cause a crash if\n # X.dtype != np.double (float64). See PR #15730\n dtype = np.float64 if sp_version < parse_version(\"1.5\") else None\n if X is Y:\n V = np.var(X, axis=0, ddof=1, dtype=dtype)\n else:\n raise ValueError(\n \"The 'V' parameter is required for the seuclidean metric \"\n \"when Y is passed.\"\n )\n return {\"V\": V}\n if metric == \"mahalanobis\" and \"VI\" not in kwds:\n if X is Y:\n VI = np.linalg.inv(np.cov(X.T)).T\n else:\n raise ValueError(\n \"The 'VI' parameter is required for the mahalanobis metric \"\n \"when Y is passed.\"\n )\n return {\"VI\": VI}\n return {}\n\n\ndef pairwise_distances_chunked(\n X,\n Y=None,\n *,\n reduce_func=None,\n metric=\"euclidean\",\n n_jobs=None,\n working_memory=None,\n **kwds,\n):\n \"\"\"Generate a distance matrix chunk by chunk with optional reduction.\n\n In cases where not all of a pairwise distance matrix needs to be stored at\n once, this is used to calculate pairwise distances in\n ``working_memory``-sized chunks. If ``reduce_func`` is given, it is run\n on each chunk and its return values are concatenated into lists, arrays\n or sparse matrices.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_X, n_samples_X) or \\\n (n_samples_X, n_features)\n Array of pairwise distances between samples, or a feature array.\n The shape the array should be (n_samples_X, n_samples_X) if\n metric='precomputed' and (n_samples_X, n_features) otherwise.\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n An optional second feature array. Only allowed if\n metric != \"precomputed\".\n\n reduce_func : callable, default=None\n The function which is applied on each chunk of the distance matrix,\n reducing it to needed values. ``reduce_func(D_chunk, start)``\n is called repeatedly, where ``D_chunk`` is a contiguous vertical\n slice of the pairwise distance matrix, starting at row ``start``.\n It should return one of: None; an array, a list, or a sparse matrix\n of length ``D_chunk.shape[0]``; or a tuple of such objects. Returning\n None is useful for in-place operations, rather than reductions.\n\n If None, pairwise_distances_chunked returns a generator of vertical\n chunks of the distance matrix.\n\n metric : str or callable, default='euclidean'\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string, it must be one of the options\n allowed by scipy.spatial.distance.pdist for its metric parameter, or\n a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.\n If metric is \"precomputed\", X is assumed to be a distance matrix.\n Alternatively, if metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays from X as input and return a value indicating\n the distance between them.\n\n n_jobs : int, default=None\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n working_memory : int, default=None\n The sought maximum memory for temporary distance matrix chunks.\n When None (default), the value of\n ``sklearn.get_config()['working_memory']`` is used.\n\n `**kwds` : optional keyword parameters\n Any further parameters are passed directly to the distance function.\n If using a scipy.spatial.distance metric, the parameters are still\n metric dependent. See the scipy docs for usage examples.\n\n Yields\n ------\n D_chunk : {ndarray, sparse matrix}\n A contiguous slice of distance matrix, optionally processed by\n ``reduce_func``.\n\n Examples\n --------\n Without reduce_func:\n\n >>> import numpy as np\n >>> from sklearn.metrics import pairwise_distances_chunked\n >>> X = np.random.RandomState(0).rand(5, 3)\n >>> D_chunk = next(pairwise_distances_chunked(X))\n >>> D_chunk\n array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...],\n [0.29..., 0. ..., 0.57..., 0.41..., 0.76...],\n [0.41..., 0.57..., 0. ..., 0.44..., 0.90...],\n [0.19..., 0.41..., 0.44..., 0. ..., 0.51...],\n [0.57..., 0.76..., 0.90..., 0.51..., 0. ...]])\n\n Retrieve all neighbors and average distance within radius r:\n\n >>> r = .2\n >>> def reduce_func(D_chunk, start):\n ... neigh = [np.flatnonzero(d < r) for d in D_chunk]\n ... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1)\n ... return neigh, avg_dist\n >>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func)\n >>> neigh, avg_dist = next(gen)\n >>> neigh\n [array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])]\n >>> avg_dist\n array([0.039..., 0. , 0. , 0.039..., 0. ])\n\n Where r is defined per sample, we need to make use of ``start``:\n\n >>> r = [.2, .4, .4, .3, .1]\n >>> def reduce_func(D_chunk, start):\n ... neigh = [np.flatnonzero(d < r[i])\n ... for i, d in enumerate(D_chunk, start)]\n ... return neigh\n >>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func))\n >>> neigh\n [array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])]\n\n Force row-by-row generation by reducing ``working_memory``:\n\n >>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func,\n ... working_memory=0)\n >>> next(gen)\n [array([0, 3])]\n >>> next(gen)\n [array([0, 1])]\n \"\"\"\n n_samples_X = _num_samples(X)\n if metric == \"precomputed\":\n slices = (slice(0, n_samples_X),)\n else:\n if Y is None:\n Y = X\n # We get as many rows as possible within our working_memory budget to\n # store len(Y) distances in each row of output.\n #\n # Note:\n # - this will get at least 1 row, even if 1 row of distances will\n # exceed working_memory.\n # - this does not account for any temporary memory usage while\n # calculating distances (e.g. difference of vectors in manhattan\n # distance.\n chunk_n_rows = get_chunk_n_rows(\n row_bytes=8 * _num_samples(Y),\n max_n_rows=n_samples_X,\n working_memory=working_memory,\n )\n slices = gen_batches(n_samples_X, chunk_n_rows)\n\n # precompute data-derived metric params\n params = _precompute_metric_params(X, Y, metric=metric, **kwds)\n kwds.update(**params)\n\n for sl in slices:\n if sl.start == 0 and sl.stop == n_samples_X:\n X_chunk = X # enable optimised paths for X is Y\n else:\n X_chunk = X[sl]\n D_chunk = pairwise_distances(X_chunk, Y, metric=metric, n_jobs=n_jobs, **kwds)\n if (X is Y or Y is None) and PAIRWISE_DISTANCE_FUNCTIONS.get(\n metric, None\n ) is euclidean_distances:\n # zeroing diagonal, taking care of aliases of \"euclidean\",\n # i.e. \"l2\"\n D_chunk.flat[sl.start :: _num_samples(X) + 1] = 0\n if reduce_func is not None:\n chunk_size = D_chunk.shape[0]\n D_chunk = reduce_func(D_chunk, sl.start)\n _check_chunk_size(D_chunk, chunk_size)\n yield D_chunk\n\n\ndef pairwise_distances(\n X, Y=None, metric=\"euclidean\", *, n_jobs=None, force_all_finite=True, **kwds\n):\n \"\"\"Compute the distance matrix from a vector array X and optional Y.\n\n This method takes either a vector array or a distance matrix, and returns\n a distance matrix. If the input is a vector array, the distances are\n computed. If the input is a distances matrix, it is returned instead.\n\n This method provides a safe way to take a distance matrix as input, while\n preserving compatibility with many other algorithms that take a vector\n array.\n\n If Y is given (default is None), then the returned matrix is the pairwise\n distance between the arrays from both X and Y.\n\n Valid values for metric are:\n\n - From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']. These metrics support sparse matrix\n inputs.\n ['nan_euclidean'] but it does not yet support sparse matrices.\n\n - From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',\n 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',\n 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']\n See the documentation for scipy.spatial.distance for details on these\n metrics. These metrics do not support sparse matrix inputs.\n\n Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are\n valid scipy.spatial.distance metrics), the scikit-learn implementation\n will be used, which is faster and has support for sparse matrices (except\n for 'cityblock'). For a verbose description of the metrics from\n scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics\n function.\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_X, n_samples_X) or \\\n (n_samples_X, n_features)\n Array of pairwise distances between samples, or a feature array.\n The shape of the array should be (n_samples_X, n_samples_X) if\n metric == \"precomputed\" and (n_samples_X, n_features) otherwise.\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n An optional second feature array. Only allowed if\n metric != \"precomputed\".\n\n metric : str or callable, default='euclidean'\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string, it must be one of the options\n allowed by scipy.spatial.distance.pdist for its metric parameter, or\n a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``.\n If metric is \"precomputed\", X is assumed to be a distance matrix.\n Alternatively, if metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays from X as input and return a value indicating\n the distance between them.\n\n n_jobs : int, default=None\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n force_all_finite : bool or 'allow-nan', default=True\n Whether to raise an error on np.inf, np.nan, pd.NA in array. Ignored\n for a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. The\n possibilities are:\n\n - True: Force all values of array to be finite.\n - False: accepts np.inf, np.nan, pd.NA in array.\n - 'allow-nan': accepts only np.nan and pd.NA values in array. Values\n cannot be infinite.\n\n .. versionadded:: 0.22\n ``force_all_finite`` accepts the string ``'allow-nan'``.\n\n .. versionchanged:: 0.23\n Accepts `pd.NA` and converts it into `np.nan`.\n\n **kwds : optional keyword parameters\n Any further parameters are passed directly to the distance function.\n If using a scipy.spatial.distance metric, the parameters are still\n metric dependent. See the scipy docs for usage examples.\n\n Returns\n -------\n D : ndarray of shape (n_samples_X, n_samples_X) or \\\n (n_samples_X, n_samples_Y)\n A distance matrix D such that D_{i, j} is the distance between the\n ith and jth vectors of the given matrix X, if Y is None.\n If Y is not None, then D_{i, j} is the distance between the ith array\n from X and the jth array from Y.\n\n See Also\n --------\n pairwise_distances_chunked : Performs the same calculation as this\n function, but returns a generator of chunks of the distance matrix, in\n order to limit memory usage.\n paired_distances : Computes the distances between corresponding elements\n of two arrays.\n \"\"\"\n if (\n metric not in _VALID_METRICS\n and not callable(metric)\n and metric != \"precomputed\"\n ):\n raise ValueError(\n \"Unknown metric %s. Valid metrics are %s, or 'precomputed', or a callable\"\n % (metric, _VALID_METRICS)\n )\n\n if metric == \"precomputed\":\n X, _ = check_pairwise_arrays(\n X, Y, precomputed=True, force_all_finite=force_all_finite\n )\n\n whom = (\n \"`pairwise_distances`. Precomputed distance \"\n \" need to have non-negative values.\"\n )\n check_non_negative(X, whom=whom)\n return X\n elif metric in PAIRWISE_DISTANCE_FUNCTIONS:\n func = PAIRWISE_DISTANCE_FUNCTIONS[metric]\n elif callable(metric):\n func = partial(\n _pairwise_callable, metric=metric, force_all_finite=force_all_finite, **kwds\n )\n else:\n if issparse(X) or issparse(Y):\n raise TypeError(\"scipy distance metrics do not support sparse matrices.\")\n\n dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None\n\n if dtype == bool and (X.dtype != bool or (Y is not None and Y.dtype != bool)):\n msg = \"Data was converted to boolean for metric %s\" % metric\n warnings.warn(msg, DataConversionWarning)\n\n X, Y = check_pairwise_arrays(\n X, Y, dtype=dtype, force_all_finite=force_all_finite\n )\n\n # precompute data-derived metric params\n params = _precompute_metric_params(X, Y, metric=metric, **kwds)\n kwds.update(**params)\n\n if effective_n_jobs(n_jobs) == 1 and X is Y:\n return distance.squareform(distance.pdist(X, metric=metric, **kwds))\n func = partial(distance.cdist, metric=metric, **kwds)\n\n return _parallel_pairwise(X, Y, func, n_jobs, **kwds)\n\n\n# These distances require boolean arrays, when using scipy.spatial.distance\nPAIRWISE_BOOLEAN_FUNCTIONS = [\n \"dice\",\n \"jaccard\",\n \"kulsinski\",\n \"matching\",\n \"rogerstanimoto\",\n \"russellrao\",\n \"sokalmichener\",\n \"sokalsneath\",\n \"yule\",\n]\n\n# Helper functions - distance\nPAIRWISE_KERNEL_FUNCTIONS = {\n # If updating this dictionary, update the doc in both distance_metrics()\n # and also in pairwise_distances()!\n \"additive_chi2\": additive_chi2_kernel,\n \"chi2\": chi2_kernel,\n \"linear\": linear_kernel,\n \"polynomial\": polynomial_kernel,\n \"poly\": polynomial_kernel,\n \"rbf\": rbf_kernel,\n \"laplacian\": laplacian_kernel,\n \"sigmoid\": sigmoid_kernel,\n \"cosine\": cosine_similarity,\n}\n\n\ndef kernel_metrics():\n \"\"\"Valid metrics for pairwise_kernels.\n\n This function simply returns the valid pairwise distance metrics.\n It exists, however, to allow for a verbose description of the mapping for\n each of the valid strings.\n\n The valid distance metrics, and the function they map to, are:\n =============== ========================================\n metric Function\n =============== ========================================\n 'additive_chi2' sklearn.pairwise.additive_chi2_kernel\n 'chi2' sklearn.pairwise.chi2_kernel\n 'linear' sklearn.pairwise.linear_kernel\n 'poly' sklearn.pairwise.polynomial_kernel\n 'polynomial' sklearn.pairwise.polynomial_kernel\n 'rbf' sklearn.pairwise.rbf_kernel\n 'laplacian' sklearn.pairwise.laplacian_kernel\n 'sigmoid' sklearn.pairwise.sigmoid_kernel\n 'cosine' sklearn.pairwise.cosine_similarity\n =============== ========================================\n\n Read more in the :ref:`User Guide <metrics>`.\n \"\"\"\n return PAIRWISE_KERNEL_FUNCTIONS\n\n\nKERNEL_PARAMS = {\n \"additive_chi2\": (),\n \"chi2\": frozenset([\"gamma\"]),\n \"cosine\": (),\n \"linear\": (),\n \"poly\": frozenset([\"gamma\", \"degree\", \"coef0\"]),\n \"polynomial\": frozenset([\"gamma\", \"degree\", \"coef0\"]),\n \"rbf\": frozenset([\"gamma\"]),\n \"laplacian\": frozenset([\"gamma\"]),\n \"sigmoid\": frozenset([\"gamma\", \"coef0\"]),\n}\n\n\ndef pairwise_kernels(\n X, Y=None, metric=\"linear\", *, filter_params=False, n_jobs=None, **kwds\n):\n \"\"\"Compute the kernel between arrays X and optional array Y.\n\n This method takes either a vector array or a kernel matrix, and returns\n a kernel matrix. If the input is a vector array, the kernels are\n computed. If the input is a kernel matrix, it is returned instead.\n\n This method provides a safe way to take a kernel matrix as input, while\n preserving compatibility with many other algorithms that take a vector\n array.\n\n If Y is given (default is None), then the returned matrix is the pairwise\n kernel between the arrays from both X and Y.\n\n Valid values for metric are:\n ['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',\n 'laplacian', 'sigmoid', 'cosine']\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_X, n_samples_X) or \\\n (n_samples_X, n_features)\n Array of pairwise kernels between samples, or a feature array.\n The shape of the array should be (n_samples_X, n_samples_X) if\n metric == \"precomputed\" and (n_samples_X, n_features) otherwise.\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n A second feature array only if X has shape (n_samples_X, n_features).\n\n metric : str or callable, default=\"linear\"\n The metric to use when calculating kernel between instances in a\n feature array. If metric is a string, it must be one of the metrics\n in pairwise.PAIRWISE_KERNEL_FUNCTIONS.\n If metric is \"precomputed\", X is assumed to be a kernel matrix.\n Alternatively, if metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two rows from X as input and return the corresponding\n kernel value as a single number. This means that callables from\n :mod:`sklearn.metrics.pairwise` are not allowed, as they operate on\n matrices, not single samples. Use the string identifying the kernel\n instead.\n\n filter_params : bool, default=False\n Whether to filter invalid parameters or not.\n\n n_jobs : int, default=None\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n **kwds : optional keyword parameters\n Any further parameters are passed directly to the kernel function.\n\n Returns\n -------\n K : ndarray of shape (n_samples_X, n_samples_X) or \\\n (n_samples_X, n_samples_Y)\n A kernel matrix K such that K_{i, j} is the kernel between the\n ith and jth vectors of the given matrix X, if Y is None.\n If Y is not None, then K_{i, j} is the kernel between the ith array\n from X and the jth array from Y.\n\n Notes\n -----\n If metric is 'precomputed', Y is ignored and X is returned.\n\n \"\"\"\n # import GPKernel locally to prevent circular imports\n from ..gaussian_process.kernels import Kernel as GPKernel\n\n if metric == \"precomputed\":\n X, _ = check_pairwise_arrays(X, Y, precomputed=True)\n return X\n elif isinstance(metric, GPKernel):\n func = metric.__call__\n elif metric in PAIRWISE_KERNEL_FUNCTIONS:\n if filter_params:\n kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]}\n func = PAIRWISE_KERNEL_FUNCTIONS[metric]\n elif callable(metric):\n func = partial(_pairwise_callable, metric=metric, **kwds)\n else:\n raise ValueError(\"Unknown kernel %r\" % metric)\n\n return _parallel_pairwise(X, Y, func, n_jobs, **kwds)\n"
] | [
[
"numpy.dot",
"numpy.sqrt",
"numpy.asarray",
"numpy.concatenate",
"numpy.fill_diagonal",
"numpy.var",
"numpy.exp",
"scipy.sparse.issparse",
"numpy.clip",
"numpy.arange",
"numpy.diag_indices_from",
"numpy.zeros",
"scipy.spatial.distance.cdist",
"scipy.sparse.csr_matrix",
"numpy.cov",
"numpy.tanh",
"numpy.maximum",
"numpy.abs",
"scipy.spatial.distance.pdist",
"numpy.prod",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
MatthieuDartiailh/vispy | [
"09d429be361a148b0614a192f56d4070c624072c"
] | [
"examples/howto/rotate-cube.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vispy: gallery 50\n\"\"\"\nThis example shows how to display 3D objects.\nYou should see a colored outlined spinning cube.\n\"\"\"\n\nimport numpy as np\nfrom vispy import app, gloo\nfrom vispy.util.transforms import perspective, translate, rotate\n\n\nvert = \"\"\"\n// Uniforms\n// ------------------------------------\nuniform mat4 u_model;\nuniform mat4 u_view;\nuniform mat4 u_projection;\nuniform vec4 u_color;\n\n// Attributes\n// ------------------------------------\nattribute vec3 a_position;\nattribute vec4 a_color;\n\n// Varying\n// ------------------------------------\nvarying vec4 v_color;\n\nvoid main()\n{\n v_color = a_color * u_color;\n gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0);\n}\n\"\"\"\n\n\nfrag = \"\"\"\n// Varying\n// ------------------------------------\nvarying vec4 v_color;\n\nvoid main()\n{\n gl_FragColor = v_color;\n}\n\"\"\"\n\n\n# -----------------------------------------------------------------------------\ndef cube():\n \"\"\"\n Build vertices for a colored cube.\n\n V is the vertices\n I1 is the indices for a filled cube (use with GL_TRIANGLES)\n I2 is the indices for an outline cube (use with GL_LINES)\n \"\"\"\n vtype = [('a_position', np.float32, 3),\n ('a_normal', np.float32, 3),\n ('a_color', np.float32, 4)]\n # Vertices positions\n v = [[1, 1, 1], [-1, 1, 1], [-1, -1, 1], [1, -1, 1],\n [1, -1, -1], [1, 1, -1], [-1, 1, -1], [-1, -1, -1]]\n # Face Normals\n n = [[0, 0, 1], [1, 0, 0], [0, 1, 0],\n [-1, 0, 1], [0, -1, 0], [0, 0, -1]]\n # Vertice colors\n c = [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 1, 0, 1],\n [1, 1, 0, 1], [1, 1, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1]]\n\n V = np.array([(v[0], n[0], c[0]), (v[1], n[0], c[1]),\n (v[2], n[0], c[2]), (v[3], n[0], c[3]),\n (v[0], n[1], c[0]), (v[3], n[1], c[3]),\n (v[4], n[1], c[4]), (v[5], n[1], c[5]),\n (v[0], n[2], c[0]), (v[5], n[2], c[5]),\n (v[6], n[2], c[6]), (v[1], n[2], c[1]),\n (v[1], n[3], c[1]), (v[6], n[3], c[6]),\n (v[7], n[3], c[7]), (v[2], n[3], c[2]),\n (v[7], n[4], c[7]), (v[4], n[4], c[4]),\n (v[3], n[4], c[3]), (v[2], n[4], c[2]),\n (v[4], n[5], c[4]), (v[7], n[5], c[7]),\n (v[6], n[5], c[6]), (v[5], n[5], c[5])],\n dtype=vtype)\n I1 = np.resize(np.array([0, 1, 2, 0, 2, 3], dtype=np.uint32), 6 * (2 * 3))\n I1 += np.repeat(4 * np.arange(2 * 3), 6)\n\n I2 = np.resize(\n np.array([0, 1, 1, 2, 2, 3, 3, 0], dtype=np.uint32), 6 * (2 * 4))\n I2 += np.repeat(4 * np.arange(6), 8)\n\n return V, I1, I2\n\n\n# -----------------------------------------------------------------------------\nclass Canvas(app.Canvas):\n\n def __init__(self):\n app.Canvas.__init__(self, close_keys='escape')\n self.size = 800, 600\n\n self.vertices, self.filled, self.outline = cube()\n self.filled_buf = gloo.IndexBuffer(self.filled)\n self.outline_buf = gloo.IndexBuffer(self.outline)\n\n self.program = gloo.Program(vert, frag)\n self.program.bind(gloo.VertexBuffer(self.vertices))\n\n self.view = np.eye(4, dtype=np.float32)\n self.model = np.eye(4, dtype=np.float32)\n self.projection = np.eye(4, dtype=np.float32)\n\n translate(self.view, 0, 0, -5)\n self.program['u_model'] = self.model\n self.program['u_view'] = self.view\n\n self.theta = 0\n self.phi = 0\n\n self._timer = app.Timer(1.0 / 60)\n self._timer.connect(self.on_timer)\n self._timer.start()\n\n # ---------------------------------\n def on_initialize(self, event):\n gloo.set_clear_color((1, 1, 1, 1))\n gloo.set_state('opaque')\n gloo.set_polygon_offset(1, 1)\n # gl.glEnable( gl.GL_LINE_SMOOTH )\n\n # ---------------------------------\n def on_timer(self, event):\n self.theta += .5\n self.phi += .5\n self.model = np.eye(4, dtype=np.float32)\n rotate(self.model, self.theta, 0, 0, 1)\n rotate(self.model, self.phi, 0, 1, 0)\n self.program['u_model'] = self.model\n self.update()\n\n # ---------------------------------\n def on_resize(self, event):\n width, height = event.size\n gloo.set_viewport(0, 0, width, height)\n self.projection = perspective(45.0, width / float(height), 2.0, 10.0)\n self.program['u_projection'] = self.projection\n\n # ---------------------------------\n def on_draw(self, event):\n gloo.clear()\n\n # Filled cube\n \n gloo.set_state(blend=False, depth_test=True, polygon_offset_fill=True)\n self.program['u_color'] = 1, 1, 1, 1\n self.program.draw('triangles', self.filled_buf)\n\n # Outline\n gloo.set_state(blend=True, depth_test=True, polygon_offset_fill=False)\n gloo.set_depth_mask(False)\n self.program['u_color'] = 0, 0, 0, 1\n self.program.draw('lines', self.outline_buf)\n gloo.set_depth_mask(True)\n\n\n# -----------------------------------------------------------------------------\nif __name__ == '__main__':\n c = Canvas()\n c.show()\n app.run()\n"
] | [
[
"numpy.arange",
"numpy.eye",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hyz-xmaster/swa_object_detection | [
"8c82c4c178f0b6bba077ff9d906a81bf8e04789c"
] | [
"mmdet/core/utils/swa_hook.py"
] | [
"import os.path as osp\nfrom copy import deepcopy\n\nimport torch\nfrom mmcv.runner import HOOKS, Hook\nfrom mmcv.runner.checkpoint import save_checkpoint\nfrom mmcv.runner.log_buffer import LogBuffer\n\nfrom mmdet.core import DistEvalHook, EvalHook\n\n\[email protected]_module()\nclass SWAHook(Hook):\n r\"\"\"SWA Object Detection Hook.\n\n This hook works together with SWA training config files to train\n SWA object detectors <https://arxiv.org/abs/2012.12645>.\n\n Args:\n swa_eval (bool): Whether to evaluate the swa model.\n Defaults to True.\n eval_hook (Hook): Hook class that contains evaluation functions.\n Defaults to None.\n swa_interval (int): The epoch interval to perform swa\n \"\"\"\n\n def __init__(self, swa_eval=True, eval_hook=None, swa_interval=1):\n if not isinstance(swa_eval, bool):\n raise TypeError('swa_eval must be a bool, but got'\n f'{type(swa_eval)}')\n if swa_eval:\n if not isinstance(eval_hook, EvalHook) and \\\n not isinstance(eval_hook, DistEvalHook):\n raise TypeError('eval_hook must be either a EvalHook or a '\n 'DistEvalHook when swa_eval = True, but got'\n f'{type(eval_hook)}')\n self.swa_eval = swa_eval\n self.eval_hook = eval_hook\n self.swa_interval = swa_interval\n\n def before_run(self, runner):\n \"\"\"Construct the averaged model which will keep track of the running\n averages of the parameters of the model.\"\"\"\n model = runner.model\n self.model = AveragedModel(model)\n\n self.meta = runner.meta\n if self.meta is None:\n self.meta = dict()\n self.meta.setdefault('hook_msgs', dict())\n if isinstance(self.meta, dict) and 'hook_msgs' not in self.meta:\n self.meta.setdefault('hook_msgs', dict())\n self.log_buffer = LogBuffer()\n\n def after_train_epoch(self, runner):\n \"\"\"Update the parameters of the averaged model, save and evaluate the\n updated averaged model.\"\"\"\n model = runner.model\n # Whether to perform swa\n if (runner.epoch + 1) % self.swa_interval == 0:\n swa_flag = True\n else:\n swa_flag = False\n # update the parameters of the averaged model\n if swa_flag:\n self.model.update_parameters(model)\n\n # save the swa model\n runner.logger.info(\n f'Saving swa model at swa-training {runner.epoch + 1} epoch')\n filename = 'swa_model_{}.pth'.format(runner.epoch + 1)\n filepath = osp.join(runner.work_dir, filename)\n optimizer = runner.optimizer\n self.meta['hook_msgs']['last_ckpt'] = filepath\n save_checkpoint(\n self.model.module,\n filepath,\n optimizer=optimizer,\n meta=self.meta)\n\n # evaluate the swa model\n if self.swa_eval and swa_flag:\n self.work_dir = runner.work_dir\n self.rank = runner.rank\n self.epoch = runner.epoch\n self.logger = runner.logger\n self.meta['hook_msgs']['last_ckpt'] = filename\n self.eval_hook.after_train_epoch(self)\n for name, val in self.log_buffer.output.items():\n name = 'swa_' + name\n runner.log_buffer.output[name] = val\n runner.log_buffer.ready = True\n self.log_buffer.clear()\n\n def after_run(self, runner):\n # since BN layers in the backbone are frozen,\n # we do not need to update the BN for the swa model\n pass\n\n def before_epoch(self, runner):\n pass\n\n\nclass AveragedModel(torch.nn.Module):\n r\"\"\"Implements averaged model for Stochastic Weight Averaging (SWA).\n AveragedModel class creates a copy of the provided model on the device\n and allows to compute running averages of the parameters of the model.\n Args:\n model (torch.nn.Module): model to use with SWA\n device (torch.device, optional): if provided, the averaged model\n will be stored on the device. Defaults to None.\n avg_fn (function, optional): the averaging function used to update\n parameters; the function must take in the current value of the\n AveragedModel parameter, the current value of model\n parameter and the number of models already averaged; if None,\n equally weighted average is used. Defaults to None.\n \"\"\"\n\n def __init__(self, model, device=None, avg_fn=None):\n super(AveragedModel, self).__init__()\n self.module = deepcopy(model)\n if device is not None:\n self.module = self.module.to(device)\n self.register_buffer('n_averaged',\n torch.tensor(0, dtype=torch.long, device=device))\n if avg_fn is None:\n\n def avg_fn(averaged_model_parameter, model_parameter,\n num_averaged):\n return averaged_model_parameter + (\n model_parameter - averaged_model_parameter) / (\n num_averaged + 1)\n\n self.avg_fn = avg_fn\n\n def forward(self, *args, **kwargs):\n return self.module(*args, **kwargs)\n\n def update_parameters(self, model):\n for p_swa, p_model in zip(self.parameters(), model.parameters()):\n device = p_swa.device\n p_model_ = p_model.detach().to(device)\n if self.n_averaged == 0:\n p_swa.detach().copy_(p_model_)\n else:\n p_swa.detach().copy_(\n self.avg_fn(p_swa.detach(), p_model_,\n self.n_averaged.to(device)))\n self.n_averaged += 1\n"
] | [
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
UCL-RITS/DashPy-kpi | [
"3fc85fe18017cf8dcda4e0ed389774c73396b1f7"
] | [
"DashPykpi/kpistats.py"
] | [
"from __future__ import print_function\nimport os\nfrom github3 import login\nimport pandas as pd\nimport numpy as np\nimport codecs\nimport getpass\nimport json\nimport requests\nfrom tinydb import TinyDB, Query\nfrom bokeh.charts import Area, defaults\nfrom bokeh.models import HoverTool, ColumnDataSource\nfrom bokeh.charts import Area, defaults\nfrom bokeh.plotting import figure\nfrom bokeh.embed import components\n\n\nclass KpiStats(object):\n \"\"\"**Gathers repo statistics from a list of github urls into a TinyDB**\n\n The class uses github3.py to create an authenticated Github session.\n Session is authenticaed either by:\n\n #. Setting up a `github-token\n <https://help.github.com/articles/creating-an-access-token-for-command-line-use/>`_\n and copying the key into a local file in the cwd called 'secret_key'.\n #. If no 'secret_key' file is detected, the user is prompted to\n enter a username and password.\n #. A github-token can be set as an environment variable\n 'GHUB_API_TOKEN' (used this on Travis).\n\n public repo stats can be retrieved). Two methods can be used to log in.\n Firstly using a github api token: The program will look for this in a file\n called 'secret_key' in the local folder. If this is not found the software\n will default to asking for a username and password. The class should be\n instansiated with a list of github repo url strings.\n\n :param urls: list of url strings ['https://github.com/<user>/<repo>',]\n\n :returns: KpiStats() object\n\n :Example:\n\n The follwing use case shows how to feed a list of urls to KpiStats\n and then read the TinyDB object produced (by default) into pandas.\n\n >>> from DashPykpi.kpistats import KpiStats, GitURLs, GraphKPIs\n >>> url_fetch = GitURLs()\n >>> urls = url_fetch.urls\n # If looking through all UCL associated repos need to remove the following\n # lines:\n # urls.remove('https://github.com/UCL/ucl')\n # urls.remove('https://github.com/UCL-RITS/ucl-rits')\n >>> test = KpiStats(urls=urls)\n >>> test.work(status=True)\n >>> db = TinyDB('tinydb_for_KPI.json')\n >>> df = pd.DataFrame(db.all())\n \"\"\"\n def __init__(self, urls):\n if os.path.isfile('secret_key'):\n fn = open(\"secret_key\")\n # Locally, with a secret_key file\n self.gh = login(token=fn.read().split()[0])\n elif os.environ.get('GHUB_API_TOKEN'):\n # On Travis? (GHUB_API_TOKEN could be set...)\n self.gh = login(token=os.environ['GHUB_API_TOKEN'])\n else:\n # Or just use username/password method\n self.gh_name = input(\"Username to access github with:\")\n pss = getpass.getpass(prompt='Ghub pswd {0}:'.format(self.gh_name))\n self.gh = login(self.gh_name, pss)\n self.urls = urls # A list of URL strings\n self.repo = None\n self.stats = None\n self.db = TinyDB('tinydb_for_KPI.json') # create new or open existing\n\n def __str__(self):\n print(\"A KPI back-end to extract data from Github.\")\n\n def get_repo_object_from_url(self, url):\n \"\"\"Get a repository object for a given github url\n\n Retrieves a github3.py.Repository() object from a url string under an\n authenticated session.\n\n :param url: a string of format 'https://github.com/<user>/<repo>'\n\n :returns: github3.py.Repository() object as self.repo()\n \"\"\"\n demo = 'https://github.com/<user>/<repo>'\n er1 = \"Error: url should be a string in format of \"\n er2 = \"Error: {0} isn't valid \".format(url)\n assert type(url) == str, er1 + demo\n assert url.split('/')[-3] == 'github.com', er2\n user_str, repo_str = url.split('/')[-2:]\n self.repo = self.gh.repository(user_str, repo_str)\n return\n\n def get_repo_stats(self, debug=False):\n \"\"\"Identify the statistics of an individual repo\n\n Examines self.repo() to identify key statistics from a repository.\n\n :param: self.repo()\n :rtype: A dictionary object as self.stats\n\n :Example:\n\n >>> from DashPykpi.kpistats import KpiStats\n >>> test = KpiStats(urls=[\"https://github.com/UCL-RITS/RSD-Dashboard\"])\n >>> test.get_repo_object_from_url(url=test.urls[0])\n >>> test.get_repo_stats()\n >>> test.stats # print a dictionary of retrieved stats\n \"\"\"\n if debug:\n print('\\nExamining repo {0}'.format(self.repo))\n contribs = [(str(contrib.author), contrib.total)\n for contrib in self.repo.iter_contributor_statistics()]\n total = sum([user_num[1] for user_num in contribs])\n branch_count = len([branch for branch in self.repo.iter_branches()])\n commits_over_time = [commit for commit in self.repo.iter_commit_activity()]\n weekly_commits = [week['total'] for week in commits_over_time]\n self.stats = {\n 'stargazers': self.repo.stargazers,\n 'fork_count': self.repo.fork_count,\n 'commits_by_author': contribs,\n 'num_contributors': len(contribs),\n 'total_commits': total,\n 'repo_owner': self.repo.owner.login,\n 'repo_name': self.repo.name,\n 'branches': branch_count,\n 'language': self.repo.language,\n \"weekly_commits\": weekly_commits,\n }\n return\n\n def add_db_row(self):\n \"\"\"KpiStats.add_db_row(self)\n\n Checks if there is a database and entry already present, if there isn't\n it adds a row to a database. If there is one already, it checks to see\n if the newly retrieved dictionary has updated info. If so, it removes\n the old row, and adds in the new one. If there is an error, and there\n is more than one row per repo it throws an assert error.\n\n :param: self\n :rtype: updates database connected to self.db\n \"\"\"\n DBfield = Query()\n results = self.db.search(DBfield.repo_name == self.repo.name)\n assert len(results) < 2, \"Error, repeat entries in DB for same repo.\"\n if len(results) == 0: # if no record then add the results\n self.db.insert(self.stats)\n if len(results) == 1: # if record exists, but the user has rerun code\n eid = results[0].eid\n if results[0]['total_commits'] < self.stats['total_commits']:\n self.db.remove(eids=[eid]) # remove the old entry\n self.db.insert(self.stats) # add the new entry\n else:\n # condition where an entry exists in DB,\n # and new stats are no diffrent (no repo changes)\n pass\n return\n\n def clean_state(self):\n \"\"\"Cleans the stats and repo objects from the class between updates\n\n Clean temporary data in the class before attempting to get a new repo\n object, specfically setting self.repo and self.stats to None.\n\n :param: self\n :rtype self.repo: None\n :rtype self.stats: None\n \"\"\"\n self.repo = None\n self.stats = None\n\n def work(self, status=False, debug=False, verbose=False, add_to_db=True):\n \"\"\"\n function:: KpiStats.work(self, status=False, debug=False,\n verbose=False, add_to_db=True)\n\n Main routine that handels passing single url strings to\n self.get_repo_object() to populate self.repo, and then calls\n self.get_repo_stats() to put statistics for each repo in a dictionary\n in self.stats. It then calls self.add_db_row() to write the dic data\n to a TinyDB file, and cleans the repo and stats objects from memory.\n\n Optionally, it also reports on the stats, progress, and execution of\n the called functions can be provided by via a status, debug and\n verbose flags.\n\n :Example:\n\n See DashPykpi.kpistats.KpiStats()\n \"\"\"\n for i, url in enumerate(self.urls):\n if status:\n print(\"\\rComplete...{0:2.0f}%\".format(((i+1)/len(self.urls)\n )*100.,), end=\"\")\n self.get_repo_object_from_url(url=url)\n self.get_repo_stats(debug=debug)\n # Deal with html get timeout bug here -> retry if no commits found\n timeout_bug = self.stats['total_commits'] < 1\n if timeout_bug:\n self.get_repo_stats()\n if add_to_db:\n self.add_db_row()\n if verbose:\n for k in sorted(self.stats):\n print(k, '-->', self.stats[k])\n self.clean_state()\n\n\nclass GitURLs(object):\n \"\"\"Get all repo urls associated with a github account.\n\n Return a list of url strings as self.urls, useful during testing. In\n deployment this list will likely come directly from the Dashboard database.\n\n :Example:\n\n >>> from DashPykpi import GitURLs\n >>> url_list = GitURLs()\n >>> url_list.urls[0:3]\n ['https://github.com/benlaken/Comment_BadruddinAslam2014.git',\n 'https://github.com/benlaken/Composite_methods_LC13.git',\n 'https://github.com/benlaken/ECCO.git']\n \"\"\"\n def __init__(self):\n if os.path.isfile('secret_key'):\n fn = open(\"secret_key\")\n # Locally, with a secret_key file\n self.gh = login(token=fn.read().split()[0])\n elif os.environ.get('GHUB_API_TOKEN'):\n # On Travis? (GHUB_API_TOKEN can be set...)\n self.gh = login(token=os.environ['GHUB_API_TOKEN'])\n else:\n # Or just use username/password method\n self.gh_name = input(\"Username to access github with:\")\n pss = getpass.getpass(prompt='Ghub pswd {0}:'.format(self.gh_name))\n self.gh = login(self.gh_name, pss)\n self.urls = [r.clone_url.split('.git')[0]\n for r in self.gh.iter_repos()]\n\n\nclass GraphKPIs(object):\n \"\"\"Graph key statistics from specified repos.\n\n How to embed bokeh plots into Django (`see example\n <http://bokeh.pydata.org/en/latest/docs/user_guide/embed.html>`_).\n Essentially, insert the script and div returned into an html template and\n the div will be replaced by the plot objet. This assumes BokehJS has been\n loaded, either inline or via CDN. (See the link above to copy CDN lines.)\n \"\"\"\n def __init__(self):\n\n if os.path.exists('tinydb_for_KPI.json'):\n self.db = TinyDB('tinydb_for_KPI.json')\n self.df = pd.DataFrame(self.db.all())\n else:\n raise IOError('DB file not present')\n\n def __str__(self):\n print(\"Class for graphing the output of KPIStats held in a DB.\")\n\n def auto_title(self, x, y):\n \"\"\"Plot title creator\n\n Automatically generate a title from two strings. The strings\n can include underscores (as they are column names from a DB), these\n are removed. Possible values for x and y are 'fork_count', 'stargazers'\n , 'num_contributors' or 'total_commits'.\n\n :param x: string\n :param y: string\n :return: string Title for plots\n \"\"\"\n x = x.split('_')\n y = y.split('_')\n return ' '.join(x + ['vs.'] + y).title()\n\n def xy_scatter(self, x, y, ptitle=None, give_script_div=False):\n \"\"\" Create an x y scatterplot coloured by total_commits\n\n Using Bokeh to insert into a webpage or a Jupyter notebook an x y\n scatter from the TinyDB. Valid inputs are column name strings for\n the numeric data in the DB, including: 'fork_count', 'stargazers',\n 'num_contributors' or 'total_commits'.\n\n :param x: e.g. 'fork_count', 'stargazers', 'num_contributors' or 'total_commits'\n :param y: e.g. 'fork_count', 'stargazers', 'num_contributors' or 'total_commits'\n :type x: string\n :type y: string\n :return: Bokeh object or script and div string items\n\n :Example:\n\n 1. Assume a database object created by KpiStats() exists and you\n wish to create the divs and script to insert into a HTML page.\n\n >>> from DashPykpi.kpistats import GraphKPIs\n >>> grobj = GraphKPIs()\n >>> script, div = grobj.xy_scatter(x='stargazers', y='fork_count',\n give_script_div=True)\n\n 2. Assume a database object created by KpiStats() exists and you\n wish to plot a test figure in a Jupyter notebook.\n\n >>> from DashPykpi.kpistats import GraphKPIs\n >>> from bokeh.plotting import figure, show, output_notebook\n >>> grobj = GraphKPIs()\n >>> p = grobj.xy_scatter(x='stargazers', y='fork_count')\n >>> show(p)\n \"\"\"\n if not ptitle:\n ptitle = self.auto_title(x=x, y=y)\n\n tmpdf = self.df[self.df['total_commits'] > 0]\n tmpdf = tmpdf[tmpdf['num_contributors'] < 80]\n df = tmpdf\n colormap = {\n \"low\": \"#8400FF\",\n \"mid\": \"#FF00FF\",\n \"high\": \"#FF0088\",\n \"highest\": \"#FF0000\",\n }\n # colour points by commit numbers\n colour_list = []\n for comitnum in df.total_commits:\n if comitnum < 10:\n colour_list.append(colormap['low'])\n elif comitnum >= 10 and comitnum < 100:\n colour_list.append(colormap['mid'])\n elif comitnum >= 100 and comitnum < 1000:\n colour_list.append(colormap['high'])\n else:\n colour_list.append(colormap['highest'])\n source = ColumnDataSource(\n data=dict(\n fork_count=df.fork_count,\n repo_name=df.repo_name,\n repo_owner=df.repo_owner,\n stargazers=df.stargazers,\n num_contributors=df.num_contributors,\n total_commits=df.total_commits,\n color_by_commits=colour_list,\n )\n )\n hover = HoverTool(\n tooltips=[\n (\"Repo\", \"@repo_name\"),\n (\"Owner\", \"@repo_owner\"),\n (\"Stargazers\", \"@stargazers\"),\n (\"Total commits\", \"@total_commits\"),\n (\"Fork count\", \"@fork_count\"),\n (\"Num. contributors\", \"@num_contributors\"),\n ]\n )\n tools = \"pan, resize, wheel_zoom, reset, box_select, save\"\n p = figure(title=ptitle, tools=[tools, hover])\n p.xaxis.axis_label = x\n p.yaxis.axis_label = y\n p.circle(x, y, source=source, color=\"color_by_commits\")\n if give_script_div:\n script, div = components(p)\n return script, div\n else:\n return(p)\n\n def weekly_activity(self, bin=None, per_repo=False, width=800, height=400,\n give_script_div=False, verbose=False):\n \"\"\"Create a stacked area plot covering the past 52 weeks of acvitity.\n Plot in the notebook (assuming a TinyDB file exists).\n bin = Number of weekly bins (if none then the resolution is weekly)\n\n :Example:\n\n >>> from bokeh.charts import show, output_notebook\n >>> from DashPykpi.kpistats import GraphKPIs\n >>> output_notebook()\n >>> bk = GraphKPIs()\n >>> show(bk.weekly_activity())\n >>> #Or, a version with all repos individually and feedback\n >>> show(bk.weekly_activity(per_repo=True, verbose=True))\n \"\"\"\n df = self.df\n defaults.width = width\n defaults.height = height\n if per_repo:\n running = 0\n num_repos = 0\n tmp_hold = {}\n for n, weekly in enumerate(df['weekly_commits']):\n if sum(weekly) > 1:\n tmp = weekly\n # If binning is required...\n if bin:\n width = bin\n tmp = np.array(tmp)\n tmp = tmp[:(tmp.size // width) * width].reshape(-1, width).mean(axis=1)\n xlab = \"months since now\"\n else:\n xlab = 'weeks since now'\n tmp_hold[df['repo_name'][n]] = tmp\n running += sum(weekly)\n num_repos += 1\n if verbose:\n print(\"{0:3,} commits, in {1} active repos (out of {2} total repos), during past 52 weeks\".format(\n running, num_repos, len(df)))\n area = Area(tmp_hold, title=\"Commits to all repos\", legend=None,\n stack=True, xlabel=xlab,\n ylabel='Master repo commits/week')\n\n if give_script_div:\n # Iincase you want to add the graphics to a HTML template file\n script, div = components(area)\n return script, div\n else:\n return(area)\n if not per_repo:\n tmp = []\n for n, weekly in enumerate(df['weekly_commits']):\n if sum(weekly) > 1:\n tmp.append(weekly)\n tmp = np.array(tmp)\n tmp = tmp.sum(axis=0)\n # If binning is required\n if bin:\n width = bin\n tmp = tmp[:(tmp.size // width) * width].reshape(-1, width).mean(axis=1)\n xlab = \"months since now\"\n else:\n xlab = \"weeks since now\"\n all_weekly_commits = {\"All repos\": tmp}\n area = Area(all_weekly_commits, title=\"Commits to repos\",\n legend=None, stack=True, xlabel=xlab,\n ylabel='Master repo commits/week')\n if give_script_div:\n # Incase you want to add the graphics to an HTML template\n script, div = components(area)\n return script, div\n else:\n return(area)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cclauss/dgl | [
"6589969e45eeb694366bf530f9a195bd78a456df"
] | [
"python/dgl/backend/mxnet/tensor.py"
] | [
"from __future__ import absolute_import\n\nimport numpy as np\nimport mxnet as mx\nimport mxnet.ndarray as nd\n\ndef data_type_dict():\n return {'float16' : np.float16,\n 'float32' : np.float32,\n 'float64' : np.float64,\n 'uint8' : np.uint8,\n 'int8' : np.int8,\n 'int16' : np.int16,\n 'int32' : np.int32,\n 'int64' : np.int64}\n\ndef cpu():\n return mx.cpu()\n\ndef tensor(data, dtype=None):\n return nd.array(data, dtype=dtype)\n\ndef sparse_matrix(data, index, shape, force_format=False):\n fmt = index[0]\n if fmt == 'coo':\n if force_format:\n raise TypeError('MXNet backend only supports CSR format,'\n ' but COO format is forced.')\n coord = index[1]\n # generate convert idx\n # FIXME: cannot use int64\n tmp_data = nd.arange(len(coord[0]), dtype=data.dtype, ctx=coord[0].context)\n tmp_spmat = nd.sparse.csr_matrix((tmp_data, (coord[0], coord[1])),\n tuple(shape), ctx=data.context)\n convert_idx = nd.cast(tmp_spmat.data, dtype='int64')\n # shuffle the data\n data = data[convert_idx]\n spmat = nd.sparse.csr_matrix((data, tmp_spmat.indices, tmp_spmat.indptr),\n tuple(shape), ctx=data.context)\n return spmat, convert_idx\n elif fmt == 'csr':\n indices = index[1]\n indptr = index[2]\n spmat = nd.sparse.csr_matrix((data, indices, indptr),\n tuple(shape), ctx=data.context)\n # No conversion is required.\n return spmat, None\n else:\n raise TypeError('Invalid format: %s.' % fmt)\n\ndef sparse_matrix_indices(spmat):\n return ('csr', spmat.indices, spmat.indptr)\n\ndef is_tensor(obj):\n return isinstance(obj, nd.NDArray)\n\ndef shape(input):\n # NOTE: the input cannot be a symbol\n return input.shape\n\ndef dtype(input):\n # NOTE: the input cannot be a symbol\n return input.dtype\n\ndef ndim(input):\n return input.ndim\n\ndef context(input):\n return input.context\n\ndef astype(input, ty):\n return nd.cast(input, ty)\n\ndef asnumpy(input):\n return input.asnumpy()\n\ndef copy_to(input, ctx):\n return input.as_in_context(ctx)\n\ndef sum(input, dim):\n return nd.sum(input, axis=dim)\n\ndef mean(input, dim):\n return nd.mean(input, axis=dim)\n\ndef max(input, dim):\n return nd.max(input, axis=dim)\n\ndef cat(seq, dim):\n return nd.concat(*seq, dim=dim)\n\ndef stack(seq, dim):\n return nd.stack(*seq, dim=dim)\n\ndef split(x, sizes_or_sections, dim):\n if isinstance(sizes_or_sections, list) or isinstance(sizes_or_sections, np.ndarray):\n # TODO: fallback to numpy is unfortunate\n np_arr = x.asnumpy()\n indices = np.cumsum(sizes_or_sections)[:-1]\n res = np.split(np_arr, indices, axis=dim)\n return [tensor(arr, dtype=x.dtype) for arr in res]\n else:\n return nd.split(x, sizes_or_sections, axis=dim)\n\ndef gather_row(data, row_index):\n if isinstance(row_index, nd.NDArray):\n return nd.take(data, row_index)\n else:\n return data[row_index,]\n\ndef narrow_row(data, start, stop):\n return nd.slice(data, begin=start, end=stop)\n\ndef scatter_row(data, row_index, value):\n return mx.nd.contrib.index_copy(data, row_index, value)\n\ndef scatter_row_inplace(data, row_index, value):\n data[row_index] = value\n\ndef squeeze(input, dim):\n return nd.squeeze(input, axis=dim)\n\ndef unsqueeze(input, dim):\n return nd.expand_dims(input, axis=dim)\n\ndef reshape(input, shape):\n # NOTE: the input cannot be a symbol\n return nd.reshape(input ,shape)\n\ndef zeros(shape, dtype, ctx):\n return nd.zeros(shape, dtype=dtype, ctx=ctx)\n\ndef ones(shape, dtype, ctx):\n return nd.ones(shape, dtype=dtype, ctx=ctx)\n\ndef spmm(x, y):\n return nd.dot(x, y)\n\ndef unsorted_1d_segment_sum(input, seg_id, n_segs, dim):\n # TODO: support other dimensions\n assert dim == 0, 'MXNet only supports segment sum on first dimension'\n\n # Use SPMV to simulate segment sum\n ctx = input.context\n n_inputs = input.shape[0]\n input_shape_suffix = input.shape[1:]\n input = input.reshape(n_inputs, -1)\n n_range = nd.arange(n_inputs, dtype='int64').as_in_context(input.context)\n w_nnz = nd.ones(n_inputs).as_in_context(input.context)\n w_nid = nd.stack(seg_id, n_range, axis=0)\n w = nd.sparse.csr_matrix((w_nnz, (seg_id, n_range)), (n_segs, n_inputs))\n w = w.as_in_context(input.context)\n y = nd.dot(w, input)\n y = nd.reshape(y, (n_segs,) + input_shape_suffix)\n return y\n\ndef unsorted_1d_segment_mean(input, seg_id, n_segs, dim):\n # TODO: support other dimensions\n assert dim == 0, 'MXNet only supports segment mean on first dimension'\n\n n_ones = nd.ones_like(seg_id).astype(input.dtype)\n w = unsorted_1d_segment_sum(n_ones, seg_id, n_segs, 0)\n w = nd.clip(w, a_min=1, a_max=np.inf)\n y = unsorted_1d_segment_sum(input, seg_id, n_segs, dim)\n y /= w.reshape((-1,) + (1,) * (y.ndim - 1))\n return y\n\ndef unique(input):\n # TODO: fallback to numpy is unfortunate\n tmp = input.asnumpy()\n tmp = np.unique(tmp)\n return nd.array(tmp, ctx=input.context, dtype=input.dtype)\n\ndef full_1d(length, fill_value):\n return nd.full((length,), fill_value)\n\ndef nonzero_1d(input):\n # TODO: fallback to numpy is unfortunate\n tmp = input.asnumpy()\n tmp = np.nonzero(tmp)[0]\n return nd.array(tmp, ctx=input.context, dtype=input.dtype)\n\ndef sort_1d(input):\n # TODO: this isn't an ideal implementation.\n val = nd.sort(input, axis=None, is_ascend=True)\n idx = nd.argsort(input, is_ascend=True)\n idx = nd.cast(idx, dtype='int64')\n return val, idx\n\ndef arange(start, stop):\n return nd.arange(start, stop, dtype=np.int64)\n\ndef rand_shuffle(arr):\n return mx.nd.random.shuffle(arr)\n\ndef zerocopy_to_dlpack(arr):\n return arr.to_dlpack_for_read()\n\ndef zerocopy_from_dlpack(dlpack_arr):\n return nd.from_dlpack(dlpack_arr)\n\ndef zerocopy_to_numpy(arr):\n # NOTE: not zerocopy\n return arr.asnumpy()\n\ndef zerocopy_from_numpy(np_data):\n # NOTE: not zerocopy\n return nd.array(np_data, dtype=np_data.dtype)\n"
] | [
[
"numpy.split",
"numpy.cumsum",
"numpy.nonzero",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hontlong/vue-admin-template | [
"2c476a169c47a85f785aea6d9b1dbd94c02a7edc"
] | [
"server/extract_cnn_vgg16_keras.py"
] | [
"# extract_cnn_vgg16_keras.py\n# -*- coding: utf-8 -*-\n\n# keras/tensorflow 使用flask部署服务的常见错误的解决办法\n# http://www.luyixian.cn/news_show_381268.aspx\n# 非常重要,需要如此才能嵌入flask中使用\n# 当前仅能在非dev模式下使用,\n# dev会二次load,会有找不到模块的错误。当前没办法解决。\n\nimport os\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf # 这个是需要的\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.backend import set_session\nfrom keras.preprocessing import image\nfrom numpy import linalg\n\n# 下面这些方法是无法运行的\n# from tensorflow_core.python.keras.api.keras.preprocessing import image\n# from tensorflow_core.python.keras.applications.vgg16 import VGG16, preprocess_input\n# from tensorflow_core.python.keras.backend import set_session\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nsess = tf.Session()\ngraph = tf.get_default_graph()\n\n__all__ = [\n \"vgg16_model\"\n]\n\n\nclass VGGNet:\n def __init__(self):\n set_session(sess)\n # weights: 'imagenet'\n # pooling: 'max' or 'avg'\n # input_shape: (width, height, 3), width and height should >= 48\n self.input_shape = (224, 224, 3)\n self.weight = 'imagenet'\n self.pooling = 'max'\n # include_top:是否保留顶层的3个全连接网络\n # weights:None代表随机初始化,即不加载预训练权重。'imagenet'代表加载预训练权重\n # input_tensor:可填入Keras tensor作为模型的图像输出tensor\n # input_shape:可选,仅当include_top=False有效,应为长为3的tuple,指明输入图片的shape,图片的宽高必须大于48,如(200,200,3)\n # pooling:当include_top = False时,该参数指定了池化方式。None代表不池化,最后一个卷积层的输出为4D张量。‘avg’代表全局平均池化,‘max’代表全局最大值池化。\n # classes:可选,图片分类的类别数,仅当include_top = True并且不加载预训练权重时可用。\n self.model_vgg = VGG16(weights=self.weight,\n input_shape=(self.input_shape[0], self.input_shape[1], self.input_shape[2]),\n pooling=self.pooling, include_top=False)\n # with graph.as_default():\n self.model_vgg.predict(np.zeros((1, 224, 224, 3)))\n\n '''\n Use vgg16/Resnet model to extract features\n Output normalized feature vector\n '''\n\n # 提取vgg16最后一层卷积特征\n def vgg_extract_feat(self, img_path):\n global sess, graph\n with graph.as_default():\n img = image.load_img(img_path, target_size=(self.input_shape[0], self.input_shape[1]))\n img = image.img_to_array(img)\n img = np.expand_dims(img, axis=0)\n set_session(sess)\n img = preprocess_input(img)\n feat = self.model_vgg.predict(img)\n # print(feat.shape)\n # print(feat[0])\n # print(LA.norm(feat[0]))\n norm_feat = feat[0] / linalg.norm(feat[0])\n return norm_feat\n\n # 提取vgg16最后一层卷积特征\n def vgg_extract_feat_batch(self, img_path_arr):\n global sess, graph\n set_session(sess)\n imgs = []\n for img_path in img_path_arr:\n img = image.load_img(img_path, target_size=(self.input_shape[0], self.input_shape[1]))\n img = image.img_to_array(img)\n img = np.expand_dims(img, axis=0)\n imgs.append(img)\n with graph.as_default():\n set_session(sess)\n img = np.concatenate([x for x in imgs])\n img = preprocess_input(img)\n # with graph.as_default():\n feat = self.model_vgg.predict(img)\n # print(feat.shape)\n # print(feat[0])\n # print(LA.norm(feat[0]))\n # norm_feat_arr = []\n # for i in xrange(0, feat.shape[0]):\n # norm_feat = feat[i] / LA.norm(feat[i])\n # norm_feat_arr.append(norm_feat)\n # return norm_feat_arr\n return feat / linalg.norm(feat)\n\n\nvgg16_model = VGGNet()\n"
] | [
[
"tensorflow.compat.v1.get_default_graph",
"numpy.expand_dims",
"numpy.linalg.norm",
"tensorflow.compat.v1.Session",
"numpy.concatenate",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ThomasTNO/mpyc | [
"11ea79be06a676a8b2ae2d81d94621ec35c638e8"
] | [
"demos/ridgeregression.py"
] | [
"\"\"\"Demo Linear and Ridge Regression.\n\nMPyC demo accompanying the paper 'Efficient Secure Ridge Regression from\nRandomized Gaussian Elimination' by Frank Blom, Niek J. Bouman, Berry\nSchoenmakers, and Niels de Vreede, presented at TPMPC 2019 by Frank Blom.\nSee https://eprint.iacr.org/2019/773 (or https://ia.cr/2019/773).\n\nThe following datasets from the UCI Machine Learning Repository are used:\n\n1. 'student-mat' (student.zip)\n2. 'winequality-red' (winequality-red.csv)\n3. 'winequality-white' (winequality-red.csv)\n4. 'YearPredictionMSD' (YearPredictionMSD.txt.zip)\n5. 'ethylene_methane' (data.zip)\n6. 'ethylene_CO' (data.zip)\n7. 'HIGGS' (HIGGS.csv.gz)\n\nThe first three datasets are included in this demo (see directory ./data/regr/).\nThe other ones can be downloaded from https://archive.ics.uci.edu/ml/datasets/\n(use the -u --data-url command line option to get the full URL for each dataset).\nSimply put the files indicated above in directory ./data/regr/, no need to (g)unzip!\n\nBy default, the demo runs with synthetic data (with n=1000 samples, d=10 features,\nand e=1 target). The default accuracy varies from 4 to 7 fractional bits. Setting\nthe regularization parameter -l --lambda to 0 will revert to linear regression.\n\nUse the -h --help command line option for more help.\n\nThe code below is based on Frank Blom's original implementation used for the paper.\n\"\"\"\n\nimport os\nimport time\nimport argparse\nimport logging\nimport random\nimport io\nimport gzip\nimport zipfile\nimport csv\nimport numpy as np\nimport sklearn.datasets\nimport sklearn.model_selection\nimport sklearn.linear_model\nimport sklearn.metrics\nfrom mpyc.runtime import mpc\n\n\nasync def synthesize_data(n_samples, n_features, n_targets):\n rnd = await mpc.transfer(random.randrange(2**31), senders=0)\n X, Y = sklearn.datasets.make_regression(n_samples=n_samples,\n n_features=n_features,\n n_informative=max(1, n_features - 5),\n n_targets=n_targets, bias=42,\n effective_rank=max(1, n_features - 3),\n tail_strength=0.5, noise=1.2,\n random_state=rnd) # all parties use same rnd\n if n_targets == 1:\n Y = np.transpose([Y])\n X = np.concatenate((X, Y), axis=1)\n b_m = np.min(X, axis=0)\n b_M = np.max(X, axis=0)\n coef_add = [-(m + M) / 2 for m, M in zip(b_m, b_M)]\n coef_mul = [2 / (M - m) for m, M in zip(b_m, b_M)]\n for xi in X:\n for j in range(len(xi)):\n # map to [-1,1] range\n xi[j] = (xi[j] + coef_add[j]) * coef_mul[j]\n return X\n\n\ndef read_data(infofile):\n with open(infofile, newline='') as file:\n reader = csv.reader(file)\n\n # process first line\n datafile, delim, skip_header, split, n, d_, e = next(reader)\n skip_header = int(skip_header) # number of lines to skip at start of datafile\n split = int(split) # train-test split (0 for random split)\n n = int(n) # number of samples\n d_ = int(d_) # number of features in datafile\n e = int(e) # number of targets\n\n # process remaining lines\n d = 0\n L = d_ + e # total number of columns in datafile\n categories = [None] * L\n coef_add = [None] * L\n coef_mul = [None] * L\n for j in range(L):\n line = next(reader)\n feature_type = line[0]\n if feature_type == 'numerical':\n m, M = float(line[1]), float(line[2])\n coef_add[j] = -(m + M) / 2\n coef_mul[j] = 2 / (M - m)\n d += 1\n elif feature_type == 'categorical':\n while not line[-1]: # drop trailing empty columns\n line.pop()\n categories[j] = line[1:]\n d += len(categories[j]) # one hot encoding\n elif feature_type == 'exclude':\n categories[j] = []\n else:\n raise ValueError('unknown feature type')\n d -= e # number of features\n\n datafile = os.path.join('data', 'regr', datafile)\n if datafile.endswith('.gz'):\n open_file = lambda f: gzip.open(f, mode='rt', newline='')\n elif datafile.find('.zip!') >= 0:\n archive, datafile = datafile.split('!')\n open_file = lambda f: io.TextIOWrapper(zipfile.ZipFile(archive).open(f), newline='')\n else:\n open_file = lambda f: open(f, newline='')\n\n offset = 0\n if datafile.find('Year') >= 0 or datafile.find('HIGGS') >= 0:\n offset = 1 - L # hack: rotate left for YearPrediction and HIGGS\n elif datafile.find('ethylene') >= 0:\n offset = 3 - L # hack: rotate left by 3 for ethylene\n csv.register_dialect('ethylene', delimiter=' ', skipinitialspace=True)\n\n X = np.empty((n, d + e), dtype=float)\n float1 = float(1)\n float_1 = float(-1)\n with open_file(datafile) as file:\n reader = csv.reader(file, delimiter=delim)\n if datafile.find('ethylene') >= 0:\n reader = csv.reader(file, dialect='ethylene')\n for _ in range(skip_header):\n next(reader)\n n100 = n // 100\n for i, row in enumerate(reader):\n if not i % n100:\n print(f'Loading ... {round(100*i/n)}%', end='\\r')\n if len(row) > L:\n row = row[:L] # ignore spurious columns\n x = X[i]\n l = 0 # column index for row x\n for j in range(L):\n if categories[j] is None: # numerical feature\n # map to [-1,1] range\n x[l] = (float(row[offset + j]) + coef_add[j]) * coef_mul[j]\n l += 1\n elif categories[j]: # categorical feature\n # one hot encoding of row[j]\n for item in categories[j]:\n x[l] = float1 if item == row[j] else float_1\n l += 1\n return X, d, e, split\n\n\ndef bareiss(Zp, A):\n \"\"\"Bareiss-like integer-preserving Gaussian elimination adapted for Zp.\n\n Using exactly one modular inverse in Zp per row of A.\n \"\"\"\n p = Zp.modulus\n d, d_e = A.shape # d by d+e matrix A\n\n # convert A elementwise from Zp to int\n for i in range(d):\n for j in range(d_e):\n A[i, j] = A[i, j].value\n\n # division-free Gaussian elimination\n for k in range(d):\n for i in range(k+1, d):\n for j in range(k+1, d_e):\n A[i, j] = (A[k, k] * A[i, j] - A[k, j] * A[i, k]) % p\n\n # back substitution\n for i in range(d-1, -1, -1):\n inv = (1 / Zp(A[i, i])).value\n if i < d-2:\n A[i, i] = inv # keep reciprocal for determinant\n for j in range(d, d_e):\n s = A[i, j]\n for k in range(i+1, d):\n s -= A[i, k] * A[k, j]\n s %= p\n A[i, j] = (s * inv) % p\n\n # postponed division for determinant\n inv = 1\n det = A[d-1, d-1]\n for i in range(d-2):\n inv = (inv * A[i, i]) % p\n det = (det * inv) % p\n\n return A[:, d:], det\n\n\ndef random_matrix_determinant(secfld, d):\n d_2 = d * (d-1) // 2\n L = np.diagflat([secfld(1)] * d)\n L[np.tril_indices(d, -1)] = mpc._randoms(secfld, d_2)\n L[np.triu_indices(d, 1)] = [secfld(0)] * d_2\n diag = mpc._randoms(secfld, d)\n U = np.diagflat(diag)\n U[np.tril_indices(d, -1)] = [secfld(0)] * d_2\n U[np.triu_indices(d, 1)] = mpc._randoms(secfld, d_2)\n R = mpc.matrix_prod(L.tolist(), U.tolist())\n detR = mpc.prod(diag) # detR != 0 with overwhelming probability\n return R, detR\n\n\[email protected]\nasync def linear_solve(A, B):\n secfld = type(A[0][0])\n d, e = len(A), len(B[0])\n await mpc.returnType(secfld, d * e + 1)\n\n R, detR = random_matrix_determinant(secfld, d)\n RA = mpc.matrix_prod(R, A)\n RA = await mpc.output([a for row in RA for a in row])\n RA = np.reshape(RA, (d, d))\n RB = mpc.matrix_prod(R, B)\n RB = await mpc.gather(RB) # NB: RB is secret-shared\n\n invA_B, detRA = bareiss(secfld.field, np.concatenate((RA, RB), axis=1))\n detA = detRA / detR\n adjA_B = [secfld(a) * detA for row in invA_B for a in row]\n return adjA_B + [detA]\n\n\ndef rmse(Y, P):\n return np.sqrt(sklearn.metrics.mean_squared_error(Y, P, multioutput='raw_values'))\n\n\nasync def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--dataset', type=int, metavar='I',\n help=('dataset 0=synthetic (default), 1=student, 2=wine-red, '\n '3=wine-white, 4=year, 5=gas-methane, 6=gas-CO, 7=higgs'))\n parser.add_argument('-u', '--data-url', action='store_true', default=False,\n help='show URL for downloading dataset I')\n parser.add_argument('-l', '--lambda_', type=float, metavar='L',\n help='regularization L>=0.0 (default=1.0)')\n parser.add_argument('-a', '--accuracy', type=int, metavar='A',\n help='accuracy A (number of fractional bits)')\n parser.add_argument('-n', '--samples', type=int, metavar='N',\n help='number of samples in synthetic data (default=1000)')\n parser.add_argument('-d', '--features', type=int, metavar='D',\n help='number of features in synthetic data (default=10)')\n parser.add_argument('-e', '--targets', type=int, metavar='E',\n help='number of targets in synthetic data (default=1)')\n parser.set_defaults(dataset=0, lambda_=1.0, accuracy=-1,\n samples=1000, features=10, targets=1)\n args = parser.parse_args()\n\n await mpc.start()\n\n if not args.dataset:\n range_alpha = range(4, 8)\n n, d, e, split = args.samples, args.features, args.targets, 0\n name = 'SYNTHETIC'\n logging.info('Generating synthetic data')\n X = await synthesize_data(n, d, e)\n else:\n settings = [('student+performance', 'student-mat', 6),\n ('Wine+Quality', 'winequality-red', 7),\n ('Wine+Quality', 'winequality-white', 8),\n ('Yearpredictionmsd', 'YearPredictionMSD', 6),\n ('Gas+sensor+array+under+dynamic+gas+mixtures', 'ethylene_methane', 8),\n ('Gas+sensor+array+under+dynamic+gas+mixtures', 'ethylene_CO', 9),\n ('HIGGS', 'HIGGS', 5)]\n url, name, alpha = settings[args.dataset - 1]\n url = 'https://archive.ics.uci.edu/ml/datasets/' + url\n if args.data_url:\n print(f'URL: {url}')\n range_alpha = range(alpha, alpha + 1)\n infofile = os.path.join('data', 'regr', 'info-' + name + '.csv')\n logging.info(f'Loading dataset {name}')\n X, d, e, split = read_data(infofile)\n n = len(X)\n logging.info(f'Loaded {n} samples')\n print(f'dataset: {name} with {n} samples, {d} features, and {e} target(s)')\n print(f'regularization lambda: {args.lambda_}')\n\n # split in train set and test set\n if split:\n # fixed split\n X1, X2 = X[:split], X[split:]\n else:\n # random split (all parties use same rnd)\n rnd = await mpc.transfer(random.randrange(2**31), senders=0)\n X1, X2 = sklearn.model_selection.train_test_split(X, train_size=0.7, random_state=rnd)\n del X\n X1, Y1 = X1[:, :d], X1[:, d:]\n X2, Y2 = X2[:, :d], X2[:, d:]\n n1 = len(X1)\n d = d + 1 # add (virtual) feature column X_d = [1, ..., 1] for vertical intercept\n\n # ridge regression \"in the clear\"\n ridge = sklearn.linear_model.Ridge(alpha=args.lambda_,\n fit_intercept=True,\n copy_X=True,\n solver='cholesky')\n ridge.fit(X1, Y1)\n error_train_skit = rmse(Y1, ridge.predict(X1))\n error_test_skit = rmse(Y2, ridge.predict(X2))\n print(f'scikit train error: {error_train_skit}')\n print(f'scikit test error: {error_test_skit}')\n\n if args.accuracy >= 0:\n alpha = args.accuracy\n range_alpha = range(alpha, alpha + 1)\n for alpha in range_alpha: # accuracy parameter\n print('accuracy alpha:', alpha)\n # set parameters accordingly\n beta = 2**alpha\n lambda_ = round(args.lambda_ * beta**2)\n gamma = n1 * beta**2 + lambda_\n secint = mpc.SecInt(gamma.bit_length() + 1)\n print(f'secint prime size: |q| = {secint.field.modulus.bit_length()} bits'\n f' (secint bit length: {secint.bit_length})')\n bound = round(d**(d/2)) * gamma**d\n secfld = mpc.SecFld(min_order=2*bound + 1, signed=True)\n print(f'secfld prime size: |p| = {secfld.field.modulus.bit_length()} bits')\n\n f2 = float(beta)\n q = secint.field.modulus\n logging.info('Transpose, scale, and create (degree 0) shares for X and Y')\n # enforce full size shares (mod q numbers) by adding q to each element\n Xt = [[int(a * f2) + q for a in col] for col in X1.transpose()]\n Yt = [[int(a * f2) + q for a in col] for col in Y1.transpose()]\n\n timeStart = time.process_time()\n logging.info('Compute A = X^T X + lambda I and B = X^T Y')\n\n AB = []\n for i in range(d-1):\n xi = Xt[i]\n for j in range(i, d-1):\n xj = Xt[j]\n s = 0\n for k in range(n1):\n s += xi[k] * xj[k]\n AB.append(s) # X_i dot X_j\n AB.append(sum(xi) * beta) # X_i dot X_d\n for j in range(e):\n yj = Yt[j]\n s = 0\n for k in range(n1):\n s += xi[k] * yj[k]\n AB.append(s) # X_i dot Y_j\n AB.append(n1 * beta**2) # X_d dot X_d\n for j in range(e):\n AB.append(beta * sum(Yt[j])) # X_d dot Y_j\n\n del Xt, Yt\n AB = [secint.field(a) for a in AB]\n AB = await mpc._reshare(AB)\n\n timeMiddle = time.process_time()\n logging.info('Compute w = A^-1 B')\n\n # convert secint to secfld\n AB = [secint(a) for a in AB]\n AB = mpc.convert(AB, secfld)\n\n # extract A and B from the AB array\n A = [[None] * d for _ in range(d)]\n B = [[None] * e for _ in range(d)]\n index = 0\n for i in range(d):\n A[i][i] = AB[index] + lambda_\n index += 1\n for j in range(i+1, d):\n A[i][j] = A[j][i] = AB[index]\n index += 1\n for j in range(e):\n B[i][j] = AB[index]\n index += 1\n\n # solve A w = B\n w_det = linear_solve(A, B)\n w_det = await mpc.output(w_det)\n w_det = list(map(int, w_det))\n w = np.reshape(w_det[:-1], (d, e))\n w /= w_det[-1]\n\n timeEnd = time.process_time()\n logging.info(f'Total time {timeEnd - timeStart} = '\n f'A and B in {timeMiddle - timeStart} + '\n f'A^-1 B in {timeEnd - timeMiddle} seconds')\n\n error_train_mpyc = rmse(Y1, np.dot(X1, w[:-1]) + w[-1])\n error_test_mpyc = rmse(Y2, np.dot(X2, w[:-1]) + w[-1])\n print(f'MPyC train error: {error_train_mpyc}')\n print(f'MPyC test error: {error_test_mpyc}')\n print(f'relative train error: {(error_train_mpyc - error_train_skit) / error_train_skit}')\n print(f'relative test error: {(error_test_mpyc - error_test_skit) / error_test_skit}')\n\n await mpc.shutdown()\n\nif __name__ == '__main__':\n mpc.run(main())\n"
] | [
[
"numpy.dot",
"numpy.min",
"numpy.diagflat",
"numpy.reshape",
"numpy.tril_indices",
"numpy.triu_indices",
"numpy.concatenate",
"numpy.max",
"numpy.transpose",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
michaelnt/pycircuit | [
"ef3110c1c3789c1e5f30c35e3f5dd15ed4bd349e"
] | [
"pycircuit/sim/simulation.py"
] | [
"# -*- coding: latin-1 -*-\n# Copyright (c) 2008 Pycircuit Development Team\n# See LICENSE for details.\n\nimport numpy as np\nfrom pycircuit.utilities import Parameter, ParameterDict, isiterable\nimport types\nfrom copy import copy\n\nclass Simulation(object):\n \"\"\"Base class for simulations\n \n A Simulation object is responsible for setting up and running \n analyses and finally providing the results gathered from them.\n\n \"\"\"\n supported_analyses = []\n sim_options = []\n def __init__(self, circuit):\n self.circuit = circuit\n\n ## Init environment parameters\n self.epar = ParameterDict(Parameter(\"T\", \"Temperature\", \"K\"))\n \n ## Init simulation option parameters\n self.options = ParameterDict(*self.sim_options)\n \n ## Init design variables\n self.var = ParameterDict()\n\n ## Add run_XXXX methods for each analysis\n def method_factory(analysis_class):\n def func(self, *args, **kvargs):\n ana = analysis_class(self, *args, **kvargs)\n return self.run_analysis(ana)\n\n name = 'run_%s'%analysis_class.__name__.lower()\n \n return name, types.MethodType(func, self, self.__class__)\n\n for analysis_class in self.supported_analyses:\n name, method = method_factory(analysis_class)\n setattr(self, name, method)\n \n self.analyses = []\n\n def clear(self):\n \"\"\"Clear analyses\"\"\"\n self.analyses = []\n\n def add_analysis(self, analysis):\n \"\"\"Add an analysis to simulation\"\"\"\n self.analyses.append(analysis)\n\n def run_analysis(self, analysis):\n \"\"\"Run an analysis by analysis object\"\"\"\n self.clear()\n self.add_analysis(analysis)\n return self.run()\n\n def set_sweep(*sweeps):\n pass\n \n def run(self):\n \"\"\"Run all analyses\"\"\"\n\nclass Circuit(object):\n \"\"\"Base class for circuits\"\"\"\n pass\n\nclass IParam(Parameter):\n \"\"\"Instance parameter\"\"\"\n\nclass Variable(Parameter):\n \"\"\"Design variable\"\"\"\n\nclass Sweep(object):\n \"\"\"Parametric sweep of parameters\"\"\"\n def __init__(self, iter, pardict=None, parname=None):\n self.pardict = pardict\n self.parname = parname\n self.iter = iter.__iter__()\n\n def __iter__(self):\n return self\n\n def next(self):\n return self.iter.next()\n \nclass LinSweep(Sweep):\n \"\"\"Linear sweep\"\"\"\n def __init__(self, start, stop, n, pardict=None, parname=None):\n self.start = start\n self.stop = stop\n self.n = n\n self.iter = np.linspace(start, stop, n).__iter__()\n\n def __eq__(self, other):\n return (self.start == other.start) and (self.stop == other.stop) and (self.n == other.n)\n\n def __repr__(self):\n return self.__class__.__name__ + '(%f, %f, %d)'%(self.start, self.stop, self.n)\n\n @property\n def step(self):\n \"\"\"Difference between adjacent steps\"\"\"\n if self.n <= 1:\n return 1\n \n return float(self.stop - self.start) / (self.n - 1.)\n\nclass LogSweep(Sweep):\n \"\"\"Logarithmic sweep\"\"\"\n def __init__(self, start, stop, n=None, decade=None, \n pardict=None, parname=None):\n self.start = start\n self.stop = stop\n\n self.n = n\n self.decade = decade\n\n if n == None and decade == None or \\\n n != None and decade != None:\n raise ValueError(\"Either n or decade must be != None\")\n \n if decade != None:\n n = (np.log10(stop) - np.log10(start)) * decade + 1\n\n self.iter = np.logspace(np.log10(start), np.log10(stop), n).__iter__()\n\n @property\n def factor(self):\n \"\"\"Ratio between adjacent steps\"\"\"\n if self.decade:\n n = (np.log10(self.stop) - np.log10(self.start)) * self.decade + 1\n else:\n n = self.n\n return (self.stop / self.start) ** (1. / (n-1))\n\ndef identify_sweep(x, maxerr = 1e-6):\n \"\"\"Identifies sweep from sweep values and return sweep instance\"\"\"\n if isinstance(x, (LinSweep, LogSweep)):\n return x \n\n if not isiterable(x):\n return LinSweep(x, x, 1)\n \n values = np.array(list(x))\n\n if len(values) == 1:\n return LinSweep(values[0], values[0], 1)\n\n linstep = np.diff(values)\n\n if np.linalg.norm(linstep - linstep[0]) <= maxerr:\n return LinSweep(values[0], values[-1], len(values))\n else:\n return Sweep(values)\n"
] | [
[
"numpy.log10",
"numpy.diff",
"numpy.linalg.norm",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ObukhovVladislav/python-adv | [
"ffab71d28e54d2c9b9c53c5fec453873242291f3"
] | [
"210222/step_3.py"
] | [
"from random import randint\nfrom time import perf_counter\n\nimport numpy as np\nfrom random import randint\nfrom time import perf_counter\n\nimport numpy as np\n\nnums = [randint(1, 1000) for _ in range(10 ** 5)]\n\nnums_by_7 = [num for num in nums]\nstart = perf_counter()\nprint(sum(nums_by_7), perf_counter() - start)\n\nnums_by_7_np = np.array([num for num in nums]).astype('int64')\nstart = perf_counter()\nprint(sum(nums_by_7_np), perf_counter() - start)\n# _sum = nums_by_7_np[0] # int32\n# _sum += nums_by_7_np[1] # int32\n# _sum += nums_by_7_np[2] # int32\nstart = perf_counter()\nprint(nums_by_7_np.sum(), perf_counter() - start)\n\nprint(nums_by_7_np.mean(), sum(nums_by_7) / len(nums_by_7))\nprint(nums_by_7_np.std())\nprint(nums_by_7_np.min())\nprint(nums_by_7_np.max())\nprint(nums_by_7_np.size, nums_by_7_np.shape)"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sahilm89/stack_overflow_random | [
"ad2e5361305eb53367452ebe6dd7dd1125700817"
] | [
"trial2.py"
] | [
"import pandas as pd\nimport Tkinter\nimport tkFileDialog\nimport matplotlib\n\nmatplotlib.use(\"TkAgg\")\n\nclass App():\n def __init__(self):\n self.root = Tkinter.Tk()\n self.root.geometry(\"500x100\")\n self.root.wm_title(\"Main\")\n\n labelframe = Tkinter.LabelFrame(self.root, text=\"Open file and do something to it\")\n labelframe.pack(fill=\"both\", expand=\"yes\")\n right = Tkinter.Button(labelframe, text = 'Give file location', command=self.open_file)\n right.pack()\n\n button = Tkinter.Button(self.root, text = 'Close', command=self.quit)\n button.pack()\n self.root.mainloop()\n\n def browse_file(self):\n file_location = tkFileDialog.askopenfilename(filetypes = ((\"Template files\", \"*.csv\"), (\"All files\", \"*\")))\n return(file_location)\n\n\n def open_file(self):\n file_location = self.browse_file()\n df1 = pd.read_csv(file_location, header=17, index_col=False, usecols=range(1,13), encoding='latin-1') \n print (df1)\n\n\n def quit(self):\n self.root.destroy() \n\napp = App()\n"
] | [
[
"matplotlib.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Joeper214/blueoil | [
"5aeb6634770011753b8d5ae283b91b7a0050101e",
"5aeb6634770011753b8d5ae283b91b7a0050101e"
] | [
"tests/converter/test_optimizer.py",
"blueoil/cmd/output_event.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright 2018 The Blueoil Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Test file for Optimizer.\"\"\"\nimport unittest\nfrom blueoil.converter.core.data_types import Float32, PackedUint32, Int32, QUANTIZED_PACKED\nfrom blueoil.converter.core.optimizer import pass_remove_identities, \\\n pass_transpose, pass_constant_folding, \\\n pass_propagate_quantization_details_into_conv, pass_compute_thresholds, pass_pack_weights, \\\n pass_quantize_convolutions, pass_propagate_datatypes, pass_propagate_output_type_backward\nfrom blueoil.converter.core.graph import Graph\nfrom blueoil.converter.core.operators import Add, AveragePool, \\\n BatchNormalization, Constant, Conv, Identity, Input, \\\n MaxPool, Operator, Output, Transpose, BinaryMeanScalingQuantizer, \\\n LinearMidTreadHalfQuantizer, Reshape, Softmax, SpaceToDepth\n\nimport numpy as np\n\n\nclass TestPassTranspose(unittest.TestCase):\n \"\"\"Test class for transposing pass.\"\"\"\n def test_pass_transpose(self) -> None:\n \"\"\"Test code for transposing optimizer pass.\"\"\"\n data = np.random.rand(3, 2, 2, 1)\n graph1 = self.create_sample_graph(data)\n graph2 = self.create_expected_graph(data)\n\n pass_transpose(graph1)\n\n self.assertEqual(graph1, graph2, 'transpose to NHWC failed.')\n\n print(\"Test pass #1 transpose passed!\")\n\n @staticmethod\n def create_sample_graph(data: np.ndarray) -> Graph:\n graph = Graph()\n\n # input\n x = Input('placeholder', [3, 5, 5, 1], Float32(), dimension_format='CWHN')\n\n # constant and internal nodes\n w = Constant('weight', Float32(), data, dimension_format='CWHN')\n i1 = Identity('identity1', [3, 2, 2, 1], Float32(), {'input': w}, dimension_format='CWHN')\n q = BinaryMeanScalingQuantizer('qtz1', [3, 2, 2, 1], Float32(), {'input': i1}, dimension_format='CWHN')\n\n # Conv\n conv = Conv('conv', [3, 4, 4, 1], Float32(), {'X': x, 'W': q}, kernel_shape=[2, 2], dimension_format='CWHN')\n\n # One output\n rs = Reshape('reshape', [1, 48], Float32(), {'data': conv})\n y = Output('output', [1, 48], Float32(), {'input': rs},)\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n @staticmethod\n def create_expected_graph(data: np.ndarray) -> Graph:\n graph = Graph()\n\n data = data.transpose([3, 2, 1, 0])\n\n # input\n x = Input('placeholder', [1, 5, 5, 3], Float32(), dimension_format='NHWC')\n\n # constant and internal nodes\n w = Constant('weight', Float32(), data, dimension_format='NHWC')\n i1 = Identity('identity1', [1, 2, 2, 3], Float32(), {'input': w}, dimension_format='NHWC')\n q = BinaryMeanScalingQuantizer('qtz1', [1, 2, 2, 3], Float32(), {'input': i1}, dimension_format='NHWC')\n\n # Conv\n conv = Conv('conv', [1, 4, 4, 3], Float32(), {'X': x, 'W': q}, kernel_shape=[2, 2], dimension_format='NHWC')\n\n # One output\n rs = Reshape('reshape', [1, 48], Float32(), {'data': conv})\n y = Output('output', [1, 48], Float32(), {'input': rs},)\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n\nclass TestPassRemoveIdentities(unittest.TestCase):\n \"\"\"Test class for removing identity pass.\"\"\"\n def test_pass_remove_identities(self) -> None:\n \"\"\"Test code for removing identities optimizer pass.\"\"\"\n data = np.random.rand(1, 2, 2, 3)\n graph1 = self.create_sample_graph(data)\n graph2 = self.create_expected_graph(data)\n\n pass_remove_identities(graph1)\n\n self.assertEqual(graph1, graph2, 'remove identities failed.')\n\n print(\"Test pass #2 remove identities passed!\")\n\n @staticmethod\n def create_sample_graph(data: np.ndarray) -> Graph:\n graph = Graph()\n\n # input\n x = Input('placeholder', [1, 5, 5, 3], Float32())\n\n # constant and internal nodes\n w = Constant('weight', Float32(), data)\n i1 = Identity('identity1', [1, 2, 2, 3], Float32(), {'input': w})\n q = BinaryMeanScalingQuantizer('qtz1', [1, 2, 2, 3], Float32(), {'input': i1})\n\n # Conv\n conv = Conv('conv', [1, 4, 4, 3], Float32(), {'X': x, 'W': q}, kernel_shape=[2, 2])\n\n # One output\n i2 = Identity('identity2', [1, 4, 4, 3], Float32(), {'input': conv})\n rs = Reshape('reshape', [1, 48], Float32(), {'data': i2})\n y = Output('output', [1, 48], Float32(), {'input': rs},)\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n @staticmethod\n def create_expected_graph(data: np.ndarray) -> Graph:\n graph = Graph()\n\n # input\n x = Input('placeholder', [1, 5, 5, 3], Float32())\n\n # constant and internal nodes\n w = Constant('weight', Float32(), data)\n q = BinaryMeanScalingQuantizer('qtz1', [1, 2, 2, 3], Float32(), {'input': w})\n\n # Conv\n conv = Conv('conv', [1, 4, 4, 3], Float32(), {'X': x, 'W': q}, kernel_shape=[2, 2])\n\n # One output\n rs = Reshape('reshape', [1, 48], Float32(), {'data': conv})\n y = Output('output', [1, 48], Float32(), {'input': rs},)\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n\nclass TestPassPropagateQuantizationDetailsIntoConv(unittest.TestCase):\n \"\"\"Test class for propagating quantization details into conv.\"\"\"\n def test_pass_propagate_quantization_details_into_conv(self) -> None:\n \"\"\"Test pass.\"\"\"\n data1 = np.random.rand(1, 2, 2, 3)\n data2 = np.random.rand(1, 2, 2, 3)\n graph1 = self.create_sample_graph(data1, data2)\n graph2 = self.create_expected_graph(data1, data2)\n\n pass_propagate_quantization_details_into_conv(graph1)\n aq_g1 = graph1.get_op('conv2').a_quantizer\n aq_g2 = graph2.get_op('conv2').a_quantizer\n kq_g1 = graph1.get_op('conv2').quantizer\n kq_g2 = graph2.get_op('conv2').quantizer\n\n self.assertEqual(len(aq_g1), len(aq_g2), '[Failed] Found number of activation quantizer not matched')\n if aq_g1 and aq_g2:\n self.assertEqual(aq_g1[0].op_type, aq_g2[0].op_type,\n '[Failed] Found type of activation quantizer not matched')\n self.assertEqual(kq_g1.op_type, kq_g2.op_type, '[Failed] Found type of kernel quantizer not matched')\n self.assertEqual(graph1, graph2, '[Failed] Expected graph not matched')\n\n print(\"Test pass #3 propagate_quantization_details_into_conv passed!\")\n\n @staticmethod\n def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:\n graph = Graph()\n\n # input\n x = Input('placeholder', [1, 5, 5, 3], Float32())\n\n # Conv1\n w1 = Constant('weight1', Float32(), data1)\n conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])\n\n # activation quantizer\n s1 = Constant('aq_const1', Float32(), np.array(1))\n s2 = Constant('aq_const2', Float32(), np.array(2))\n aq = LinearMidTreadHalfQuantizer('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})\n\n # Conv2\n w2 = Constant('weight2', Float32(), data2)\n kq = BinaryMeanScalingQuantizer('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})\n conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq, 'W': kq}, kernel_shape=[2, 2])\n\n # One output\n y = Output('output', [1, 3, 3, 3], Float32(), {'input': conv2})\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n @staticmethod\n def create_expected_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:\n graph = Graph()\n\n # input\n x = Input('placeholder', [1, 5, 5, 3], Float32())\n\n # Conv1\n w1 = Constant('weight1', Float32(), data1)\n conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])\n\n # activation quantizer\n s1 = Constant('aq_const1', Float32(), np.array(1))\n s2 = Constant('aq_const2', Float32(), np.array(2))\n aq = LinearMidTreadHalfQuantizer('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})\n\n # Conv2\n w2 = Constant('weight2', Float32(), data2)\n kq = BinaryMeanScalingQuantizer('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})\n conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq, 'W': kq}, kernel_shape=[2, 2])\n conv2.a_quantizer = [aq]\n conv2.quantizer = kq\n\n # One output\n y = Output('output', [1, 3, 3, 3], Float32(), {'input': conv2})\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n\nclass TestPassPackWeights(unittest.TestCase):\n \"\"\"Test class for packing weight.\"\"\"\n def test_pass_pack_weights(self) -> None:\n \"\"\"Test pass.\"\"\"\n data1 = np.float32(np.random.rand(1, 2, 2, 3))\n data2 = np.float32(np.random.rand(1, 2, 2, 3))\n\n graph1 = self.create_sample_graph(data1, data2)\n pass_pack_weights(graph1)\n self.assertEqual(graph1.get_op('conv2').input_ops['W'].op_type, 'Constant',\n '[Failed] Found input kernel weights not a constant')\n\n graph_2_1 = self.create_sample_graph_2(data1)\n graph_2_2 = self.create_sample_graph_2(data1)\n pass_pack_weights(graph_2_2)\n self.assertEqual(graph_2_1, graph_2_2,\n '[Failed] Found optimized graph not the same')\n\n print(\"Test pass #4 pack_weights passed!\")\n\n @staticmethod\n def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:\n graph = Graph()\n\n # input\n x = Input('placeholder', [1, 5, 5, 3], Float32())\n\n # Conv1\n w1 = Constant('weight1', Float32(), data1)\n conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])\n\n # activation quantizer\n s1 = Constant('aq_const1', Float32(), np.array(1))\n s2 = Constant('aq_const2', Float32(), np.array(2))\n aq = LinearMidTreadHalfQuantizer('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})\n\n # Conv2\n w2 = Constant('weight2', Float32(), data2)\n kq = BinaryMeanScalingQuantizer('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})\n conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq, 'W': kq}, kernel_shape=[2, 2])\n conv2.a_quantizer = [aq]\n conv2.quantizer = kq\n\n # One output\n y = Output('output', [1, 3, 3, 3], Float32(), {'input': conv2})\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n @staticmethod\n def create_sample_graph_2(data1: np.ndarray) -> Graph:\n graph = Graph()\n\n # input\n x = Input('placeholder', [1, 5, 5, 3], Float32())\n\n # Conv1\n w1 = Constant('weight1', Float32(), data1)\n conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])\n\n s1 = Constant('const1', Float32(), np.zeros([1, 4, 4, 3]))\n add1 = Add('add', [1, 4, 4, 3], Float32(), {'A': conv1, 'B': s1})\n\n y = Output('output', [1, 4, 4, 3], Float32(), {'input': add1})\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n\nclass TestPassQuantizeConvolutions(unittest.TestCase):\n \"\"\"Test class for packing weight.\"\"\"\n def test_pass_quantize_convolutions(self) -> None:\n \"\"\"Test pass.\"\"\"\n data1 = np.float32(np.random.rand(1, 2, 2, 3))\n data2 = np.float32(np.random.rand(1, 2, 2, 3))\n graph1 = self.create_sample_graph(data1, data2)\n\n pass_quantize_convolutions(graph1)\n\n self.assertEqual(graph1.get_op('aqtz1').dtype, QUANTIZED_PACKED(),\n '[Failed] Found output dtype of activation quantizer not proper')\n self.assertEqual(graph1.get_op('kqtz1').dtype, PackedUint32(),\n '[Failed] Found output dtype of kernel quantizer not proper')\n self.assertEqual(graph1.get_op('conv2').dtype, Float32(),\n '[Failed] Found output dtype of conv not proper')\n\n print(\"Test pass #5 quantize_convolutions passed!\")\n\n @staticmethod\n def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:\n graph = Graph()\n\n # input\n x = Input('placeholder', [1, 5, 5, 3], Float32())\n\n # Conv1\n w1 = Constant('weight1', Float32(), data1)\n conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])\n\n # activation quantizer\n s1 = Constant('aq_const1', Float32(), np.array(1))\n s2 = Constant('aq_const2', Float32(), np.array(2))\n aq = LinearMidTreadHalfQuantizer('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})\n\n # Conv2\n w2 = Constant('weight2', Float32(), data2)\n kq = BinaryMeanScalingQuantizer('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})\n conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq, 'W': kq}, kernel_shape=[2, 2])\n conv2.a_quantizer = [aq]\n conv2.quantizer = kq\n\n # One output\n y = Output('output', [1, 3, 3, 3], Float32(), {'input': conv2})\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n\nclass TestPassPropagateDatatypes(unittest.TestCase):\n \"\"\"Test class for packing weight.\"\"\"\n def test_pass_propagate_datatypes(self) -> None:\n \"\"\"Test pass.\"\"\"\n data1 = np.float32(np.random.rand(1, 2, 2, 3))\n graph1 = self.create_sample_graph(data1)\n # graph2 = self.create_expected_graph(data1, data2)\n\n pass_propagate_datatypes(graph1)\n\n self.assertEqual(graph1.get_op('s2d').dtype, QUANTIZED_PACKED(),\n '[Failed] Found dtype of SpaceToDepth not propagate correctly')\n\n print(\"Test pass #6 propagate data types passed!\")\n\n @staticmethod\n def create_sample_graph(data1: np.ndarray) -> Graph:\n graph = Graph()\n\n # input\n x = Input('placeholder', [1, 5, 5, 3], Float32())\n\n # Conv1\n w1 = Constant('weight1', Float32(), data1)\n conv1 = Conv('conv1', [1, 4, 4, 3], QUANTIZED_PACKED(), {'X': x, 'W': w1}, kernel_shape=[2, 2])\n\n pool1 = SpaceToDepth('s2d', [1, 2, 2, 12], Float32(), {'input': conv1})\n\n # One output\n y = Output('output', [1, 2, 2, 12], Float32(), {'input': pool1})\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n\nclass TestPassPropagateOutputTypeBackward(unittest.TestCase):\n \"\"\"Test class for packing weight.\"\"\"\n def test_pass_propagate_output_type_backward(self) -> None:\n \"\"\"Test pass.\"\"\"\n data1 = np.float32(np.random.rand(1, 2, 2, 3))\n graph1 = self.create_sample_graph(data1)\n\n pass_propagate_output_type_backward(graph1)\n\n self.assertEqual(graph1.get_op('conv1').dtype, Float32(),\n '[Failed] Found dtype of SpaceToDepth not propagate correctly')\n\n print(\"Test pass #7 propagate output type backward passed!\")\n\n @staticmethod\n def create_sample_graph(data1: np.ndarray) -> Graph:\n graph = Graph()\n\n # input\n x = Input('placeholder', [1, 5, 5, 3], Float32())\n\n # Conv1\n w1 = Constant('weight1', Float32(), data1)\n conv1 = Conv('conv1', [1, 4, 4, 3], QUANTIZED_PACKED(), {'X': x, 'W': w1}, kernel_shape=[2, 2])\n conv1.is_quantized = True\n\n pool1 = SpaceToDepth('s2d', [1, 2, 2, 12], Float32(), {'input': conv1})\n\n # One output\n y = Output('output', [1, 2, 2, 12], Float32(), {'input': pool1})\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n\nclass TestPassComputeThresholds(unittest.TestCase):\n \"\"\"Test class for packing weight.\"\"\"\n def test_pass_compute_thresholds(self) -> None:\n \"\"\"Test pass.\"\"\"\n data1 = np.float32(np.random.rand(1, 2, 2, 3))\n data2 = np.float32(np.random.rand(1, 2, 2, 3))\n graph1 = self.create_sample_graph(data1, data2)\n\n pass_compute_thresholds(graph1)\n\n self.assertEqual(graph1.get_op('conv2').has_thresholds, True,\n '[Failed] Found threshold of Conv not calculated')\n\n print(\"Test pass #8 compute_thresholds passed!\")\n\n def test_pass_compute_thresholds_for_huge_threshold_values(self) -> None:\n \"\"\"Test pass.\"\"\"\n data1 = np.float32(np.random.rand(1, 2, 2, 3))\n data2 = np.float32(np.random.uniform(10 ** (-30), 10 ** (-40), size=(1, 2, 2, 3)))\n graph1 = self.create_sample_graph(data1, data2)\n\n pass_compute_thresholds(graph1)\n\n self.assertEqual(graph1.get_op('conv2').has_thresholds, True,\n '[Failed] Found threshold of Conv not calculated')\n\n print(\"Test pass #8-1 compute_thresholds of enormous values passed!\")\n\n @staticmethod\n def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:\n graph = Graph()\n\n # input\n x = Input('placeholder', [1, 5, 5, 3], Float32())\n\n # Conv1\n w1 = Constant('weight1', Float32(), data1)\n conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])\n\n # activation quantizer\n s1 = Constant('aq_const1', Int32(), np.array([2], dtype=np.int32))\n s2 = Constant('aq_const2', Float32(), np.array([2.0], dtype=np.float32))\n aq1 = LinearMidTreadHalfQuantizer('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})\n\n # Conv2\n w2 = Constant('weight2', Float32(), data2)\n kq = BinaryMeanScalingQuantizer('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})\n conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq1, 'W': kq}, kernel_shape=[2, 2])\n conv2.a_quantizer = [aq1]\n conv2.quantizer = kq\n conv2.is_quantized = True\n\n sc = Constant('bn_scale', Float32(), np.random.rand(3))\n be = Constant('bn_b', Float32(), np.random.rand(3))\n mu = Constant('bn_mu', Float32(), np.random.rand(3))\n va = Constant('bn_var', Float32(), np.random.rand(3))\n bn = BatchNormalization('bn', [1, 3, 3, 3], Float32(), {'X': conv2,\n 'scale': sc,\n 'B': be,\n 'mean': mu,\n 'var': va})\n\n # activation quantizer\n s3 = Constant('aq_const3', Int32(), np.array([2], dtype=np.int32))\n s4 = Constant('aq_const4', Float32(), np.array([2.0], dtype=np.float32))\n aq2 = LinearMidTreadHalfQuantizer('aqtz2', [1, 3, 3, 3], Float32(), {'X': bn, 'Y': s3, 'Z': s4})\n\n # One output\n y = Output('output', [1, 3, 3, 3], Float32(), {'input': aq2})\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n\nclass TestPassConstantFolding(unittest.TestCase):\n \"\"\"Test class for packing weight.\"\"\"\n def test_pass_constant_folding(self) -> None:\n \"\"\"Test pass.\"\"\"\n graph1 = self.create_sample_graph()\n\n pass_constant_folding(graph1)\n\n self.assertEqual(set(graph1.get_op('potatoes_new').data), set(np.array([2, 5])),\n '[Failed] Found folded constant not correct')\n\n print(\"Test pass #9 constant folding passed!\")\n\n @staticmethod\n def create_sample_graph() -> Graph:\n graph = Graph()\n\n x = Input('placeholder', [2], Float32())\n\n s1 = Constant('potato_1', Float32(), np.array([1, 2]))\n s2 = Constant('potato_2', Float32(), np.array([1, 3]))\n add1 = Add('potatoes', [2], Float32(), {'A': s1, 'B': s2})\n add2 = Add('more_potatoes', [2], Float32(), {'A': x, 'B': add1})\n\n # One output\n y = Output('output', [2], Float32(), {'input': add2})\n\n # add ops to the graph\n graph.add_op_and_inputs(y)\n\n return graph\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n# Copyright 2018 The Blueoil Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nimport itertools\nimport os\n\nimport click\nimport pandas as pd\nimport pytablewriter\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\nfrom tensorboard.backend.event_processing.io_wrapper import GetLogdirSubdirectories\n\nfrom blueoil import environment\n\n\ndef _get_metrics_keys(event_accumulator):\n return event_accumulator.Tags()[\"scalars\"]\n\n\ndef _value_step_list(event_accumulator, metrics_key):\n try:\n events = event_accumulator.Scalars(metrics_key)\n return [(event.value, event.step) for event in events]\n except KeyError as e:\n print(\"Key {} was not found in {}\\n{}\".format(metrics_key, event_accumulator.path, e))\n return []\n\n\ndef _column_name(event_accumulator, metrics_key):\n return \"{}:{}\".format(os.path.basename(event_accumulator.path), metrics_key)\n\n\ndef output(tensorboard_dir, output_dir, metrics_keys, steps, output_file_base=\"metrics\"):\n \"\"\"Output csv and markdown file which accumulated tensorflow event by step and metrics_keys.\"\"\"\n subdirs = GetLogdirSubdirectories(tensorboard_dir)\n\n event_accumulators = []\n for subdir in subdirs:\n event_accumulator = EventAccumulator(subdir)\n # init event accumulator\n event_accumulator.Reload()\n\n event_accumulators.append(event_accumulator)\n\n if not metrics_keys:\n metrics_keys = {\n metrics_key\n for event_accumulator in event_accumulators\n for metrics_key in _get_metrics_keys(event_accumulator)\n }\n\n columns = [_column_name(event_accumulator, metrics_key)\n for event_accumulator, metrics_key in itertools.product(event_accumulators, metrics_keys)]\n columns.sort()\n df = pd.DataFrame([], columns=columns)\n\n for event_accumulator in event_accumulators:\n for metrics_key in metrics_keys:\n value_step_list = _value_step_list(event_accumulator, metrics_key)\n for value, step in value_step_list:\n column_name = _column_name(event_accumulator, metrics_key)\n df.loc[step, column_name] = value\n\n if steps:\n df = df[steps, :]\n\n df = df.sort_index(ascending=False)\n\n # index to column. and re-order column.\n df[\"step\"] = df.index\n df = df[[\"step\"] + columns]\n\n output_csv = os.path.join(output_dir, \"{}.csv\".format(output_file_base))\n df.to_csv(output_csv, index=False)\n\n output_md = os.path.join(output_dir, \"{}.md\".format(output_file_base))\n writer = pytablewriter.MarkdownTableWriter()\n writer.char_left_side_row = \"|\" # fix for github\n writer.from_dataframe(df)\n\n with open(output_md, \"w\") as file_stream:\n writer.stream = file_stream\n writer.write_table()\n\n message = \"\"\"\noutput success\n\noutput csv: {}\noutput md: {}\n\"\"\".format(output_csv, output_md)\n\n print(message)\n\n\[email protected](context_settings=dict(help_option_names=[\"-h\", \"--help\"]))\[email protected](\"-i\", \"--experiment_id\", help=\"id of target experiment\", required=True)\[email protected](\n \"-k\",\n \"--metrics_keys\",\n help=\"\"\"Target metrics name of tensorboard scalar summaries for output.\n When it is empty, collect all scalar keys from tensorboard event.\n i.e. -k metrics/accuracy -k loss\"\"\",\n default=[],\n multiple=True,\n)\[email protected](\n \"-s\",\n \"--steps\",\n help=\"Target step for output. When it is empty, target is all steps.\",\n default=[],\n multiple=True,\n type=int,\n)\[email protected](\n \"-o\",\n \"--output_file_base\",\n help=\"output file base name. default: `metrics`.\",\n default=os.path.join(\"metrics\"),\n)\ndef main(output_file_base, metrics_keys, steps, experiment_id):\n environment.init(experiment_id)\n\n output(\n environment.TENSORBOARD_DIR,\n environment.EXPERIMENT_DIR,\n metrics_keys,\n steps,\n output_file_base=\"metrics\",\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.random.rand"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
phschaad/dace | [
"1c2abdf775c45c3b1c6ef7886007c917cebcd176"
] | [
"tests/fpga/pipeline_scope.py"
] | [
"# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport copy\nimport dace\n\n\ndef make_sdfg(dtype, name=\"pipeline_test\"):\n\n n = dace.symbol(\"N\")\n k = dace.symbol(\"K\")\n m = dace.symbol(\"M\")\n\n sdfg = dace.SDFG(name)\n\n pre_state = sdfg.add_state(name + \"_pre\")\n state = sdfg.add_state(name)\n post_state = sdfg.add_state(name + \"_post\")\n sdfg.add_edge(pre_state, state, dace.InterstateEdge())\n sdfg.add_edge(state, post_state, dace.InterstateEdge())\n\n _, desc_input_host = sdfg.add_array(\"a\", (n, k, m), dtype)\n _, desc_output_host = sdfg.add_array(\"b\", (n, k, m), dtype)\n desc_input_device = copy.copy(desc_input_host)\n desc_input_device.storage = dace.StorageType.FPGA_Global\n desc_input_device.location[\"memorytype\"] = \"ddr\"\n desc_input_device.location[\"bank\"] = \"0\"\n desc_input_device.transient = True\n desc_output_device = copy.copy(desc_output_host)\n desc_output_device.storage = dace.StorageType.FPGA_Global\n desc_output_device.location[\"memorytype\"] = \"ddr\"\n desc_output_device.location[\"bank\"] = \"1\"\n desc_output_device.transient = True\n sdfg.add_datadesc(\"a_device\", desc_input_device)\n sdfg.add_datadesc(\"b_device\", desc_output_device)\n\n # Host to device\n pre_read = pre_state.add_read(\"a\")\n pre_write = pre_state.add_write(\"a_device\")\n pre_state.add_memlet_path(pre_read,\n pre_write,\n memlet=dace.Memlet(\"a_device[0:N, 0:K, 0:M]\"))\n\n # Device to host\n post_read = post_state.add_read(\"b_device\")\n post_write = post_state.add_write(\"b\")\n post_state.add_memlet_path(post_read,\n post_write,\n memlet=dace.Memlet(\"b[0:N, 0:K, 0:M]\"))\n\n # Compute state\n read_memory = state.add_read(\"a_device\")\n write_memory = state.add_write(\"b_device\")\n\n # Memory streams\n sdfg.add_stream(\"a_stream\",\n dtype,\n storage=dace.StorageType.FPGA_Local,\n transient=True)\n sdfg.add_stream(\"b_stream\",\n dtype,\n storage=dace.StorageType.FPGA_Local,\n transient=True)\n produce_input_stream = state.add_write(\"a_stream\")\n consume_input_stream = state.add_read(\"a_stream\")\n produce_output_stream = state.add_write(\"b_stream\")\n consume_output_stream = state.add_write(\"b_stream\")\n\n entry, exit = state.add_pipeline(name, {\n \"n\": \"0:N\",\n \"k\": \"0:K\",\n \"m\": \"0:M\",\n },\n schedule=dace.ScheduleType.FPGA_Device,\n init_size=k * m,\n init_overlap=True,\n drain_size=k * m,\n drain_overlap=True,\n additional_iterators={'user_var': 0})\n # for the sake of testing, use the additional user_var to set to zero the last element of each row\n tasklet = state.add_tasklet(\n name, {\"_in\"}, {\"_out\"}, \"\"\"\\\n_out = _in + (0 if user_var==M-1 else (1 if {} else (3 if {} else 2)))\nif user_var == M-1:\n user_var = 0\nelse:\n user_var = user_var + 1 \n\"\"\".format(entry.pipeline.init_condition(), entry.pipeline.drain_condition()))\n\n # Container-to-container copies between arrays and streams\n state.add_memlet_path(read_memory,\n produce_input_stream,\n memlet=dace.Memlet(\"a_device[0:N, 0:K, 0:M]\",\n other_subset=\"0\",\n volume=n * k * m))\n state.add_memlet_path(consume_output_stream,\n write_memory,\n memlet=dace.Memlet(\"b_device[0:N, 0:K, 0:M]\",\n other_subset=\"0\",\n volume=n * k * m))\n\n # Input stream to buffer\n state.add_memlet_path(consume_input_stream,\n entry,\n tasklet,\n dst_conn=\"_in\",\n memlet=dace.Memlet(\"a_stream[0]\", dynamic=True))\n\n # Buffer to output stream\n state.add_memlet_path(tasklet,\n exit,\n produce_output_stream,\n src_conn=\"_out\",\n memlet=dace.Memlet(\"b_stream[0]\", dynamic=True))\n\n return sdfg\n\n\nif __name__ == \"__main__\":\n\n import numpy as np\n\n dtype = np.float64\n n = 16\n k = 24\n m = 32\n\n jacobi = make_sdfg(dtype=dtype)\n jacobi.specialize({\"N\": n, \"K\": k, \"M\": m})\n\n a = np.arange(n * k * m, dtype=dtype).reshape((n, k, m))\n b = np.empty((n, k, m), dtype=dtype)\n\n jacobi(a=a, b=b)\n\n ref = copy.copy(a)\n ref[0, :, 0:-1] += 1\n ref[1:-1, :, 0:-1] += 2\n ref[-1, :, 0:-1] += 3\n\n if (b != ref).any():\n print(b)\n print(ref)\n raise ValueError(\"Unexpected output.\")\n"
] | [
[
"numpy.arange",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ksomf/workbook | [
"12fcac2c08e235ffa9c1706d6fee4f65d97d63b9"
] | [
"alexeygrigorev_zoomcamp/capstone_project/train.py"
] | [
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as pl\nimport seaborn as sns\nimport tensorflow as tf\n\nimport re\nimport json\n\nfrom functools import partial\nfrom itertools import filterfalse\nfrom wordcloud import WordCloud\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\ndf = pd.read_csv('data.csv')\ncolumns = ['speaker','headline','description','event','duration','date_published','views_as_of_06162017','tags','transcript']\ndf = df[columns]\ndf['duration'] = pd.to_timedelta(df['duration']).dt.total_seconds()\ndf['date_published'] = pd.to_datetime(df['date_published'])\ndf = df.rename(columns={'views_as_of_06162017':'views'})\ndf = df.dropna()\nwc = WordCloud()\n\ndef transcript_to_tokens(s):\n s = list(map(lambda s: s.strip(), filter(len,s.split('\\r'))))\n s = ' '.join(filterfalse(partial(re.match,'[0-9]+\\:[0-9]+'),s))\n s = s.replace('.','').replace(',','').replace('!','').replace('?','').replace(':','').replace(';','').replace('\"','').lower()\n emotes = re.findall('\\(([^)]+)\\)',s)\n speech = ' '.join(re.split('\\(([^)]+)\\)',s)).split()\n emotes = emotes + list(filter(lambda s: s in ['applause','laughter'],speech)) # Inconsistent annotation in transcript\n speech = filter(lambda s: not s in ['applause','laughter'],speech)\n speech = list(filter(lambda s: s not in wc.stopwords, speech))\n return (emotes,speech)\n\ndef word_count(s):\n return len(pd.value_counts(s))\n\ndef translate_df(df):\n emotes, words = zip(*df['transcript'].apply(transcript_to_tokens).to_list())\n df.loc[:,'emotes'] = list(emotes)\n df.loc[:,'words'] = list(words)\n df['unique_words'] = df['words'].apply(word_count)\n df['year_published'] = df['date_published'].dt.year\n df['month_published'] = df['date_published'].dt.month\n return df\n\ndf = translate_df(df)\nall_words = [ x for xs in df['words'].to_list() for x in xs ]\nword_counts = pd.value_counts(all_words)\n\nall_emotes = [ x for xs in df['emotes'] for x in xs ]\nemote_counts = pd.value_counts(all_emotes)\n\nn_words_analyse = 50\nfor word in word_counts.head(n=n_words_analyse).keys():\n df[f'num_{word}'] = df['words'].apply(lambda xs: xs.count(word))\n\nn_emotes_analyse = 2\nfor emote in emote_counts.head(n=n_emotes_analyse).keys():\n df[f'times_{emote}'] = df['emotes'].apply(lambda xs: xs.count(emote))\n\nnumerical_columns = df.select_dtypes(include='number').columns\n\nval_frac = 0.2\ntest_frac = 0.2\ntrain_frac = 1.0 - val_frac - test_frac\n\ndf_model = df[numerical_columns]\n\ndf_full_train = df_model.sample(frac=train_frac + val_frac,random_state=0)\ndf_test = df_model.drop(df_full_train.index)\n\ny_full_train = np.log1p(df_full_train.pop('views'))\ny_test = np.log1p(df_test .pop('views'))\n\ndef train_NN(df_train,y_train,df_val,y_val,inner_layers=[64],learning_rate=0.1,droprate=None,input_droprate=None):\n normalizer = tf.keras.layers.Normalization(axis=-1)\n normalizer.adapt(np.asarray(df_train))\n\n model = tf.keras.Sequential()\n model.add(normalizer)\n if input_droprate:\n model.add(layers.Dropout(droprate))\n for layer_size in inner_layers:\n model.add(layers.Dense(layer_size, activation='relu'))\n if droprate:\n model.add(layers.Dropout(droprate))\n model.add(layers.Dense(units=1))\n model.summary()\n\n model.compile(optimizer=tf.optimizers.Adam(learning_rate=learning_rate)\n ,loss='mean_squared_error')\n history = model.fit(df_train,y_train,epochs=200,validation_data=(np.asarray(df_val),y_val))\n return history\n\nbest_ddn2_layer_size = [16,16]\nbest_ddn2_learning_rate = 0.33\nbest_ddn2_droprate = 0.4\nbest_ddn2_input_droprate = 0.0\n\nbest = train_NN(df_full_train,y_full_train,df_test,y_test\n ,inner_layers=best_ddn2_layer_size\n ,droprate=best_ddn2_droprate\n ,learning_rate=best_ddn2_learning_rate\n ,input_droprate=best_ddn2_input_droprate)\n\n#best.model.save('keras_model')\ntf.saved_model.save(best.model, 'view-model')\nmodel_spec = { 'columns': list(filter(lambda x: x != 'views',df[numerical_columns].columns.to_list())),\n 'trained_words': word_counts.head(n=n_words_analyse).keys().to_list(),\n 'trained_emotes': emote_counts.head(n=n_emotes_analyse).keys().to_list()}\n\nopen('keras_model_spec.json','w+').write(json.dumps(model_spec))\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"numpy.asarray",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Sequential",
"tensorflow.saved_model.save",
"pandas.to_timedelta",
"tensorflow.keras.layers.Normalization",
"pandas.value_counts",
"tensorflow.keras.layers.Dropout",
"tensorflow.optimizers.Adam"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
lzmisscc/emoran | [
"f7360ac21b0c8657244d75ec927020fb26c41fea"
] | [
"tools/dataset.py"
] | [
"import random\nimport torch\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\nfrom torch.utils.data import sampler\nimport lmdb\nimport six\nimport sys\nfrom PIL import Image\nimport numpy as np\n\n\nclass lmdbDataset(Dataset):\n\n def __init__(self, root=None, transform=None, reverse=False, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):\n self.env = lmdb.open(\n root,\n max_readers=1,\n readonly=True,\n lock=False,\n readahead=False,\n meminit=False)\n\n if not self.env:\n print('cannot creat lmdb from %s' % (root))\n sys.exit(0)\n\n with self.env.begin(write=False) as txn:\n nSamples = int(txn.get('num-samples'.encode()))\n self.nSamples = nSamples\n\n self.transform = transform\n self.alphabet = alphabet\n self.reverse = reverse\n\n def __len__(self):\n return self.nSamples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n index += 1\n with self.env.begin(write=False) as txn:\n img_key = 'image-%09d' % index\n imgbuf = txn.get(img_key.encode())\n\n buf = six.BytesIO()\n buf.write(imgbuf)\n buf.seek(0)\n try:\n img = Image.open(buf).convert('L')\n except IOError:\n print('Corrupted image for %d' % index)\n return self[index + 1]\n\n label_key = 'label-%09d' % index\n label = str(txn.get(label_key.encode()).decode('utf-8'))\n\n label = ''.join(label[i] if label[i].lower() in self.alphabet else ''\n for i in range(len(label)))\n if len(label) <= 0:\n return self[index + 1]\n if self.reverse:\n label_rev = label[-1::-1]\n label_rev += '$'\n label += '$'\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.reverse:\n return (img, label, label_rev)\n else:\n return (img, label)\n\n\nclass resizeNormalize(object):\n\n def __init__(self, size, interpolation=Image.BILINEAR):\n self.size = size\n self.interpolation = interpolation\n self.toTensor = transforms.ToTensor()\n\n def __call__(self, img):\n img = img.resize(self.size, self.interpolation)\n img = self.toTensor(img)\n img.sub_(0.5).div_(0.5)\n return img\n\n\nclass randomSequentialSampler(sampler.Sampler):\n\n def __init__(self, data_source, batch_size):\n self.num_samples = len(data_source)\n self.batch_size = batch_size\n\n def __len__(self):\n return self.num_samples\n\n def __iter__(self):\n n_batch = len(self) // self.batch_size\n tail = len(self) % self.batch_size\n index = torch.LongTensor(len(self)).fill_(0)\n for i in range(n_batch):\n random_start = random.randint(0, len(self) - self.batch_size)\n batch_index = random_start + torch.arange(0, self.batch_size)\n index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index\n # deal with tail\n if tail:\n random_start = random.randint(0, len(self) - self.batch_size)\n tail_index = random_start + torch.arange(0, tail)\n index[(i + 1) * self.batch_size:] = tail_index\n\n return iter(index)\n"
] | [
[
"torch.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
leherbert/Informer2020 | [
"45d4a656550e90399b3934a8f51e7ebc5a32e745"
] | [
"models/model.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom utils.masking import TriangularCausalMask, ProbMask\nfrom models.encoder import Encoder, EncoderLayer, ConvLayer\nfrom models.decoder import Decoder, DecoderLayer\nfrom models.attn import FullAttention, ProbAttention, AttentionLayer\nfrom models.embed import DataEmbedding\n\nclass Informer(nn.Module):\n def __init__(self, enc_in, dec_in, c_out, seq_len, label_len, out_len, \n factor=5, d_model=512, n_heads=8, e_layers=3, d_layers=2, d_ff=512, \n dropout=0.0, attn='prob', embed='fixed', data='ETTh', activation='gelu', \n device=torch.device('cuda:0')):\n super(Informer, self).__init__()\n self.pred_len = out_len\n self.attn = attn\n\n # Encoding\n self.enc_embedding = DataEmbedding(enc_in, d_model, embed, data, dropout)\n self.dec_embedding = DataEmbedding(dec_in, d_model, embed, data, dropout)\n # Attention\n Attn = ProbAttention if attn=='prob' else FullAttention\n # Encoder\n self.encoder = Encoder(\n [\n EncoderLayer(\n AttentionLayer(Attn(False, factor, attention_dropout=dropout), \n d_model, n_heads),\n d_model,\n d_ff,\n dropout=dropout,\n activation=activation\n ) for l in range(e_layers)\n ],\n [\n ConvLayer(\n d_model\n ) for l in range(e_layers-1)\n ],\n norm_layer=torch.nn.LayerNorm(d_model)\n )\n # Decoder\n self.decoder = Decoder(\n [\n DecoderLayer(\n AttentionLayer(FullAttention(True, factor, attention_dropout=dropout), \n d_model, n_heads),\n AttentionLayer(FullAttention(False, factor, attention_dropout=dropout), \n d_model, n_heads),\n d_model,\n d_ff,\n dropout=dropout,\n activation=activation,\n )\n for l in range(d_layers)\n ],\n norm_layer=torch.nn.LayerNorm(d_model)\n )\n # self.end_conv1 = nn.Conv1d(in_channels=label_len+out_len, out_channels=out_len, kernel_size=1, bias=True)\n # self.end_conv2 = nn.Conv1d(in_channels=d_model, out_channels=c_out, kernel_size=1, bias=True)\n self.projection = nn.Linear(d_model, c_out, bias=True)\n \n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, \n enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None):\n enc_out = self.enc_embedding(x_enc, x_mark_enc)\n enc_out = self.encoder(enc_out, attn_mask=enc_self_mask)\n\n dec_out = self.dec_embedding(x_dec, x_mark_dec)\n dec_out = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask)\n dec_out = self.projection(dec_out)\n \n # dec_out = self.end_conv1(dec_out)\n # dec_out = self.end_conv2(dec_out.transpose(2,1)).transpose(1,2)\n return dec_out[:,-self.pred_len:,:] # [B, L, D]\n"
] | [
[
"torch.device",
"torch.nn.Linear",
"torch.nn.LayerNorm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Will-Smith11/BettyBot | [
"2b9f6b62b1d6394fbb974b39ba93ad3c02023a7e"
] | [
"BettyBot/EntireGameScreen.py"
] | [
"import numpy as np\nimport cv2\nfrom mss import mss\nimport time\n\n# location of the screen grab image\nbbox = {'top': 57, 'left': 0, 'width': 420, 'height': 750}\n#resizing pct of img\nheight_percent = 53\nwidth_percent = 60\nsct = mss()\nlast_time = time.time()\n\ndef convert_Gray(original_image):\n processed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)\n return processed_img\n\n\nwhile True:\n #get screen img\n sct_img = sct.grab(bbox)\n #convert img to array\n screen = np.array(sct_img)\n # resizing the img to fit better on the screen\n width = int(screen.shape[1] * width_percent / 100)\n height = int(screen.shape[0] * height_percent / 100)\n dim = (width, height)\n resized = cv2.resize(screen, dim, interpolation = cv2.INTER_AREA)\n #final_screen = process_img(resized)\n\n processed_img = cv2.cvtColor(resized, cv2.COLOR_RGB2BGR)\n processed_img = convert_Gray(processed_img)\n\n cv2.imshow('screen', processed_img)\n print(f'running at {1/(time.time()-last_time)} FPS')\n last_time = time.time()\n # quit key\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mrjleo/fastforward | [
"3178257a7095dc2f23528ce0a509280b2c0f6a00"
] | [
"fast_forward/index.py"
] | [
"import abc\nimport time\nimport pickle\nimport logging\nfrom enum import Enum\nfrom pathlib import Path\nfrom queue import PriorityQueue\nfrom collections import OrderedDict, defaultdict\nfrom typing import Callable, Dict, Iterable, Iterator, List, Sequence, Set, Tuple, Union\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom scipy.spatial.distance import cosine\n\nfrom fast_forward.ranking import Ranking\nfrom fast_forward.encoder import QueryEncoder\nfrom fast_forward.util import interpolate\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Mode(Enum):\n \"\"\"Enum used to set the retrieval mode of an index.\"\"\"\n\n PASSAGE = 1\n MAXP = 2\n FIRSTP = 3\n AVEP = 4\n\n\nclass Index(abc.ABC):\n \"\"\"Abstract base class for Fast-Forward indexes.\"\"\"\n\n def __init__(\n self,\n encoder: QueryEncoder = None,\n mode: Mode = Mode.PASSAGE,\n encoder_batch_size: int = 32,\n ) -> None:\n \"\"\"Constructor.\n\n Args:\n encoder (QueryEncoder, optional): The query encoder to use. Defaults to None.\n mode (Mode, optional): Indexing mode. Defaults to Mode.PASSAGE.\n encoder_batch_size (int, optional): Query encoder batch size. Defaults to 32.\n \"\"\"\n super().__init__()\n self.encoder = encoder\n self.mode = mode\n self._encoder_batch_size = encoder_batch_size\n\n def encode(self, queries: Sequence[str]) -> List[np.ndarray]:\n \"\"\"Encode queries.\n\n Args:\n queries (Sequence[str]): The queries to encode.\n\n Raises:\n RuntimeError: When no query encoder exists.\n\n Returns:\n List[np.ndarray]: The query representations.\n \"\"\"\n if self._encoder is None:\n raise RuntimeError(\"This index does not have a query encoder.\")\n\n result = []\n for i in range(0, len(queries), self._encoder_batch_size):\n batch = queries[i : i + self._encoder_batch_size]\n result.extend(self._encoder.encode(batch))\n return result\n\n @property\n def encoder(self) -> QueryEncoder:\n \"\"\"Return the query encoder.\n\n Returns:\n QueryEncoder: The encoder.\n \"\"\"\n return self._encoder\n\n @encoder.setter\n def encoder(self, encoder: QueryEncoder) -> None:\n \"\"\"Set the query encoder.\n\n Args:\n encoder (QueryEncoder): The encoder.\n \"\"\"\n assert encoder is None or isinstance(encoder, QueryEncoder)\n self._encoder = encoder\n\n @property\n def mode(self) -> Mode:\n \"\"\"Return the indexing mode.\n\n Returns:\n Mode: The mode.\n \"\"\"\n return self._mode\n\n @mode.setter\n def mode(self, mode: Mode) -> None:\n \"\"\"Set the indexing mode.\n\n Args:\n mode (Mode): The indexing mode.\n \"\"\"\n assert isinstance(mode, Mode)\n self._mode = mode\n\n @property\n def doc_ids(self) -> Set[str]:\n \"\"\"Return all unique document IDs.\n\n Returns:\n Set[str]: The document IDs.\n \"\"\"\n return self._get_doc_ids()\n\n @abc.abstractmethod\n def _get_doc_ids(self) -> Set[str]:\n \"\"\"Return all unique document IDs.\n\n Returns:\n Set[str]: The document IDs.\n \"\"\"\n pass\n\n @property\n def psg_ids(self) -> Set[str]:\n \"\"\"Return all unique passage IDs.\n\n Returns:\n Set[str]: The passage IDs.\n \"\"\"\n return self._get_psg_ids()\n\n @abc.abstractmethod\n def _get_psg_ids(self) -> Set[str]:\n \"\"\"Return all unique passage IDs.\n\n Returns:\n Set[str]: The passage IDs.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def _add(\n self,\n vectors: np.ndarray,\n doc_ids: Sequence[Union[str, None]],\n psg_ids: Sequence[Union[str, None]],\n ) -> None:\n \"\"\"Add vector representations and corresponding IDs to the index. Each vector is guaranteed to\n have either a document or passage ID associated.\n\n Args:\n vectors (np.ndarray): The representations, shape (num_vectors, dim).\n doc_ids (Sequence[Union[str, None]]): The corresponding document IDs (may be duplicate).\n psg_ids (Sequence[Union[str, None]]): The corresponding passage IDs (must be unique).\n \"\"\"\n pass\n\n def add(\n self,\n vectors: np.ndarray,\n doc_ids: Sequence[str] = None,\n psg_ids: Sequence[str] = None,\n ) -> None:\n \"\"\"Add vector representations and corresponding IDs to the index. Only one of \"doc_ids\" and \"psg_ids\"\n may be None. For performance reasons, this function should not be called frequently with few items.\n\n Args:\n vectors (np.ndarray): The representations, shape (num_vectors, dim).\n doc_id (Sequence[str], optional): The corresponding document IDs (may be duplicate). Defaults to None.\n psg_id (Sequence[str], optional): The corresponding passage IDs (must be unique). Defaults to None.\n\n Raises:\n ValueError: When there are no document IDs and no passage IDs.\n \"\"\"\n if doc_ids is None and psg_ids is None:\n raise ValueError(\n 'At least one of \"doc_ids\" and \"psg_ids\" must be provided.'\n )\n\n num_vectors = vectors.shape[0]\n if num_vectors < 100:\n LOGGER.warning(\n 'calling \"Index.add()\" repeatedly with few vectors may be slow'\n )\n if doc_ids is None:\n doc_ids = [None] * num_vectors\n if psg_ids is None:\n psg_ids = [None] * num_vectors\n\n assert num_vectors == len(doc_ids) == len(psg_ids)\n self._add(vectors, doc_ids, psg_ids)\n\n @abc.abstractmethod\n def _get_vectors(\n self, ids: Iterable[str], mode: Mode\n ) -> Tuple[np.ndarray, List[Union[List[int], int, None]]]:\n \"\"\"Return:\n * A single array containing all vectors necessary to compute the scores for each document/passage.\n * For each document/passage (in the same order as the IDs), either\n * a list of integers (MAXP, AVEP),\n * a single integer (FIRSTP, PASSAGE),\n * None (the document/passage is not indexed and has no vector)\n\n The integers will be used to get the corresponding representations from the array.\n The output of this function depends on the current mode.\n\n Args:\n ids (Iterable[str]): The document/passage IDs to get the representations for.\n mode (Mode): The index mode.\n\n Returns:\n Tuple[np.ndarray, List[Union[List[int], int, None]]]: The vectors and corresponding indices.\n \"\"\"\n pass\n\n def _compute_scores(self, q_rep: np.ndarray, ids: Iterable[str]) -> Iterator[float]:\n \"\"\"Compute scores based on the current mode.\n\n Args:\n q_rep (np.ndarray): Query representation.\n ids (Iterable[str]): Document/passage IDs.\n\n Yields:\n float: The scores, preserving the order of the IDs.\n \"\"\"\n vectors, id_indices = self._get_vectors(ids, self.mode)\n all_scores = np.dot(q_rep, vectors.T)\n\n for ind in id_indices:\n if ind is None:\n yield None\n else:\n if self.mode == Mode.MAXP:\n yield np.max(all_scores[ind])\n elif self.mode == Mode.AVEP:\n yield np.average(all_scores[ind])\n elif self.mode in (Mode.FIRSTP, Mode.PASSAGE):\n yield all_scores[ind]\n\n def _early_stopping(\n self,\n ids: Iterable[str],\n dense_scores: Iterable[float],\n sparse_scores: Iterable[float],\n alpha: float,\n cutoff: int,\n ) -> Dict[str, float]:\n \"\"\"Interpolate scores with early stopping.\n\n Args:\n ids (Iterable[str]): Document/passage IDs.\n dense_scores (Iterable[float]): Corresponding dense scores.\n sparse_scores (Iterable[float]): Corresponding sparse scores.\n alpha (float): Interpolation parameter.\n cutoff (int): Cut-off depth.\n\n Returns:\n Dict[str, float]: Document/passage IDs mapped to scores.\n \"\"\"\n result = {}\n relevant_scores = PriorityQueue(cutoff)\n min_relevant_score = float(\"-inf\")\n max_dense_score = float(\"-inf\")\n for id, dense_score, sparse_score in zip(ids, dense_scores, sparse_scores):\n if relevant_scores.qsize() >= cutoff:\n\n # check if approximated max possible score is too low to make a difference\n min_relevant_score = relevant_scores.get_nowait()\n max_possible_score = (\n alpha * sparse_score + (1 - alpha) * max_dense_score\n )\n\n # early stopping\n if max_possible_score <= min_relevant_score:\n break\n\n if dense_score is None:\n LOGGER.warning(f\"{id} not indexed, skipping\")\n continue\n\n max_dense_score = max(max_dense_score, dense_score)\n score = alpha * sparse_score + (1 - alpha) * dense_score\n result[id] = score\n\n # the new score might be ranked higher than the one we removed\n relevant_scores.put_nowait(max(score, min_relevant_score))\n return result\n\n def get_scores(\n self,\n ranking: Ranking,\n queries: Dict[str, str],\n alpha: Union[float, Iterable[float]] = 0.0,\n cutoff: int = None,\n early_stopping: bool = False,\n ) -> Dict[float, Ranking]:\n \"\"\"Compute corresponding dense scores for a ranking and interpolate.\n\n Args:\n ranking (Ranking): The ranking to compute scores for and interpolate with.\n queries (Dict[str, str]): Query IDs mapped to queries.\n alpha (Union[float, Iterable[float]], optional): Interpolation weight(s). Defaults to 0.0.\n cutoff (int, optional): Cut-off depth (documents/passages per query). Defaults to None.\n early_stopping (bool, optional): Whether to use early stopping. Defaults to False.\n\n Raises:\n ValueError: When the cut-off depth is missing for early stopping.\n\n Returns:\n Dict[float, Ranking]: Alpha mapped to interpolated scores.\n \"\"\"\n if isinstance(alpha, float):\n alpha = [alpha]\n\n if early_stopping and cutoff is None:\n raise ValueError(\"A cut-off depth is required for early stopping.\")\n\n t0 = time.time()\n\n # batch encode queries\n q_id_list = list(ranking)\n q_reps = self.encode([queries[q_id] for q_id in q_id_list])\n\n result = {}\n if not early_stopping:\n # here we can simply compute the dense scores once and interpolate for each alpha\n dense_run = defaultdict(OrderedDict)\n for q_id, q_rep in zip(tqdm(q_id_list), q_reps):\n ids = list(ranking[q_id].keys())\n for id, score in zip(ids, self._compute_scores(q_rep, ids)):\n if score is None:\n LOGGER.warning(f\"{id} not indexed, skipping\")\n else:\n dense_run[q_id][id] = score\n for a in alpha:\n result[a] = interpolate(\n ranking, Ranking(dense_run, sort=False), a, sort=True\n )\n if cutoff is not None:\n result[a].cut(cutoff)\n else:\n # early stopping requries the ranking to be sorted\n # this should normally be the case anyway\n if not ranking.is_sorted:\n LOGGER.warning(\"input ranking not sorted. sorting...\")\n ranking.sort()\n\n # since early stopping depends on alpha, we have to run the algorithm more than once\n for a in alpha:\n run = defaultdict(OrderedDict)\n for q_id, q_rep in zip(tqdm(q_id_list), q_reps):\n ids, sparse_scores = zip(*ranking[q_id].items())\n dense_scores = self._compute_scores(q_rep, ids)\n scores = self._early_stopping(\n ids, dense_scores, sparse_scores, a, cutoff\n )\n for id, score in scores.items():\n run[q_id][id] = score\n result[a] = Ranking(run, sort=True, copy=False)\n result[a].cut(cutoff)\n\n LOGGER.info(f\"computed scores in {time.time() - t0}s\")\n return result\n\n\nclass InMemoryIndex(Index):\n \"\"\"Fast-Forward index that is held in memory.\"\"\"\n\n def __init__(\n self,\n encoder: QueryEncoder = None,\n mode: Mode = Mode.PASSAGE,\n encoder_batch_size: int = 32,\n ) -> None:\n \"\"\"Constructor.\n\n Args:\n encoder (QueryEncoder, optional): The query encoder to use. Defaults to None.\n mode (Mode, optional): Indexing mode. Defaults to Mode.PASSAGE.\n encoder_batch_size (int, optional): Query encoder batch size. Defaults to 32.\n \"\"\"\n self._vectors = None\n self._doc_ids = []\n self._psg_ids = []\n self._doc_id_to_idx = defaultdict(list)\n self._psg_id_to_idx = {}\n super().__init__(encoder, mode, encoder_batch_size)\n\n def _add(\n self,\n vectors: np.ndarray,\n doc_ids: Sequence[Union[str, None]],\n psg_ids: Sequence[Union[str, None]],\n ) -> None:\n if self._vectors is None:\n idx = 0\n self._vectors = vectors.copy()\n else:\n idx = self._vectors.shape[0]\n self._vectors = np.append(self._vectors, vectors, axis=0)\n\n for doc_id, psg_id in zip(doc_ids, psg_ids):\n if doc_id is not None:\n self._doc_id_to_idx[doc_id].append(idx)\n if psg_id is not None:\n assert psg_id not in self._psg_id_to_idx\n self._psg_id_to_idx[psg_id] = idx\n idx += 1\n\n self._doc_ids.extend(doc_ids)\n self._psg_ids.extend(psg_ids)\n\n def _get_doc_ids(self) -> Set[str]:\n return set(self._doc_id_to_idx.keys())\n\n def _get_psg_ids(self) -> Set[str]:\n return set(self._psg_id_to_idx.keys())\n\n def _get_vectors(\n self, ids: Iterable[str], mode: Mode\n ) -> Tuple[np.ndarray, List[Union[List[int], int, None]]]:\n # a list of all vectors to take from the main vector array\n vector_indices = []\n\n # for each ID, keep a list of indices to get the corresponding vectors from \"vector_indices\"\n id_indices = []\n i = 0\n\n if mode in (Mode.MAXP, Mode.AVEP):\n for id in ids:\n if id in self._doc_id_to_idx:\n doc_indices = self._doc_id_to_idx[id]\n vector_indices.extend(doc_indices)\n id_indices.append(list(range(i, i + len(doc_indices))))\n i += len(doc_indices)\n else:\n id_indices.append(None)\n elif mode == Mode.FIRSTP:\n for id in ids:\n if id in self._doc_id_to_idx:\n vector_indices.append(self._doc_id_to_idx[id][0])\n id_indices.append(i)\n i += 1\n else:\n id_indices.append(None)\n elif mode == Mode.PASSAGE:\n for id in ids:\n if id in self._psg_id_to_idx:\n vector_indices.append(self._psg_id_to_idx[id])\n id_indices.append(i)\n i += 1\n else:\n id_indices.append(None)\n else:\n LOGGER.error(f\"invalid mode: {mode}\")\n return self._vectors[vector_indices], id_indices\n\n def save(self, target: Path) -> None:\n \"\"\"Save the index in a file on disk.\n\n Args:\n target (Path): Target file to create.\n \"\"\"\n target.parent.mkdir(parents=True, exist_ok=True)\n LOGGER.info(f\"writing {target}\")\n with open(target, \"wb\") as fp:\n pickle.dump((self._vectors, self._doc_ids, self._psg_ids), fp)\n\n @classmethod\n def from_disk(\n cls,\n index_file: Path,\n encoder: QueryEncoder = None,\n mode: Mode = Mode.PASSAGE,\n encoder_batch_size: int = 32,\n ) -> \"InMemoryIndex\":\n \"\"\"Read an index from disk.\n\n Args:\n index_file (Path): The index file.\n encoder (QueryEncoder, optional): The query encoder to use. Defaults to None.\n mode (Mode, optional): Indexing mode. Defaults to Mode.PASSAGE.\n encoder_batch_size (int, optional): Query encoder batch size. Defaults to 32.\n\n Returns:\n InMemoryIndex: The index.\n \"\"\"\n LOGGER.info(f\"reading {index_file}\")\n with open(index_file, \"rb\") as fp:\n vectors, doc_ids, psg_ids = pickle.load(fp)\n\n index = cls(encoder, mode, encoder_batch_size)\n if vectors is not None:\n index.add(vectors, doc_ids, psg_ids)\n index.mode = mode\n return index\n\n\ndef create_coalesced_index(\n source_index: Index,\n target_index: Index,\n delta: float,\n distance: Callable[[np.ndarray, np.ndarray], float] = cosine,\n buffer_size: int = None,\n) -> None:\n \"\"\"Create a compressed index using sequential coalescing.\n\n Args:\n source_index (Index): The source index. Should contain multiple vectors for each document.\n target_index (Index): The target index. Must be empty.\n delta (float): The coalescing threshold.\n distance (Callable[[np.ndarray, np.ndarray], float]): The distance function. Defaults to cosine.\n buffer_size (int, optional): Use a buffer instead of adding all vectors at the end. Defaults to None.\n \"\"\"\n assert len(target_index.doc_ids) == 0\n buffer_size = buffer_size or len(source_index.doc_ids)\n\n def _coalesce(P):\n P_new = []\n A = []\n A_avg = None\n first_iteration = True\n for v in P:\n if first_iteration:\n first_iteration = False\n elif distance(v, A_avg) >= delta:\n P_new.append(A_avg)\n A = []\n A.append(v)\n A_avg = np.mean(A, axis=0)\n P_new.append(A_avg)\n return P_new\n\n vectors, doc_ids = [], []\n for doc_id in tqdm(source_index.doc_ids):\n\n # check if buffer is full\n if len(vectors) == buffer_size:\n target_index.add(np.array(vectors), doc_ids=doc_ids)\n vectors, doc_ids = [], []\n\n v_old, _ = source_index._get_vectors([doc_id], Mode.MAXP)\n v_new = _coalesce(v_old)\n vectors.extend(v_new)\n doc_ids.extend([doc_id] * len(v_new))\n\n if len(vectors) > 0:\n target_index.add(np.array(vectors), doc_ids=doc_ids)\n\n assert source_index.doc_ids == target_index.doc_ids\n"
] | [
[
"numpy.dot",
"numpy.max",
"numpy.append",
"numpy.mean",
"numpy.average",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sudo-rajarshi/covid19indialive.com | [
"1ad5677b9c2f6a5ad8b250ba2ee39ff231fac10b"
] | [
"track.py"
] | [
"import pandas as pd\nimport numpy as np\nimport requests\nfrom datetime import datetime\nimport ftplib\nimport time as ts\nimport os\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n# # State Data\napi_url_data = 'https://api.covid19india.org/data.json'\n\nr_data = requests.get(api_url_data)\ndata_time_series = r_data.json()\nstate_data = data_time_series.get('statewise')\n\nstate_list = []\nconfirmed_list = []\nrecovered_list = []\ndeaths_list = []\n\ndeltaconfirmed_list = []\ndeltarecovered_list = []\ndeltadeaths_list = []\nlastupdatedtime_list = []\n\nfor j in range(len(state_data)):\n state = state_data[j].get('state')\n \n confirmed = state_data[j].get('confirmed') \n recovered = state_data[j].get('recovered')\n deaths = state_data[j].get('deaths')\n \n deltaconfirmed = state_data[j].get('deltaconfirmed')\n deltarecovered = state_data[j].get('deltarecovered')\n deltadeaths = state_data[j].get('deltadeaths')\n \n lastupdatedtime = state_data[j].get('lastupdatedtime')\n \n \n if confirmed != '0':\n state_list.append(state)\n \n confirmed_list.append(confirmed)\n recovered_list.append(recovered)\n deaths_list.append(deaths)\n \n deltaconfirmed_list.append(deltaconfirmed)\n deltarecovered_list.append(deltarecovered)\n deltadeaths_list.append(deltadeaths)\n\n lastupdatedtime_list.append(lastupdatedtime)\n\n\nconfirmed_list = np.array(confirmed_list, dtype = int)\nrecovered_list = np.array(recovered_list, dtype = int)\ndeaths_list = np.array(deaths_list, dtype = int)\n\nactive = np.subtract(confirmed_list, np.add(recovered_list, deaths_list))\ndeath_rate = np.divide(deaths_list, confirmed_list)*100\nrecovery_rate = np.divide(recovered_list, confirmed_list)*100\n\n\ndeath_rate_list = []\nrecovery_rate_list = []\n\nfor s in death_rate:\n s = round(s,1)\n death_rate_list = np.append(death_rate_list, s)\nfor p in recovery_rate:\n p = round(p,1)\n recovery_rate_list = np.append(recovery_rate_list, p)\n\nconfirmed_list_ = []\nrecovered_list_ = []\ndeaths_list_ = []\n \ni = 0\nfor i in range(len(confirmed_list)):\n confirmed_list_ = np.append(confirmed_list_, \"\"\"<b>\"\"\" + str(confirmed_list[i]) + \"\"\"</b> <br><i class=\"fas fa-arrow-up\"></i> \"\"\" + str(deltaconfirmed_list[i]))\n recovered_list_ = np.append(recovered_list_, \"\"\"<b>\"\"\" + str(recovered_list[i]) + \"\"\"</b> <br><i class=\"fas fa-arrow-up\"></i> \"\"\" + str(deltarecovered_list[i]))\n deaths_list_ = np.append(deaths_list_, \"\"\"<b>\"\"\" + str(deaths_list[i]) + \"\"\"</b> <br><i class=\"fas fa-arrow-up\"></i> \"\"\" + str(deltadeaths_list[i]))\n\n\nstate_data = {'State':state_list[1:],'Confirmed':confirmed_list_[1:], 'Active':active[1:], 'Recovered':recovered_list_[1:], 'Deaths':deaths_list_[1:], 'Recovery(%)':recovery_rate_list[1:], 'Death(%)':death_rate_list[1:], 'Updated':lastupdatedtime_list[1:]}\ndf_state_data = pd.DataFrame(state_data)\ndf_state_data.to_csv(os.path.join(dir_path, 'CSV', 'State_data.csv'))\ndf_state_data.to_html(os.path.join(dir_path, 'HTML', 'State_data.html'), border=0, justify = 'left', index = False, table_id = \"state_data_table\", escape=False)\n\nwith open(os.path.join(dir_path, 'HTML', 'State_data.html')) as rd:\n state_wise = rd.read()\n state_wise = state_wise[:25] + \"table table-striped table-bordered table-sm text-left \" + state_wise[:35] + str(' align=\"left\"') + state_wise[35:]\n state_wise = state_wise[:159] + ' class=\"thead-dark\"' + state_wise[159:]\n\n\n# # Case Time Plot:\ntime = data_time_series.get('cases_time_series')\n\ndate_list = []\ndailyconfirmed_list = []\ndailydeceased_list = []\ndailyrecovered_list = []\n\ntotalconfirmed_list = []\ntotaldeceased_list = []\ntotalrecovered_list = []\nt = 0\nfor t in range(len(time)):\n \n date = time[t].get('date')\n date_list.append(date)\n \n dailyconfirmed = time[t].get('dailyconfirmed')\n dailyconfirmed_list.append(dailyconfirmed)\n \n dailydeceased = time[t].get('dailydeceased')\n dailydeceased_list.append(dailydeceased)\n \n dailyrecovered = time[t].get('dailyrecovered')\n dailyrecovered_list.append(dailyrecovered)\n \n totalconfirmed = time[t].get('totalconfirmed')\n totalconfirmed_list.append(totalconfirmed)\n \n totalrecovered = time[t].get('totalrecovered')\n totalrecovered_list.append(totalrecovered)\n \n totaldeceased = time[t].get('totaldeceased')\n totaldeceased_list.append(totaldeceased)\n\nday_range = 14\n\ndaily_data = {'Daily Confirmations':dailyconfirmed_list[-day_range:], 'Daily Recoveries':dailyrecovered_list[-day_range:], 'Daily Deaths':dailydeceased_list[-day_range:], 'Total Confirmations':totalconfirmed_list[-day_range:], 'Total Recoveries':totalrecovered_list[-day_range:], 'Total Deaths':totaldeceased_list[-day_range:]}\ndf_daily_data = pd.DataFrame(daily_data, index = [date_list[-day_range:]])\ndf_daily_data.to_csv(os.path.join(dir_path, 'CSV', 'daily_data.csv'))\ndf_daily_data.to_html(os.path.join(dir_path, 'HTML', 'daily_data.html'), border=0, justify = 'left')\n\nwith open(os.path.join(dir_path, 'HTML', 'daily_data.html')) as rd: \n daily = rd.read()\n daily = daily[:25] + \"table table-striped table-bordered table-sm text-left \" + daily[:35] + str(' align=\"left\"') + daily[35:]\n daily = daily[:137] + ' class=\"thead-dark\"' + daily[137:]\n\ntotalconfirmed_list = np.array(totalconfirmed_list, dtype=int)\ntotalrecovered_list = np.array(totalrecovered_list, dtype=int)\ntotaldeceased_list = np.array(totaldeceased_list, dtype=int)\n\ndailyconfirmed_list = np.array(dailyconfirmed_list, dtype=int)\ndailyrecovered_list = np.array(dailyrecovered_list, dtype=int)\ndailydeceased_list = np.array(dailydeceased_list, dtype=int)\n\n\n\n# # ICMR Reports\ntested = data_time_series.get('tested')\n\ntotalsamplestested_list = []\nreport_date_list = []\n\nfor i in range(len(tested)):\n report_date = tested[i].get('updatetimestamp')\n report_date = report_date[0:10]\n report_date_list.append(report_date)\n \n totalsamplestested = tested[i].get('totalsamplestested')\n totalsamplestested_list.append(totalsamplestested)\n\ndt_list_total = report_date_list[-28:]\ntst_list_total = np.array(totalsamplestested_list[-28:], dtype=int)\n\ndt_list_daily = report_date_list[-28:-1]\n\ni = 0\ntst_list_daily = []\nfor i in range(len(tst_list_total)):\n tst_daily = tst_list_total[i] - tst_list_total[i-1]\n tst_list_daily.append(tst_daily)\n\ntst_list_daily = np.array(tst_list_daily[1:])\ncnf_list = totalconfirmed_list[-29:-1]\n\nconf_rate = str(totalconfirmed_list[-1] / tst_list_total[-1]*100)\nconf_rate = conf_rate[:1]\n\nreport_date_list_chart = []\ntotalsamplestested_list_chart = []\ndt_list_daily_chart = []\ntotalsamplestested_daily_list_chart = []\n\ni,j,k,l = 0,0,0,0\n \nfor i in tst_list_total:\n totalsamplestested_list_chart.append(i)\n \nfor j in dt_list_total:\n report_date_list_chart.append(j)\n \nfor k in dt_list_daily:\n dt_list_daily_chart.append(k)\n \nfor l in tst_list_daily:\n totalsamplestested_daily_list_chart.append(l)\n\n\n# # index.html\ndate_list_chart = []\ntotalconfirmed_list_chart = []\ntotalrecovered_list_chart = []\ntotaldeceased_list_chart = []\ndailyconfirmed_list_chart = []\ndailyrecovered_list_chart = []\ndailydeceased_list_chart = []\n\npopulation = 1352600000\n\ni,j,k,l,m,n,o = 0,0,0,0,0,0,0\n\nfor i in date_list:\n date_list_chart.append(i)\n \nfor j in totalconfirmed_list:\n totalconfirmed_list_chart.append(j)\n\nfor k in totalrecovered_list:\n totalrecovered_list_chart.append(k)\n \nfor l in totaldeceased_list:\n totaldeceased_list_chart.append(l)\n \nfor m in dailyconfirmed_list:\n dailyconfirmed_list_chart.append(m)\n \nfor n in dailyrecovered_list:\n dailyrecovered_list_chart.append(n)\n \nfor o in dailydeceased_list:\n dailydeceased_list_chart.append(o)\n\nper_mil_test = int(totalsamplestested_list_chart[-1]/population*1000000)\nper_mil_conf = int(((totalconfirmed_list[-1] / tst_list_total[-1]*100)*per_mil_test)/100)\nper_mil_rec = int((recovery_rate_list[0]*per_mil_conf)/100)\nper_mil_det = int((death_rate_list[0]*per_mil_conf)/100)\n\nreports = pd.read_csv(os.path.join(dir_path, 'CSV', 'REPORTS.csv'))\n\nx_conf_long_pred = reports['pred_conf'].values[0]\nx_act_long_pred = reports['pred_act'].values[0]\nx_rec_long_pred = reports['pred_rec'].values[0]\nx_det_long_pred = reports['pred_det'].values[0]\n\ndate_pred = reports['date'].values\n\nstate_list[state_list.index('Uttarakhand')]='IN-UT'\nstate_list[state_list.index('Odisha')]='Orissa'\n\nsss = []\nsss = [['State', 'Confirmed Cases', 'Recovered Cases']]\n\nfor jj in range(len(state_list)):\n sss.append([state_list[jj], confirmed_list[jj], recovered_list[jj]])\nsss.pop(1)\n\ntime = data_time_series.get('cases_time_series')\n\nwith open(os.path.join(dir_path, 'HTML', 'day14.html')) as rd: \n pred_day14 = rd.read()\n pred_day14 = pred_day14[:25] + \"table table-striped table-bordered table-sm text-left \" + pred_day14[:35] + str(' align=\"left\"') + pred_day14[35:]\n pred_day14 = pred_day14[:137] + ' class=\"thead-dark\"' + pred_day14[137:]\n\n\nnow = datetime.now()\ndate = now.strftime(\"%d-%m-%Y\")\ndate_time = now.strftime(\"%d-%m-%Y at %H:%M\")\n\nindex = \"\"\"\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <!-- Global site tag (gtag.js) - Google Analytics -->\n <script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-168253703-1\"></script>\n <script>\n window.dataLayer = window.dataLayer || [];\n function gtag(){dataLayer.push(arguments);}\n gtag('js', new Date());\n\n gtag('config', 'UA-168253703-1');\n </script>\n <meta charset=\"utf-8\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=0.77, shrink-to-fit=yes\">\n <meta name=\"Description\" content=\"Insights and Future Prediction of Covid-19 Pandemic in India. Get Detailed Analysis of the spread of the virus in India.\">\n <meta name=\"keywords\" content=\"coronavirus, corona, covid, covid19, covid-19, covidindia, covid-19 india, covid-19 india tracker, india, virus, pandemic, world\">\n <meta http-equiv=\"Cache-Control\" content=\"no-cache, no-store, must-revalidate\" />\n <meta http-equiv=\"Pragma\" content=\"no-cache\" />\n <meta http-equiv=\"Expires\" content=\"0\" />\n <meta name=\"cf-2fa-verify\" content=\"e85e8a45e5ef19b\">\n <title>Insights of Covid-19 in India</title>\n <link rel=\"stylesheet\" href=\"https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.4.1/css/bootstrap.min.css\">\n <link rel=\"stylesheet\" href=\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.13.0/css/all.min.css\">\n <link rel=\"stylesheet\" href=\"https://cdn.datatables.net/v/bs4/dt-1.10.20/datatables.min.css\">\n <script src=\"https://cdn.jsdelivr.net/npm/[email protected]/dist/Chart.min.js\"></script>\n <script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js\"></script>\n <script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n <script src=\"https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>\n <style>\n @import url('https://fonts.googleapis.com/css?family=Roboto+Slab&display=swap');\n *{font-family: 'Roboto Slab', serif}\n </style>\n</head>\n<body>\n\n<nav class=\"navbar bg-light navbar-expand-sm navbar-light sticky-top\">\n <a class=\"navbar-brand\" href=\"index\"\"><b>Insights of Covid-19</b> <i class=\"fas fa-virus\"></i></a>\n <button class=\"navbar-toggler\" type=\"button\" data-toggle=\"collapse\" data-target=\"#navbarSupportedContent\">\n <span class=\"navbar-toggler-icon\"></span>\n </button>\n <div class=\"collapse navbar-collapse\" id=\"navbarSupportedContent\">\n <ul class=\"navbar-nav ml-auto\">\n <li class=\"nav-item active\">\n <a class=\"nav-link\" href=\"index\"><b>Home <i class=\"fas fa-home\"></i></b></a>\n </li>\n <li class=\"nav-item\">\n <a class=\"nav-link\" href=\"trendAnalysis\"><b>Trend Analysis</b></a>\n </li>\n <li class=\"nav-item\">\n <a class=\"nav-link\" href=\"helpfulResources\"><b>Helpful Resources</b></a>\n </li>\n </ul>\n </div>\n</nav>\n\n<div class=\"text-center\">\n <br>\n <h1 class=\"text-center\"><b>Insights of Covid-19 Pandemic in India</b></h1>\n <p class=\"text-center\">\n Live information about Novel Corona Virus spread in India\n <br>\n Check out the <a href=\"https://telegra.ph/Data-Sources-for-Covidtracker-indiaml-04-05\" target=\"_blank\" rel=\"noopener\">data sources</a> for this website\n </p>\n \n</div>\n\n<div class=\"container\">\n <div class=\"row justify-content-center\">\n <div class=\"col-lg-6 col mb-3\">\n <script src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script>\n google.charts.load('current', {\n 'packages':['geochart'],\n 'mapsApiKey': 'AIzaSyD-9tSrke72PouQMnMX-a7eZSW0jkFMBWY'\n });\n google.charts.setOnLoadCallback(drawRegionsMap);\n\n function drawRegionsMap() {\n var data = google.visualization.arrayToDataTable(\"\"\" + str(sss) + \"\"\");\n\n var options = {\n region: 'IN',\n displayMode: 'regions',\n resolution: 'provinces',\n datalessRegionColor: '#ffffff'\n };\n\n var chart = new google.visualization.GeoChart(document.getElementById('geochart-colors'));\n chart.draw(data, options);\n };\n </script>\n <div id=\"geochart-colors\" align=\"center\"></div>\n </div>\n </div>\n </div>\n</div>\n\n\n<div class=\"container\">\n <div class=\"row justify-content-center\">\n\n <div class=\"col-lg-2 col mb-3\">\n <div class=\"card border-0\">\n <div class=\"text-primary\">\n <div class=\"card-body\">\n <div class=\"text-center\">\n <h6 class=\"card-title\"><b>Total <br> Confirmed</b></h6>\n <h4 class=\"card-text\"><b>\"\"\" + str(confirmed_list[0]) + \"\"\"</b></h4>\n <h6 class=\"card-text\"> +\"\"\" + str(deltaconfirmed_list[0]) + \"\"\"</h6>\n </div>\n </div>\n </div>\n </div>\n </div>\n\n <div class=\"col-lg-2 col mb-3\">\n <div class=\"card border-0\">\n <div class=\"text-success\">\n <div class=\"card-body\">\n <div class=\"text-center\">\n <h6 class=\"card-title\"><b>Total <br> Recovered</b></h6>\n <h4 class=\"card-text\"><b>\"\"\" + str(recovered_list[0]) + \"\"\"</b></h4>\n <h6 class=\"card-text\"> +\"\"\" + str(deltarecovered_list[0]) + \"\"\"</h6>\n </div>\n </div>\n </div>\n </div>\n </div>\n\n <div class=\"col-lg-2 col mb-3\">\n <div class=\"card border-0\">\n <div class=\"text-dark\">\n <div class=\"card-body\">\n <div class=\"text-center\">\n <h6 class=\"card-title\"><b>Total <br> Deceased</b></h6>\n <h4 class=\"card-text\"><b>\"\"\" + str(deaths_list[0]) + \"\"\"</b></h4>\n <h6 class=\"card-text\"> +\"\"\" + str(deltadeaths_list[0]) + \"\"\"</h6>\n </div>\n </div>\n </div>\n </div>\n </div>\n\n <div class=\"col-lg-2 col mb-3\">\n <div class=\"card border-0\">\n <div class=\"text-primary\">\n <div class=\"card-body\">\n <div class=\"text-center\">\n <h6 class=\"card-title\"><b>Confirmation <br> Rate</b></h6>\n <h4 class=\"card-text\"><b>\"\"\" + conf_rate + '%' + \"\"\"</b></h4>\n <a data-toggle=\"tooltip\" data-placement=\"bottom\" title='Out of 100 tests \"\"\"+ conf_rate +\"\"\" people have tested positive'>\n <i class=\"fas fa-question-circle\"></i>\n </a>\n </div>\n </div>\n </div>\n </div>\n </div>\n\n <div class=\"col-lg-2 col mb-3\">\n <div class=\"card border-0\">\n <div class=\"text-success\">\n <div class=\"card-body\">\n <div class=\"text-center\">\n <h6 class=\"card-title\"><b>Recovery <br> Rate</b></h6>\n <h4 class=\"card-text\"><b>\"\"\" + str(int(recovery_rate_list[0])) + '%' + \"\"\"</b></h4>\n <a data-toggle=\"tooltip\" data-placement=\"top\" title='Out of 100 confirmed cases \"\"\"+ str(int(recovery_rate_list[0])) +\"\"\" people have recovered'>\n <i class=\"fas fa-question-circle\"></i>\n </a>\n </div>\n </div>\n </div>\n </div>\n </div>\n \n <div class=\"col-lg-2 col mb-3\">\n <div class=\"card border-0\">\n <div class=\"text-dark\">\n <div class=\"card-body\">\n <div class=\"text-center\">\n <h6 class=\"card-title\"><b>Mortality <br> Rate</b></h6>\n <h4 class=\"card-text\"><b>\"\"\" + str(int(death_rate_list[0])) + '%' + \"\"\"</b></h4>\n <a data-toggle=\"tooltip\" data-placement=\"bottom\" title='Out of 100 confirmed cases \"\"\"+ str(int(death_rate_list[0])) +\"\"\" people have died'>\n <i class=\"fas fa-question-circle\"></i>\n </a>\n </div>\n </div>\n </div>\n </div>\n </div>\n\n <script>\n $('a[data-toggle=\"tooltip\"]').tooltip({\n animated: 'fade',\n trigger: 'click'\n });\n </script>\n\n <div class=\"text-center\">\n <a href=\"trendAnalysis\" class=\"btn btn-warning\" role=\"button\" aria-pressed=\"true\">\n <i class=\"text-center fas fa-chart-line fa-1x\"></i> Trend of the Spread <i class=\"fas fa-external-link-square-alt\"></i>\n </a>\n </div>\n\n </div>\n</div>\n\n<br>\n\n<h4 class=\"text-center\">\n <b>Spread per million people</b>\n</h4>\n\n<p class=\"text-center\">\n Testing, confirmations, recoveries and deaths per million people\n</p>\n\n \n<div class=\"container\">\n <div class=\"row justify-content-center\">\n \n <div class=\"col-lg-6 col mb-3\">\n <div class=\"card bg-info border-0\">\n <div class=\"text-white\">\n <div class=\"card-body\">\n <div class=\"text-left\">\n <h5 class=\"card-title\"><b>Samples Testing</b></h5>\n <p class=\"card-text\">Out of 1 million people <b>\"\"\" + str(per_mil_test) + \"\"\"</b> people have been tested</p>\n </div>\n </div>\n </div>\n </div>\n </div>\n \n <div class=\"col-lg-6 col mb-3\">\n <div class=\"card bg-primary border-0\">\n <div class=\"text-white\">\n <div class=\"card-body\">\n <div class=\"text-left\">\n <h5 class=\"card-title\"><b>Confirmations</b></h5>\n <p class=\"card-text\">Out of \"\"\" + str(per_mil_test) + \"\"\" tests <b>\"\"\" + str(per_mil_conf) + \"\"\"</b> people have been tested positive</p>\n </div>\n </div>\n </div>\n </div>\n </div>\n \n </div>\n</div>\n \n<div class=\"container\">\n <div class=\"row justify-content-center\">\n \n <div class=\"col-lg-6 col mb-3\">\n <div class=\"card bg-success border-0\">\n <div class=\"text-white\">\n <div class=\"card-body\">\n <div class=\"text-left\">\n <h5 class=\"card-title\"><b>Recoveries</b></h5>\n <p class=\"card-text\">Out of \"\"\" + str(per_mil_conf) + \"\"\" positive cases <b>\"\"\" + str(per_mil_rec) + \"\"\"</b> people have been recovered</p>\n </div>\n </div>\n </div>\n </div>\n </div>\n \n <div class=\"col-lg-6 col mb-3\">\n <div class=\"card bg-dark border-0\">\n <div class=\"text-white\">\n <div class=\"card-body\">\n <div class=\"text-left\">\n <h5 class=\"card-title\"><b>Deaths</b></h5>\n <p class=\"card-text\">Out of \"\"\" + str(per_mil_conf) + \"\"\" positive cases <b>\"\"\" + str(per_mil_det) + \"\"\"</b> people have been deceased</p>\n </div>\n </div>\n </div>\n </div>\n </div>\n \n </div>\n</div>\n\n<div class=\"container\">\n <div class=\"row justify-content-center\">\n \n <div class=\"col-lg-8 col mb-3\">\n <div class=\"card border-0\">\n <div class=\"text-dark\">\n <div class=\"card-body\">\n <div class=\"text-center\">\n <h4 class=\"card-title\"><b>Detailed Analysis of Different States</b></h4>\n <p class=\"card-text\">Get detailed analytics of confirmations, recoveries and deaths across different states</p>\n <button class=\"btn btn-warning\" type=\"button\" data-toggle=\"collapse\" data-target=\"#state\" aria-expanded=\"false\" aria-controls=\"state\">\n State Data <i class=\"fas fa-chevron-circle-down fa-1x\"></i>\n </button>\n </div>\n </div>\n </div>\n </div>\n </div>\n \n </div>\n</div>\n\n<div class=\"collapse\" id=\"state\">\n\n <div class=\"text-center\">\n <input type=\"text\" name=\"\" id=\"state_data_search\" placeholder=\" Search by State\" onkeyup=\"search()\">\n </div>\n <br>\n\n<div class=\"container mb-3\">\n<div class=\"table-responsive\">\n\"\"\" + state_wise +\"\"\"\n</div>\n</div>\n<script>\n const search = () =>{\n let filter = document.getElementById('state_data_search').value.toUpperCase();\n let resources_table = document.getElementById('state_data_table');\n let tr = resources_table.getElementsByTagName('tr');\n for(var i=0; i<tr.length; i++){\n let td = tr[i].getElementsByTagName('td')[0];\n if(td){\n let textvalue = td.textContent || td.innerHTML;\n if(textvalue.toUpperCase().indexOf(filter) > -1){\n tr[i].style.display = \"\";\n }\n else{\n tr[i].style.display = \"none\";\n }\n }\n }\n }\n</script>\n\n</div>\n\n<h4 class=\"text-center\">\n <b>Predictions of \"\"\" + date_pred[0] + \"\"\" using AI</b>\n</h4>\n\n<p class='text-center'>\n The following predictions are made by considering the current situation doesnot change with respect to time.\n</p>\n\n<div class=\"container\">\n <div class=\"row justify-content-center\">\n \n <div class=\"col-lg-3 col mb-3\">\n <div class=\"card bg-primary border-0\">\n <div class=\"text-white\">\n <div class=\"card-header\">Today's Predictions</div>\n <div class=\"card-body\">\n <h4 class=\"card-title\"><b>Confirmation</b></h4>\n <p class=\"card-text\">\"\"\" + str(x_conf_long_pred) + \"\"\"</p>\n </div>\n </div>\n </div>\n </div>\n \n <div class=\"col-lg-3 col mb-3\">\n <div class=\"card bg-danger border-0\">\n <div class=\"text-white\">\n <div class=\"card-header\">Today's Predictions</div>\n <div class=\"card-body\"> \n <h4 class=\"card-title\"><b>Active</b></h4>\n <p class=\"card-text\">\"\"\" + str(x_act_long_pred) + \"\"\"</p>\n </div>\n </div>\n </div>\n </div>\n\n <div class=\"col-lg-3 col mb-3\">\n <div class=\"card bg-success border-0\">\n <div class=\"text-white\">\n <div class=\"card-header\">Today's Predictions</div>\n <div class=\"card-body\"> \n <h4 class=\"card-title\"><b>Recovery</b></h4>\n <p class=\"card-text\">\"\"\" + str(x_rec_long_pred) + \"\"\"</p>\n </div>\n </div>\n </div>\n </div>\n\n <div class=\"col-lg-3 col mb-3\">\n <div class=\"card bg-dark border-0\">\n <div class=\"text-white\">\n <div class=\"card-header\">Today's Predictions</div>\n <div class=\"card-body\">\n <h4 class=\"card-title\"><b>Death</b></h4>\n <p class=\"card-text\">\"\"\" + str(x_det_long_pred) + \"\"\"</p>\n </div>\n </div>\n </div>\n </div>\n \n </div>\n</div>\n\n<div class=\"container\">\n <div class=\"row justify-content-center\">\n \n <div class=\"col-lg-8 col mb-3\">\n <div class=\"card border-0\">\n <div class=\"text-dark\">\n <div class=\"card-body\">\n <div class=\"text-center\">\n <p class=\"card-text\">Get detailed Predictions of confirmations, recoveries and deaths for next 14 days</p>\n <button class=\"btn btn-warning\" type=\"button\" data-toggle=\"collapse\" data-target=\"#pred\" aria-expanded=\"false\" aria-controls=\"pred\">\n Predictions of each day for next 14 days <i class=\"fas fa-chevron-circle-down fa-1x\"></i>\n </button>\n </div>\n </div>\n </div>\n </div>\n </div>\n \n </div>\n</div>\n\n<div class=\"collapse\" id=\"pred\">\n<div class=\"container mb-3\">\n<div class=\"table-responsive\">\n\"\"\" + pred_day14 + \"\"\"\n</div>\n</div>\n</div>\n\n<footer class=\"py-3\">\n <div class=\"sticky-bottom\">\n <div class=\"container\">\n <div class=\"row justify-content-center\">\n <p class=\"m-0 text-center text-dark\">\n Copyright <i class=\"fa fa-copyright\"></i> 2020. Made with <i class=\"fa fa-heart\"></i> by <a href=\"https://github.com/sudo-rajarshi\" target=\"_blank\" rel=\"noopener\">Rajarshi Bhadra</a>\n </p>\n </div>\n </div>\n </div>\n</footer>\n</body>\n</html>\n\"\"\"\n\nindex_ = open(os.path.join(dir_path, 'HTML', 'index.htm'),\"w\")\nindex_.write(index)\nindex_.close()\n\n\n# # trend.html\n\nnow = datetime.now()\ndate = now.strftime(\"%d-%m-%Y\")\ndate_time = now.strftime(\"%d-%m-%Y at %H:%M:%S\")\n\nintro = \"\"\"\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <!-- Global site tag (gtag.js) - Google Analytics -->\n <script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-168253703-1\"></script>\n <script>\n window.dataLayer = window.dataLayer || [];\n function gtag(){dataLayer.push(arguments);}\n gtag('js', new Date());\n\n gtag('config', 'UA-168253703-1');\n </script>\n\n <meta charset=\"utf-8\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=0.77, shrink-to-fit=yes\">\n <meta name=\"Description\" content=\"Insights and Future Prediction of Covid-19 Pandemic in India\">\n <meta http-equiv=\"Cache-Control\" content=\"no-cache, no-store, must-revalidate\" />\n <meta http-equiv=\"Pragma\" content=\"no-cache\" />\n <meta http-equiv=\"Expires\" content=\"0\" />\n <title>Insights of Covid'19 in India</title>\n <link rel=\"stylesheet\" href=\"https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.4.1/css/bootstrap.min.css\">\n <link rel=\"stylesheet\" href=\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.13.0/css/all.min.css\">\n <link rel=\"stylesheet\" href=\"https://cdn.datatables.net/v/bs4/dt-1.10.20/datatables.min.css\">\n <script src=\"https://cdn.jsdelivr.net/npm/[email protected]/dist/Chart.min.js\"></script>\n <script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js\"></script>\n <script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n <script src=\"https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>\n <style>\n @import url('https://fonts.googleapis.com/css?family=Roboto+Slab&display=swap');\n *{font-family: 'Roboto Slab', serif}\n </style>\n</head>\n<body>\n\n<nav class=\"navbar bg-light navbar-expand-sm navbar-light sticky-top\">\n <a class=\"navbar-brand\" href=\"index\"\"><b>Insights of Covid-19</b> <i class=\"fas fa-virus\"></i></a>\n <button class=\"navbar-toggler\" type=\"button\" data-toggle=\"collapse\" data-target=\"#navbarSupportedContent\">\n <span class=\"navbar-toggler-icon\"></span>\n </button>\n <div class=\"collapse navbar-collapse\" id=\"navbarSupportedContent\">\n <ul class=\"navbar-nav ml-auto\">\n <li class=\"nav-item\">\n <a class=\"nav-link\" href=\"index\"><b>Home</b></a>\n </li>\n <li class=\"nav-item active\">\n <a class=\"nav-link\" href=\"trendAnalysis\"><b>Trend Analysis <i class=\"fas fa-chart-line\"></i></b></a>\n </li>\n <li class=\"nav-item\">\n <a class=\"nav-link\" href=\"helpfulResources\"><b>Helpful Resources</b></a>\n </li>\n </ul>\n </div>\n</nav>\n\n<br>\n\n<div class=\"text-center\">\n <h4 class=\"text-center\"><b>Today's Current Status</b></h4>\n <br>\n</div>\n\n<div class=\"container\">\n <div class=\"row justify-content-center\">\n \n <div class=\"col-lg-3 col mb-3\">\n <div class=\"card bg-primary border-0\">\n <div class=\"text-white\">\n <div class=\"card-body\">\n <h3 class=\"card-title\"><b>Confirmed</b></h3>\n <p class=\"card-text\">\"\"\" + str(confirmed_list[0]) + ' (+' + str(deltaconfirmed_list[0]) + ')' + \"\"\"</p>\n </div>\n </div>\n </div>\n </div>\n\n <div class=\"col-lg-3 col mb-3\">\n <div class=\"card bg-danger border-0\">\n <div class=\"text-white\">\n <div class=\"card-body\">\n <h3 class=\"card-title\"><b>Active</b></h3>\n <p class=\"card-text\">\"\"\" + str(active[0]) + \"\"\"</p>\n </div>\n </div>\n </div>\n </div>\n\n <div class=\"col-lg-3 col mb-3\">\n <div class=\"card bg-success border-0\">\n <div class=\"text-white\">\n <div class=\"card-body\">\n <h3 class=\"card-title\"><b>Recovered</b></h3>\n <p class=\"card-text\">\"\"\" + str(recovered_list[0]) + ' (+' + str(deltarecovered_list[0]) + ')' + \"\"\"</p>\n </div>\n </div>\n </div>\n </div>\n\n\n <div class=\"col-lg-3 col mb-3\">\n <div class=\"card bg-dark border-0\">\n <div class=\"text-white\">\n <div class=\"card-body\">\n <h3 class=\"card-title\"><b>Deceased</b></h3>\n <p class=\"card-text\">\"\"\" + str(deaths_list[0]) + ' (+' + str(deltadeaths_list[0]) + ')' + \"\"\"</p>\n </div>\n </div>\n </div>\n </div>\n </div>\n</div>\n\"\"\"\n\nintro_ = open(os.path.join(dir_path, 'HTML', 'trend_intro.html'),\"w\")\nintro_.write(intro)\nintro_.close()\n\n\nconfirmed_list_chart = []\n\ni = 0\nfor i in confirmed_list:\n confirmed_list_chart.append(i)\n\n\n\ndaily_data_intro = \"\"\"\n<div class=\"container\">\n <div class=\"row justify-content-center\">\n <div class=\"col-lg-8 col mb-3\">\n <div class=\"card border-0\">\n <div class=\"text-dark\">\n <div class=\"card-body\">\n <div class=\"text-center\">\n <h4 class=\"card-title\"><b>In depth Trend Analysis of last 14 days</b></h4>\n <p class=\"card-text\">Detailed daily analysis of Confirmations, Recoveries and Deaths of last 14 days</p>\n <button class=\"btn btn-warning\" type=\"button\" data-toggle=\"collapse\" data-target=\"#trend\" aria-expanded=\"false\" aria-controls=\"state\">\n Trend Analysis <i class=\"fas fa-chevron-circle-down fa-1x\"></i>\n </button>\n </div>\n </div>\n </div>\n </div>\n </div>\n </div>\n</div>\n\n<div class=\"collapse\" id=\"trend\">\n<div class=\"container mb-3\">\n<div class=\"table-responsive\">\n\"\"\"+ daily +\"\"\"\n</div>\n</div>\n</div>\n\"\"\"\n\ndaily_data_intro_ = open(os.path.join(dir_path, 'HTML', 'daily_data_intro.html'), 'w')\ndaily_data_intro_.write(daily_data_intro)\ndaily_data_intro_.close()\n\n\n\n\nchart_total = \"\"\"\n</div>\n</div>\n\n<h4 class=\"text-center\">\n <b>Overall Analysis of last 56 days</b>\n</h4>\n\n<br>\n\n<div class=\"container\">\n <div class=\"row justify-content-center\">\n \n <div class=\"col-lg-6 col mb-3\">\n <script src=\"https://cdn.jsdelivr.net/npm/[email protected]/dist/Chart.min.js\"></script>\n <canvas id=\"myChart_3\"></canvas>\n <script>\n var ctx_3 = document.getElementById('myChart_3').getContext('2d');\n var mixedChart_3 = new Chart(ctx_3, {\n type: 'bar',\n data: {\n datasets: [{\n label: 'Daily Confirmations',\n data: \"\"\" + str(dailyconfirmed_list_chart[-56:]) + \"\"\",\n backgroundColor: [\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n ],\n },{\n label: '',\n data: \"\"\" + str(dailyconfirmed_list_chart[-56:]) + \"\"\",\n pointRadius: 0,\n borderColor: \"rgba(0,0,255,1)\",\n type: 'line',\n fill: true,\n order: 1\n }],\n labels: \"\"\" + str(date_list_chart[-56:]) + \"\"\"\n },\n options: {\n scales: {\n xAxes: [{\n display: false\n }],\n yAxes: [{\n display: false\n }]\n }\n }\n });\n </script>\n </div>\n\n <div class=\"col-lg-6 col mb-3\">\n <script src=\"https://cdn.jsdelivr.net/npm/[email protected]/dist/Chart.min.js\"></script>\n <canvas id=\"myChart_4\"></canvas>\n <script>\n var ctx_4 = document.getElementById('myChart_4').getContext('2d');\n var mixedChart_4 = new Chart(ctx_4, {\n type: 'bar',\n data: {\n datasets: [{\n label: 'Total Confirmations',\n data: \"\"\" + str(totalconfirmed_list_chart[-56:]) + \"\"\",\n backgroundColor: [\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n ],\n },{\n label: '',\n data: \"\"\" + str(totalconfirmed_list_chart[-56:]) + \"\"\",\n pointRadius: 0,\n borderColor: \"rgba(0,0,255,1)\",\n type: 'line',\n fill: true,\n order: 1\n }],\n labels: \"\"\" + str(date_list_chart[-56:]) + \"\"\"\n },\n options: {\n scales: {\n xAxes: [{\n display: false\n }],\n yAxes: [{\n display: false\n }]\n }\n }\n });\n </script>\n </div>\n\n <div class=\"col-lg-6 col mb-3\">\n <script src=\"https://cdn.jsdelivr.net/npm/[email protected]/dist/Chart.min.js\"></script>\n <canvas id=\"myChart_41\"></canvas>\n <script>\n var ctx_41 = document.getElementById('myChart_41').getContext('2d');\n var mixedChart_41 = new Chart(ctx_41, {\n type: 'bar',\n data: {\n datasets: [{\n label: 'Daily Recoveries',\n data: \"\"\" + str(dailyrecovered_list_chart[-56:]) + \"\"\",\n backgroundColor: [\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n ],\n },{\n label: '',\n data: \"\"\" + str(dailyrecovered_list_chart[-56:]) + \"\"\",\n borderColor: \"rgba(92,184,92,1)\",\n pointRadius: 0,\n type: 'line',\n fill: true,\n order: 1\n }],\n labels: \"\"\" + str(date_list_chart[-56:]) + \"\"\"\n },\n options: {\n scales: {\n xAxes: [{\n display: false\n }],\n yAxes: [{\n display: false\n }]\n }\n }\n });\n </script>\n </div>\n\n <div class=\"col-lg-6 col mb-3\">\n <script src=\"https://cdn.jsdelivr.net/npm/[email protected]/dist/Chart.min.js\"></script>\n <canvas id=\"myChart_31\"></canvas>\n <script>\n var ctx_31 = document.getElementById('myChart_31').getContext('2d');\n var mixedChart_31 = new Chart(ctx_31, {\n type: 'bar',\n data: {\n datasets: [{\n label: 'Total Recoveries',\n data: \"\"\" + str(totalrecovered_list_chart[-56:]) + \"\"\",\n backgroundColor: [\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(92,184,92,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n 'rgba(0,0,255,0)',\n ],\n },{\n label: '',\n data: \"\"\" + str(totalrecovered_list_chart[-56:]) + \"\"\",\n borderColor: \"rgba(92,184,92,1)\",\n pointRadius: 0,\n type: 'line',\n fill: true,\n order: 1\n }],\n labels: \"\"\" + str(date_list_chart[-56:]) + \"\"\"\n },\n options: {\n scales: {\n xAxes: [{\n display: false\n }],\n yAxes: [{\n display: false\n }]\n }\n }\n });\n </script>\n </div>\n\n <div class=\"col-lg-6 col mb-3\">\n <script src=\"https://cdn.jsdelivr.net/npm/[email protected]/dist/Chart.min.js\"></script>\n <canvas id=\"myChart_32\"></canvas>\n <script>\n var ctx_32 = document.getElementById('myChart_32').getContext('2d');\n var mixedChart_32 = new Chart(ctx_32, {\n type: 'bar',\n data: {\n datasets: [{\n label: 'Daily Deaths',\n data: \"\"\" + str(dailydeceased_list_chart[-56:]) + \"\"\",\n backgroundColor: [\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n ],\n },{\n label: '',\n data: \"\"\" + str(dailydeceased_list_chart[-56:]) + \"\"\",\n borderColor: \"rgba(255,0,0,1)\",\n pointRadius: 0,\n type: 'line',\n fill: true,\n order: 1\n }],\n labels: \"\"\" + str(date_list_chart[-56:]) + \"\"\"\n },\n options: {\n scales: {\n xAxes: [{\n display: false\n }],\n yAxes: [{\n display: false\n }]\n }\n }\n });\n </script>\n </div>\n\n <div class=\"col-lg-6 col mb-3\">\n <script src=\"https://cdn.jsdelivr.net/npm/[email protected]/dist/Chart.min.js\"></script>\n <canvas id=\"myChart_42\"></canvas>\n <script>\n var ctx_42 = document.getElementById('myChart_42').getContext('2d');\n var mixedChart_42 = new Chart(ctx_42, {\n type: 'bar',\n data: {\n datasets: [{\n label: 'Total Deaths',\n data: \"\"\" + str(totaldeceased_list_chart[-56:]) + \"\"\",\n backgroundColor: [\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n 'rgba(255,0,0,0)',\n ],\n },{\n label: '',\n data: \"\"\" + str(totaldeceased_list_chart[-56:]) + \"\"\",\n borderColor: \"rgba(255,0,0,1)\",\n pointRadius: 0,\n type: 'line',\n fill: true,\n order: 1\n }],\n labels: \"\"\" + str(date_list_chart[-56:]) + \"\"\"\n },\n options: {\n scales: {\n xAxes: [{\n display: false\n }],\n yAxes: [{\n display: false\n }]\n }\n }\n });\n </script>\n </div>\n </div>\n</div>\n\n<br>\n\"\"\"\n\nchart_total_ = open(os.path.join(dir_path, 'HTML', 'chart_total.html'),\"w\")\nchart_total_.write(chart_total)\nchart_total_.close()\n\n\nICMR_report_data_intro = \"\"\"\n</div>\n</div>\n\n<br>\n\n<h4 class=\"text-center\">\n <b>Test data of last 28 days from ICMR</b>\n</h4>\n\"\"\"\n\nICMR_report_data_intro_ = open(os.path.join(dir_path, 'HTML', 'ICMR_report_data_intro.html'), 'w')\nICMR_report_data_intro_.write(ICMR_report_data_intro)\nICMR_report_data_intro_.close()\n\nreport_date_list_chart = []\ntotalsamplestested_list_chart = []\ndt_list_daily_chart = []\ntotalsamplestested_daily_list_chart = []\n\ni,j,k,l = 0,0,0,0\n\n \nfor i in tst_list_total:\n totalsamplestested_list_chart.append(i)\n \nfor j in dt_list_total:\n report_date_list_chart.append(j)\n \nfor k in dt_list_daily:\n dt_list_daily_chart.append(k)\n \nfor l in tst_list_daily:\n totalsamplestested_daily_list_chart.append(l)\n\nicmr_stat_sample = \"\"\"\n<div class=\"container\">\n <div class=\"row justify-content-center\">\n \n <div class=\"col-lg-6 col mb-3\">\n <div class=\"card border-0\">\n <div class=\"text-info\">\n <div class=\"text-center\">\n <div class=\"card-body\">\n <h5 class=\"card-title\"><b>Tests per Million</b></h5>\n <p class=\"card-text\">\"\"\" + str(int((totalsamplestested_list_chart[-1]/population)*1000000)) + \"\"\"</p>\n </div>\n </div>\n </div>\n </div>\n </div>\n \n <div class=\"col-lg-6 col mb-3\">\n <div class=\"card border-0\">\n <div class=\"text-info\">\n <div class=\"text-center\">\n <div class=\"card-body\">\n <h5 class=\"card-title\"><b>Samples Tested</b></h5>\n <p class=\"card-text\">\"\"\" + str(totalsamplestested_list_chart[-1]) + \"\"\"</p>\n </div>\n </div>\n </div>\n </div>\n </div>\n \n </div>\n</div>\n\n<div class=\"container\">\n <div class=\"row justify-content-center\">\n \n <div class=\"col-lg-6 col mb-3\">\n <canvas id=\"myChart_12\"></canvas>\n <script>\n var ctx_12 = document.getElementById('myChart_12').getContext('2d');\n var mixedChart_12 = new Chart(ctx_12, {\n type: 'bar',\n data: {\n labels: \"\"\" + str(dt_list_daily_chart) + \"\"\",\n datasets: [{\n label: 'Daily Samples Tested',\n data: \"\"\" + str(totalsamplestested_daily_list_chart) + \"\"\",\n backgroundColor: [\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n 'rgba(255, 0, 0, 0.3)',\n ],\n borderWidth: 2,\n fill: true\n }]\n },\n options: {\n scales: {\n xAxes: [{\n display: false\n }],\n yAxes: [{\n display: false\n }]\n }\n }\n\n });\n </script>\n </div>\n\n <div class=\"col-lg-6 col mb-3\">\n <script src=\"https://cdn.jsdelivr.net/npm/[email protected]/dist/Chart.min.js\"></script>\n <canvas id=\"myChart_1\"></canvas>\n <script>\n var ctx_1 = document.getElementById('myChart_1').getContext('2d');\n var mixedChart_1 = new Chart(ctx_1, {\n type: 'bar',\n data: {\n labels: \"\"\" + str(report_date_list_chart) + \"\"\",\n datasets: [{\n label: 'Total Samples Tested',\n data: \"\"\" + str(totalsamplestested_list_chart) + \"\"\",\n backgroundColor: [\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n 'rgba(255, 0, 0, 0)',\n ],\n borderWidth: 0,\n fill: true\n },{\n label: '',\n data: \"\"\" + str(totalsamplestested_list_chart) + \"\"\",\n borderColor: \"rgba(255,0,0,1)\",\n pointRadius: 0,\n type: 'line',\n fill: true,\n order: 1\n }]\n },\n options: {\n scales: {\n xAxes: [{\n display: false\n }],\n yAxes: [{\n display: false\n }]\n }\n }\n\n });\n </script>\n </div>\n </div>\n</div>\n\"\"\"\n\nicmr_stat_sample_ = open(os.path.join(dir_path, 'HTML', 'icmr_stat_sample.html'),\"w\")\nicmr_stat_sample_.write(icmr_stat_sample)\nicmr_stat_sample_.close()\n\nfooter = \"\"\"\n<footer class=\"py-3\">\n <div class=\"sticky-bottom\">\n <div class=\"container\">\n <div class=\"row justify-content-center\">\n <p class=\"m-0 text-center text-dark\">\n Copyright <i class=\"fa fa-copyright\"></i> 2020. Made with <i class=\"fa fa-heart\"></i> by <a href=\"https://github.com/sudo-rajarshi\" target=\"_blank\" rel=\"noopener\">Rajarshi Bhadra</a>\n </p>\n </div>\n </div>\n </div>\n</footer>\n</body>\n</html>\n\"\"\"\n\nfooter_ = open(os.path.join(dir_path, 'HTML', 'footer.html'),\"w\")\nfooter_.write(footer)\nfooter_.close()\n\n\n\nwith open(os.path.join(dir_path, 'HTML', 'trend_intro.html')) as rd: \n intro = rd.read()\n \nwith open(os.path.join(dir_path, 'HTML', 'daily_data_intro.html')) as rd: \n daily_intro = rd.read()\n \nwith open(os.path.join(dir_path, 'HTML', 'ICMR_report_data_intro.html')) as rd: \n ICMR_intro = rd.read()\n \nwith open(os.path.join(dir_path, 'HTML', 'icmr_stat_sample.html')) as rd: \n ICMR_chart = rd.read()\n \nwith open(os.path.join(dir_path, 'HTML', 'chart_total.html')) as rd: \n line_chart_total = rd.read()\n \nwith open(os.path.join(dir_path, 'HTML', 'footer.html')) as rd: \n footer = rd.read()\n \ndata = intro + ICMR_intro + ICMR_chart + daily_intro + line_chart_total + footer\n\nwith open (os.path.join(dir_path, 'HTML', 'trendAnalysis.htm'), 'w') as fp: \n fp.write(data)\n \ndate_time_exe = now.strftime(\"%H:%M:%S\")\n\n# ftp = ftplib.FTP('ftpupload.net')\n# ftp.login('*****','*****')\n# ftp.cwd('htdocs')\n\n# file1 = open(os.path.join(dir_path, 'HTML', 'index.htm'),'rb')\n# file2 = open(os.path.join(dir_path, 'HTML', 'trendAnalysis.htm'),'rb')\n\n# ftp.storlines('STOR index.htm', file1)\n# ftp.storlines('STOR trendAnalysis.htm', file2)\n\n# file1.close()\n# file2.close()\n\ndate_time_upload = now.strftime(\"%H:%M:%S\")\n\nprint(\"Operation Executed at {} and Files Uploaded ot {}\".format(date_time_exe, date_time_upload))\n"
] | [
[
"pandas.DataFrame",
"numpy.append",
"numpy.add",
"numpy.array",
"numpy.divide"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
teamwong111/Data-mining-course | [
"719ea546f7f0f3904e888ac94c19d02daff32a9b"
] | [
"svm-digital-recognition/code/test.py"
] | [
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\nimport itertools\nimport matplotlib.pyplot as plt\nimport _pickle as pickle\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\nimport data\nimport mydata\n\n'''步骤五:评估模型'''\n# 加载模型\nwith open('.\\\\saved_model\\\\paramater.pkl','rb') as file:\n model = pickle.load(file)\n\n# 获取自己写的数字\n(my_x_test,my_y_test) = mydata.getmydata()\n\n# 评估模型\n# 输出混淆矩阵\nprint(f\"我的数据集混淆矩阵如下:\")\npredictions = [int(a) for a in model.predict(my_x_test)]\nprint(confusion_matrix(my_y_test, predictions))\nprint(classification_report(my_y_test, np.array(predictions), labels=np.unique(predictions)))\n#计算准确度\nprint('accuracy=', accuracy_score(my_y_test, predictions))"
] | [
[
"numpy.unique",
"numpy.array",
"sklearn.metrics.confusion_matrix",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anuranrc/Paddle | [
"21fa3eb0688459d3b71141d316e8358d31882b8d"
] | [
"paddle/py_paddle/util.py"
] | [
"# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nSome Useful method for py_paddle.\n\"\"\"\n\nimport swig_paddle\nimport os\nimport paddle.trainer.PyDataProviderWrapper\nimport paddle.proto.ParameterConfig_pb2\nimport paddle.proto.ModelConfig_pb2\nimport paddle.proto.TrainerConfig_pb2\nimport weakref\nimport numpy\nimport struct\nimport sys\nimport copy\n\n\ndef initializePaddle(*args):\n \"\"\"\n To initialize paddle process.\n :param args: Command line options, such as --use_gpu=0, etc.\n :return: Nothing.\n \"\"\"\n old_argv = copy.deepcopy(sys.argv)\n old_pypath = os.getenv(\"PYTHONPATH\")\n pypath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n if old_pypath is not None:\n pypath = os.pathsep.join([pypath, old_pypath])\n os.putenv(\"PYTHONPATH\", pypath)\n args = [\"\"] + list(args) # argv[0] is command name, it is not important.\n swig_paddle.__initPaddle__(args)\n sys.argv = old_argv\n\n\ndef __monkeypatch_init_paddle__():\n swig_paddle.__initPaddle__ = swig_paddle.initPaddle\n swig_paddle.initPaddle = initializePaddle\n\n\nclass __ParameterCallbackWrapper__(swig_paddle.UpdateCallback):\n \"\"\"\n Wrap the python callable object to paddle.UpdateCallback.\n\n INTERNAL USE ONLY.\n \"\"\"\n\n def __init__(self, callback):\n swig_paddle.UpdateCallback.__init__(self)\n self.callback = callback\n\n def apply(self, param):\n self.callback(param)\n\n @staticmethod\n def wrap(callback):\n \"\"\"\n Cast the python callable object/paddle.UpdateCallback to\n swig_paddle.UpdateCallback.__disown__\n :param callback: callable or swig_paddle.UpdateCallback object.\n \"\"\"\n if isinstance(callback, swig_paddle.UpdateCallback):\n return callback.__disown__()\n elif isinstance(callback, weakref.ProxyType):\n raise RuntimeError(\"Should not pass __disown__ object\")\n else:\n return __ParameterCallbackWrapper__(callback).__disown__()\n\n\ndef __arguments_to_numpy__(i, arg):\n assert isinstance(arg, swig_paddle.Arguments)\n value = arg.getSlotValue(i)\n ids = arg.getSlotIds(i)\n if value is not None:\n assert isinstance(value, swig_paddle.Matrix)\n value = value.copyToNumpyMat()\n if ids is not None:\n assert isinstance(ids, swig_paddle.IVector)\n ids = ids.copyToNumpyArray()\n return {\"value\": value, \"id\": ids}\n\n\ndef __monkeypatch_gradient_machine__():\n \"\"\"\n Add some class methods to GradientMachine.\n This method should be only used internally.\n \"\"\"\n swig_paddle.GradientMachine.loadFromConfigFile = \\\n staticmethod(loadGradientMachine)\n\n def __matrix_to_numpy__(m):\n if isinstance(m, swig_paddle.Matrix):\n return m.copyToNumpyMat()\n elif isinstance(m, swig_paddle.IVector):\n return m.copyToNumpyArra()\n else:\n raise RuntimeError(\"Input arg should be matrix or vecotr.\")\n\n def createFromConfigProto(protoObj,\n createMode=swig_paddle.CREATE_MODE_NORMAL,\n paramTypes=[\n swig_paddle.PARAMETER_VALUE,\n swig_paddle.PARAMETER_GRADIENT,\n swig_paddle.PARAMETER_MOMENTUM\n ]):\n \"\"\"\n Create Gradient Machine From Proto object.\n :param protoObj: Model config\n :type protoObj: proto.ModelConfig_pb2.ModelConfig\n :param createMode: Create Mode, default is normal.\n :type createMode: int\n :param paramTypes: the gradient machine parameter type.\n :type paramTypes: list of int\n :return: paddle.GradientMachine\n \"\"\"\n assert isinstance(protoObj, paddle.proto.ModelConfig)\n return swig_paddle.GradientMachine.createByConfigProtoStr(\n protoObj.SerializeToString(), createMode, paramTypes)\n\n swig_paddle.GradientMachine.createFromConfigProto = \\\n staticmethod(createFromConfigProto)\n\n def forwardTest(self, inArgs):\n \"\"\"\n forwardTest. forward gradient machine in test mode, and return a numpy\n matrix dict.\n\n :param inArgs: The input arguments\n :type inArgs: paddle.Arguments\n :return: A dictionary with keys ['id', 'value'], each value is a\n numpy.ndarray.\n \"\"\"\n outArgs = swig_paddle.Arguments.createArguments(0)\n self.forward(inArgs, outArgs, swig_paddle.PASS_TEST)\n return [\n __arguments_to_numpy__(i, outArgs)\n for i in xrange(outArgs.getSlotNum())\n ]\n\n swig_paddle.GradientMachine.forwardTest = forwardTest\n\n # Monkey patching backward\n swig_paddle.GradientMachine.__backward__ = swig_paddle.GradientMachine.backward\n\n def backward(self, callback):\n \"\"\"\n GradientMachine Backward\n :param callback: a callback which parameter is (paddle.Parameter) or\n a paddle.UpdateCallback object.\n \"\"\"\n self.__backward__(__ParameterCallbackWrapper__.wrap(callback))\n\n swig_paddle.GradientMachine.backward = backward\n\n # Monkey patching forwardBackward.\n swig_paddle.GradientMachine.__forwardBackward__ = \\\n swig_paddle.GradientMachine.forwardBackward\n\n def forwardBackward(self,\n inArgs,\n outArgs,\n passType,\n callback=swig_paddle.UpdateCallback()):\n \"\"\"\n GradientMachine forward backward.\n :param inArgs: Input Arguments for GradientMachine.\n :type inArgs: paddle.Arguments\n :param outArgs: Output Arguments for GradientMachine.\n :type outArgs: paddle.Arguments\n :param passType: gradient machine's pass type.\n :type passType: paddle.PassType\n :param callback: a callable object with arguments (paddle.Parameter) or\n a paddle.UpdateCallback it will be called when\n backward\n \"\"\"\n self.__forwardBackward__(inArgs, outArgs, passType,\n __ParameterCallbackWrapper__.wrap(callback))\n\n swig_paddle.GradientMachine.forwardBackward = forwardBackward\n\n def getParameters(self):\n return (self.getParameter(i) for i in xrange(self.getParameterSize()))\n\n swig_paddle.GradientMachine.getParameters = getParameters\n\n def getNonStaticParameters(self):\n return (self.getNonStaticParameter(i)\n for i in xrange(self.getNonStaticParameterSize()))\n\n swig_paddle.GradientMachine.getNonStaticParameters = getNonStaticParameters\n\n def getLayerOutputs(self, layerNames):\n \"\"\"\n getLayerOutputs. get outputs of layers and return a numpy matrix dict.\n :param layerNames: layer names.\n :type layerNames: string or list.\n \"\"\"\n if isinstance(layerNames, basestring):\n layerNames = [layerNames]\n elif not isinstance(layerNames, list):\n raise RuntimeError(\"Input args shuld be string or a sting list.\")\n\n output = dict()\n for name in layerNames:\n output[name] = __arguments_to_numpy__(0, self.getLayerOutput(name))\n return output\n\n swig_paddle.GradientMachine.getLayerOutputs = getLayerOutputs\n\n\ndef loadGradientMachine(config_filename, model_dir=None):\n \"\"\"\n Load a gradient machine from config file name/path.\n :param config_filename: The trainer config file name/path\n :param model_dir: The model parameter directory. None if same as the\n directory of config_filename\n :return: GradientMachine with some enhance methods.\n :rtype: paddle.GradientMachine\n \"\"\"\n trainer_config = swig_paddle.TrainerConfig.createFromTrainerConfigFile(\n config_filename)\n assert isinstance(trainer_config, swig_paddle.TrainerConfig)\n model_conf = trainer_config.getModelConfig()\n network = swig_paddle.GradientMachine.createByModelConfig(model_conf)\n assert isinstance(network, swig_paddle.GradientMachine)\n if model_dir is None:\n model_dir = os.path.dirname(config_filename)\n network.loadParameters(model_dir)\n return network\n\n\ndef loadParameterFile(fn):\n \"\"\"\n Load Paddle Parameter file to numpy.ndarray\n :param fn: file name or file like object.\n :type fn: str or file like object.\n :return: numpy array\n :rtype: numpy.ndarray\n :raise: paddle.UnsupportError when parameter format is wrong.\n \"\"\"\n if isinstance(fn, str):\n with open(fn, 'rb') as f:\n return loadParameterFile(f)\n elif hasattr(fn, 'read'): # File like object\n version, = struct.unpack('i', fn.read(4))\n if version != 0:\n raise swig_paddle.UnsupportError()\n value_length, = struct.unpack(\"I\", fn.read(4))\n if value_length != 4 and value_length != 8:\n raise swig_paddle.UnsupportError()\n dtype = 'float32' if value_length == 4 else 'float64'\n param_size, = struct.unpack(\"L\", fn.read(8))\n value = numpy.fromfile(fn, dtype)\n if len(value) != param_size:\n raise swig_paddle.UnsupportError()\n return value\n else:\n raise swig_paddle.UnsupportError()\n\n\nclass DataProviderWrapperConverter(object):\n \"\"\"\n A class convert DataFormat from PyDataProvider Wrapper to\n py_paddle.paddle.Arguemnts.\n \"\"\"\n\n class DenseValueConverter(object):\n \"\"\"\n Internal class\n \"\"\"\n\n def __init__(self, header_def):\n self.__dim__ = header_def.dim\n self.buf = []\n\n def append(self, other):\n assert len(other) == self.__dim__\n self.buf += other\n\n def __call__(self, slot_idx, arg):\n mat = swig_paddle.Matrix.createDense(self.buf,\n len(self.buf) / self.__dim__,\n self.__dim__)\n arg.setSlotValue(slot_idx, mat)\n\n class IdValueConverter(object):\n \"\"\"\n Internal class\n \"\"\"\n\n def __init__(self, *args):\n self.buf = []\n\n def append(self, other):\n assert isinstance(other, int)\n self.buf.append(other)\n\n def __call__(self, slot_idx, arg):\n arg.setSlotIds(slot_idx, swig_paddle.IVector.create(self.buf))\n\n class SparseNonValueConverter(object):\n \"\"\"\n Internal class\n \"\"\"\n\n def __init__(self, slot_def):\n self.indices = [0]\n self.cols = []\n self.dim = slot_def.dim\n\n def append(self, other):\n self.indices.append(self.indices[-1] + len(other))\n self.cols += other\n\n def __call__(self, slot_idx, arg):\n mat = swig_paddle.Matrix.createSparse(\n len(self.indices) - 1, self.dim, len(self.cols), True)\n assert isinstance(mat, swig_paddle.Matrix)\n mat.sparseCopyFrom(self.indices, self.cols)\n self.putIntoArg(slot_idx, arg, mat)\n\n def putIntoArg(self, slot_idx, arg, mat):\n arg.setSlotValue(slot_idx, mat)\n\n class SparseValueConverter(SparseNonValueConverter):\n \"\"\"\n Internal class\n \"\"\"\n\n def __init__(self, slot_def):\n super(DataProviderWrapperConverter.SparseValueConverter,\n self).__init__(slot_def)\n self.values = []\n\n def append(self, other):\n super(DataProviderWrapperConverter.SparseValueConverter,\n self).append(map(lambda x: x[0], other))\n self.values += map(lambda x: x[1], other)\n\n def __call__(self, slot_idx, arg):\n mat = swig_paddle.Matrix.createSparse(\n len(self.indices) - 1, self.dim, len(self.cols), False)\n assert isinstance(mat, swig_paddle.Matrix)\n mat.sparseCopyFrom(self.indices, self.cols, self.values)\n self.putIntoArg(slot_idx, arg, mat)\n\n __SLOT_VALUE_CONVERTER_MAP__ = {\n paddle.trainer.PyDataProviderWrapper.DenseSlot: DenseValueConverter,\n paddle.trainer.PyDataProviderWrapper.IndexSlot: IdValueConverter,\n paddle.trainer.PyDataProviderWrapper.SparseNonValueSlot:\n SparseNonValueConverter,\n paddle.trainer.PyDataProviderWrapper.SparseValueSlot:\n SparseValueConverter\n }\n\n def __init__(self, use_seq, header):\n \"\"\"\n Ctor\n :param use_seq: True if use sequence.\n :param header: List of slots type,\n trainer.PyDataProviderWrapper.SlotType\n \"\"\"\n self.__use_seq__ = use_seq\n self.__header__ = header\n\n def convert(self, wrapper_data, argument=None):\n \"\"\"\n Convert PyDataProviderWrapper format to paddle.Argument\n :param wrapper_data: PyDataProviderWrapper yield's data list.\n :param argument: The output paddle.Arguments.\n If it is not None, it will assign data in this\n arguments, else it will create new arguments.\n :return: arguments that contains data.\n :rtype: paddle.Arguments\n \"\"\"\n if argument is None:\n argument = swig_paddle.Arguments.createArguments(0)\n assert isinstance(argument, swig_paddle.Arguments)\n argument.resize(len(self.__header__))\n\n values = map(\n lambda x: DataProviderWrapperConverter.__SLOT_VALUE_CONVERTER_MAP__[x.__class__](x),\n self.__header__)\n\n if self.__use_seq__:\n seq_dim = [[] for _ in xrange(self.__header__.__len__())]\n seq_start_pos = [[0] for _ in xrange(self.__header__.__len__())]\n\n for each_sample in wrapper_data:\n for slot_idx, sequence in enumerate(each_sample):\n for raw_data in sequence:\n values[slot_idx].append(raw_data)\n seq_start_pos[slot_idx].append(seq_start_pos[slot_idx][-1] +\n len(sequence))\n seq_dim[slot_idx].append(len(sequence))\n\n for slot_idx in xrange(len(self.__header__)):\n argument.setSlotSequenceDim(\n slot_idx, swig_paddle.IVector.create(seq_dim[slot_idx]))\n argument.setSlotSequenceStartPositions(\n slot_idx,\n swig_paddle.IVector.create(seq_start_pos[slot_idx]))\n else:\n for each_sample in wrapper_data:\n for raw_data, value in zip(each_sample, values):\n value.append(raw_data)\n\n for i, v in enumerate(values):\n v(i, argument)\n\n return argument\n\n def __call__(self, wrapper_data, argument=None):\n \"\"\"\n Invoke self.convert. See documents in self.convert.\n \"\"\"\n return self.convert(wrapper_data, argument)\n\n\ndef __monkey_patch_protobuf_objects__():\n def ParameterConfig_toProto(self):\n \"\"\"\n Convert paddle.ParameterConfig to\n proto.ParameterConfig_pb2.ParameterConfig\n\n :return: proto.ParameterConfig_pb2.ParameterConfig object.\n \"\"\"\n param_conf = paddle.proto.ParameterConfig_pb2.ParameterConfig()\n param_conf.ParseFromString(self.toProtoString())\n return param_conf\n\n swig_paddle.ParameterConfig.toProto = ParameterConfig_toProto\n\n def OptimizationConfig_toProto(self):\n \"\"\"\n Convert paddle.OptimizationConfig to\n proto.TrainerConfig_pb2.OptimizationConfig\n\n :return: proto.TrainerConfig_pb2.OptimizationConfig\n \"\"\"\n opt_conf = proto.TrainerConfig_pb2.OptimizationConfig()\n opt_conf.ParseFromString(self.toProtoString())\n return opt_conf\n\n swig_paddle.OptimizationConfig.toProto = OptimizationConfig_toProto\n\n def OptimizationConfig_createFromProto(protoObj):\n \"\"\"\n Create a new paddle.OptimizationConfig from\n proto.TrainerConfig_pb2.OptimizationConfig\n\n :param protoObj: proto.TrainerConfig_pb2.OptimizationConfig\n :return: paddle.OptimizationConfig\n \"\"\"\n\n assert isinstance(protoObj, paddle.proto.OptimizationConfig)\n return swig_paddle.OptimizationConfig.createFromProtoString(\n protoObj.SerializeToString())\n\n swig_paddle.OptimizationConfig.createFromProto = staticmethod(\n OptimizationConfig_createFromProto)\n\n def TrainerConfig_createFromProto(protoObj):\n \"\"\"\n Create a new paddle.TrainerConfig from\n proto.OptimizationConfig\n\n :param protoObj: proto.TrainerConfig\n :return: paddle.TrainerConfig\n \"\"\"\n assert isinstance(protoObj, paddle.proto.TrainerConfig)\n return swig_paddle.TrainerConfig.createFromProtoString(\n protoObj.SerializeToString())\n\n swig_paddle.TrainerConfig.createFromProto = staticmethod(\n TrainerConfig_createFromProto)\n\n\ndef __monkey_patch_parameter__():\n def getBufs(self):\n \"\"\"\n get all parameter vectors.\n NOTE: the return value is a generator. Maybe you need to cast to\n list or tuple or something else.\n\n :return: generator of all parameter vectors.\n :rtype: generator\n \"\"\"\n return (self.getBuf(i) for i in xrange(swig_paddle.NUM_PARAMETER_TYPES))\n\n swig_paddle.Parameter.getBufs = getBufs\n\n\ndef __monkey_patch_trainer__():\n swig_paddle.Trainer.__create__ = staticmethod(swig_paddle.Trainer.create)\n\n def Trainer_create(config, model=None):\n \"\"\"\n Create a trainer for model with TrainerCOnfig trainer_config\n trainer_config.model_config will be ignored when model is supplied.\n Trainer.trainOneBatch() and Trainer.forwardOneBatch() can be used only\n when trainer_config.data_config is set.\n\n A typical usage for Trainer is:\n .. code-block:: python\n trainer = Trainer.create(trainer_config, model)\n for p in xrange(num_passes)\n while True:\n data = get_next_batch(batch_size)\n if not data:\n break\n trainer.trainOneDataBatch(batch_size, data)\n trainer.finishTrainPass()\n trainer.finishTrain()\n\n The trainer will take care of logging, model saving, distributed\n training, etc.\n\n :param config: trainer configuration\n :type config: paddle.proto.TrainerConfig\n :param model: the model to be trained\n :type model: swig_paddle.GradientMachine\n :return: a trainer\n :rtype swig_paddle.Trainer\n\n \"\"\"\n assert isinstance(config, paddle.proto.TrainerConfig)\n if model is not None:\n assert isinstance(model, swig_paddle.GradientMachine)\n return swig_paddle.Trainer.__create__(\n swig_paddle.TrainerConfig.createFromProto(config), model)\n\n swig_paddle.Trainer.create = staticmethod(Trainer_create)\n\n swig_paddle.Trainer.__getForwardOutput__ = \\\n swig_paddle.Trainer.getForwardOutput\n\n def getForwardOutput(self):\n \"\"\"\n Get the netword outputs from the previous trainOneBatch(),\n trainOneDataBatch(), testOneDataPatch(), or forwardOneBatch() call.\n\n :return: list of dictionary with keys ['id', 'value'], each value is a\n numpy.ndarray.\n \"\"\"\n outArgs = self.__getForwardOutput__()\n return [\n __arguments_to_numpy__(i, outArgs)\n for i in xrange(outArgs.getSlotNum())\n ]\n\n swig_paddle.Trainer.getForwardOutput = getForwardOutput\n\n\ndef monkeypatches():\n patches = [\n __monkeypatch_init_paddle__, __monkeypatch_gradient_machine__,\n __monkey_patch_protobuf_objects__, __monkey_patch_parameter__,\n __monkey_patch_trainer__\n ]\n for patch in patches:\n patch()\n"
] | [
[
"numpy.fromfile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marcgonzmont/SurfaceInspection | [
"8b8171be69eb617898801a2072fd3451b279e377"
] | [
"main.py"
] | [
"import sys\r\nfrom os.path import basename\r\nimport argparse\r\nimport numpy as np\r\nimport glob\r\nimport cv2\r\nfrom myPackage import tools as tl\r\nfrom myPackage import image_processing as imp\r\nfrom myPackage import Gabor_filter as gf\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n group = parser.add_mutually_exclusive_group()\r\n group.add_argument(\"-tr\", \"--training_path\",\r\n help=\"-tr Training path of the samples\")\r\n group.add_argument(\"-te\", \"--test_path\",\r\n help=\"-te Test path of the samples\")\r\n args = vars(parser.parse_args())\r\n # Show intermediate images\r\n show = False\r\n debug = False\r\n # Margin to ensure to get the wood without bg\r\n margin = 15\r\n # Gabor filter configuration\r\n theta = np.arange(0, np.pi, np.pi / 16) # orientation of the normal to the parallel stripes of the Gabor function\r\n sigma = np.arange(2, 5, 1) # controls the width of Gaussian envelope used in Gabor kernel\r\n size = 3 # the size of convolution kernel varies\r\n prev_file = ''\r\n results = []\r\n\r\n if args[\"training_path\"] is not None:\r\n # Get the TRAINING and test images to process and the GT to evaluate the algorithm\r\n all_files = tl.natSort(tl.getSamples(args[\"training_path\"]))\r\n text = (\"\\n--- TRAINIG RESULTS ---\\n\"\r\n \"Intersection over Union (mean): {}\\n\"\r\n \"True rejected: {}\\n\"\r\n \"False rejected: {}\\n\"\r\n \"False accepted: {}\\n\"\r\n \"True accepted: {}\\n\")\r\n else:\r\n # Get the TEST images to process\r\n all_files = tl.natSort(tl.getSamples(args[\"test_path\"]))\r\n text = (\"\\n--- TEST RESULTS ---\\n\"\r\n \"Intersection over Union (mean): {}\\n\"\r\n \"True rejected: {}\\n\"\r\n \"False rejected: {}\\n\"\r\n \"False accepted: {}\\n\"\r\n \"True accepted: {}\\n\")\r\n\r\n for idx in range(len(all_files)):\r\n substr = '..' + all_files[idx].split(\".\")[-2]\r\n # Read all files except .directory\r\n if substr != prev_file and substr.split(\"/\")[-1] != '':\r\n file = substr.split(\"/\")[-1] + '.png'\r\n # print(\"Image '{}'\".format(file))\r\n img, gt = tl.parseSample(substr)\r\n img_fg, coords = imp.removeBG(img.copy(), margin) #debug\r\n filters = gf.build_filters(size, theta, sigma) #debug\r\n img_filtered = gf.convolve(img_fg, filters) #debug\r\n defects_detected = imp.detectDefects(img_filtered, debug) #debug\r\n defects_detected = imp.getOriginalCoords(defects_detected, coords, margin)\r\n\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n # Draw GT\r\n if gt is not None:\r\n for x, y, w, h in gt:\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n # Draw detections\r\n if defects_detected is not None:\r\n for x, y, w, h in defects_detected:\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n if show:\r\n titles = []\r\n images = [img]\r\n title = \"Detection result for '{}'\".format(file)\r\n tl.plotImages(titles, images, title, 1, 1)\r\n results.append(imp.evaluate(gt, defects_detected)) # debug\r\n\r\n\r\n prev_file = substr\r\n # break\r\n results = np.array(results)\r\n\r\n iou_avg = np.mean([np.mean(iou) for iou in results[:, 0]])\r\n TR = np.sum(results[:, 1]).astype(int)\r\n FR = np.sum(results[:, 2]).astype(int)\r\n FA = np.sum(results[:, 3]).astype(int)\r\n TA = np.sum(results[:, 4]).astype(int)\r\n print(text.format(iou_avg, TR, FR, FA, TA))\r\n\r\n\r\n classes = ['rejected', 'accepted']\r\n cnf_matrix = np.zeros((2,2), dtype= int)\r\n cnf_matrix[0][0] = TR\r\n cnf_matrix[0][1] = FR\r\n cnf_matrix[1][0] = FA\r\n cnf_matrix[1][1] = TA\r\n\r\n tl.computeMetrics(cnf_matrix)\r\n\r\n np.set_printoptions(precision=3)\r\n # Plot normalized confusion matrix\r\n tl.plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True,\r\n title=\"Normalized confusion matrix\")\r\n\r\n\r\n print(\"\\n\\n---- FINISHED!! ----\")\r\n sys.exit(0)"
] | [
[
"numpy.arange",
"numpy.set_printoptions",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
casperbh96/Statistical_Significance_Testing | [
"bfddbfc48bb9ad7f97183ec2b6f104bb2c101903"
] | [
"src/significance.py"
] | [
"import numpy as np\nfrom scipy import stats\nfrom scipy.stats import levene, f_oneway\n\ndef gaussian_test(col, values):\n stat1, p1 = stats.shapiro(values)\n stat2, p2 = stats.normaltest(values)\n\n print(f\"Gaussian: {col}\\n\\t{p1:5f} (Shapiro-Wilk)\\n\\t{p2:5f} (D'Agostino's)\")\n\n\ndef correlation_test(df):\n pearson_stat = df.corr(method=lambda x, y: stats.pearsonr(x, y)[0])\n pearson_p = df.corr(method=lambda x, y: stats.pearsonr(x, y)[1])\n spearman_stat = df.corr(method=lambda x, y: stats.spearmanr(x, y)[0])\n spearman_p = df.corr(method=lambda x, y: stats.spearmanr(x, y)[1])\n\n pearson_p = (pearson_p - np.eye(df.shape[1])).round(4)\n spearman_p = (spearman_p - np.eye(df.shape[1])).round(4)\n\n return pearson_stat, pearson_p, spearman_stat, spearman_p\n\n\ndef boldness_test(bold1, bold2, bold3):\n rng = np.random.RandomState(42)\n a_ton_of_text_boldness = rng.uniform(low=0.7, high=bold2 + 0.5, size=200)\n\n variance_check = [[bold1, bold2], [bold2, bold3]]\n\n for check in variance_check:\n stat1, p1 = levene(a_ton_of_text_boldness, check, center='mean')\n stat2, p2 = f_oneway(a_ton_of_text_boldness, check)\n\n print(f'{p1:5f}')\n print(f'{p2:5f}')\n"
] | [
[
"scipy.stats.f_oneway",
"scipy.stats.normaltest",
"numpy.eye",
"scipy.stats.pearsonr",
"scipy.stats.shapiro",
"scipy.stats.levene",
"scipy.stats.spearmanr",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
bsridatta/robotfashion | [
"594853c59a536a83e5c81d3a825dcfa72f7ac03b"
] | [
"robotfashion/robotfashion/data/deepfashion2.py"
] | [
"import os\nimport json\n\nimport numpy as np\nimport torch as t\n\nfrom .util import has_correct_folder_structure, maybe_download_and_unzip_data\n\nfrom torchvision.datasets import VisionDataset\nfrom PIL import Image\n\n\nclass DeepFashion2(VisionDataset):\n train_mode = \"train\"\n val_mode = \"val\"\n test_mode = \"test\"\n modes = [train_mode, val_mode, test_mode]\n\n def __init__(\n self,\n working_path: str,\n mode: str,\n password: str = None,\n download_if_missing: bool = False,\n subset_ratio=1,\n transform=None,\n ):\n super().__init__(working_path, transform=transform, target_transform=None)\n\n if not has_correct_folder_structure(\n self._get_root_data_folder(), self.get_folders(), self.get_dataset_name()\n ):\n if not download_if_missing:\n raise ValueError(\n f\"cannot find (valid) {self.get_dataset_name()} data.\"\n + \" Set download_if_missing=True to download dataset\"\n )\n\n if not password:\n raise PermissionError(\n f\"Cannot download {self.get_dataset_name()} data without the password.\"\n + \" See https://github.com/switchablenorms/DeepFashion2#download-the-data\"\n )\n\n maybe_download_and_unzip_data(\n self._get_root_data_folder(),\n self.get_download_links(),\n password=password,\n )\n\n if not has_correct_folder_structure(\n self._get_root_data_folder(),\n self.get_folders(),\n self.get_dataset_name(),\n ):\n raise Exception(\"Downloading and/or unzipping data failed\")\n\n if mode not in DeepFashion2.modes:\n raise ValueError(f\"mode {mode} should be one of {DeepFashion2.modes}\")\n\n if subset_ratio <= 0 or subset_ratio > 1:\n raise ValueError(f\"subset ratio {subset_ratio} needs to be in (0, 1]\")\n else:\n self.subset_ratio = subset_ratio\n\n self.mode = mode\n\n if mode == DeepFashion2.train_mode:\n self.image_paths, self.label_paths = self.load_train_data()\n elif mode == DeepFashion2.val_mode:\n self.image_paths, self.label_paths = self.load_val_data()\n else:\n self.image_paths, self.label_paths = self.load_test_data()\n\n def _get_root_data_folder(self):\n return os.path.join(self.root, self.get_data_folder_name())\n\n def load_train_data(self):\n return self.load_data(os.path.join(self._get_root_data_folder(), \"train\"))\n\n def load_val_data(self):\n return self.load_data(os.path.join(self._get_root_data_folder(), \"validation\"))\n\n def load_test_data(self):\n # return self.load_data(\n # os.path.join(get_root_data_folder(self.root), \"test\")\n # )\n raise NotImplementedError(\"labels of test data are not published\")\n\n def __getitem__(self, index):\n image = self.load_image(self.image_paths[index])\n label = self.load_label(self.label_paths[index])\n\n if self.transform is not None:\n image = self.transform(image)\n\n return image, label\n\n def __len__(self):\n n = len(self.image_paths)\n\n return int(self.subset_ratio * n)\n\n @staticmethod\n def load_data(data_dir):\n annos_dir = os.path.join(data_dir, \"annos\")\n image_dir = os.path.join(data_dir, \"image\")\n\n image_paths = [\n os.path.join(image_dir, f)\n for f in sorted(os.listdir(image_dir))\n if os.path.isfile(os.path.join(image_dir, f))\n ]\n label_paths = [\n os.path.join(annos_dir, f)\n for f in sorted(os.listdir(annos_dir))\n if os.path.isfile(os.path.join(annos_dir, f))\n ]\n\n if len(image_paths) != len(label_paths):\n raise ValueError(\"length of images and labels doesn't match\")\n\n return image_paths, label_paths\n\n @staticmethod\n def load_image(image_path):\n img = Image.open(image_path)\n\n return img\n\n @staticmethod\n def load_label(label_path):\n # During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n # containing:\n # - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values\n # between 0 and H and 0 and W\n # - labels (Int64Tensor[N]): the class label for each ground-truth box\n\n with open(label_path, \"r\") as f:\n obj = json.load(f)\n\n items = []\n count = 0\n while True:\n count += 1\n key = f\"item{count}\"\n\n if key in obj:\n items.append(obj[key])\n else:\n break\n\n n = len(items)\n boxes = np.zeros((n, 4))\n labels = np.zeros((n,))\n\n for idx, item in enumerate(items):\n boxes[idx, :] = item[\"bounding_box\"]\n labels[idx] = item[\"category_id\"]\n\n return {\"boxes\": t.tensor(boxes).float(), \"labels\": t.tensor(labels).long()}\n\n @classmethod\n def get_data_folder_name(cls):\n return f\"{cls.get_dataset_name()}_data_folder\"\n\n @staticmethod\n def get_dataset_name():\n return \"deepfashion2\"\n\n @staticmethod\n def get_download_links():\n return [\n # order:\n # 1. google drive id,\n # 2. file name,\n # 3. sha256 hash of zipfile,\n # 4. data length of zipfile\n (\n \"12DmrxXNtl0U9hnN1bzue4XX7nw1fSMZ5\",\n \"json_for_validation.zip\",\n \"1899b133c15b961c317cf03f589cdc8423fe16b290e534b642accad538656ab4\",\n 14895000,\n ),\n (\n \"1hsa-UE-LX8sks8eAcGLL-9QDNyNt6VgP\",\n \"test.zip\",\n \"1a85367dc9c75fbac8645e397b93af11c86bc059ab718c1eee31b559b5b4598b\",\n 3341995077,\n ),\n (\n \"1lQZOIkO-9L0QJuk_w1K8-tRuyno-KvLK\",\n \"train.zip\",\n \"ec6f5d83f896f3abbb46bcfb9fdd6b9f544c0585344f862c214f6de899c495c7\",\n 10633411064,\n ),\n (\n \"1O45YqhREBOoLudjA06HcTehcEebR0o9y\",\n \"validation.zip\",\n \"edabbdb57fae4b5039ff06e436cc0dfa15326424244bfac938e4a4d6f8db0259\",\n 1816223824,\n ),\n ]\n\n @staticmethod\n def get_folders():\n return [\n # order:\n # 1. folder name\n # 2. sha256 hash of all file and subfolder names\n # concatenated to a string (without spaces as separation)\n (\n \"validation\",\n \"a87d16eee207a902b5d3b5bb2ad9f92f0456ffd992b326e1f3a1dfbbc260d38e\",\n ),\n (\n \"json_for_test\",\n \"38f8e52f2a4d6e99b190d2ad71ecabdd397d9dc60673b303613ee16f99b0fdac\",\n ),\n (\n \"train\",\n \"a87d16eee207a902b5d3b5bb2ad9f92f0456ffd992b326e1f3a1dfbbc260d38e\",\n ),\n (\n \"json_for_validation\",\n \"0868b572600747de8308160e4cf9eaaeeccf9a3ceab76e6e9bb1a29ba49e07db\",\n ),\n (\n \"test\",\n \"6105d6cc76af400325e94d588ce511be5bfdbb73b437dc51eca43917d7a43e3d\",\n ),\n ]\n"
] | [
[
"numpy.zeros",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bhazelton/lunarsky | [
"4453fca33ad51e7eb1a02ee7f154cd698e5048ca"
] | [
"lunarsky/tests/test_time.py"
] | [
"\nimport numpy as np\nfrom astropy.coordinates import ICRS\nfrom astropy.time import TimeDelta\nimport pytest\n\nfrom lunarsky import MoonLocation, SkyCoord, Time\n\n\[email protected]('lat', np.linspace(-89, 89, 5))\[email protected]('lon', np.linspace(0, 360, 5))\ndef test_sidereal_time_calculation(lat, lon):\n # Confirm that the ra of the zenith is close to the calculated LST.\n\n t0 = Time.now()\n loc = MoonLocation.from_selenodetic(lon, lat, 0)\n t0.location = loc\n\n Ntimes = 200\n Ndays = 28\n times = t0 + TimeDelta(np.linspace(0, Ndays, Ntimes) * 3600 * 24, format='sec')\n\n for tt in times:\n src = SkyCoord(alt='90d', az='0d', frame='lunartopo', obstime=tt, location=loc)\n lst = tt.sidereal_time('mean')\n assert np.isclose(lst.deg, src.transform_to(ICRS()).ra.deg, atol=1e-4)\n"
] | [
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bennimmo/elastiknn | [
"544c8f22eb37067d632c479e2a1a23d0be4e5bca"
] | [
"elastiknn-benchmarks/python/preprocess.py"
] | [
"import array\nimport gzip\nimport json\nimport os\nimport sys\nfrom io import BytesIO\nfrom itertools import islice\nfrom math import *\nfrom random import Random\nfrom time import time\nfrom typing import List\n\nimport PIL\nimport boto3\nimport h5py\nfrom PIL import Image\nfrom botocore.exceptions import ClientError\nfrom elastiknn.api import Vec\nfrom elastiknn.utils import ndarray_to_sparse_bool_vectors\nfrom imagehash import phash\nfrom sklearn.neighbors import NearestNeighbors\nfrom tqdm import tqdm\n\n\ndef exists(s3, bucket: str, key: str) -> bool:\n ex = 'Contents' in s3.list_objects(Bucket=bucket, Prefix=key)\n if ex:\n print(f\"Key {key} already exists in bucket {bucket}\")\n return True\n return False\n\n\ndef rounded_dense_float(values: List[float], n: int = 7) -> Vec.DenseFloat:\n def f(v):\n return round(float(v), n - int(floor(log10(abs(v)))) if abs(v) >= 1 else n)\n return Vec.DenseFloat(values = [f(v) for v in values])\n\n\ndef write_vec(fp, vec: Vec.Base):\n s = json.dumps(vec.to_dict(), separators=(',', ':')) + '\\n'\n fp.write(s)\n\n\ndef annb(name: str, hdf5_s3_bucket: str, hdf5_s3_key: str, local_data_dir: str, output_s3_bucket: str,\n output_s3_prefix: str, scale_by_max: bool = False):\n\n s3 = boto3.client('s3')\n train_key = f\"{output_s3_prefix}/train.json.gz\"\n test_key = f\"{output_s3_prefix}/test.json.gz\"\n dist_key = f\"{output_s3_prefix}/dist.json.gz\"\n\n hdf5_file = f\"{local_data_dir}/vecs-{name}.hdf5\"\n train_file = f\"{local_data_dir}/train-{name}.json.gz\"\n test_file = f\"{local_data_dir}/test-{name}.json.gz\"\n dist_file = f\"{local_data_dir}/dist-{name}.json.gz\"\n\n if not os.path.exists(hdf5_file):\n print(f\"Downloading s3://{hdf5_s3_bucket}/{hdf5_s3_key} to {hdf5_file}\")\n s3.download_file(Bucket=hdf5_s3_bucket, Key=hdf5_s3_key, Filename=hdf5_file)\n\n hdf5_fp = h5py.File(hdf5_file, 'r')\n is_sparse = hdf5_fp['train'].dtype == bool\n\n train = hdf5_fp['train'][...]\n test = hdf5_fp['test'][...]\n\n if scale_by_max:\n max_scaler = train.max()\n train /= max_scaler\n test /= max_scaler\n\n metric = hdf5_fp.attrs['distance']\n if metric == 'angular':\n metric = 'cosine'\n knn = NearestNeighbors(n_neighbors=100, algorithm='brute', metric=metric).fit(train)\n (distances, _) = knn.kneighbors(test, return_distance=True)\n\n def write(iter_arr, fp):\n for arr in iter_arr:\n if is_sparse:\n vec = Vec.SparseBool([x for x, b in enumerate(arr) if b], len(arr))\n else:\n vec = rounded_dense_float(list(arr))\n write_vec(fp, vec)\n\n with gzip.open(train_file, \"wt\") as gzfp:\n write(tqdm(train, desc=\"train\"), gzfp)\n\n with gzip.open(test_file, \"wt\") as gzfp:\n write(tqdm(test, desc=\"test\"), gzfp)\n\n with gzip.open(dist_file, \"wt\") as gzfp:\n for arr in tqdm(distances, desc=\"distances\"):\n lst = list(map(float, arr))\n gzfp.write(json.dumps(lst) + '\\n')\n\n for (loc, key) in [(train_file, train_key), (test_file, test_key), (dist_file, dist_key)]:\n print(f\"Copying {loc} to s3://{output_s3_bucket}/{key}\")\n s3.upload_file(loc, output_s3_bucket, key)\n\n\ndef amazon_raw(features_s3_bucket: str, features_s3_key: str, local_data_dir: str, output_s3_bucket: str,\n output_s3_prefix: str, normalize: bool, total_size: int, test_size: int):\n\n s3 = boto3.client('s3')\n\n # Check if it exists first.\n train_key = f\"{output_s3_prefix}/train.json.gz\"\n test_key = f\"{output_s3_prefix}/test.json.gz\"\n if exists(s3, output_s3_bucket, train_key) and exists(s3, output_s3_bucket, test_key):\n return\n\n features_file = f\"{local_data_dir}/vecs.b.gz\"\n if not os.path.exists(features_file):\n print(f\"Downloading s3://{features_s3_bucket}/{features_s3_key} to {features_file}\")\n s3.download_file(Bucket=features_s3_bucket, Key=features_s3_key, Filename=features_file)\n\n features_fp = gzip.open(features_file, 'rb')\n\n train_file = f\"{local_data_dir}/train.json.gz\"\n test_file = f\"{local_data_dir}/test.json.gz\"\n\n # Setup to sample test vectors from the file.\n rng = Random(0)\n test_indexes = set(rng.sample(range(total_size), test_size))\n\n with gzip.open(train_file, \"wt\") as train_fp, gzip.open(test_file, \"wt\") as test_fp:\n i = 0\n t0 = time()\n while True:\n asin = features_fp.read(10).decode()\n if len(asin) == 0:\n break\n arr = array.array('f')\n arr.fromfile(features_fp, 4096)\n if normalize:\n norm = sqrt(sum(map(lambda n: n * n, arr.tolist())))\n unit_values = [v / norm for v in arr.tolist()]\n vec = rounded_dense_float(unit_values)\n norm_check = round(sqrt(sum(map(lambda n: n * n, vec.values))), 2)\n assert norm_check == 1.0, (vec, norm_check)\n else:\n vec = rounded_dense_float(arr.tolist())\n\n if i in test_indexes:\n write_vec(test_fp, asin, vec)\n else:\n write_vec(train_fp, asin, vec)\n\n print(f\"Processed {i}: {asin} - {((i + 1) / ((time() - t0) / 60)):.1f} vecs / minute\")\n i += 1\n\n print(f\"Copying {train_file} to s3://{output_s3_bucket}/{train_key}\")\n s3.upload_file(train_file, output_s3_bucket, train_key)\n print(f\"Copying {test_file} to s3://{output_s3_bucket}/{test_key}\")\n s3.upload_file(test_file, output_s3_bucket, test_key)\n\n\ndef amazon_phash(metadata_s3_bucket: str, metadata_s3_key: str, imgs_s3_bucket: str, imgs_s3_prefix: str,\n local_data_dir: str, output_s3_bucket: str, output_s3_prefix: str, n: int = sys.maxsize):\n\n s3 = boto3.client('s3')\n\n # Check if it exists first.\n output_key = f\"{output_s3_prefix}/vecs.json.gz\"\n if exists(s3, output_s3_bucket, output_key):\n return\n\n metadata_file = f\"{local_data_dir}/metadata.json.gz\"\n if not os.path.exists(metadata_file):\n print(f\"Downloading s3://{metadata_s3_bucket}/{metadata_s3_key} to {metadata_file}\")\n s3.download_file(Bucket=metadata_s3_bucket, Key=metadata_s3_key, Filename=metadata_file)\n\n vecs_file = f\"{local_data_dir}/vecs.json.gz\"\n vecs_fp = gzip.open(vecs_file, \"wt\")\n\n hash_size = 64 # end up with a 4096-dimensional bit vector.\n\n print(f\"Writing vectors to {vecs_file}\")\n\n with gzip.open(metadata_file) as gzfp:\n lines = islice(gzfp, 0, n)\n t0 = time()\n for i, d in enumerate(map(eval, lines)):\n if \"imUrl\" not in d or not d[\"imUrl\"].endswith(\"jpg\"):\n continue\n asin = d['asin']\n try:\n obj = s3.get_object(Bucket=imgs_s3_bucket, Key=f\"{imgs_s3_prefix}/{asin}.jpg\")\n bytes = BytesIO(obj['Body'].read())\n img = Image.open(bytes)\n except (PIL.UnidentifiedImageError, ClientError) as ex:\n print(f\"Error for image {asin}: {ex}\\n\", file=sys.stderr)\n ph = phash(img, hash_size)\n for vec in ndarray_to_sparse_bool_vectors(ph.hash.reshape((1, ph.hash.size))):\n write_vec(vecs_fp, asin, vec)\n print(f\"Processed {i}: {asin} - {((i + 1) / ((time() - t0) / 60)):.1f} vecs / minute\")\n vecs_fp.close() # Very important. Otherwise gzip file is invalid!\n\n print(f\"Copying {vecs_file} to s3://{output_s3_bucket}/{output_key}\")\n s3.upload_file(vecs_file, output_s3_bucket, output_key)\n\n\ndef main(argv: List[str]) -> int:\n assert len(argv) == 5, \"Usage: <script.py> <dataset name> <local data dir> <s3 bucket> <s3 prefix>\"\n [dataset_name, local_data_dir, s3_bucket, s3_prefix] = argv[1:]\n benchmarks_bucket = \"elastiknn-benchmarks\"\n if dataset_name == \"amazonhome\":\n amazon_raw(\n benchmarks_bucket,\n \"data/raw/amazon-reviews/image_features_Home_and_Kitchen.b.gz\",\n local_data_dir,\n s3_bucket,\n s3_prefix,\n False,\n 436988,\n 10000\n )\n elif dataset_name == \"amazonhomeunit\":\n amazon_raw(\n benchmarks_bucket,\n \"data/raw/amazon-reviews/image_features_Home_and_Kitchen.b.gz\",\n local_data_dir,\n s3_bucket,\n s3_prefix,\n True,\n 436988,\n 10000\n )\n elif dataset_name == \"amazonhomephash\":\n amazon_phash(\n benchmarks_bucket,\n \"data/raw/amazon-reviews/meta_Home_and_Kitchen.json.gz\",\n benchmarks_bucket,\n \"data/raw/amazon-reviews/images\",\n local_data_dir,\n s3_bucket,\n s3_prefix\n )\n elif dataset_name == \"annbdeep1b\":\n annb(\n dataset_name,\n benchmarks_bucket,\n \"data/raw/annb/deep-image-96-angular.hdf5\",\n local_data_dir,\n s3_bucket,\n s3_prefix\n )\n elif dataset_name == \"annbfashionmnist\":\n annb(\n dataset_name,\n benchmarks_bucket,\n \"data/raw/annb/fashion-mnist-784-euclidean.hdf5\",\n local_data_dir,\n s3_bucket,\n s3_prefix,\n scale_by_max=True\n )\n elif dataset_name == \"annbgist\":\n annb(\n dataset_name,\n benchmarks_bucket,\n \"data/raw/annb/gist-960-euclidean.hdf5\",\n local_data_dir,\n s3_bucket,\n s3_prefix\n )\n elif dataset_name == \"annbglove25\":\n annb(\n dataset_name,\n benchmarks_bucket,\n \"data/raw/annb/glove-25-angular.hdf5\",\n local_data_dir,\n s3_bucket,\n s3_prefix\n )\n elif dataset_name == \"annbglove100\":\n annb(\n dataset_name,\n benchmarks_bucket,\n \"data/raw/annb/glove-100-angular.hdf5\",\n local_data_dir,\n s3_bucket,\n s3_prefix\n )\n elif dataset_name == \"annbkosarak\":\n annb(\n dataset_name,\n benchmarks_bucket,\n \"data/raw/annb/kosarak-jaccard.hdf5\",\n local_data_dir,\n s3_bucket,\n s3_prefix\n )\n elif dataset_name == \"annbmnist\":\n annb(\n dataset_name,\n benchmarks_bucket,\n \"data/raw/annb/mnist-784-euclidean.hdf5\",\n local_data_dir,\n s3_bucket,\n s3_prefix,\n scale_by_max=True\n )\n elif dataset_name == \"annbnyt\":\n annb(\n dataset_name,\n benchmarks_bucket,\n \"data/raw/annb/nytimes-256-angular.hdf5\",\n local_data_dir,\n s3_bucket,\n s3_prefix\n )\n elif dataset_name == \"annbsift\":\n annb(\n dataset_name,\n benchmarks_bucket,\n \"data/raw/annb/sift-128-euclidean.hdf5\",\n local_data_dir,\n s3_bucket,\n s3_prefix,\n scale_by_max=True\n )\n else:\n raise RuntimeError(f\"Unknown dataset: {dataset_name}\")\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n"
] | [
[
"sklearn.neighbors.NearestNeighbors"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rahulbhadani/hcipy | [
"b52726cb9502b5225ddff9d7b1ff417f2350cda8"
] | [
"hcipy/mode_basis/zernike.py"
] | [
"import numpy as np\nfrom math import sqrt, floor\nfrom scipy.special import binom\n\ndef noll_to_zernike(i):\n\t'''Get the Zernike index from a Noll index.\n\n\tParameters\n\t----------\n\ti : int\n\t\tThe Noll index.\n\t\n\tReturns\n\t-------\n\tn : int\n\t\tThe radial Zernike order.\n\tm : int\n\t\tThe azimuthal Zernike order.\n\t'''\n\tn = int(sqrt(2 * i - 1) + 0.5) - 1\n\tif n % 2:\n\t\tm = 2 * int((2 * (i + 1) - n * (n + 1)) // 4) - 1\n\telse:\n\t\tm = 2 * int((2 * i + 1 - n * (n + 1)) // 4)\n\treturn n, m * (-1)**(i % 2)\n\ndef ansi_to_zernike(i):\n\t'''Get the Zernike index from an ANSI index.\n\n\tParameters\n\t----------\n\ti : int\n\t\tThe ANSI index.\n\t\n\tReturns\n\t-------\n\tn : int\n\t\tThe radial Zernike order.\n\tm : int\n\t\tThe azimuthal Zernike order.\n\t'''\n\tn = int((sqrt(8 * i + 1) - 1) / 2)\n\tm = 2 * i - n * (n + 2)\n\treturn (n, m)\n\ndef zernike_to_ansi(n, m):\n\t'''Get the ANSI index for a pair of Zernike indices.\n\n\tParameters\n\t----------\n\tn : int\n\t\tThe radial Zernike order.\n\tm : int\n\t\tThe azimuthal Zernike order.\n\t\n\tReturns\n\t-------\n\tint\n\t\tThe ANSI index.\n\t'''\n\treturn (m + n * n) // 2 + n\n\ndef zernike_to_noll(n, m):\n\t'''Get the Noll index for a pair of Zernike indices.\n\n\tParameters\n\t----------\n\tn : int\n\t\tThe radial Zernike order.\n\tm : int\n\t\tThe azimuthal Zernike order.\n\t\n\tReturns\n\t-------\n\tint\n\t\tThe Noll index.\n\t'''\n\ti = int(((n + 0.5)**2 + 1) / 2) + 1\n\tNn = (n + 1) * (n + 2) // 2 + 1\n\n\t# Brute force search\n\tfor j in range(i, i+Nn):\n\t\tnn, mm = noll_to_zernike(j)\n\t\tif nn == n and mm == m:\n\t\t\treturn j\n\traise ValueError('Could not find noll index for (%d,%d)' % n, m)\n\ndef zernike_radial(n, m, r, cache=None):\n\t'''The radial component of a Zernike polynomial.\n\n\tWe use the q-recursive method, which uses recurrence relations to calculate the radial\n\tZernike polynomials without using factorials. A description of the method can be found\n\tin [1]_. Additionally, this function optionally caches results of previous calls.\n\n\t.. [1] Chong, C. W., Raveendran, P., & Mukundan, R. (2003). A comparative analysis of algorithms for fast computation of Zernike moments. Pattern Recognition, 36(3), 731-742.\n\n\tParameters\n\t----------\n\tn : int\n\t\tThe radial Zernike order.\n\tm : int\n\t\tThe azimuthal Zernike order.\n\tr : array_like\n\t\tThe (normalized) radial coordinates on which to calculate the polynomial.\n\tcache : dictionary or None\n\t\tA dictionary containing previously calculated Zernike modes on the same grid.\n\t\tThis function is for speedup only, and therefore the cache is expected to be \n\t\tvalid. You can reuse the cache for future calculations on the same exact grid.\n\t\tThe given dictionary is updated with the current calculation.\n\t\n\tReturns\n\t-------\n\tarray_like\n\t\tThe radial component of the evaluated Zernike polynomial.\n\t'''\n\tm = abs(m)\n\n\tif cache is not None:\n\t\tif ('rad', n, m) in cache:\n\t\t\treturn cache[('rad', n, m)]\n\t\n\tif n == m:\n\t\tres = r**n\n\telif (n - m) == 2:\n\t\tz1 = zernike_radial(n, n, r, cache)\n\t\tz2 = zernike_radial(n - 2, n - 2, r, cache)\n\n\t\tres = n * z1 - (n - 1) * z2\n\telse:\n\t\tp = n\n\t\tq = m + 4\n\n\t\th3 = -4 * (q - 2) * (q - 3) / float((p + q - 2) * (p - q + 4))\n\t\th2 = h3 * (p + q) * (p - q + 2) / float(4 * (q - 1)) + (q - 2)\n\t\th1 = q * (q - 1) / 2.0 - q * h2 + h3 * (p + q + 2) * (p - q) / 8.0\n\n\t\tr2 = zernike_radial(2, 2, r, cache)\n\t\tres = h1 * zernike_radial(p, q, r, cache) + (h2 + h3 / r2) * zernike_radial(n, q - 2, r, cache)\n\n\tif cache is not None:\n\t\tcache[('rad', n, m)] = res\n\t\n\treturn res\n\ndef zernike_azimuthal(m, theta, cache=None):\n\t'''The azimuthal component of a Zernike polynomial.\n\n\tThis function optionally caches results of previous calls.\n\n\tParameters\n\t----------\n\tm : int\n\t\tThe azimuthal Zernike order.\n\ttheta : array_like\n\t\tThe azimuthal coordinates on which to calculate the polynomial.\n\tcache : dictionary or None\n\t\tA dictionary containing previously calculated Zernike modes on the same grid.\n\t\tThis function is for speedup only, and therefore the cache is expected to be \n\t\tvalid. You can reuse the cache for future calculations on the same exact grid.\n\t\tThe given dictionary is updated with the current calculation.\n\t\n\tReturns\n\t-------\n\tarray_like\n\t\tThe azimuthal component of the evaluated Zernike polynomial.\n\t'''\n\tif cache is not None:\n\t\tif ('azim', m) in cache:\n\t\t\treturn cache[('azim', m)]\n\t\n\tif m < 0:\n\t\tres = sqrt(2) * np.sin(-m * theta)\n\telif m == 0:\n\t\treturn 1\n\telse:\n\t\tres = sqrt(2) * np.cos(m * theta)\n\t\n\tif cache is not None:\n\t\tcache[('azim', m)] = res\n\t\n\treturn res\n\t\ndef zernike(n, m, D=1, grid=None, radial_cutoff=True, cache=None):\n\t'''Evaluate the Zernike polynomial on a grid.\n\n\tParameters\n\t----------\n\tn : int\n\t\tThe radial Zernike order.\n\tm : int\n\t\tThe azimuthal Zernike order.\n\tD : scalar\n\t\tThe diameter of the Zernike polynomial.\n\tgrid : Grid\n\t\tThe grid on which to evaluate the Zernike polynomial. If this is None,\n\t\ta Field generator will be returned.\n\tradial_cutoff : boolean\n\t\tWhether to apply a circular aperture to cutoff the modes.\n\tcache : dictionary or None\n\t\tA dictionary containing previously calculated Zernike modes on the same grid.\n\t\tThis function is for speedup only, and therefore the cache is expected to be \n\t\tvalid. You can reuse the cache for future calculations on the same exact grid.\n\t\tThe given dictionary is updated with the current calculation.\n\t\n\tReturns\n\t-------\n\tField or Field generator\n\t\tThe evaluated Zernike polynomial. If `grid` is None, a Field generator is returned,\n\t\twhich evaluates the Zernike polynomial on the supplied grid.\n\t'''\n\tfrom ..field import Field\n\n\tif grid is None:\n\t\treturn lambda grid: zernike(n, m, D, grid)\n\t\n\tif grid.is_separated and grid.is_('polar'):\n\t\tR, Theta = grid.separated_coords\n\t\tz_r = zernike_radial(n, m, 2 * R / D, cache)\n\t\tif radial_cutoff:\n\t\t\tz_r *= (2 * R) < D\n\t\tz = sqrt(n + 1) * np.outer(zernike_azimuthal(m, Theta, cache), z_r).flatten()\n\telse:\n\t\tr, theta = grid.as_('polar').coords\n\t\tz = sqrt(n + 1) * zernike_azimuthal(m, theta, cache) * zernike_radial(n, m, 2 * r / D, cache)\n\t\tif radial_cutoff:\n\t\t\tz *= (2 * r) < D\n\t\n\treturn Field(z, grid)\n\ndef zernike_ansi(i, D=1, grid=None, radial_cutoff=True, cache=None):\n\t'''Evaluate the Zernike polynomial on a grid using an ANSI index.\n\n\tParameters\n\t----------\n\ti : int\n\t\tThe ANSI index.\n\tD : scalar\n\t\tThe diameter of the Zernike polynomial.\n\tgrid : Grid or None\n\t\tThe grid on which to evaluate the Zernike polynomial. If this is None,\n\t\ta Field generator will be returned.\n\tradial_cutoff : boolean\n\t\tWhether to apply a circular aperture to cutoff the modes.\n\tcache : dictionary or None\n\t\tA dictionary containing previously calculated Zernike modes on the same grid.\n\t\tThis function is for speedup only, and therefore the cache is expected to be \n\t\tvalid. You can reuse the cache for future calculations on the same exact grid.\n\t\tThe given dictionary is updated with the current calculation.\n\t\n\tReturns\n\t-------\n\tField or Field generator\n\t\tThe evaluated Zernike polynomial. If `grid` is None, a Field generator is returned,\n\t\twhich evaluates the Zernike polynomial on the supplied grid.\n\t'''\n\tn, m = ansi_to_zernike(i)\n\treturn zernike(n, m, D, grid, radial_cutoff, cache)\n\ndef zernike_noll(i, D=1, grid=None, radial_cutoff=True, cache=None):\n\t'''Evaluate the Zernike polynomial on a grid using a Noll index.\n\n\tParameters\n\t----------\n\ti : int\n\t\tThe Noll index.\n\tD : scalar\n\t\tThe diameter of the Zernike polynomial.\n\tgrid : Grid or None\n\t\tThe grid on which to evaluate the Zernike polynomial. If this is None,\n\t\ta Field generator will be returned.\n\tradial_cutoff : boolean\n\t\tWhether to apply a circular aperture to cutoff the modes.\n\tcache : dictionary or None\n\t\tA dictionary containing previously calculated Zernike modes on the same grid.\n\t\tThis function is for speedup only, and therefore the cache is expected to be \n\t\tvalid. You can reuse the cache for future calculations on the same exact grid.\n\t\tThe given dictionary is updated with the current calculation.\n\t\n\tReturns\n\t-------\n\tField or Field generator\n\t\tThe evaluated Zernike polynomial. If `grid` is None, a Field generator is returned,\n\t\twhich evaluates the Zernike polynomial on the supplied grid.\n\t'''\n\tn, m = noll_to_zernike(i)\n\treturn zernike(n, m, D, grid, radial_cutoff, cache)\n\ndef make_zernike_basis(num_modes, D, grid, starting_mode=1, ansi=False, radial_cutoff=True, use_cache=True):\n\t'''Make a ModeBasis of Zernike polynomials.\n\n\tParameters\n\t----------\n\tnum_modes : int\n\t\tThe number of Zernike polynomials to generate.\n\tD : scalar\n\t\tThe diameter of the Zernike polynomial.\n\tgrid : Grid or None\n\t\tThe grid on which to evaluate the Zernike polynomials. If this is None,\n\t\ta list of Field generators will be returned.\n\tstarting_mode : int\n\t\tThe first mode to evaluate.\n\tansi : boolean\n\t\tIf this is True, the modes will be indexed using ANSI indices. Otherwise, a Noll \n\t\tindexing scheme is used.\n\tradial_cutoff : boolean\n\t\tWhether to apply a circular aperture to cutoff the modes.\n\tuse_cache : boolean\n\t\tWhether to use a cache while calculating the modes. A cache uses memory, so turn it\n\t\toff when you are limited on memory.\n\t\n\tReturns\n\t-------\n\tModeBasis or list of Field generators\n\t\tThe evaluated mode basis of Zernike polynomials, or a list of Field generators for\n\t\teach of the Zernike polynomials.\n\t'''\n\tfrom .mode_basis import ModeBasis\n\tf = zernike_ansi if ansi else zernike_noll\n\n\tif grid is None:\n\t\tpolar_grid = None\n\telse:\n\t\tpolar_grid = grid.as_('polar')\n\t\n\tif use_cache:\n\t\tcache = {}\n\telse:\n\t\tcache = None\n\n\tmodes = [f(i, D, polar_grid, radial_cutoff, cache) for i in range(starting_mode, starting_mode + num_modes)]\n\n\tif grid is None:\n\t\treturn modes\n\telse:\n\t\treturn ModeBasis(modes)\n"
] | [
[
"numpy.cos",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
evcano/seisflows | [
"d7c52827891c11f4b9681418b91bd4ab6a264e9b"
] | [
"seisflows/preprocess/pyatoa.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nThis is the base class seisflows.preprocess.Pyatoa\n\nThis is a main Seisflows class, it controls the preprocessing.\nThis class uses the Python package Pyatoa to perform preprocessing, and\nmisfit measurement.\n\n..warning::\n This might break if no residuals are written for a given event\n\"\"\"\nimport os\nimport sys\nimport pyatoa\nimport numpy as np\nfrom glob import glob\nfrom seisflows.tools import unix\nfrom seisflows.config import Dict\nfrom pyatoa.utils.images import merge_pdfs\nfrom seisflows.tools.err import ParameterError\n\nPAR = sys.modules[\"seisflows_parameters\"]\nPATH = sys.modules[\"seisflows_paths\"]\n\n\nclass Pyatoa:\n \"\"\"\n Data preprocessing class using the Pyatoa package\n \"\"\"\n def __init__(self):\n \"\"\"\n These parameters should not be set by __init__!\n Attributes are just initialized as NoneTypes for clarity and docstrings\n\n :type data: str\n :param data: directory where data from the preprocessing is stored\n :type figures: str\n :param figures: directory where figures are stored\n \"\"\"\n self.path_datasets = None\n self.path_figures = None\n\n def check(self):\n \"\"\" \n Checks Parameter and Path files, will be run at the start of a Seisflows\n workflow to ensure that things are set appropriately.\n \"\"\"\n # Check the path requirements\n if \"PREPROCESS\" not in PATH:\n setattr(PATH, \"PREPROCESS\", \n os.path.join(PATH.SCRATCH, \"preprocess\"))\n\n if \"DATA\" not in PATH:\n setattr(PATH, \"DATA\", None)\n\n if \"RESPONSE\" not in PATH:\n setattr(PATH, \"RESPONSE\", None)\n\n # Check the existence of required parameters\n required_parameters = [\"COMPONENTS\", \"UNIT_OUTPUT\", \"MIN_PERIOD\",\n \"MAX_PERIOD\", \"CORNERS\", \"ROTATE\",\n \"ADJ_SRC_TYPE\", \"PYFLEX_PRESET\",\n \"FIX_WINDOWS\", \"PLOT\", \"FORMAT\"\n ]\n for req in required_parameters:\n if req not in PAR:\n raise ParameterError(PAR, req)\n\n # Check specific parameter requirements\n if PAR.FORMAT != \"ascii\":\n raise ValueError(\"Pyatoa preprocess currently only works with \"\n \"the 'ascii' format\")\n\n # Set default values parameters for any non-set parameters\n if \"PLOT\" not in PAR:\n setattr(PAR, \"PLOT\", True)\n\n if \"LOGGING\" not in PAR:\n setattr(PAR, \"LOGGING\", \"DEBUG\")\n\n if \"MAP_CORNERS\" not in PAR:\n setattr(PAR, \"MAP_CORNERS\", None)\n\n if \"CLIENT\" not in PAR:\n setattr(PAR, \"CLIENT\", None)\n\n if \"SNAPSHOT\" not in PAR:\n setattr(PAR, \"SNAPSHOT\", True)\n\n # Used to define the start time of fetched observation waveforms\n if \"START_PAD\" not in PAR:\n setattr(PAR, \"START_PAD\", 20)\n\n # Used to define the end time of fetched observation waveforms\n if \"END_PAD\" not in PAR:\n setattr(PAR, \"END_PAD\", PAR.DT * PAR.NT + PAR.START_PAD + 5)\n else:\n if PAR.DT * PAR.NT >= PAR.START_PAD + PAR.END_PAD:\n raise ValueError(\"Pyatoa preprocess parameters START_PAD and \"\n \"END_PAD will not provide long enough obs.\"\n \"traces to match the length of synthetics\")\n\n def setup(self):\n \"\"\"\n Sets up data preprocessing machinery by establishing an internally\n defined directory structure that will be used to store the outputs \n of the preprocessing workflow\n\n Akin to an __init__ class, but to be called externally by the workflow.\n \"\"\"\n # Late import because preprocess is loaded before optimize\n solver = sys.modules[\"seisflows_solver\"]\n\n # Inititate a Pyaflowa object to make sure the machinery works\n pyaflowa = pyatoa.Pyaflowa(structure=\"seisflows\", sfpaths=PATH, \n sfpar=PAR)\n\n # Pull path names from Pyaflowa to keep path structure in one place\n self.path_datasets = pyaflowa.path_structure.datasets\n self.path_figures = pyaflowa.path_structure.figures\n\n def prepare_eval_grad(self, path, source_name):\n \"\"\"\n Prepare the gradient evaluation by gathering, preprocessing waveforms, \n and measuring misfit between observations and synthetics using Pyatoa.\n \n This is a process specific task and intended to be run in parallel\n\n :type path: str\n :param path: path to the current function evaluation for saving residual\n :type source_name: str\n :param source_name: the event id to be used for tagging and data lookup\n \"\"\"\n # Late import because preprocess is loaded before optimize,\n # Optimize required to know which iteration/step_count we are at\n optimize = sys.modules[\"seisflows_optimize\"]\n\n # Inititate the Pyaflowa class which abstracts processing functions\n # Communicate to Pyaflowa the current iteration and step count\n pyaflowa = pyatoa.Pyaflowa(structure=\"seisflows\", sfpaths=PATH, \n sfpar=PAR, iteration=optimize.iter,\n step_count=optimize.line_search.step_count)\n\n # Process all the stations for a given event using Pyaflowa\n misfit = pyaflowa.process(source_name, fix_windows=PAR.FIX_WINDOWS)\n\n # Generate the necessary files to continue the inversion\n if misfit:\n # Event misfit defined by Tape et al. (2010)\n self.write_residuals(path=path, scaled_misfit=misfit,\n source_name=source_name)\n \n self.snapshot()\n\n def finalize(self):\n \"\"\"\n Run some serial finalization tasks specific to Pyatoa, which will help\n aggregate the collection of output information:\n Aggregate misfit windows using the Inspector class\n Generate PDFS of waveform figures for easy access\n \"\"\"\n unix.cd(self.path_datasets)\n insp = pyatoa.Inspector(PAR.TITLE, verbose=False)\n insp.discover()\n insp.save() \n\n self.make_final_pdfs()\n\n def write_residuals(self, path, scaled_misfit, source_name):\n \"\"\"\n Computes residuals and saves them to a text file in the appropriate path\n\n :type path: str \n :param path: scratch directory path, e.g. PATH.GRAD or PATH.FUNC\n :type scaled_misfit: float\n :param scaled_misfit: the summation of misfit from each \n source-receiver pair calculated by prepare_eval_grad()\n :type source_name: str\n :param source_name: name of the source related to the misfit, used\n for file naming\n \"\"\"\n residuals_dir = os.path.join(path, \"residuals\") \n\n if not os.path.exists(residuals_dir):\n unix.mkdir(residuals_dir)\n \n event_residual = os.path.join(residuals_dir, source_name) \n \n np.savetxt(event_residual, [scaled_misfit], fmt=\"%11.6e\")\n\n def sum_residuals(self, files):\n \"\"\"\n Averages the event misfits and returns the total misfit.\n Total misfit defined by Tape et al. (2010)\n\n :type files: str\n :param files: list of single-column text files containing residuals\n that will have been generated using prepare_eval_grad()\n :rtype: float\n :return: average misfit\n \"\"\"\n assert(len(files) == PAR.NTASK), \\\n \"Number of misfit files does not match the number of events\"\n\n total_misfit = 0\n for filename in files:\n total_misfit += np.sum(np.loadtxt(filename))\n\n total_misfit /= PAR.NTASK\n\n return total_misfit\n\n def snapshot(self):\n \"\"\"\n Copy all ASDFDataSets in the data directory into a separate snapshot\n directory for redundancy\n \"\"\"\n if PAR.SNAPSHOT:\n snapshot_dir = os.path.join(self.path_datasets, \"snapshot\")\n if not os.path.exists(snapshot_dir):\n unix.mkdir(snapshot_dir)\n \n srcs = glob(os.path.join(self.path_datasets, \"*.h5\"))\n for src in srcs:\n dst = os.path.join(snapshot_dir, os.path.basename(src))\n unix.cp(src, dst)\n\n def make_final_pdfs(self):\n \"\"\"\n Utility function to combine all pdfs for a given event, iteration, and\n step count into a single pdf. To reduce on file count and provide easier\n visualization. Removes the original event-based pdfs.\n \n .. warning::\n This is a simple function because it won't account for missed \n iterations i.e. if this isn't run in the finalization, it will \n probably break the next time\n\n :raises AssertionError: When tags don't match the mainsolvers first tag\n \"\"\"\n # Late import because preprocess is loaded before optimize\n solver = sys.modules[\"seisflows_solver\"]\n\n # Relative pathing from here on out boys\n unix.cd(self.path_figures)\n sources = []\n for source_name in solver.source_names:\n sources += glob(os.path.join(source_name, \"*.pdf\"))\n\n # Incase this is run out of turn and pdfs were already deleted\n if not sources:\n return\n\n iter_steps = set([os.path.basename(_).split(\"_\")[0] for _ in sources])\n for iter_step in iter_steps:\n # Merge pdfs that correspond to the same iteration and step count \n fids = [_ for _ in sources if iter_step in _]\n merge_pdfs(fids=sorted(fids), fid_out=f\"{iter_step}.pdf\")\n unix.rm(fids)\n\n\n"
] | [
[
"numpy.savetxt",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AmazingSean/spotify_top_tracks_analysis | [
"f1f58751abf77cacd55ca7271f8a285614b0a8d3"
] | [
"data/billboard_data_process.py"
] | [
"import pandas as pd\nimport numpy as np\nimport sys\n\nimport spotipy\nimport spotipy.util as util\nfrom spotipy.oauth2 import SpotifyClientCredentials\n\nfrom pandarallel import pandarallel\n\n\ndef get_features_with_id(id, sp):\n \"\"\"\n Get audio features provided by Spotify Web API.\n \"\"\"\n print(id)\n\n track = sp.audio_features(id)[0]\n feature_values = [track[feature] for feature in feature_names]\n\n return feature_values\n\ndef get_unique_track_keys(sections):\n unique_keys_list = np.unique([str(section['key']) for section in sections])\n return ','.join(unique_keys_list)\n\ndef get_unique_track_modes(sections):\n unique_modes_list = np.unique([str(section['mode']) for section in sections])\n return ','.join(unique_modes_list)\n\ndef get_unique_track_modes(sections):\n unique_time_list = np.unique([str(section['time_signature']) for section in sections])\n return ','.join(unique_time_list)\n\n\ndef get_features_from_audio_analysis(id, sp):\n \"\"\"\n Construct new features from the audio analysis provided by Spotify Web API.\n \"\"\"\n print(id)\n analysis = sp.audio_analysis(id)\n\n # analysis is a dictionary that has keys: meta(irrelevant info), track, bars, beats, tatums, sections, segments\n track = analysis['track']\n bars = analysis['bars']\n beats = analysis['beats']\n tatums = analysis['tatums']\n sections = analysis['sections']\n segments = analysis['segments']\n\n # construct new features:\n tempo_confidence = track['tempo_confidence']\n time_signature_confidence = track['time_signature_confidence']\n key_confidence = track['key_confidence']\n mode_confidence = track['mode_confidence']\n\n #bars\n num_bars = len(bars)\n avg_bar_len = np.mean([bar['duration'] for bar in bars])\n avg_bar_conf = np.mean([bar['confidence'] for bar in bars])\n \n #beats\n num_beats = len(beats)\n avg_beat_len = np.mean([beat['duration'] for beat in beats])\n avg_beat_conf = np.mean([beat['confidence'] for beat in beats])\n\n #tatums\n num_tatums = len(tatums)\n avg_tatum_len = np.mean([tatum['duration'] for tatum in tatums])\n avg_tatum_conf = np.mean([tatum['confidence'] for tatum in tatums])\n\n #sections\n num_sections = len(sections)\n avg_section_len = np.mean([section['duration'] for section in sections])\n avg_tempo = np.mean([section['tempo'] for section in sections])\n avg_tempo_conf = np.mean([section['tempo_confidence'] for section in sections])\n key = get_unique_track_keys(sections)\n avg_key_conf = np.mean([section['key_confidence'] for section in sections])\n mode = get_unique_track_modes(sections)\n avg_mode_conf = np.mean([section['mode_confidence'] for section in sections])\n time_signature = get_unique_time_signatures(sections)\n avg_time_conf = np.mean([section['time_signature_confidence'] for section in sections])\n\n #segments\n num_segments = len(segments)\n\n\n result_list = [tempo_confidence, time_signature_confidence, key_confidence, mode_confidence,\n num_bars, num_beats, num_tatums, num_sections, num_segments, avg_bar_len,\n avg_bar_conf, avg_beat_len, avg_bar_conf, avg_tatum_len, avg_tatum_conf,\n avg_section_len, avg_tempo, avg_tempo_conf, key, avg_key_conf, mode, \n avg_mode_conf, time_signature, avg_time_conf\n ]\n\n return result_list\n\n\nif __name__ == '__main__':\n # construct spotify object\n \n client_credentials_manager = SpotifyClientCredentials(client_id='ff9ff248e80e428bbb9796e1a7d62aeb', client_secret='7f70a7c884494c6c96d2e6f03c4d2388')\n sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\n\n pandarallel.initialize(progress_bar=True)\n\n billboard = pd.read_csv('billboard_with_ids.csv')\n\n # list of features 1. provided by Spotify 2. newly constructed features from audio analysis:\n feature_names = ['duration_ms', 'key', 'mode', 'time_signature', 'acousticness', 'danceability', \n 'energy', 'instrumentalness', 'liveness', 'loudness', 'speechiness', 'valence', 'tempo']\n \n # expand the df to fit the new features:\n column_names = list(billboard.columns)\n column_names.extend(feature_names)\n billboard = billboard.reindex(columns = column_names)\n\n # assign the new features\n print('Adding features...')\n billboard[feature_names] = billboard['sp_id'].apply(lambda x: pd.Series(get_features_with_id(x, sp)))\n\n\n # construct features from audio analysis:\n new_feature_names = ['tempo_confidence', 'time_signature_confidence', 'key_confidence', 'mode_confidence',\n 'num_bars', 'num_beats', 'num_tatums', 'num_sections', 'num_segments', 'avg_bar_len',\n 'avg_bar_conf', 'avg_beat_len', 'avg_bar_conf', 'avg_tatum_len', 'avg_tatum_conf',\n 'avg_section_len', 'avg_tempo', 'avg_tempo_conf', 'key', 'avg_key_conf', 'mode', \n 'avg_mode_conf', 'time_signature', 'avg_time_conf']\n\n # expand the df\n column_names.extend(new_feature_names)\n billboard = billboard.reindex(columns = column_names)\n \n # assign the new features:\n print('Adding audio analysis...')\n billboard[new_feature_names] = billboard['sp_id'].apply(lambda x: pd.Series(get_features_from_audio_analysis(x, sp)))\n\n print(billboard.head())\n print('Saving to csv...')\n billboard.to_csv('billboard_with_features.csv')"
] | [
[
"pandas.read_csv",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ikngtty/solve_maze | [
"9e3f4bd2b35057b14ccb0fe6a58e6cb4f9958cd6"
] | [
"mylib/maze.py"
] | [
"\"\"\"Use A-star algorithm.\"\"\"\nimport heapq as hq\nimport typing\n\nimport scipy.spatial as spatial\n\nfrom . import util\n\n\ndef is_passable(pixel) -> bool:\n \"\"\"\n Judge whether we can pass through the pixel or not.\n\n Parameters\n ----------\n pixel\n The value of the pixel in the image. Unless the maze image is\n grayscale, the grayscale integer is passed.\n\n \"\"\"\n return pixel > 200\n\n\ndef heuristic_cost(point1: util.Point,\n point2: util.Point) -> float:\n return spatial.distance.euclidean(point1, point2)\n\n\nclass Node:\n __slots__ = (\"_point\", \"_cost\", \"_heuristic_cost\", \"_parent\")\n\n def __init__(self,\n point: util.Point,\n cost: int,\n heuristic_cost: float,\n parent):\n self._point = point\n self._cost = cost\n self._heuristic_cost = heuristic_cost\n self._parent = parent\n\n @property\n def priority_score(self):\n return self._cost + self._heuristic_cost\n\n @property\n def _compare_values(self):\n return (self.priority_score,\n self.cost,\n self.point)\n\n def __eq__(self, other):\n return self._compare_values < other._compare_values\n\n def __lt__(self, other):\n return self._compare_values < other._compare_values\n\n\n# Define getters of `Node` class.\nfor attr in Node.__slots__:\n util.def_getter(Node, attr)\n\n\ndef get_path(maze_img,\n start_point: util.Point,\n goal_point: util.Point) -> typing.List[util.Point]:\n goal_node = _get_goal_node(maze_img, start_point, goal_point)\n if goal_node is None:\n raise Exception(\"The goal is not found.\")\n\n path_reverse = [goal_node.point]\n parent_node = goal_node.parent\n while(parent_node is not None):\n path_reverse.append(parent_node.point)\n parent_node = parent_node.parent\n\n return list(reversed(path_reverse))\n\n\ndef _get_goal_node(maze_img,\n start_point: util.Point,\n goal_point: util.Point) -> Node:\n\n def is_goal(node: Node):\n return node.point == goal_point\n\n start_node = Node(point=start_point,\n cost=0,\n heuristic_cost=heuristic_cost(goal_point, start_point),\n parent=None)\n if is_goal(start_node):\n return start_node\n\n shape = maze_img.shape\n open_node_map = {start_node.point: start_node}\n node_hq = [start_node]\n while (len(node_hq) > 0):\n center_node = hq.heappop(node_hq)\n next_cost = center_node.cost + 1\n next_nodes = [Node(point=p,\n cost=next_cost,\n heuristic_cost=heuristic_cost(goal_point, p),\n parent=center_node)\n for p in center_node.point.udlr()\n if p.y >= 0\n and p.x >= 0\n and p.y < shape[0]\n and p.x < shape[1]\n and open_node_map.get(p) is None\n and is_passable(maze_img[p])]\n for node in next_nodes:\n if is_goal(node):\n return node\n open_node_map[node.point] = node\n # Visualize cost for debugging.\n # maze_img[node.point] = 255 - node.cost\n hq.heappush(node_hq, node)\n\n return None\n"
] | [
[
"scipy.spatial.distance.euclidean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
AjitPant/AprilTag_Detection | [
"fd93d0803d0f4b5f7a862f77f2db26a189ebeedb"
] | [
"detection/inference/data_validator.py"
] | [
"import numpy as np\nimport pickle\nimport cv2\nimport glob\nimport os\nimport argparse\nfrom multiprocessing import Process, Manager\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport itertools\n\n\n\n\ndef process(args):\n corners, ids, counter = pickle.load( open( args.pkl_path, \"rb\" ) )[:3]\n\n\n\n colors = itertools.cycle([\"r\", \"b\", \"g\", \"black\"])\n\n for img_corners, img_ids, img_counter in zip(corners, ids, counter):\n print(img_ids)\n print(\"hi\")\n print(np.array(img_corners).shape)\n img_corners = np.array(img_corners).reshape(-1,4, 2)\n img_corners[:,[0, 1,2, 3], :] = img_corners[:, [2,3,0,1], :]\n img_corners = img_corners.reshape(-1, 2)\n\n\n for corner in range(4):\n series = img_corners[corner::4]\n x, y = series.T\n y = -y\n plt.scatter(x,y, color=next(colors))\n\n img_corners = np.array(img_corners).reshape(-1,4, 2)\n for i in range(len(img_corners)):\n x = np.mean(img_corners[i,:,0])\n y = -np.mean(img_corners[i,:,1])\n plt.annotate(str(img_ids[i]),xy=(x,y))\n\n plt.show()\n\n\n\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--pkl_path',\n type=str,\n required=True,\n help=\"Pickle file containing the corners list\")\n args = parser.parse_args()\n process(args)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"matplotlib.pyplot.show",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
weareno1/cupy | [
"ac52cce00b69d97b5d99bd1f91caed720b32b2d3",
"5cf50a93bbdebe825337ed7996c464e84b1495ba"
] | [
"tests/cupyx_tests/scipy_tests/sparse_tests/test_csc.py",
"tests/cupy_tests/core_tests/test_ndarray_ufunc.py"
] | [
"import unittest\n\nimport numpy\ntry:\n import scipy.sparse\n scipy_available = True\nexcept ImportError:\n scipy_available = False\n\nimport cupy\nfrom cupy import testing\nfrom cupyx.scipy import sparse\n\n\ndef _make(xp, sp, dtype):\n data = xp.array([0, 1, 3, 2], dtype)\n indices = xp.array([0, 0, 2, 1], 'i')\n indptr = xp.array([0, 1, 2, 3, 4], 'i')\n # 0, 1, 0, 0\n # 0, 0, 0, 2\n # 0, 0, 3, 0\n return sp.csc_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make_complex(xp, sp, dtype):\n data = xp.array([0, 1, 2, 3], dtype)\n if dtype in [numpy.complex64, numpy.complex128]:\n data = data - 1j\n indices = xp.array([0, 1, 3, 2], 'i')\n indptr = xp.array([0, 2, 3, 4], 'i')\n # 0, 1 - 1j, 0, 0\n # 0, 0, 0, 2 - 1j\n # 0, 0, 3 - 1j, 0\n return sp.csr_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make2(xp, sp, dtype):\n data = xp.array([2, 1, 3, 4], dtype)\n indices = xp.array([1, 0, 1, 2], 'i')\n indptr = xp.array([0, 0, 1, 4, 4], 'i')\n # 0, 0, 1, 0\n # 0, 2, 3, 0\n # 0, 0, 4, 0\n return sp.csc_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make3(xp, sp, dtype):\n data = xp.array([1, 4, 3, 2, 5], dtype)\n indices = xp.array([0, 3, 1, 1, 3], 'i')\n indptr = xp.array([0, 2, 3, 5], 'i')\n # 1, 0, 0\n # 0, 3, 2\n # 0, 0, 0\n # 4, 0, 5\n return sp.csc_matrix((data, indices, indptr), shape=(4, 3))\n\n\ndef _make_unordered(xp, sp, dtype):\n data = xp.array([1, 2, 3, 4], dtype)\n indices = xp.array([1, 0, 1, 2], 'i')\n indptr = xp.array([0, 0, 0, 2, 4], 'i')\n return sp.csc_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make_duplicate(xp, sp, dtype):\n data = xp.array([1, 4, 3, 0, 2, 5], dtype)\n indices = xp.array([0, 1, 0, 2, 1, 1], 'i')\n indptr = xp.array([0, 3, 4, 6, 6], 'i')\n # 4, 0, 0, 0\n # 4, 0, 7, 0\n # 0, 0, 0, 0\n return sp.csc_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make_empty(xp, sp, dtype):\n data = xp.array([], dtype)\n indices = xp.array([], 'i')\n indptr = xp.array([0, 0, 0, 0, 0], 'i')\n return sp.csc_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make_shape(xp, sp, dtype):\n return sp.csc_matrix((3, 4))\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\nclass TestCscMatrix(unittest.TestCase):\n\n def setUp(self):\n self.m = _make(cupy, sparse, self.dtype)\n\n def test_dtype(self):\n self.assertEqual(self.m.dtype, self.dtype)\n\n def test_data(self):\n self.assertEqual(self.m.data.dtype, self.dtype)\n testing.assert_array_equal(\n self.m.data, cupy.array([0, 1, 3, 2], self.dtype))\n\n def test_indices(self):\n self.assertEqual(self.m.indices.dtype, numpy.int32)\n testing.assert_array_equal(\n self.m.indices, cupy.array([0, 0, 2, 1], self.dtype))\n\n def test_indptr(self):\n self.assertEqual(self.m.indptr.dtype, numpy.int32)\n testing.assert_array_equal(\n self.m.indptr, cupy.array([0, 1, 2, 3, 4], self.dtype))\n\n def test_init_copy(self):\n n = sparse.csc_matrix(self.m)\n self.assertIsNot(n, self.m)\n cupy.testing.assert_array_equal(n.data, self.m.data)\n cupy.testing.assert_array_equal(n.indices, self.m.indices)\n cupy.testing.assert_array_equal(n.indptr, self.m.indptr)\n self.assertEqual(n.shape, self.m.shape)\n\n def test_init_copy_other_sparse(self):\n n = sparse.csc_matrix(self.m.tocsr())\n cupy.testing.assert_array_equal(n.data, self.m.data)\n cupy.testing.assert_array_equal(n.indices, self.m.indices)\n cupy.testing.assert_array_equal(n.indptr, self.m.indptr)\n self.assertEqual(n.shape, self.m.shape)\n\n @unittest.skipUnless(scipy_available, 'requires scipy')\n def test_init_copy_scipy_sparse(self):\n m = _make(numpy, scipy.sparse, self.dtype)\n n = sparse.csc_matrix(m)\n self.assertIsInstance(n.data, cupy.ndarray)\n self.assertIsInstance(n.indices, cupy.ndarray)\n self.assertIsInstance(n.indptr, cupy.ndarray)\n cupy.testing.assert_array_equal(n.data, m.data)\n cupy.testing.assert_array_equal(n.indices, m.indices)\n cupy.testing.assert_array_equal(n.indptr, m.indptr)\n self.assertEqual(n.shape, m.shape)\n\n @unittest.skipUnless(scipy_available, 'requires scipy')\n def test_init_copy_other_scipy_sparse(self):\n m = _make(numpy, scipy.sparse, self.dtype)\n n = sparse.csc_matrix(m.tocsr())\n self.assertIsInstance(n.data, cupy.ndarray)\n self.assertIsInstance(n.indices, cupy.ndarray)\n self.assertIsInstance(n.indptr, cupy.ndarray)\n cupy.testing.assert_array_equal(n.data, m.data)\n cupy.testing.assert_array_equal(n.indices, m.indices)\n cupy.testing.assert_array_equal(n.indptr, m.indptr)\n self.assertEqual(n.shape, m.shape)\n\n def test_init_dense(self):\n m = cupy.array([[0, 1, 0, 2],\n [0, 0, 0, 0],\n [0, 0, 0, 3]], dtype=self.dtype)\n n = sparse.csc_matrix(m)\n self.assertEqual(n.nnz, 3)\n self.assertEqual(n.shape, (3, 4))\n cupy.testing.assert_array_equal(n.data, [1, 2, 3])\n cupy.testing.assert_array_equal(n.indices, [0, 0, 2])\n cupy.testing.assert_array_equal(n.indptr, [0, 0, 1, 1, 3])\n\n def test_init_dense_empty(self):\n m = cupy.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]], dtype=self.dtype)\n n = sparse.csc_matrix(m)\n self.assertEqual(n.nnz, 0)\n self.assertEqual(n.shape, (3, 4))\n cupy.testing.assert_array_equal(n.data, [])\n cupy.testing.assert_array_equal(n.indices, [])\n cupy.testing.assert_array_equal(n.indptr, [0, 0, 0, 0, 0])\n\n def test_init_dense_one_dim(self):\n m = cupy.array([0, 1, 0, 2], dtype=self.dtype)\n n = sparse.csc_matrix(m)\n self.assertEqual(n.nnz, 2)\n self.assertEqual(n.shape, (1, 4))\n cupy.testing.assert_array_equal(n.data, [1, 2])\n cupy.testing.assert_array_equal(n.indices, [0, 0])\n cupy.testing.assert_array_equal(n.indptr, [0, 0, 1, 1, 2])\n\n def test_init_dense_zero_dim(self):\n m = cupy.array(1, dtype=self.dtype)\n n = sparse.csc_matrix(m)\n self.assertEqual(n.nnz, 1)\n self.assertEqual(n.shape, (1, 1))\n cupy.testing.assert_array_equal(n.data, [1])\n cupy.testing.assert_array_equal(n.indices, [0])\n cupy.testing.assert_array_equal(n.indptr, [0, 1])\n\n @unittest.skipUnless(scipy_available, 'requires scipy')\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=TypeError)\n def test_init_dense_invalid_ndim(self, xp, sp):\n m = xp.zeros((1, 1, 1), dtype=self.dtype)\n sp.csc_matrix(m)\n\n def test_copy(self):\n n = self.m.copy()\n self.assertIsInstance(n, sparse.csc_matrix)\n self.assertIsNot(n, self.m)\n self.assertIsNot(n.data, self.m.data)\n self.assertIsNot(n.indices, self.m.indices)\n self.assertIsNot(n.indptr, self.m.indptr)\n cupy.testing.assert_array_equal(n.data, self.m.data)\n cupy.testing.assert_array_equal(n.indices, self.m.indices)\n cupy.testing.assert_array_equal(n.indptr, self.m.indptr)\n self.assertEqual(n.shape, self.m.shape)\n\n def test_shape(self):\n self.assertEqual(self.m.shape, (3, 4))\n\n def test_ndim(self):\n self.assertEqual(self.m.ndim, 2)\n\n def test_nnz(self):\n self.assertEqual(self.m.nnz, 4)\n\n def test_conj(self):\n n = _make_complex(cupy, sparse, self.dtype)\n cupy.testing.assert_array_equal(n.conj().data, n.data.conj())\n\n @unittest.skipUnless(scipy_available, 'requires scipy')\n def test_get(self):\n m = self.m.get()\n self.assertIsInstance(m, scipy.sparse.csc_matrix)\n expect = [\n [0, 1, 0, 0],\n [0, 0, 0, 2],\n [0, 0, 3, 0]\n ]\n numpy.testing.assert_allclose(m.toarray(), expect)\n\n @unittest.skipUnless(scipy_available, 'requires scipy')\n def test_str(self):\n if numpy.dtype(self.dtype).kind == 'f':\n expect = ''' (0, 0)\\t0.0\n (0, 1)\\t1.0\n (2, 2)\\t3.0\n (1, 3)\\t2.0'''\n elif numpy.dtype(self.dtype).kind == 'c':\n expect = ''' (0, 0)\\t0j\n (0, 1)\\t(1+0j)\n (2, 2)\\t(3+0j)\n (1, 3)\\t(2+0j)'''\n\n self.assertEqual(str(self.m), expect)\n\n def test_toarray(self):\n m = self.m.toarray()\n expect = [\n [0, 1, 0, 0],\n [0, 0, 0, 2],\n [0, 0, 3, 0]\n ]\n self.assertTrue(m.flags.c_contiguous)\n cupy.testing.assert_allclose(m, expect)\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected](scipy_available, 'requires scipy')\nclass TestCscMatrixInit(unittest.TestCase):\n\n def setUp(self):\n self.shape = (3, 4)\n\n def data(self, xp):\n return xp.array([1, 2, 3, 4], self.dtype)\n\n def indices(self, xp):\n return xp.array([0, 0, 2, 1], 'i')\n\n def indptr(self, xp):\n return xp.array([0, 1, 2, 3, 4], 'i')\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_shape_none(self, xp, sp):\n x = sp.csc_matrix(\n (self.data(xp), self.indices(xp), self.indptr(xp)), shape=None)\n self.assertEqual(x.shape, (3, 4))\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_dtype(self, xp, sp):\n data = self.data(xp).real.astype('i')\n x = sp.csc_matrix(\n (data, self.indices(xp), self.indptr(xp)), dtype=self.dtype)\n self.assertEqual(x.dtype, self.dtype)\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_copy_true(self, xp, sp):\n data = self.data(xp)\n indices = self.indices(xp)\n indptr = self.indptr(xp)\n x = sp.csc_matrix((data, indices, indptr), copy=True)\n\n self.assertIsNot(data, x.data)\n self.assertIsNot(indices, x.indices)\n self.assertIsNot(indptr, x.indptr)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_init_with_shape(self, xp, sp):\n s = sp.csc_matrix(self.shape)\n self.assertEqual(s.shape, self.shape)\n self.assertEqual(s.dtype, 'd')\n self.assertEqual(s.size, 0)\n return s\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_init_with_shape_and_dtype(self, xp, sp):\n s = sp.csc_matrix(self.shape, dtype=self.dtype)\n self.assertEqual(s.shape, self.shape)\n self.assertEqual(s.dtype, self.dtype)\n self.assertEqual(s.size, 0)\n return s\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_shape_invalid(self, xp, sp):\n sp.csc_matrix(\n (self.data(xp), self.indices(xp), self.indptr(xp)), shape=(2,))\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_data_invalid(self, xp, sp):\n sp.csc_matrix(\n ('invalid', self.indices(xp), self.indptr(xp)), shape=self.shape)\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_data_invalid_ndim(self, xp, sp):\n sp.csc_matrix(\n (self.data(xp)[None], self.indices(xp), self.indptr(xp)),\n shape=self.shape)\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_indices_invalid(self, xp, sp):\n sp.csc_matrix(\n (self.data(xp), 'invalid', self.indptr(xp)), shape=self.shape)\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_indices_invalid_ndim(self, xp, sp):\n sp.csc_matrix(\n (self.data(xp), self.indices(xp)[None], self.indptr(xp)),\n shape=self.shape)\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_indptr_invalid(self, xp, sp):\n sp.csc_matrix(\n (self.data(xp), self.indices(xp), 'invalid'), shape=self.shape)\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_indptr_invalid_ndim(self, xp, sp):\n sp.csc_matrix(\n (self.data(xp), self.indices(xp), self.indptr(xp)[None]),\n shape=self.shape)\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_data_indices_different_length(self, xp, sp):\n data = xp.arange(5, dtype=self.dtype)\n sp.csc_matrix(\n (data, self.indices(xp), self.indptr(xp)), shape=self.shape)\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_indptr_invalid_length(self, xp, sp):\n indptr = xp.array([0, 1], 'i')\n sp.csc_matrix(\n (self.data(xp), self.indices(xp), indptr), shape=self.shape)\n\n def test_unsupported_dtype(self):\n with self.assertRaises(ValueError):\n sparse.csc_matrix(\n (self.data(cupy), self.indices(cupy), self.indptr(cupy)),\n shape=self.shape, dtype='i')\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_conj(self, xp, sp):\n n = _make_complex(xp, sp, self.dtype)\n cupy.testing.assert_array_equal(n.conj().data, n.data.conj())\n\n\[email protected](*testing.product({\n 'make_method': [\n '_make', '_make_unordered', '_make_empty', '_make_duplicate',\n '_make_shape'],\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected](scipy_available, 'requires scipy')\nclass TestCscMatrixScipyComparison(unittest.TestCase):\n\n @property\n def make(self):\n return globals()[self.make_method]\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=TypeError)\n def test_len(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n len(m)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_asfptype(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.asfptype()\n\n @testing.numpy_cupy_allclose(sp_name='sp', contiguous_check=False)\n def test_toarray(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n a = m.toarray()\n if sp is sparse:\n self.assertTrue(a.flags.c_contiguous)\n return a\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_toarray_c_order(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n a = m.toarray(order='C')\n self.assertTrue(a.flags.c_contiguous)\n return a\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_toarray_f_order(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n a = m.toarray(order='F')\n self.assertTrue(a.flags.f_contiguous)\n return a\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=TypeError)\n def test_toarray_unknown_order(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.toarray(order='#')\n\n @testing.numpy_cupy_allclose(sp_name='sp', contiguous_check=False)\n def test_A(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.A\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocoo(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.tocoo()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocoo_copy(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = m.tocoo(copy=True)\n self.assertIsNot(m.data, n.data)\n return n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocsc(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.tocsc()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocsc_copy(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = m.tocsc(copy=True)\n self.assertIsNot(m.data, n.data)\n self.assertIsNot(m.indices, n.indices)\n self.assertIsNot(m.indptr, n.indptr)\n return n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocsr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.tocsr()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocsr_copy(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = m.tocsr(copy=True)\n self.assertIsNot(m.data, n.data)\n self.assertIsNot(m.indices, n.indices)\n self.assertIsNot(m.indptr, n.indptr)\n return n\n\n # dot\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.dot(2.0)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_numpy_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.dot(numpy.dtype(self.dtype).type(2.0))\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_csr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype)\n return m.dot(x)\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)\n def test_dot_csr_invalid_shape(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = sp.csr_matrix((5, 3), dtype=self.dtype)\n m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_csc(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocsc()\n return m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_sparse(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocoo()\n return m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_zero_dim(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.array(2, dtype=self.dtype)\n return m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_dense_vector(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(4).astype(self.dtype)\n return m.dot(x)\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)\n def test_dot_dense_vector_invalid_shape(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(5).astype(self.dtype)\n m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp', contiguous_check=False)\n def test_dot_dense_matrix(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(8).reshape(4, 2).astype(self.dtype)\n return m.dot(x)\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)\n def test_dot_dense_matrix_invalid_shape(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(10).reshape(5, 2).astype(self.dtype)\n m.dot(x)\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)\n def test_dot_dense_ndim3(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(24).reshape(4, 2, 3).astype(self.dtype)\n m.dot(x)\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_dot_unsupported(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.dot(None)\n\n # __add__\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_add_zero(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m + 0\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_add_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m + 1\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_add_csr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = _make2(xp, sp, self.dtype)\n return m + n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_add_coo(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = _make2(xp, sp, self.dtype).tocoo()\n return m + n\n\n @testing.numpy_cupy_allclose(sp_name='sp', contiguous_check=False)\n def test_add_dense(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = xp.arange(12).reshape(3, 4)\n return m + n\n\n # __radd__\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_radd_zero(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return 0 + m\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_radd_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n 1 + m\n\n @testing.numpy_cupy_allclose(sp_name='sp', contiguous_check=False)\n def test_radd_dense(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = xp.arange(12).reshape(3, 4)\n return n + m\n\n # __sub__\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sub_zero(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m - 0\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_sub_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m - 1\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sub_csr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = _make2(xp, sp, self.dtype)\n return m - n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sub_coo(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = _make2(xp, sp, self.dtype).tocoo()\n return m - n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sub_dense(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = xp.arange(12).reshape(3, 4)\n return m - n\n\n # __rsub__\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rsub_zero(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return 0 - m\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_rsub_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n 1 - m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rsub_dense(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = xp.arange(12).reshape(3, 4)\n return n - m\n\n # __mul__\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m * 2.0\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_numpy_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m * numpy.dtype(self.dtype).type(2.0)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_csr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype)\n return m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_csc(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocsc()\n return m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_sparse(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocoo()\n return m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_zero_dim(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.array(2, dtype=self.dtype)\n return m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_dense_vector(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(4).astype(self.dtype)\n return m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp', contiguous_check=False)\n def test_mul_dense_matrix(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(8).reshape(4, 2).astype(self.dtype)\n return m * x\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_mul_dense_ndim3(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(24).reshape(4, 2, 3).astype(self.dtype)\n m * x\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_mul_unsupported(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m * None\n\n # __rmul__\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return 2.0 * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_numpy_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return numpy.dtype(self.dtype).type(2.0) * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_csr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype)\n return x * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_csc(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocsc()\n return x * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_sparse(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocoo()\n return x * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_zero_dim(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.array(2, dtype=self.dtype)\n return x * m\n\n @testing.numpy_cupy_allclose(sp_name='sp', contiguous_check=False)\n def test_rmul_dense_matrix(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(12).reshape(4, 3).astype(self.dtype)\n return x * m\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_rmul_dense_ndim3(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(24).reshape(4, 2, 3).astype(self.dtype)\n x * m\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_rmul_unsupported(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n # TODO(unno): When a sparse matrix has no element, scipy.sparse\n # does not raise an error.\n if m.nnz == 0:\n raise Exception\n None * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sort_indices(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.sort_indices()\n return m\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_sum_tuple_axis(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.sum(axis=(0, 1))\n\n @testing.numpy_cupy_raises(sp_name='sp')\n def test_sum_too_large_axis(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.sum(axis=3)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sum_duplicates(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.sum_duplicates()\n self.assertTrue(m.has_canonical_format)\n return m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_transpose(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.transpose()\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)\n def test_transpose_axes_int(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.transpose(axes=0)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_eliminate_zeros(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.eliminate_zeros()\n return m\n\n @testing.numpy_cupy_equal(sp_name='sp')\n @unittest.skipIf(\n cupy.cuda.runtime.runtimeGetVersion() < 8000,\n 'CUDA <8 cannot keep number of non-zero entries ')\n def test_eliminate_zeros_nnz(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.eliminate_zeros()\n return m.nnz\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64],\n 'ret_dtype': [None, numpy.float32, numpy.float64],\n 'axis': [None, 0, 1, -1, -2],\n}))\[email protected](scipy_available, 'requires scipy')\nclass TestCscMatrixSum(unittest.TestCase):\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sum(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.sum(axis=self.axis, dtype=self.ret_dtype)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sum_with_out(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n if self.axis is None:\n shape = ()\n else:\n shape = list(m.shape)\n shape[self.axis] = 1\n shape = tuple(shape)\n out = xp.empty(shape, dtype=self.ret_dtype)\n if xp is numpy:\n # TODO(unno): numpy.matrix is used for scipy.sparse though\n # cupy.ndarray is used for cupyx.scipy.sparse.\n out = xp.asmatrix(out)\n return m.sum(axis=self.axis, dtype=self.ret_dtype, out=out)\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected](scipy_available, 'requires scipy')\nclass TestCscMatrixScipyCompressed(unittest.TestCase):\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_get_shape(self, xp, sp):\n return _make(xp, sp, self.dtype).get_shape()\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_getnnz(self, xp, sp):\n return _make(xp, sp, self.dtype).getnnz()\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected](scipy_available, 'requires scipy')\nclass TestCscMatrixData(unittest.TestCase):\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_dtype(self, xp, sp):\n return _make(xp, sp, self.dtype).dtype\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_abs(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return abs(m)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_neg(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return (-m)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_astype(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n if numpy.dtype(self.dtype).kind == 'c':\n t = 'D'\n else:\n t = 'd'\n return m.astype(t)\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_count_nonzero(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.count_nonzero()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_power(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.power(2)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_power_with_dtype(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n if numpy.dtype(self.dtype).kind == 'c':\n t = 'D'\n else:\n t = 'd'\n return m.power(2, t)\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n 'ufunc': [\n 'arcsin', 'arcsinh', 'arctan', 'arctanh', 'ceil', 'deg2rad', 'expm1',\n 'floor', 'log1p', 'rad2deg', 'rint', 'sign', 'sin', 'sinh', 'sqrt',\n 'tan', 'tanh', 'trunc',\n ],\n}))\[email protected](scipy_available, 'requires scipy')\nclass TestUfunc(unittest.TestCase):\n\n @testing.numpy_cupy_allclose(sp_name='sp', atol=1e-5)\n def test_ufun(self, xp, sp):\n x = _make(xp, sp, self.dtype)\n x.data *= 0.1\n func = getattr(x, self.ufunc)\n complex_unsupported = {'ceil', 'deg2rad', 'floor', 'rad2deg', 'trunc'}\n if (numpy.dtype(self.dtype).kind == 'c' and\n self.ufunc in complex_unsupported):\n with self.assertRaises(TypeError):\n func()\n return numpy.array(0)\n else:\n return func()\n\n\nclass TestIsspmatrixCsc(unittest.TestCase):\n\n def test_csr(self):\n x = sparse.csr_matrix(\n (cupy.array([], 'f'),\n cupy.array([], 'i'),\n cupy.array([0], 'i')),\n shape=(0, 0), dtype='f')\n self.assertFalse(sparse.isspmatrix_csc(x))\n\n def test_csc(self):\n x = sparse.csc_matrix(\n (cupy.array([], 'f'),\n cupy.array([], 'i'),\n cupy.array([0], 'i')),\n shape=(0, 0), dtype='f')\n self.assertTrue(sparse.isspmatrix_csc(x))\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected](scipy_available, 'requires scipy')\nclass TestCsrMatrixGetitem(unittest.TestCase):\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_getitem_int_int(self, xp, sp):\n self.assertEqual(_make(xp, sp, self.dtype)[0, 1], 1)\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_getitem_int_int_not_found(self, xp, sp):\n self.assertEqual(_make(xp, sp, self.dtype)[1, 1], 0)\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_getitem_int_int_negative(self, xp, sp):\n self.assertEqual(_make(xp, sp, self.dtype)[-1, -2], 3)\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=IndexError)\n def test_getitem_int_int_too_small_row(self, xp, sp):\n _make(xp, sp, self.dtype)[-4, 0]\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=IndexError)\n def test_getitem_int_int_too_large_row(self, xp, sp):\n _make(xp, sp, self.dtype)[3, 0]\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=IndexError)\n def test_getitem_int_int_too_small_col(self, xp, sp):\n _make(xp, sp, self.dtype)[0, -5]\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=IndexError)\n def test_getitem_int_int_too_large_col(self, xp, sp):\n _make(xp, sp, self.dtype)[0, 4]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_int(self, xp, sp):\n return _make(xp, sp, self.dtype)[:, 1]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_negative_int(self, xp, sp):\n return _make(xp, sp, self.dtype)[:, -1]\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=IndexError)\n def test_getitem_int_too_small(self, xp, sp):\n _make(xp, sp, self.dtype)[:, -5]\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=IndexError)\n def test_getitem_int_too_large(self, xp, sp):\n _make(xp, sp, self.dtype)[:, 4]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice(self, xp, sp):\n return _make(xp, sp, self.dtype)[:, 1:3]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice_negative(self, xp, sp):\n return _make(xp, sp, self.dtype)[:, -2:-1]\n\n @testing.numpy_cupy_raises(sp_name='sp', accept_error=IndexError)\n def test_getitem_slice_start_larger_than_stop(self, xp, sp):\n _make(xp, sp, self.dtype)[:, 3:2]\n\n def test_getitem_slice_step_2(self):\n with self.assertRaises(ValueError):\n _make(cupy, sparse, self.dtype)[:, 0::2]\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected]_requires('scipy>=1.0')\nclass TestCsrMatrixGetitem2(unittest.TestCase):\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice_start_too_small(self, xp, sp):\n return _make(xp, sp, self.dtype)[:, -5:None]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice_start_too_large(self, xp, sp):\n return _make(xp, sp, self.dtype)[:, 5:None]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice_stop_too_small(self, xp, sp):\n return _make(xp, sp, self.dtype)[:, None:-5]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice_stop_too_large(self, xp, sp):\n return _make(xp, sp, self.dtype)[:, None:5]\n",
"import unittest\n\nimport numpy as np\n\nimport cupy\nfrom cupy import testing\n\n\[email protected]\nclass TestArrayUfunc(unittest.TestCase):\n\n @testing.with_requires('numpy>=1.13')\n @testing.for_all_dtypes()\n def test_unary_op(self, dtype):\n a = cupy.array(np.array([0, 1, 2]), dtype=dtype)\n outa = np.sin(a)\n # numpy operation produced a cupy array\n self.assertTrue(isinstance(outa, cupy.ndarray))\n b = a.get()\n outb = np.sin(b)\n self.assertTrue(np.allclose(outa.get(), outb))\n\n @testing.with_requires('numpy>=1.13')\n @testing.for_all_dtypes()\n def test_unary_op_out(self, dtype):\n a = cupy.array(np.array([0, 1, 2]), dtype=dtype)\n b = a.get()\n outb = np.sin(b)\n # pre-make output with same type as input\n outa = cupy.array(np.array([0, 1, 2]), dtype=outb.dtype)\n np.sin(a, out=outa)\n self.assertTrue(np.allclose(outa.get(), outb))\n\n @testing.with_requires('numpy>=1.13')\n @testing.for_all_dtypes()\n def test_binary_op(self, dtype):\n a1 = cupy.array(np.array([0, 1, 2]), dtype=dtype)\n a2 = cupy.array(np.array([0, 1, 2]), dtype=dtype)\n outa = np.add(a1, a2)\n # numpy operation produced a cupy array\n self.assertTrue(isinstance(outa, cupy.ndarray))\n b1 = a1.get()\n b2 = a2.get()\n outb = np.add(b1, b2)\n self.assertTrue(np.allclose(outa.get(), outb))\n\n @testing.with_requires('numpy>=1.13')\n @testing.for_all_dtypes()\n def test_binary_op_out(self, dtype):\n a1 = cupy.array(np.array([0, 1, 2]), dtype=dtype)\n a2 = cupy.array(np.array([0, 1, 2]), dtype=dtype)\n outa = cupy.array(np.array([0, 1, 2]), dtype=dtype)\n np.add(a1, a2, out=outa)\n b1 = a1.get()\n b2 = a2.get()\n outb = np.add(b1, b2)\n self.assertTrue(np.allclose(outa.get(), outb))\n\n @testing.with_requires('numpy>=1.13')\n @testing.for_all_dtypes()\n def test_binary_mixed_op(self, dtype):\n a1 = cupy.array(np.array([0, 1, 2]), dtype=dtype)\n a2 = cupy.array(np.array([0, 1, 2]), dtype=dtype).get()\n with self.assertRaises(TypeError):\n # attempt to add cupy and numpy arrays\n np.add(a1, a2)\n with self.assertRaises(TypeError):\n # check reverse order\n np.add(a2, a1)\n with self.assertRaises(TypeError):\n # reject numpy output from cupy\n np.add(a1, a1, out=a2)\n with self.assertRaises(TypeError):\n # reject cupy output from numpy\n np.add(a2, a2, out=a1)\n with self.assertRaises(ValueError):\n # bad form for out=\n # this is also an error with numpy array\n np.sin(a1, out=())\n with self.assertRaises(ValueError):\n # bad form for out=\n # this is also an error with numpy array\n np.sin(a1, out=(a1, a1))\n\n @testing.numpy_cupy_array_equal()\n def test_indexing(self, xp):\n a = cupy.testing.shaped_arange((3, 1), xp)[:, :, None]\n b = cupy.testing.shaped_arange((3, 2), xp)[:, None, :]\n return a * b\n"
] | [
[
"numpy.array",
"numpy.dtype"
],
[
"numpy.add",
"numpy.array",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
darrenf0209/PFNL | [
"e8316b4e2460bac8785a878e0814c3221f47fd8c"
] | [
"modules/videosr_ops.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops import control_flow_ops\n# re-do slim\nimport tf_slim as slim\n\n# OLD\n#slim = tf.contrib.slim\n\n\ndef im2uint8(x):\n if x.__class__ == tf.Tensor:\n return tf.cast(tf.clip_by_value(x, 0.0, 1.0) * 255.0, tf.uint8)\n else:\n t = np.clip(x, 0.0, 1.0) * 255.0\n return t.astype(np.uint8)\n\n\ndef get_shape(x):\n shape = tf.shape(x)\n check = tf.Assert(tf.reduce_all(shape >= 0), [\"EASYFLOW: Need value.shape >= 0, got \", shape])\n shape = control_flow_ops.with_dependencies([check], shape)\n return [shape[i] for i in range(shape.shape.as_list()[0])]\n\n\ndef zero_upsampling(x, scale_factor):\n dims = x.get_shape().as_list()\n if len(dims) == 5:\n n, t, h, w, c = dims\n y = tf.concat([x] + [tf.zeros_like(x)] * (scale_factor ** 2 - 1), -1)\n y = tf.reshape(y, [n, t, h, w, scale_factor, scale_factor, c])\n y = tf.transpose(y, [0, 1, 2, 4, 3, 5, 6])\n y = tf.reshape(y, [n, t, h * scale_factor, w * scale_factor, c])\n elif len(dims) == 4:\n n, h, w, c = dims\n y = tf.concat([x] + [tf.zeros_like(x)] * (scale_factor ** 2 - 1), -1)\n y = tf.reshape(y, [n, h, w, scale_factor, scale_factor, c])\n y = tf.transpose(y, [0, 1, 3, 2, 4, 5])\n y = tf.reshape(y, [n, h * scale_factor, w * scale_factor, c])\n return y\n\n\ndef leaky_relu(x, alpha=0.1):\n return tf.maximum(x, alpha * x)\n\n\ndef prelu(x):\n alphas = tf.get_variable('alpha', x.get_shape()[-1],\n initializer=tf.constant_initializer(0.0),\n dtype=tf.float32)\n pos = tf.nn.relu(x)\n neg = alphas * (x - tf.abs(x)) * 0.5\n\n return pos + neg\n\n\ndef display_tf_variables(train_vars):\n print('Training Variables: ')\n for var in train_vars:\n print('\\t', var.name)\n\n\ndef resize_images(images, size, method=2, align_corners=False):\n dims = len(images.get_shape())\n if dims == 5:\n n, t, h, w, c = images.get_shape().as_list()\n images = tf.reshape(images, [n * t, h, w, c])\n images = tf.image.resize_images(images, size, method, align_corners)\n if dims == 5:\n images = tf.reshape(images, [n, t, size[0], size[1], c])\n return images\n\n\ndef rgb2y(inputs):\n with tf.name_scope('rgb2y'):\n if inputs.get_shape()[-1].value == 1:\n return inputs\n assert inputs.get_shape()[-1].value == 3, 'Error: rgb2y input should be RGB or grayscale!'\n dims = len(inputs.get_shape())\n if dims == 4:\n scale = tf.reshape([65.481, 128.553, 24.966], [1, 1, 1, 3]) / 255.0\n elif dims == 5:\n scale = tf.reshape([65.481, 128.553, 24.966], [1, 1, 1, 1, 3]) / 255.0\n output = tf.reduce_sum(inputs * scale, reduction_indices=dims - 1, keep_dims=True)\n output = output + 16 / 255.0\n return output\n\n\ndef rgb2ycbcr(inputs):\n with tf.name_scope('rgb2ycbcr'):\n if inputs.get_shape()[-1].value == 1:\n return inputs\n assert inputs.get_shape()[-1].value == 3, 'Error: rgb2ycbcr input should be RGB or grayscale!'\n ndims = len(inputs.get_shape())\n origT = [[65.481, 128.553, 24.966], [-37.797, -74.203, 112], [112, -93.786, -18.214]]\n origOffset = [16.0, 128.0, 128.0]\n if ndims == 4:\n origT = [tf.reshape(origT[i], [1, 1, 1, 3]) / 255.0 for i in range(3)]\n elif ndims == 5:\n origT = [tf.reshape(origT[i], [1, 1, 1, 1, 3]) / 255.0 for i in range(3)]\n output = []\n for i in range(3):\n output.append(tf.reduce_sum(inputs * origT[i], reduction_indices=-1, keep_dims=True) + origOffset[i] / 255.0)\n return tf.concat(output, -1)\n\n\ndef ycbcr2rgb(inputs):\n with tf.name_scope('ycbcr2rgb'):\n if inputs.get_shape()[-1].value == 1:\n return inputs\n assert inputs.get_shape()[-1].value == 3, 'Error: rgb2ycbcr input should be RGB or grayscale!'\n ndims = len(inputs.get_shape())\n # origT = np.array([[65.481, 128.553, 24.966], [-37.797 -74.203 112], [112 -93.786 -18.214]])\n # T = tf.inv(origT)\n Tinv = [[0.00456621, 0., 0.00625893], [0.00456621, -0.00153632, -0.00318811], [0.00456621, 0.00791071, 0.]]\n origOffset = [16.0, 128.0, 128.0]\n if ndims == 4:\n origT = [tf.reshape(Tinv[i], [1, 1, 1, 3]) * 255.0 for i in range(3)]\n origOffset = tf.reshape(origOffset, [1, 1, 1, 3]) / 255.0\n elif ndims == 5:\n origT = [tf.reshape(Tinv[i], [1, 1, 1, 1, 3]) * 255.0 for i in range(3)]\n origOffset = tf.reshape(origOffset, [1, 1, 1, 1, 3]) / 255.0\n output = []\n for i in range(3):\n output.append(tf.reduce_sum((inputs - origOffset) * origT[i], reduction_indices=-1, keep_dims=True))\n return tf.concat(output, -1)\n \n\ndef rgb2gray(inputs):\n with tf.name_scope('rgb2gray'):\n if inputs.get_shape()[-1].value == 1:\n return inputs\n assert inputs.get_shape()[-1].value == 3, 'Error: rgb2y input should be RGB or grayscale!'\n dims = len(inputs.get_shape())\n if dims == 4:\n scale = tf.reshape([0.299, 0.587, 0.114], [1, 1, 1, 3])\n elif dims == 5:\n scale = tf.reshape([0.299, 0.587, 0.114], [1, 1, 1, 1, 3])\n output = tf.reduce_sum(inputs * scale, reduction_indices=dims - 1, keep_dims=True)\n return output\n\n\ndef flowToColor(flow, maxflow=None):\n def makeColorwheel():\n RY = 15\n YG = 6\n GC = 4\n CB = 11\n BM = 13\n MR = 6\n\n ncols = RY + YG + GC + CB + BM + MR\n\n colorwheel = np.zeros([ncols, 3], dtype=np.float32) # r g b\n\n col = 0\n # RY\n colorwheel[0:RY, 0] = 255.0\n colorwheel[0:RY, 1] = np.floor(np.multiply(255.0 / RY, range(RY)))\n col = col + RY\n # YG\n colorwheel[col + np.arange(0, YG), 0] = 255.0 - np.floor(np.multiply(255.0 / YG, range(YG)))\n colorwheel[col + np.arange(0, YG), 1] = 255.0\n col = col + YG\n # GC\n colorwheel[col + np.arange(0, GC), 1] = 255.0\n colorwheel[col + np.arange(0, GC), 2] = np.floor(np.multiply(255.0 / GC, range(GC)))\n col = col + GC\n # CB\n colorwheel[col + np.arange(0, CB), 1] = 255.0 - np.floor(np.multiply(255.0 / CB, range(CB)))\n colorwheel[col + np.arange(0, CB), 2] = 255.0\n col = col + CB\n # BM\n colorwheel[col + np.arange(0, BM), 2] = 255.0\n colorwheel[col + np.arange(0, BM), 0] = np.floor(np.multiply(255.0 / BM, range(BM)))\n col = col + BM\n # MR\n colorwheel[col + np.arange(0, MR), 2] = 255.0 - np.floor(np.multiply(255.0 / MR, range(MR)))\n colorwheel[col + np.arange(0, MR), 0] = 255.0\n return colorwheel\n\n def atan2(y, x):\n angle = tf.where(tf.greater(x, 0.0), tf.atan(y / x), tf.zeros_like(x))\n angle = tf.where(tf.logical_and(tf.less(x, 0.0), tf.greater_equal(y, 0.0)), tf.atan(y / x) + np.pi, angle)\n angle = tf.where(tf.logical_and(tf.less(x, 0.0), tf.less(y, 0.0)), tf.atan(y / x) - np.pi, angle)\n angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.greater(y, 0.0)), 0.5 * np.pi * tf.ones_like(x), angle)\n angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.less(y, 0.0)), -0.5 * np.pi * tf.ones_like(x), angle)\n angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.equal(y, 0.0)), np.nan * tf.zeros_like(x), angle)\n return angle\n eps = 2.2204e-16\n\n u = flow[:, :, :, 0]\n v = flow[:, :, :, 1]\n\n if maxflow is not None:\n maxrad = maxflow\n else:\n rad = tf.sqrt(u ** 2 + v ** 2)\n maxrad = tf.reduce_max(rad)\n\n u /= (maxrad + eps)\n v /= (maxrad + eps)\n rad = tf.sqrt(u ** 2 + v ** 2)\n \n colorwheel = makeColorwheel()\n ncols = colorwheel.shape[0]\n\n a = atan2(-v, -u) / np.pi\n fk = (a + 1) / 2 * (ncols - 1) # -1~1 maped to 0 ~ ncols-1\n k0 = tf.floor(fk)\n k1 = (k0 + 1) % ncols\n # k1[k1 == ncols] = 0\n f = fk - k0\n k0 = tf.cast(k0, tf.int32)\n k1 = tf.cast(k1, tf.int32)\n\n \n col0 = tf.gather(colorwheel, k0) / 255.0\n col1 = tf.gather(colorwheel, k1) / 255.0\n f = tf.expand_dims(f, dim=-1)\n col = (1 - f) * col0 + f * col1\n\n idx = tf.tile(tf.expand_dims(rad <= 1, dim=-1), [1, 1, 1, 3])\n rad = tf.expand_dims(rad, dim=-1)\n col = tf.where(idx, 1 - rad * (1 - col), col * 0.75)\n\n img = tf.cast(tf.floor(255.0 * col), tf.uint8)\n return img\n\n\ndef channel2sub(x, scale_factor):\n dims = len(x.get_shape())\n if dims == 5:\n num_batch, num_frame, height, width, num_channels = map(lambda x: x.value, x.get_shape())\n out_height = height * scale_factor\n out_width = width * scale_factor\n out_channels = num_channels / scale_factor / scale_factor\n x = tf.reshape(x, [num_batch, num_frame, height, width, scale_factor, scale_factor, out_channels])\n x = tf.transpose(x, perm=[0, 1, 2, 4, 3, 5, 6])\n x = tf.reshape(x, [num_batch, num_frame, out_height, out_width, out_channels])\n else:\n num_batch, height, width, num_channels = map(lambda x: x.value, x.get_shape())\n out_height = height * scale_factor\n out_width = width * scale_factor\n out_channels = num_channels / scale_factor / scale_factor\n x = tf.reshape(x, [num_batch, height, width, scale_factor, scale_factor, out_channels])\n x = tf.transpose(x, perm=[0, 1, 3, 2, 4, 5])\n x = tf.reshape(x, [num_batch, out_height, out_width, out_channels])\n return x\n\n\ndef sub2channel(x, scale_factor):\n dims = len(x.get_shape())\n if dims == 5:\n num_batch, num_frame, out_height, out_width, num_channels = map(lambda x: x.value, x.get_shape())\n height = out_height / scale_factor\n width = out_width / scale_factor\n x = tf.reshape(x, [num_batch, num_frame, height, scale_factor, width, scale_factor, num_channels])\n x = tf.transpose(x, perm=[0, 1, 2, 4, 3, 5, 6])\n x = tf.reshape(x, [num_batch, num_frame, height, width, scale_factor * scale_factor * num_channels])\n else:\n num_batch, out_height, out_width, num_channels = map(lambda x: x.value, x.get_shape())\n height = out_height / scale_factor\n width = out_width / scale_factor\n x = tf.reshape(x, [num_batch, height, scale_factor, width, scale_factor, num_channels])\n x = tf.transpose(x, perm=[0, 1, 3, 2, 4, 5])\n x = tf.reshape(x, [num_batch, height, width, scale_factor * scale_factor * num_channels])\n return x\n\n\ndef _repeat(x, n_repeats):\n with tf.variable_scope('_repeat'):\n # rep = tf.transpose(\n # tf.expand_dims(tf.ones(shape=tf.pack([n_repeats, ])), 1), [1, 0])\n # rep = tf.cast(rep, 'int32')\n # with tf.device('/cpu:0'):\n # x = tf.matmul(tf.reshape(x, (-1, 1)), rep)\n # return tf.reshape(x, [-1])\n x = tf.reshape(x, [-1, 1])\n with tf.device('/cpu:0'):\n res = tf.tile(x, [1, n_repeats])\n res = tf.reshape(res, [-1])\n res = tf.cast(res, 'int32')\n return res\n\n\ndef meshgrid(height, width):\n with tf.variable_scope('_meshgrid'):\n # This should be equivalent to:\n # x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),\n # np.linspace(-1, 1, height))\n # ones = np.ones(np.prod(x_t.shape))\n # grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])\n\n # with tf.device('/cpu:0'):\n # x_t = tf.matmul(tf.ones(shape=tf.pack([height, 1])),\n # tf.transpose(tf.expand_dims(tf.linspace(0.0, -1.0 + width, width), 1), [1, 0]))\n # y_t = tf.matmul(tf.expand_dims(tf.linspace(0.0, -1.0 + height, height), 1),\n # tf.ones(shape=tf.pack([1, width])))\n # x_t = tf.expand_dims(x_t, 2)\n # y_t = tf.expand_dims(y_t, 2)\n # grid = tf.concat(2, [x_t, y_t])\n with tf.device('/cpu:0'):\n grid = tf.meshgrid(list(range(height)), list(range(width)), indexing='ij')\n grid = tf.cast(tf.stack(grid, axis=2)[:, :, ::-1], tf.float32)\n return grid\n\n\ndef imwarp_backward(uv, input_dim, out_size):\n def _interpolate_backward(im, x, y, out_size):\n with tf.variable_scope('_interp_b', reuse=False):\n # constants\n num_batch, height, width, channels = map(lambda x: x.value, im.get_shape())\n out_height = out_size[0]\n out_width = out_size[1]\n\n x = tf.cast(x, 'float32') * (out_height / height)\n y = tf.cast(y, 'float32') * (out_width / width)\n zero = tf.zeros([], dtype='int32')\n max_y = tf.cast(out_height - 1, 'int32')\n max_x = tf.cast(out_width - 1, 'int32')\n\n # do sampling\n x0 = tf.cast(tf.floor(x), 'int32')\n x1 = x0 + 1\n y0 = tf.cast(tf.floor(y), 'int32')\n y1 = y0 + 1\n x0_f = tf.cast(x0, 'float32')\n x1_f = tf.cast(x1, 'float32')\n y0_f = tf.cast(y0, 'float32')\n y1_f = tf.cast(y1, 'float32')\n\n x0 = tf.clip_by_value(x0, zero, max_x)\n x1 = tf.clip_by_value(x1, zero, max_x)\n y0 = tf.clip_by_value(y0, zero, max_y)\n y1 = tf.clip_by_value(y1, zero, max_y)\n dim2 = out_width\n dim1 = out_width * out_height\n\n base = _repeat(tf.range(num_batch) * dim1, height * width)\n base_y0 = base + y0 * dim2\n base_y1 = base + y1 * dim2\n idx_a = base_y0 + x0\n idx_b = base_y1 + x0\n idx_c = base_y0 + x1\n idx_d = base_y1 + x1\n\n # use indices to lookup pixels in the flat image and restore\n # channels dim\n im_flat = tf.reshape(im, tf.stack([-1, channels]))\n im_flat = tf.cast(im_flat, 'float32')\n Ia = tf.gather(im_flat, idx_a)\n Ib = tf.gather(im_flat, idx_b)\n Ic = tf.gather(im_flat, idx_c)\n Id = tf.gather(im_flat, idx_d)\n\n # and finally calculate interpolated values\n wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1)\n wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), 1)\n wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), 1)\n wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), 1)\n output = tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id])\n return output\n\n with tf.variable_scope('imwarp_b'):\n dims = len(input_dim.get_shape())\n if dims == 5:\n n, num_frame, height, width, num_channels = input_dim.get_shape().as_list()\n input_dim = tf.reshape(input_dim, [n * num_frame, height, width, num_channels])\n dims_uv = len(uv.get_shape())\n if dims_uv == 5:\n n_uv, num_frame_uv, height_uv, width_uv, num_channels_uv = uv.get_shape().as_list()\n uv = tf.reshape(uv, [n_uv * num_frame_uv, height_uv, width_uv, num_channels_uv])\n\n num_batch, height, width, num_channels = map(lambda x: x.value, input_dim.get_shape())\n uv = tf.cast(uv, 'float32')\n\n # grid of (x_t, y_t, 1), eq (1) in ref [1]\n out_height = out_size[0]\n out_width = out_size[1]\n grid = meshgrid(height, width)\n grid = tf.expand_dims(grid, 0)\n grid = tf.tile(grid, tf.stack([num_batch, 1, 1, 1]))\n\n T_g = grid + uv\n\n x_s = T_g[:, :, :, 0]\n y_s = T_g[:, :, :, 1]\n x_s_flat = tf.reshape(x_s, [-1])\n y_s_flat = tf.reshape(y_s, [-1])\n\n input_transformed = _interpolate_backward(input_dim, x_s_flat, y_s_flat, out_size)\n # output: (n * h * w * c), output_w: (n * h * w * 1)\n input_transformed = tf.clip_by_value(input_transformed, 0.0, 1.0)\n output = tf.reshape(\n input_transformed, tf.stack([num_batch, out_height, out_width, num_channels]))\n if dims == 5:\n output = tf.reshape(output, [n, num_frame, height, width, num_channels])\n return output\n\n\ndef imwarp_forward(uv, input_dim, out_size):\n def _interpolate_forward(im, x, y, out_size):\n with tf.variable_scope('_interp_f', reuse=False):\n # constants\n num_batch, height, width, channels = map(lambda x: x.value, im.get_shape())\n out_height = out_size[0]\n out_width = out_size[1]\n\n x = tf.cast(x, 'float32') * (out_height / height)\n y = tf.cast(y, 'float32') * (out_width / width)\n zero = tf.zeros([], dtype='int32')\n max_y = tf.cast(out_height - 1, 'int32')\n max_x = tf.cast(out_width - 1, 'int32')\n\n # do sampling\n x0 = tf.cast(tf.floor(x), 'int32')\n x1 = x0 + 1\n y0 = tf.cast(tf.floor(y), 'int32')\n y1 = y0 + 1\n x0_f = tf.cast(x0, 'float32')\n x1_f = tf.cast(x1, 'float32')\n y0_f = tf.cast(y0, 'float32')\n y1_f = tf.cast(y1, 'float32')\n\n x0 = tf.clip_by_value(x0, zero, max_x)\n x1 = tf.clip_by_value(x1, zero, max_x)\n y0 = tf.clip_by_value(y0, zero, max_y)\n y1 = tf.clip_by_value(y1, zero, max_y)\n dim2 = out_width\n dim1 = out_width * out_height\n\n base = _repeat(tf.range(num_batch) * dim1, height * width)\n base_y0 = base + y0 * dim2\n base_y1 = base + y1 * dim2\n idx_a = base_y0 + x0\n idx_b = base_y1 + x0\n idx_c = base_y0 + x1\n idx_d = base_y1 + x1\n\n # use indices to lookup pixels in the flat image and restore\n # channels dim\n im_flat = tf.reshape(im, tf.stack([-1, channels]))\n im_flat = tf.cast(im_flat, 'float32')\n\n # and finally calculate interpolated values\n wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1)\n wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), 1)\n wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), 1)\n wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), 1)\n\n # try:\n # tf.get_variable_scope()._reuse = False\n # warp_img = tf.get_variable('warp_img', [num_batch * out_height * out_width, channels],\n # initializer=tf.constant_initializer(0.0), trainable=False)\n # tf.get_variable_scope()._reuse = True\n # except ValueError:\n # tf.get_variable_scope().reuse_variables()\n # warp_img = tf.get_variable('warp_img', [num_batch * out_height * out_width, channels],\n # initializer=tf.constant_initializer(0.0), trainable=False)\n # init0 = tf.group(\n # tf.assign(warp_img, tf.zeros([num_batch * out_height * out_width, channels], dtype=tf.float32)))\n # with tf.control_dependencies([init0]):\n # warp_img = tf.scatter_add(warp_img, idx_a, wa * im_flat, name='interp_sa1')\n # warp_img = tf.scatter_add(warp_img, idx_b, wb * im_flat, name='interp_sa2')\n # warp_img = tf.scatter_add(warp_img, idx_c, wc * im_flat, name='interp_sa3')\n # warp_img = tf.scatter_add(warp_img, idx_d, wd * im_flat, name='interp_sa4')\n\n num_segments = num_batch * out_height * out_width\n with tf.device('/cpu:0'):\n warp_img_a = tf.unsorted_segment_sum(data=wa * im_flat, segment_ids=idx_a, num_segments=num_segments)\n warp_img_b = tf.unsorted_segment_sum(data=wb * im_flat, segment_ids=idx_b, num_segments=num_segments)\n warp_img_c = tf.unsorted_segment_sum(data=wc * im_flat, segment_ids=idx_c, num_segments=num_segments)\n warp_img_d = tf.unsorted_segment_sum(data=wd * im_flat, segment_ids=idx_d, num_segments=num_segments)\n warp_img = warp_img_a + warp_img_b + warp_img_c + warp_img_d\n return warp_img\n\n with tf.variable_scope('imwarp_f'):\n dims = len(input_dim.get_shape())\n if dims == 5:\n n, num_frame, height, width, num_channels = map(lambda x: x.value, input_dim.get_shape())\n input_dim = tf.reshape(input_dim, [n * num_frame, height, width, num_channels])\n\n num_batch, height, width, num_channels = map(lambda x: x.value, input_dim.get_shape())\n uv = tf.cast(uv, 'float32')\n\n # grid of (x_t, y_t, 1), eq (1) in ref [1]\n out_height = out_size[0]\n out_width = out_size[1]\n grid = meshgrid(height, width)\n grid = tf.expand_dims(grid, 0)\n grid = tf.tile(grid, tf.stack([num_batch, 1, 1, 1]))\n\n T_g = grid + uv\n\n x_s = T_g[:, :, :, 0]\n y_s = T_g[:, :, :, 1]\n x_s_flat = tf.reshape(x_s, [-1])\n y_s_flat = tf.reshape(y_s, [-1])\n\n input_transformed = _interpolate_forward(input_dim, x_s_flat, y_s_flat, out_size)\n # output: n * h * w * c\n output = tf.reshape(input_transformed, tf.stack([num_batch, out_height, out_width, num_channels]))\n if dims == 5:\n output = tf.reshape(output, [n, num_frame, out_height, out_width, num_channels])\n return output\n\nif __name__ == '__main__':\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = '1'\n uv = -tf.ones([2, 100, 100, 2], tf.float32) * 0.125\n sess = tf.Session()\n uv_val = sess.run(flowToColor(uv, 0.1))\n \n import scipy.misc\n scipy.misc.imshow(uv_val[0, :, :, :])"
] | [
[
"tensorflow.device",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.unsorted_segment_sum",
"tensorflow.where",
"tensorflow.add_n",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"numpy.clip",
"tensorflow.greater",
"numpy.arange",
"tensorflow.floor",
"tensorflow.gather",
"tensorflow.name_scope",
"tensorflow.Session",
"numpy.zeros",
"tensorflow.tile",
"tensorflow.atan",
"tensorflow.shape",
"tensorflow.less",
"tensorflow.image.resize_images",
"tensorflow.zeros_like",
"tensorflow.clip_by_value",
"tensorflow.nn.relu",
"tensorflow.reduce_max",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.constant_initializer",
"tensorflow.variable_scope",
"tensorflow.sqrt",
"tensorflow.greater_equal",
"tensorflow.reduce_all",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
rodrigcd/NB3_robot | [
"d68e8de4a169c7bbaa949ebd09307ff0557fa61d"
] | [
"neural_network_lecture/demo_model.py"
] | [
"import numpy as np\nimport demo_dl as dl\nimport data_funcs as df\n\nbatch_size = 20\nnum_epochs = 500\nsamples_per_class = 100\nnum_classes = 4\nhidden_units = 100\n\ndata, target = df.gen_spiral_data(samples_per_class, num_classes, 0.2, 'double')\nraw_data = df.plot_scatter(data, target)\nraw_data.figure.savefig(\"demo_raw.png\")\n\nmodel = dl.Model()\nmodel.add(dl.Linear(2, hidden_units))\nmodel.add(dl.ReLU())\nmodel.add(dl.Linear(hidden_units, num_classes))\noptimiser = dl.SGD(model.parameters, lr=1, weight_decay=0.001, momentum=0.9)\nloss = dl.sigmoid()\nmodel.fit(data, target, batch_size, num_epochs, optimiser, loss, df.data_generator)\npre_arg = model.predict(data)\n\npred_labels = np.argmax(pre_arg, axis=1)\ngood_labels = pred_labels == target\naccuracy = np.mean(good_labels)\nprint(\"model Accuracy = {:.2f}%\".format(accuracy*100))\nclassified_data = df.plot_decision(data, target, model)\nclassified_data.figure.savefig(\"demo_classified.png\")\n"
] | [
[
"numpy.argmax",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
de-code/tensorflow | [
"988c3e6d21f1791b9485d0ec448dfbcecad0754d"
] | [
"tensorflow/python/data/ops/dataset_ops.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python wrappers for Datasets.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport functools\nimport sys\nimport threading\nimport warnings\nimport weakref\n\nimport numpy as np\nimport six\nfrom six.moves import queue as Queue # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.python import tf2\nfrom tensorflow.python.data.experimental.ops import distribute_options\nfrom tensorflow.python.data.experimental.ops import optimization_options\nfrom tensorflow.python.data.experimental.ops import stats_options\nfrom tensorflow.python.data.experimental.ops import threading_options\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.data.util import convert\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.data.util import options as options_lib\nfrom tensorflow.python.data.util import random_seed\nfrom tensorflow.python.data.util import structure\nfrom tensorflow.python.data.util import traverse\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function as eager_function\nfrom tensorflow.python.framework import auto_control_deps\nfrom tensorflow.python.framework import auto_control_deps_utils as acd_utils\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed as core_random_seed\nfrom tensorflow.python.framework import smart_cond\nfrom tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework import type_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_dataset_ops\nfrom tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops\nfrom tensorflow.python.ops import gen_io_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import script_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.training.tracking import base as tracking_base\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import function_utils\nfrom tensorflow.python.util import lazy_loader\nfrom tensorflow.python.util import nest as tf_nest\nfrom tensorflow.python.util.compat import collections_abc\nfrom tensorflow.python.util.tf_export import tf_export\n\n# Loaded lazily due to a circular dependency (roughly\n# tf.function->wrap_function->dataset->autograph->tf.function).\n# TODO(b/133251390): Use a regular import.\nwrap_function = lazy_loader.LazyLoader(\n \"wrap_function\", globals(),\n \"tensorflow.python.eager.wrap_function\")\n# TODO(mdan): Create a public API for this.\nautograph_ctx = lazy_loader.LazyLoader(\n \"autograph_ctx\", globals(),\n \"tensorflow.python.autograph.core.ag_ctx\")\nautograph = lazy_loader.LazyLoader(\n \"autograph\", globals(),\n \"tensorflow.python.autograph.impl.api\")\n\nops.NotDifferentiable(\"ReduceDataset\")\n\n# A constant that can be used to enable auto-tuning.\nAUTOTUNE = -1\ntf_export(\"data.AUTOTUNE\").export_constant(__name__, \"AUTOTUNE\")\n# TODO(b/168128531): Deprecate and remove this symbol.\ntf_export(\"data.experimental.AUTOTUNE\").export_constant(__name__, \"AUTOTUNE\")\n\n# Constants representing infinite and unknown cardinalities.\nINFINITE = -1\nUNKNOWN = -2\ntf_export(\"data.INFINITE_CARDINALITY\").export_constant(__name__, \"INFINITE\")\ntf_export(\"data.UNKNOWN_CARDINALITY\").export_constant(__name__, \"UNKNOWN\")\n\n\n@tf_export(\"data.Dataset\", v1=[])\[email protected]_metaclass(abc.ABCMeta)\nclass DatasetV2(collections_abc.Iterable, tracking_base.Trackable,\n composite_tensor.CompositeTensor):\n \"\"\"Represents a potentially large set of elements.\n\n The `tf.data.Dataset` API supports writing descriptive and efficient input\n pipelines. `Dataset` usage follows a common pattern:\n\n 1. Create a source dataset from your input data.\n 2. Apply dataset transformations to preprocess the data.\n 3. Iterate over the dataset and process the elements.\n\n Iteration happens in a streaming fashion, so the full dataset does not need to\n fit into memory.\n\n Source Datasets:\n\n The simplest way to create a dataset is to create it from a python `list`:\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> for element in dataset:\n ... print(element)\n tf.Tensor(1, shape=(), dtype=int32)\n tf.Tensor(2, shape=(), dtype=int32)\n tf.Tensor(3, shape=(), dtype=int32)\n\n To process lines from files, use `tf.data.TextLineDataset`:\n\n >>> dataset = tf.data.TextLineDataset([\"file1.txt\", \"file2.txt\"])\n\n To process records written in the `TFRecord` format, use `TFRecordDataset`:\n\n >>> dataset = tf.data.TFRecordDataset([\"file1.tfrecords\", \"file2.tfrecords\"])\n\n To create a dataset of all files matching a pattern, use\n `tf.data.Dataset.list_files`:\n\n >>> dataset = tf.data.Dataset.list_files(\"/path/*.txt\") # doctest: +SKIP\n\n See `tf.data.FixedLengthRecordDataset` and `tf.data.Dataset.from_generator`\n for more ways to create datasets.\n\n Transformations:\n\n Once you have a dataset, you can apply transformations to prepare the data for\n your model:\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> dataset = dataset.map(lambda x: x*2)\n >>> list(dataset.as_numpy_iterator())\n [2, 4, 6]\n\n Common Terms:\n\n **Element**: A single output from calling `next()` on a dataset iterator.\n Elements may be nested structures containing multiple components. For\n example, the element `(1, (3, \"apple\"))` has one tuple nested in another\n tuple. The components are `1`, `3`, and `\"apple\"`.\n\n **Component**: The leaf in the nested structure of an element.\n\n Supported types:\n\n Elements can be nested structures of tuples, named tuples, and dictionaries.\n Note that Python lists are *not* treated as nested structures of components.\n Instead, lists are converted to tensors and treated as components. For\n example, the element `(1, [1, 2, 3])` has only two components; the tensor `1`\n and the tensor `[1, 2, 3]`. Element components can be of any type\n representable by `tf.TypeSpec`, including `tf.Tensor`, `tf.data.Dataset`,\n `tf.sparse.SparseTensor`, `tf.RaggedTensor`, and `tf.TensorArray`.\n\n >>> a = 1 # Integer element\n >>> b = 2.0 # Float element\n >>> c = (1, 2) # Tuple element with 2 components\n >>> d = {\"a\": (2, 2), \"b\": 3} # Dict element with 3 components\n >>> Point = collections.namedtuple(\"Point\", [\"x\", \"y\"]) # doctest: +SKIP\n >>> e = Point(1, 2) # Named tuple # doctest: +SKIP\n >>> f = tf.data.Dataset.range(10) # Dataset element\n\n \"\"\"\n\n def __init__(self, variant_tensor):\n \"\"\"Creates a DatasetV2 object.\n\n This is a difference between DatasetV1 and DatasetV2. DatasetV1 does not\n take anything in its constructor whereas in the DatasetV2, we expect\n subclasses to create a variant_tensor and pass it in to the super() call.\n\n Args:\n variant_tensor: A DT_VARIANT tensor that represents the dataset.\n \"\"\"\n self._variant_tensor_attr = variant_tensor\n weak_self = weakref.proxy(self)\n self._variant_tracker = self._track_trackable(\n _VariantTracker(\n self._variant_tensor,\n # _trace_variant_creation only works when executing eagerly, so we\n # don't want to run it immediately. We also want the _VariantTracker\n # to have a weak reference to the Dataset to avoid creating\n # reference cycles and making work for the garbage collector.\n lambda: weak_self._trace_variant_creation()()), # pylint: disable=unnecessary-lambda,protected-access\n name=\"_variant_tracker\")\n self._graph_attr = ops.get_default_graph()\n\n # Initialize the options for this dataset and its inputs.\n self._options_attr = Options()\n for input_dataset in self._inputs():\n input_options = input_dataset.options()\n if input_options is not None:\n self._options_attr = self._options_attr.merge(input_options)\n\n @property\n def _variant_tensor(self):\n return self._variant_tensor_attr\n\n @_variant_tensor.setter\n def _variant_tensor(self, _):\n raise ValueError(\"The _variant_tensor property is read-only\")\n\n @deprecation.deprecated_args(None, \"Use external_state_policy instead\",\n \"allow_stateful\")\n def _as_serialized_graph(\n self,\n allow_stateful=None,\n strip_device_assignment=None,\n external_state_policy=distribute_options.ExternalStatePolicy.WARN):\n \"\"\"Produces serialized graph representation of the dataset.\n\n Args:\n allow_stateful: If true, we allow stateful ops to be present in the graph\n def. In that case, the state in these ops would be thrown away.\n strip_device_assignment: If true, non-local (i.e. job and task) device\n assignment is stripped from ops in the serialized graph.\n external_state_policy: The ExternalStatePolicy enum that determines how we\n handle input pipelines that depend on external state. By default, its\n set to WARN.\n\n Returns:\n A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a\n serialized graph.\n \"\"\"\n if external_state_policy:\n policy = external_state_policy.value\n return gen_dataset_ops.dataset_to_graph_v2(\n self._variant_tensor,\n external_state_policy=policy,\n strip_device_assignment=strip_device_assignment)\n if strip_device_assignment:\n return gen_dataset_ops.dataset_to_graph(\n self._variant_tensor,\n allow_stateful=allow_stateful,\n strip_device_assignment=strip_device_assignment)\n return gen_dataset_ops.dataset_to_graph(\n self._variant_tensor, allow_stateful=allow_stateful)\n\n def _trace_variant_creation(self):\n \"\"\"Traces a function which outputs a variant `tf.Tensor` for this dataset.\n\n Note that creating this function involves evaluating an op, and is currently\n only supported when executing eagerly.\n\n Returns:\n A zero-argument `ConcreteFunction` which outputs a variant `tf.Tensor`.\n \"\"\"\n variant = self._variant_tensor\n if not isinstance(variant, ops.EagerTensor):\n raise NotImplementedError(\n \"Can only export Datasets which were created executing eagerly. \"\n \"Please file a feature request if this is important to you.\")\n with context.eager_mode(), ops.device(\"CPU\"):\n # pylint: disable=protected-access\n graph_def = graph_pb2.GraphDef().FromString(\n self._as_serialized_graph(external_state_policy=distribute_options\n .ExternalStatePolicy.FAIL).numpy())\n output_node_name = None\n for node in graph_def.node:\n if node.op == \"_Retval\":\n if output_node_name is not None:\n raise AssertionError(\n \"Found multiple return values from the dataset's graph, expected \"\n \"only one.\")\n output_node_name, = node.input\n if output_node_name is None:\n raise AssertionError(\"Could not find the dataset's output node.\")\n # Add functions used in this Dataset to the function's graph, since they\n # need to follow it around (and for example be added to a SavedModel which\n # references the dataset).\n variant_function = wrap_function.function_from_graph_def(\n graph_def, inputs=[], outputs=output_node_name + \":0\")\n for used_function in self._functions():\n used_function.function.add_to_graph(variant_function.graph)\n return variant_function\n\n @abc.abstractmethod\n def _inputs(self):\n \"\"\"Returns a list of the input datasets of the dataset.\"\"\"\n\n raise NotImplementedError(\"Dataset._inputs\")\n\n @property\n def _graph(self):\n return self._graph_attr\n\n @_graph.setter\n def _graph(self, _):\n raise ValueError(\"The _graph property is read-only\")\n\n def _has_captured_ref(self):\n \"\"\"Whether this dataset uses a function that captures ref variables.\n\n Returns:\n A boolean, which if true indicates that the dataset or one of its inputs\n uses a function that captures ref variables.\n \"\"\"\n if context.executing_eagerly():\n # RefVariables are not supported in eager mode\n return False\n\n def is_tensor_or_parent_ref(tensor):\n if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access\n return True\n # If the captured tensor is an eager tensor, we cannot trace its inputs.\n if isinstance(tensor, ops._EagerTensorBase): # pylint: disable=protected-access\n return False\n return any(is_tensor_or_parent_ref(x) for x in tensor.op.inputs)\n\n for fn in self._functions():\n if any(is_tensor_or_parent_ref(t) for t in fn.function.captured_inputs):\n return True\n\n return any(\n [input_dataset._has_captured_ref() for input_dataset in self._inputs()]) # pylint: disable=protected-access\n\n # TODO(jsimsa): Change this to be the transitive closure of functions used\n # by this dataset and its inputs.\n def _functions(self):\n \"\"\"Returns a list of functions associated with this dataset.\n\n Returns:\n A list of `StructuredFunctionWrapper` objects.\n \"\"\"\n return []\n\n def options(self):\n \"\"\"Returns the options for this dataset and its inputs.\n\n Returns:\n A `tf.data.Options` object representing the dataset options.\n \"\"\"\n return self._options_attr\n\n def _apply_options(self):\n \"\"\"Apply options, such as optimization configuration, to the dataset.\"\"\"\n\n dataset = self\n options = self.options()\n\n # (1) Apply threading options\n if options.experimental_threading is not None:\n t_options = options.experimental_threading\n if t_options.max_intra_op_parallelism is not None:\n dataset = _MaxIntraOpParallelismDataset(\n dataset, t_options.max_intra_op_parallelism)\n if t_options.private_threadpool_size is not None:\n dataset = _PrivateThreadPoolDataset(dataset,\n t_options.private_threadpool_size)\n\n # (2) Apply autotune options\n autotune, algorithm, cpu_budget, ram_budget = options._autotune_settings() # pylint: disable=protected-access\n if autotune:\n dataset = _ModelDataset(dataset, algorithm, cpu_budget, ram_budget)\n\n # (3) Apply graph rewrite options\n # pylint: disable=protected-access\n graph_rewrites = options._graph_rewrites()\n graph_rewrite_configs = options._graph_rewrite_configs(autotune)\n # pylint: enable=protected-access\n if self._has_captured_ref():\n if graph_rewrites.enabled or graph_rewrites.default:\n warnings.warn(\n \"tf.data graph rewrites are not compatible with tf.Variable. \"\n \"The following rewrites will be disabled: %s. To enable \"\n \"rewrites, use resource variables instead by calling \"\n \"`tf.enable_resource_variables()` at the start of the program.\" %\n \", \".join(graph_rewrites.enabled + graph_rewrites.default))\n elif (graph_rewrites.enabled or graph_rewrites.default or\n (options.experimental_optimization.apply_default_optimizations # pylint: disable=g-bool-id-comparison\n is not False)):\n dataset = _OptimizeDataset(dataset, graph_rewrites.enabled,\n graph_rewrites.disabled,\n graph_rewrites.default, graph_rewrite_configs)\n\n # (4) Apply stats aggregator options\n if options.experimental_stats and options.experimental_stats.aggregator: # pylint: disable=line-too-long\n dataset = _SetStatsAggregatorDataset( # pylint: disable=protected-access\n dataset, options.experimental_stats.aggregator,\n options.experimental_stats.prefix,\n options.experimental_stats.counter_prefix)\n return dataset\n\n def __iter__(self):\n \"\"\"Creates an iterator for elements of this dataset.\n\n The returned iterator implements the Python Iterator protocol.\n\n Returns:\n An `tf.data.Iterator` for the elements of this dataset.\n\n Raises:\n RuntimeError: If not inside of tf.function and not executing eagerly.\n \"\"\"\n if context.executing_eagerly() or ops.inside_function():\n with ops.colocate_with(self._variant_tensor):\n return iterator_ops.OwnedIterator(self)\n else:\n raise RuntimeError(\"__iter__() is only supported inside of tf.function \"\n \"or when eager execution is enabled.\")\n\n def __bool__(self):\n return True # Required as __len__ is defined\n\n __nonzero__ = __bool__ # Python 2 backward compatibility\n\n def __len__(self):\n \"\"\"Returns the length of the dataset if it is known and finite.\n\n This method requires that you are running in eager mode, and that the\n length of the dataset is known and non-infinite. When the length may be\n unknown or infinite, or if you are running in graph mode, use\n `tf.data.Dataset.cardinality` instead.\n\n Returns:\n An integer representing the length of the dataset.\n\n Raises:\n RuntimeError: If the dataset length is unknown or infinite, or if eager\n execution is not enabled.\n \"\"\"\n if not context.executing_eagerly():\n raise TypeError(\"__len__() is not supported while tracing functions. \"\n \"Use `tf.data.Dataset.cardinality` instead.\")\n length = self.cardinality()\n if length.numpy() == INFINITE:\n raise TypeError(\"dataset length is infinite.\")\n if length.numpy() == UNKNOWN:\n raise TypeError(\"dataset length is unknown.\")\n return length\n\n @abc.abstractproperty\n def element_spec(self):\n \"\"\"The type specification of an element of this dataset.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> dataset.element_spec\n TensorSpec(shape=(), dtype=tf.int32, name=None)\n\n Returns:\n A nested structure of `tf.TypeSpec` objects matching the structure of an\n element of this dataset and specifying the type of individual components.\n \"\"\"\n raise NotImplementedError(\"Dataset.element_spec\")\n\n def __repr__(self):\n output_shapes = nest.map_structure(str, get_legacy_output_shapes(self))\n output_shapes = str(output_shapes).replace(\"'\", \"\")\n output_types = nest.map_structure(repr, get_legacy_output_types(self))\n output_types = str(output_types).replace(\"'\", \"\")\n return (\"<%s shapes: %s, types: %s>\" % (type(self).__name__, output_shapes,\n output_types))\n\n def as_numpy_iterator(self):\n \"\"\"Returns an iterator which converts all elements of the dataset to numpy.\n\n Use `as_numpy_iterator` to inspect the content of your dataset. To see\n element shapes and types, print dataset elements directly instead of using\n `as_numpy_iterator`.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> for element in dataset:\n ... print(element)\n tf.Tensor(1, shape=(), dtype=int32)\n tf.Tensor(2, shape=(), dtype=int32)\n tf.Tensor(3, shape=(), dtype=int32)\n\n This method requires that you are running in eager mode and the dataset's\n element_spec contains only `TensorSpec` components.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> for element in dataset.as_numpy_iterator():\n ... print(element)\n 1\n 2\n 3\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> print(list(dataset.as_numpy_iterator()))\n [1, 2, 3]\n\n `as_numpy_iterator()` will preserve the nested structure of dataset\n elements.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices({'a': ([1, 2], [3, 4]),\n ... 'b': [5, 6]})\n >>> list(dataset.as_numpy_iterator()) == [{'a': (1, 3), 'b': 5},\n ... {'a': (2, 4), 'b': 6}]\n True\n\n Returns:\n An iterable over the elements of the dataset, with their tensors converted\n to numpy arrays.\n\n Raises:\n TypeError: if an element contains a non-`Tensor` value.\n RuntimeError: if eager execution is not enabled.\n \"\"\"\n if not context.executing_eagerly():\n raise RuntimeError(\"as_numpy_iterator() is not supported while tracing \"\n \"functions\")\n for component_spec in nest.flatten(self.element_spec):\n if not isinstance(\n component_spec,\n (tensor_spec.TensorSpec, ragged_tensor.RaggedTensorSpec)):\n raise TypeError(\n \"Dataset.as_numpy_iterator() does not support datasets containing \"\n + str(component_spec.value_type))\n\n return _NumpyIterator(self)\n\n @property\n def _flat_shapes(self):\n \"\"\"Returns a list `tf.TensorShapes`s for the element tensor representation.\n\n Returns:\n A list `tf.TensorShapes`s for the element tensor representation.\n \"\"\"\n return structure.get_flat_tensor_shapes(self.element_spec)\n\n @property\n def _flat_types(self):\n \"\"\"Returns a list `tf.DType`s for the element tensor representation.\n\n Returns:\n A list `tf.DType`s for the element tensor representation.\n \"\"\"\n return structure.get_flat_tensor_types(self.element_spec)\n\n @property\n def _flat_structure(self):\n \"\"\"Helper for setting `output_shapes` and `output_types` attrs of an op.\n\n Most dataset op constructors expect `output_shapes` and `output_types`\n arguments that represent the flattened structure of an element. This helper\n function generates these attrs as a keyword argument dictionary, allowing\n `Dataset._variant_tensor` implementations to pass `**self._flat_structure`\n to the op constructor.\n\n Returns:\n A dictionary of keyword arguments that can be passed to a dataset op\n constructor.\n \"\"\"\n return {\n \"output_shapes\": self._flat_shapes,\n \"output_types\": self._flat_types,\n }\n\n @property\n def _type_spec(self):\n return DatasetSpec(self.element_spec)\n\n @staticmethod\n def from_tensors(tensors):\n \"\"\"Creates a `Dataset` with a single element, comprising the given tensors.\n\n `from_tensors` produces a dataset containing only a single element. To slice\n the input tensor into multiple elements, use `from_tensor_slices` instead.\n\n >>> dataset = tf.data.Dataset.from_tensors([1, 2, 3])\n >>> list(dataset.as_numpy_iterator())\n [array([1, 2, 3], dtype=int32)]\n >>> dataset = tf.data.Dataset.from_tensors(([1, 2, 3], 'A'))\n >>> list(dataset.as_numpy_iterator())\n [(array([1, 2, 3], dtype=int32), b'A')]\n\n >>> # You can use `from_tensors` to produce a dataset which repeats\n >>> # the same example many times.\n >>> example = tf.constant([1,2,3])\n >>> dataset = tf.data.Dataset.from_tensors(example).repeat(2)\n >>> list(dataset.as_numpy_iterator())\n [array([1, 2, 3], dtype=int32), array([1, 2, 3], dtype=int32)]\n\n Note that if `tensors` contains a NumPy array, and eager execution is not\n enabled, the values will be embedded in the graph as one or more\n `tf.constant` operations. For large datasets (> 1 GB), this can waste\n memory and run into byte limits of graph serialization. If `tensors`\n contains one or more large NumPy arrays, consider the alternative described\n in [this\n guide](https://tensorflow.org/guide/data#consuming_numpy_arrays).\n\n Args:\n tensors: A dataset element.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return TensorDataset(tensors)\n\n @staticmethod\n def from_tensor_slices(tensors):\n \"\"\"Creates a `Dataset` whose elements are slices of the given tensors.\n\n The given tensors are sliced along their first dimension. This operation\n preserves the structure of the input tensors, removing the first dimension\n of each tensor and using it as the dataset dimension. All input tensors\n must have the same size in their first dimensions.\n\n >>> # Slicing a 1D tensor produces scalar tensor elements.\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> list(dataset.as_numpy_iterator())\n [1, 2, 3]\n\n >>> # Slicing a 2D tensor produces 1D tensor elements.\n >>> dataset = tf.data.Dataset.from_tensor_slices([[1, 2], [3, 4]])\n >>> list(dataset.as_numpy_iterator())\n [array([1, 2], dtype=int32), array([3, 4], dtype=int32)]\n\n >>> # Slicing a tuple of 1D tensors produces tuple elements containing\n >>> # scalar tensors.\n >>> dataset = tf.data.Dataset.from_tensor_slices(([1, 2], [3, 4], [5, 6]))\n >>> list(dataset.as_numpy_iterator())\n [(1, 3, 5), (2, 4, 6)]\n\n >>> # Dictionary structure is also preserved.\n >>> dataset = tf.data.Dataset.from_tensor_slices({\"a\": [1, 2], \"b\": [3, 4]})\n >>> list(dataset.as_numpy_iterator()) == [{'a': 1, 'b': 3},\n ... {'a': 2, 'b': 4}]\n True\n\n >>> # Two tensors can be combined into one Dataset object.\n >>> features = tf.constant([[1, 3], [2, 1], [3, 3]]) # ==> 3x2 tensor\n >>> labels = tf.constant(['A', 'B', 'A']) # ==> 3x1 tensor\n >>> dataset = Dataset.from_tensor_slices((features, labels))\n >>> # Both the features and the labels tensors can be converted\n >>> # to a Dataset object separately and combined after.\n >>> features_dataset = Dataset.from_tensor_slices(features)\n >>> labels_dataset = Dataset.from_tensor_slices(labels)\n >>> dataset = Dataset.zip((features_dataset, labels_dataset))\n >>> # A batched feature and label set can be converted to a Dataset\n >>> # in similar fashion.\n >>> batched_features = tf.constant([[[1, 3], [2, 3]],\n ... [[2, 1], [1, 2]],\n ... [[3, 3], [3, 2]]], shape=(3, 2, 2))\n >>> batched_labels = tf.constant([['A', 'A'],\n ... ['B', 'B'],\n ... ['A', 'B']], shape=(3, 2, 1))\n >>> dataset = Dataset.from_tensor_slices((batched_features, batched_labels))\n >>> for element in dataset.as_numpy_iterator():\n ... print(element)\n (array([[1, 3],\n [2, 3]], dtype=int32), array([[b'A'],\n [b'A']], dtype=object))\n (array([[2, 1],\n [1, 2]], dtype=int32), array([[b'B'],\n [b'B']], dtype=object))\n (array([[3, 3],\n [3, 2]], dtype=int32), array([[b'A'],\n [b'B']], dtype=object))\n\n Note that if `tensors` contains a NumPy array, and eager execution is not\n enabled, the values will be embedded in the graph as one or more\n `tf.constant` operations. For large datasets (> 1 GB), this can waste\n memory and run into byte limits of graph serialization. If `tensors`\n contains one or more large NumPy arrays, consider the alternative described\n in [this guide](\n https://tensorflow.org/guide/data#consuming_numpy_arrays).\n\n Args:\n tensors: A dataset element, with each component having the same size in\n the first dimension.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return TensorSliceDataset(tensors)\n\n class _GeneratorState(object):\n \"\"\"Stores outstanding iterators created from a Python generator.\n\n This class keeps track of potentially multiple iterators that may have\n been created from a generator, e.g. in the case that the dataset is\n repeated, or nested within a parallel computation.\n \"\"\"\n\n def __init__(self, generator):\n self._generator = generator\n self._lock = threading.Lock()\n self._next_id = 0 # GUARDED_BY(self._lock)\n self._args = {}\n self._iterators = {}\n\n def get_next_id(self, *args):\n with self._lock:\n ret = self._next_id\n self._next_id += 1\n self._args[ret] = args\n # NOTE(mrry): Explicitly create an array of `np.int64` because implicit\n # casting in `py_func()` will create an array of `np.int32` on Windows,\n # leading to a runtime error.\n return np.array(ret, dtype=np.int64)\n\n def get_iterator(self, iterator_id):\n try:\n return self._iterators[iterator_id]\n except KeyError:\n iterator = iter(self._generator(*self._args.pop(iterator_id)))\n self._iterators[iterator_id] = iterator\n return iterator\n\n def iterator_completed(self, iterator_id):\n del self._iterators[iterator_id]\n\n @staticmethod\n @deprecation.deprecated_args(None, \"Use output_signature instead\",\n \"output_types\", \"output_shapes\")\n def from_generator(generator,\n output_types=None,\n output_shapes=None,\n args=None,\n output_signature=None):\n \"\"\"Creates a `Dataset` whose elements are generated by `generator`.\n\n The `generator` argument must be a callable object that returns\n an object that supports the `iter()` protocol (e.g. a generator function).\n\n The elements generated by `generator` must be compatible with either the\n given `output_signature` argument or with the given `output_types` and\n (optionally) `output_shapes` arguments, whichever was specified.\n\n The recommended way to call `from_generator` is to use the\n `output_signature` argument. In this case the output will be assumed to\n consist of objects with the classes, shapes and types defined by\n `tf.TypeSpec` objects from `output_signature` argument:\n\n >>> def gen():\n ... ragged_tensor = tf.ragged.constant([[1, 2], [3]])\n ... yield 42, ragged_tensor\n >>>\n >>> dataset = tf.data.Dataset.from_generator(\n ... gen,\n ... output_signature=(\n ... tf.TensorSpec(shape=(), dtype=tf.int32),\n ... tf.RaggedTensorSpec(shape=(2, None), dtype=tf.int32)))\n >>>\n >>> list(dataset.take(1))\n [(<tf.Tensor: shape=(), dtype=int32, numpy=42>,\n <tf.RaggedTensor [[1, 2], [3]]>)]\n\n There is also a deprecated way to call `from_generator` by either with\n `output_types` argument alone or together with `output_shapes` argument.\n In this case the output of the function will be assumed to consist of\n `tf.Tensor` objects with the types defined by `output_types` and with the\n shapes which are either unknown or defined by `output_shapes`.\n\n Note: The current implementation of `Dataset.from_generator()` uses\n `tf.numpy_function` and inherits the same constraints. In particular, it\n requires the dataset and iterator related operations to be placed\n on a device in the same process as the Python program that called\n `Dataset.from_generator()`. The body of `generator` will not be\n serialized in a `GraphDef`, and you should not use this method if you\n need to serialize your model and restore it in a different environment.\n\n Note: If `generator` depends on mutable global variables or other external\n state, be aware that the runtime may invoke `generator` multiple times\n (in order to support repeating the `Dataset`) and at any time\n between the call to `Dataset.from_generator()` and the production of the\n first element from the generator. Mutating global variables or external\n state can cause undefined behavior, and we recommend that you explicitly\n cache any external state in `generator` before calling\n `Dataset.from_generator()`.\n\n Args:\n generator: A callable object that returns an object that supports the\n `iter()` protocol. If `args` is not specified, `generator` must take no\n arguments; otherwise it must take as many arguments as there are values\n in `args`.\n output_types: (Optional.) A nested structure of `tf.DType` objects\n corresponding to each component of an element yielded by `generator`.\n output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects\n corresponding to each component of an element yielded by `generator`.\n args: (Optional.) A tuple of `tf.Tensor` objects that will be evaluated\n and passed to `generator` as NumPy-array arguments.\n output_signature: (Optional.) A nested structure of `tf.TypeSpec` objects\n corresponding to each component of an element yielded by `generator`.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n if not callable(generator):\n raise TypeError(\"`generator` must be callable.\")\n\n if output_signature is not None:\n if output_types is not None:\n raise TypeError(\"`output_types` can not be used together with \"\n \"`output_signature`\")\n if output_shapes is not None:\n raise TypeError(\"`output_shapes` can not be used together with \"\n \"`output_signature`\")\n if not all(\n isinstance(_, type_spec.TypeSpec)\n for _ in nest.flatten(output_signature)):\n raise TypeError(\"All the elements of `output_signature` must be \"\n \"`tf.TypeSpec` objects.\")\n else:\n if output_types is None:\n raise TypeError(\"Either `output_signature` or `output_types` must \"\n \"be specified\")\n\n if output_signature is None:\n if output_shapes is None:\n output_shapes = nest.map_structure(\n lambda _: tensor_shape.TensorShape(None), output_types)\n else:\n output_shapes = nest.map_structure_up_to(output_types,\n tensor_shape.as_shape,\n output_shapes)\n output_signature = nest.map_structure_up_to(output_types,\n tensor_spec.TensorSpec,\n output_shapes, output_types)\n if all([\n isinstance(x, tensor_spec.TensorSpec)\n for x in nest.flatten(output_signature)\n ]):\n output_types = nest.pack_sequence_as(\n output_signature, [x.dtype for x in nest.flatten(output_signature)])\n output_shapes = nest.pack_sequence_as(\n output_signature, [x.shape for x in nest.flatten(output_signature)])\n\n if args is None:\n args = ()\n else:\n args = tuple(ops.convert_n_to_tensor(args, name=\"args\"))\n\n generator_state = DatasetV2._GeneratorState(generator)\n\n def get_iterator_id_fn(unused_dummy):\n \"\"\"Creates a unique `iterator_id` for each pass over the dataset.\n\n The returned `iterator_id` disambiguates between multiple concurrently\n existing iterators.\n\n Args:\n unused_dummy: Ignored value.\n\n Returns:\n A `tf.int64` tensor whose value uniquely identifies an iterator in\n `generator_state`.\n \"\"\"\n return script_ops.numpy_function(generator_state.get_next_id, args,\n dtypes.int64)\n\n def generator_next_fn(iterator_id_t):\n \"\"\"Generates the next element from iterator with ID `iterator_id_t`.\n\n We map this function across an infinite repetition of the\n `iterator_id_t`, and raise `StopIteration` to terminate the iteration.\n\n Args:\n iterator_id_t: A `tf.int64` tensor whose value uniquely identifies the\n iterator in `generator_state` from which to generate an element.\n\n Returns:\n The next element to generate from the iterator.\n \"\"\"\n if output_types and output_shapes:\n flattened_types = [\n dtypes.as_dtype(dt) for dt in nest.flatten(output_types)\n ]\n flattened_shapes = nest.flatten(output_shapes)\n\n def generator_py_func(iterator_id):\n \"\"\"A `py_func` that will be called to invoke the iterator.\"\"\"\n # `next()` raises `StopIteration` when there are no more\n # elements remaining to be generated.\n values = next(generator_state.get_iterator(iterator_id))\n\n # Use the same _convert function from the py_func() implementation to\n # convert the returned values to arrays early, so that we can inspect\n # their values.\n try:\n flattened_values = nest.flatten_up_to(output_types, values)\n except (TypeError, ValueError):\n six.reraise(\n TypeError,\n TypeError(\n \"`generator` yielded an element that did not match the \"\n \"expected structure. The expected structure was %s, but \"\n \"the yielded element was %s.\" % (output_types, values)),\n sys.exc_info()[2])\n ret_arrays = []\n for ret, dtype in zip(flattened_values, flattened_types):\n try:\n ret_arrays.append(\n script_ops.FuncRegistry._convert( # pylint: disable=protected-access\n ret,\n dtype=dtype.as_numpy_dtype))\n except (TypeError, ValueError):\n six.reraise(\n TypeError,\n TypeError(\n \"`generator` yielded an element that could not be \"\n \"converted to the expected type. The expected type was \"\n \"%s, but the yielded element was %s.\" %\n (dtype.name, ret)),\n sys.exc_info()[2])\n\n # Additional type and shape checking to ensure that the components of\n # the generated element match the `output_types` and `output_shapes`\n # arguments.\n for (ret_array, expected_dtype,\n expected_shape) in zip(ret_arrays, flattened_types,\n flattened_shapes):\n if ret_array.dtype != expected_dtype.as_numpy_dtype:\n raise TypeError(\n \"`generator` yielded an element of type %s where an element \"\n \"of type %s was expected.\" %\n (ret_array.dtype, expected_dtype.as_numpy_dtype))\n if not expected_shape.is_compatible_with(ret_array.shape):\n raise ValueError(\n \"`generator` yielded an element of shape %s where an element \"\n \"of shape %s was expected.\" %\n (ret_array.shape, expected_shape))\n\n return ret_arrays\n\n flat_values = script_ops.numpy_function(generator_py_func,\n [iterator_id_t],\n flattened_types)\n\n # The `py_func()` op drops the inferred shapes, so we add them back in\n # here.\n if output_shapes is not None:\n for ret_t, shape in zip(flat_values, flattened_shapes):\n ret_t.set_shape(shape)\n\n return nest.pack_sequence_as(output_types, flat_values)\n else:\n flat_output_types = structure.get_flat_tensor_types(output_signature)\n\n def generator_py_func(iterator_id):\n \"\"\"A `py_func` that will be called to invoke the iterator.\"\"\"\n # `next()` raises `StopIteration` when there are no more\n # elements remaining to be generated.\n values = next(generator_state.get_iterator(iterator_id.numpy()))\n\n try:\n values = structure.normalize_element(values, output_signature)\n except (TypeError, ValueError):\n six.reraise(\n TypeError,\n TypeError(\n \"`generator` yielded an element that did not match the \"\n \"expected structure. The expected structure was %s, but \"\n \"the yielded element was %s.\" % (output_signature, values)),\n sys.exc_info()[2])\n\n values_spec = structure.type_spec_from_value(values)\n\n if not structure.are_compatible(values_spec, output_signature):\n raise TypeError(\n \"`generator` yielded an element of %s where an element \"\n \"of %s was expected.\" % (values_spec, output_signature))\n\n return structure.to_tensor_list(output_signature, values)\n\n return script_ops._eager_py_func( # pylint: disable=protected-access\n generator_py_func,\n inp=[iterator_id_t],\n Tout=flat_output_types,\n use_tape_cache=False)\n\n def finalize_fn(iterator_id_t):\n \"\"\"Releases host-side state for the iterator with ID `iterator_id_t`.\"\"\"\n\n def finalize_py_func(iterator_id):\n generator_state.iterator_completed(iterator_id)\n # We return a dummy value so that the `finalize_fn` has a valid\n # signature.\n # NOTE(mrry): Explicitly create an array of `np.int64` because implicit\n # casting in `py_func()` will create an array of `np.int32` on Windows,\n # leading to a runtime error.\n return np.array(0, dtype=np.int64)\n\n return script_ops.numpy_function(finalize_py_func, [iterator_id_t],\n dtypes.int64)\n\n # This function associates each traversal of `generator` with a unique\n # iterator ID.\n def flat_map_fn(dummy_arg):\n # The `get_iterator_id_fn` gets a unique ID for the current instance of\n # of the generator.\n # The `generator_next_fn` gets the next element from the iterator with the\n # given ID, and raises StopIteration when that iterator contains no\n # more elements.\n return _GeneratorDataset(dummy_arg, get_iterator_id_fn, generator_next_fn,\n finalize_fn, output_signature)\n\n # A single-element dataset that, each time it is evaluated, contains a\n # freshly-generated and unique (for the returned dataset) int64\n # ID that will be used to identify the appropriate Python state, which\n # is encapsulated in `generator_state`, and captured in\n # `get_iterator_id_map_fn`.\n dummy = 0\n id_dataset = Dataset.from_tensors(dummy)\n\n # A dataset that contains all of the elements generated by a\n # single iterator created from `generator`, identified by the\n # iterator ID contained in `id_dataset`. Lifting the iteration\n # into a flat_map here enables multiple repetitions and/or nested\n # versions of the returned dataset to be created, because it forces\n # the generation of a new ID for each version.\n return id_dataset.flat_map(flat_map_fn)\n\n @staticmethod\n def range(*args, **kwargs):\n \"\"\"Creates a `Dataset` of a step-separated range of values.\n\n >>> list(Dataset.range(5).as_numpy_iterator())\n [0, 1, 2, 3, 4]\n >>> list(Dataset.range(2, 5).as_numpy_iterator())\n [2, 3, 4]\n >>> list(Dataset.range(1, 5, 2).as_numpy_iterator())\n [1, 3]\n >>> list(Dataset.range(1, 5, -2).as_numpy_iterator())\n []\n >>> list(Dataset.range(5, 1).as_numpy_iterator())\n []\n >>> list(Dataset.range(5, 1, -2).as_numpy_iterator())\n [5, 3]\n >>> list(Dataset.range(2, 5, output_type=tf.int32).as_numpy_iterator())\n [2, 3, 4]\n >>> list(Dataset.range(1, 5, 2, output_type=tf.float32).as_numpy_iterator())\n [1.0, 3.0]\n\n Args:\n *args: follows the same semantics as python's xrange.\n len(args) == 1 -> start = 0, stop = args[0], step = 1.\n len(args) == 2 -> start = args[0], stop = args[1], step = 1.\n len(args) == 3 -> start = args[0], stop = args[1], step = args[2].\n **kwargs:\n - output_type: Its expected dtype. (Optional, default: `tf.int64`).\n\n Returns:\n Dataset: A `RangeDataset`.\n\n Raises:\n ValueError: if len(args) == 0.\n \"\"\"\n return RangeDataset(*args, **kwargs)\n\n @staticmethod\n def zip(datasets):\n \"\"\"Creates a `Dataset` by zipping together the given datasets.\n\n This method has similar semantics to the built-in `zip()` function\n in Python, with the main difference being that the `datasets`\n argument can be an arbitrary nested structure of `Dataset` objects.\n\n >>> # The nested structure of the `datasets` argument determines the\n >>> # structure of elements in the resulting dataset.\n >>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ]\n >>> b = tf.data.Dataset.range(4, 7) # ==> [ 4, 5, 6 ]\n >>> ds = tf.data.Dataset.zip((a, b))\n >>> list(ds.as_numpy_iterator())\n [(1, 4), (2, 5), (3, 6)]\n >>> ds = tf.data.Dataset.zip((b, a))\n >>> list(ds.as_numpy_iterator())\n [(4, 1), (5, 2), (6, 3)]\n >>>\n >>> # The `datasets` argument may contain an arbitrary number of datasets.\n >>> c = tf.data.Dataset.range(7, 13).batch(2) # ==> [ [7, 8],\n ... # [9, 10],\n ... # [11, 12] ]\n >>> ds = tf.data.Dataset.zip((a, b, c))\n >>> for element in ds.as_numpy_iterator():\n ... print(element)\n (1, 4, array([7, 8]))\n (2, 5, array([ 9, 10]))\n (3, 6, array([11, 12]))\n >>>\n >>> # The number of elements in the resulting dataset is the same as\n >>> # the size of the smallest dataset in `datasets`.\n >>> d = tf.data.Dataset.range(13, 15) # ==> [ 13, 14 ]\n >>> ds = tf.data.Dataset.zip((a, d))\n >>> list(ds.as_numpy_iterator())\n [(1, 13), (2, 14)]\n\n Args:\n datasets: A nested structure of datasets.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return ZipDataset(datasets)\n\n def concatenate(self, dataset):\n \"\"\"Creates a `Dataset` by concatenating the given dataset with this dataset.\n\n >>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ]\n >>> b = tf.data.Dataset.range(4, 8) # ==> [ 4, 5, 6, 7 ]\n >>> ds = a.concatenate(b)\n >>> list(ds.as_numpy_iterator())\n [1, 2, 3, 4, 5, 6, 7]\n >>> # The input dataset and dataset to be concatenated should have the same\n >>> # nested structures and output types.\n >>> c = tf.data.Dataset.zip((a, b))\n >>> a.concatenate(c)\n Traceback (most recent call last):\n TypeError: Two datasets to concatenate have different types\n <dtype: 'int64'> and (tf.int64, tf.int64)\n >>> d = tf.data.Dataset.from_tensor_slices([\"a\", \"b\", \"c\"])\n >>> a.concatenate(d)\n Traceback (most recent call last):\n TypeError: Two datasets to concatenate have different types\n <dtype: 'int64'> and <dtype: 'string'>\n\n Args:\n dataset: `Dataset` to be concatenated.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return ConcatenateDataset(self, dataset)\n\n def prefetch(self, buffer_size):\n \"\"\"Creates a `Dataset` that prefetches elements from this dataset.\n\n Most dataset input pipelines should end with a call to `prefetch`. This\n allows later elements to be prepared while the current element is being\n processed. This often improves latency and throughput, at the cost of\n using additional memory to store prefetched elements.\n\n Note: Like other `Dataset` methods, prefetch operates on the\n elements of the input dataset. It has no concept of examples vs. batches.\n `examples.prefetch(2)` will prefetch two elements (2 examples),\n while `examples.batch(20).prefetch(2)` will prefetch 2 elements\n (2 batches, of 20 examples each).\n\n >>> dataset = tf.data.Dataset.range(3)\n >>> dataset = dataset.prefetch(2)\n >>> list(dataset.as_numpy_iterator())\n [0, 1, 2]\n\n Args:\n buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum\n number of elements that will be buffered when prefetching. If the value\n `tf.data.AUTOTUNE` is used, then the buffer size is dynamically tuned.\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return PrefetchDataset(self, buffer_size)\n\n @staticmethod\n def list_files(file_pattern, shuffle=None, seed=None):\n \"\"\"A dataset of all files matching one or more glob patterns.\n\n The `file_pattern` argument should be a small number of glob patterns.\n If your filenames have already been globbed, use\n `Dataset.from_tensor_slices(filenames)` instead, as re-globbing every\n filename with `list_files` may result in poor performance with remote\n storage systems.\n\n Note: The default behavior of this method is to return filenames in\n a non-deterministic random shuffled order. Pass a `seed` or `shuffle=False`\n to get results in a deterministic order.\n\n Example:\n If we had the following files on our filesystem:\n\n - /path/to/dir/a.txt\n - /path/to/dir/b.py\n - /path/to/dir/c.py\n\n If we pass \"/path/to/dir/*.py\" as the directory, the dataset\n would produce:\n\n - /path/to/dir/b.py\n - /path/to/dir/c.py\n\n Args:\n file_pattern: A string, a list of strings, or a `tf.Tensor` of string type\n (scalar or vector), representing the filename glob (i.e. shell wildcard)\n pattern(s) that will be matched.\n shuffle: (Optional.) If `True`, the file names will be shuffled randomly.\n Defaults to `True`.\n seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random\n seed that will be used to create the distribution. See\n `tf.random.set_seed` for behavior.\n\n Returns:\n Dataset: A `Dataset` of strings corresponding to file names.\n \"\"\"\n with ops.name_scope(\"list_files\"):\n if shuffle is None:\n shuffle = True\n file_pattern = ops.convert_to_tensor(\n file_pattern, dtype=dtypes.string, name=\"file_pattern\")\n matching_files = gen_io_ops.matching_files(file_pattern)\n\n # Raise an exception if `file_pattern` does not match any files.\n condition = math_ops.greater(array_ops.shape(matching_files)[0], 0,\n name=\"match_not_empty\")\n\n message = math_ops.add(\n \"No files matched pattern: \",\n string_ops.reduce_join(file_pattern, separator=\", \"), name=\"message\")\n\n assert_not_empty = control_flow_ops.Assert(\n condition, [message], summarize=1, name=\"assert_not_empty\")\n with ops.control_dependencies([assert_not_empty]):\n matching_files = array_ops.identity(matching_files)\n\n dataset = Dataset.from_tensor_slices(matching_files)\n if shuffle:\n # NOTE(mrry): The shuffle buffer size must be greater than zero, but the\n # list of files might be empty.\n buffer_size = math_ops.maximum(\n array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1)\n dataset = dataset.shuffle(buffer_size, seed=seed)\n return dataset\n\n def repeat(self, count=None):\n \"\"\"Repeats this dataset so each original value is seen `count` times.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> dataset = dataset.repeat(3)\n >>> list(dataset.as_numpy_iterator())\n [1, 2, 3, 1, 2, 3, 1, 2, 3]\n\n Note: If this dataset is a function of global state (e.g. a random number\n generator), then different repetitions may produce different elements.\n\n Args:\n count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\n number of times the dataset should be repeated. The default behavior (if\n `count` is `None` or `-1`) is for the dataset be repeated indefinitely.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return RepeatDataset(self, count)\n\n def enumerate(self, start=0):\n \"\"\"Enumerates the elements of this dataset.\n\n It is similar to python's `enumerate`.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> dataset = dataset.enumerate(start=5)\n >>> for element in dataset.as_numpy_iterator():\n ... print(element)\n (5, 1)\n (6, 2)\n (7, 3)\n\n >>> # The nested structure of the input dataset determines the structure of\n >>> # elements in the resulting dataset.\n >>> dataset = tf.data.Dataset.from_tensor_slices([(7, 8), (9, 10)])\n >>> dataset = dataset.enumerate()\n >>> for element in dataset.as_numpy_iterator():\n ... print(element)\n (0, array([7, 8], dtype=int32))\n (1, array([ 9, 10], dtype=int32))\n\n Args:\n start: A `tf.int64` scalar `tf.Tensor`, representing the start value for\n enumeration.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n\n max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max\n return Dataset.zip((Dataset.range(start, max_value), self))\n\n def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):\n \"\"\"Randomly shuffles the elements of this dataset.\n\n This dataset fills a buffer with `buffer_size` elements, then randomly\n samples elements from this buffer, replacing the selected elements with new\n elements. For perfect shuffling, a buffer size greater than or equal to the\n full size of the dataset is required.\n\n For instance, if your dataset contains 10,000 elements but `buffer_size` is\n set to 1,000, then `shuffle` will initially select a random element from\n only the first 1,000 elements in the buffer. Once an element is selected,\n its space in the buffer is replaced by the next (i.e. 1,001-st) element,\n maintaining the 1,000 element buffer.\n\n `reshuffle_each_iteration` controls whether the shuffle order should be\n different for each epoch. In TF 1.X, the idiomatic way to create epochs\n was through the `repeat` transformation:\n\n >>> dataset = tf.data.Dataset.range(3)\n >>> dataset = dataset.shuffle(3, reshuffle_each_iteration=True)\n >>> dataset = dataset.repeat(2) # doctest: +SKIP\n [1, 0, 2, 1, 2, 0]\n\n >>> dataset = tf.data.Dataset.range(3)\n >>> dataset = dataset.shuffle(3, reshuffle_each_iteration=False)\n >>> dataset = dataset.repeat(2) # doctest: +SKIP\n [1, 0, 2, 1, 0, 2]\n\n In TF 2.0, `tf.data.Dataset` objects are Python iterables which makes it\n possible to also create epochs through Python iteration:\n\n >>> dataset = tf.data.Dataset.range(3)\n >>> dataset = dataset.shuffle(3, reshuffle_each_iteration=True)\n >>> list(dataset.as_numpy_iterator()) # doctest: +SKIP\n [1, 0, 2]\n >>> list(dataset.as_numpy_iterator()) # doctest: +SKIP\n [1, 2, 0]\n\n >>> dataset = tf.data.Dataset.range(3)\n >>> dataset = dataset.shuffle(3, reshuffle_each_iteration=False)\n >>> list(dataset.as_numpy_iterator()) # doctest: +SKIP\n [1, 0, 2]\n >>> list(dataset.as_numpy_iterator()) # doctest: +SKIP\n [1, 0, 2]\n\n Args:\n buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n elements from this dataset from which the new dataset will sample.\n seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random\n seed that will be used to create the distribution. See\n `tf.random.set_seed` for behavior.\n reshuffle_each_iteration: (Optional.) A boolean, which if true indicates\n that the dataset should be pseudorandomly reshuffled each time it is\n iterated over. (Defaults to `True`.)\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration)\n\n def cache(self, filename=\"\"):\n \"\"\"Caches the elements in this dataset.\n\n The first time the dataset is iterated over, its elements will be cached\n either in the specified file or in memory. Subsequent iterations will\n use the cached data.\n\n Note: For the cache to be finalized, the input dataset must be iterated\n through in its entirety. Otherwise, subsequent iterations will not use\n cached data.\n\n >>> dataset = tf.data.Dataset.range(5)\n >>> dataset = dataset.map(lambda x: x**2)\n >>> dataset = dataset.cache()\n >>> # The first time reading through the data will generate the data using\n >>> # `range` and `map`.\n >>> list(dataset.as_numpy_iterator())\n [0, 1, 4, 9, 16]\n >>> # Subsequent iterations read from the cache.\n >>> list(dataset.as_numpy_iterator())\n [0, 1, 4, 9, 16]\n\n When caching to a file, the cached data will persist across runs. Even the\n first iteration through the data will read from the cache file. Changing\n the input pipeline before the call to `.cache()` will have no effect until\n the cache file is removed or the filename is changed.\n\n >>> dataset = tf.data.Dataset.range(5)\n >>> dataset = dataset.cache(\"/path/to/file\") # doctest: +SKIP\n >>> list(dataset.as_numpy_iterator()) # doctest: +SKIP\n [0, 1, 2, 3, 4]\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset = dataset.cache(\"/path/to/file\") # Same file! # doctest: +SKIP\n >>> list(dataset.as_numpy_iterator()) # doctest: +SKIP\n [0, 1, 2, 3, 4]\n\n Note: `cache` will produce exactly the same elements during each iteration\n through the dataset. If you wish to randomize the iteration order, make sure\n to call `shuffle` *after* calling `cache`.\n\n Args:\n filename: A `tf.string` scalar `tf.Tensor`, representing the name of a\n directory on the filesystem to use for caching elements in this Dataset.\n If a filename is not provided, the dataset will be cached in memory.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return CacheDataset(self, filename)\n\n def take(self, count):\n \"\"\"Creates a `Dataset` with at most `count` elements from this dataset.\n\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset = dataset.take(3)\n >>> list(dataset.as_numpy_iterator())\n [0, 1, 2]\n\n Args:\n count: A `tf.int64` scalar `tf.Tensor`, representing the number of\n elements of this dataset that should be taken to form the new dataset.\n If `count` is -1, or if `count` is greater than the size of this\n dataset, the new dataset will contain all elements of this dataset.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return TakeDataset(self, count)\n\n def skip(self, count):\n \"\"\"Creates a `Dataset` that skips `count` elements from this dataset.\n\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset = dataset.skip(7)\n >>> list(dataset.as_numpy_iterator())\n [7, 8, 9]\n\n Args:\n count: A `tf.int64` scalar `tf.Tensor`, representing the number of\n elements of this dataset that should be skipped to form the new dataset.\n If `count` is greater than the size of this dataset, the new dataset\n will contain no elements. If `count` is -1, skips the entire dataset.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return SkipDataset(self, count)\n\n def shard(self, num_shards, index):\n \"\"\"Creates a `Dataset` that includes only 1/`num_shards` of this dataset.\n\n `shard` is deterministic. The Dataset produced by `A.shard(n, i)` will\n contain all elements of A whose index mod n = i.\n\n >>> A = tf.data.Dataset.range(10)\n >>> B = A.shard(num_shards=3, index=0)\n >>> list(B.as_numpy_iterator())\n [0, 3, 6, 9]\n >>> C = A.shard(num_shards=3, index=1)\n >>> list(C.as_numpy_iterator())\n [1, 4, 7]\n >>> D = A.shard(num_shards=3, index=2)\n >>> list(D.as_numpy_iterator())\n [2, 5, 8]\n\n This dataset operator is very useful when running distributed training, as\n it allows each worker to read a unique subset.\n\n When reading a single input file, you can shard elements as follows:\n\n ```python\n d = tf.data.TFRecordDataset(input_file)\n d = d.shard(num_workers, worker_index)\n d = d.repeat(num_epochs)\n d = d.shuffle(shuffle_buffer_size)\n d = d.map(parser_fn, num_parallel_calls=num_map_threads)\n ```\n\n Important caveats:\n\n - Be sure to shard before you use any randomizing operator (such as\n shuffle).\n - Generally it is best if the shard operator is used early in the dataset\n pipeline. For example, when reading from a set of TFRecord files, shard\n before converting the dataset to input samples. This avoids reading every\n file on every worker. The following is an example of an efficient\n sharding strategy within a complete pipeline:\n\n ```python\n d = Dataset.list_files(pattern)\n d = d.shard(num_workers, worker_index)\n d = d.repeat(num_epochs)\n d = d.shuffle(shuffle_buffer_size)\n d = d.interleave(tf.data.TFRecordDataset,\n cycle_length=num_readers, block_length=1)\n d = d.map(parser_fn, num_parallel_calls=num_map_threads)\n ```\n\n Args:\n num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of\n shards operating in parallel.\n index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.\n\n Returns:\n Dataset: A `Dataset`.\n\n Raises:\n InvalidArgumentError: if `num_shards` or `index` are illegal values.\n\n Note: error checking is done on a best-effort basis, and errors aren't\n guaranteed to be caught upon dataset creation. (e.g. providing in a\n placeholder tensor bypasses the early checking, and will instead result\n in an error during a session.run call.)\n \"\"\"\n return ShardDataset(self, num_shards, index)\n\n def batch(self, batch_size, drop_remainder=False):\n \"\"\"Combines consecutive elements of this dataset into batches.\n\n >>> dataset = tf.data.Dataset.range(8)\n >>> dataset = dataset.batch(3)\n >>> list(dataset.as_numpy_iterator())\n [array([0, 1, 2]), array([3, 4, 5]), array([6, 7])]\n\n >>> dataset = tf.data.Dataset.range(8)\n >>> dataset = dataset.batch(3, drop_remainder=True)\n >>> list(dataset.as_numpy_iterator())\n [array([0, 1, 2]), array([3, 4, 5])]\n\n The components of the resulting element will have an additional outer\n dimension, which will be `batch_size` (or `N % batch_size` for the last\n element if `batch_size` does not divide the number of input elements `N`\n evenly and `drop_remainder` is `False`). If your program depends on the\n batches having the same outer dimension, you should set the `drop_remainder`\n argument to `True` to prevent the smaller batch from being produced.\n\n Args:\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n consecutive elements of this dataset to combine in a single batch.\n drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing\n whether the last batch should be dropped in the case it has fewer than\n `batch_size` elements; the default behavior is not to drop the smaller\n batch.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return BatchDataset(self, batch_size, drop_remainder)\n\n def padded_batch(self,\n batch_size,\n padded_shapes=None,\n padding_values=None,\n drop_remainder=False):\n \"\"\"Combines consecutive elements of this dataset into padded batches.\n\n This transformation combines multiple consecutive elements of the input\n dataset into a single element.\n\n Like `tf.data.Dataset.batch`, the components of the resulting element will\n have an additional outer dimension, which will be `batch_size` (or\n `N % batch_size` for the last element if `batch_size` does not divide the\n number of input elements `N` evenly and `drop_remainder` is `False`). If\n your program depends on the batches having the same outer dimension, you\n should set the `drop_remainder` argument to `True` to prevent the smaller\n batch from being produced.\n\n Unlike `tf.data.Dataset.batch`, the input elements to be batched may have\n different shapes, and this transformation will pad each component to the\n respective shape in `padded_shapes`. The `padded_shapes` argument\n determines the resulting shape for each dimension of each component in an\n output element:\n\n * If the dimension is a constant, the component will be padded out to that\n length in that dimension.\n * If the dimension is unknown, the component will be padded out to the\n maximum length of all elements in that dimension.\n\n >>> A = (tf.data.Dataset\n ... .range(1, 5, output_type=tf.int32)\n ... .map(lambda x: tf.fill([x], x)))\n >>> # Pad to the smallest per-batch size that fits all elements.\n >>> B = A.padded_batch(2)\n >>> for element in B.as_numpy_iterator():\n ... print(element)\n [[1 0]\n [2 2]]\n [[3 3 3 0]\n [4 4 4 4]]\n >>> # Pad to a fixed size.\n >>> C = A.padded_batch(2, padded_shapes=5)\n >>> for element in C.as_numpy_iterator():\n ... print(element)\n [[1 0 0 0 0]\n [2 2 0 0 0]]\n [[3 3 3 0 0]\n [4 4 4 4 0]]\n >>> # Pad with a custom value.\n >>> D = A.padded_batch(2, padded_shapes=5, padding_values=-1)\n >>> for element in D.as_numpy_iterator():\n ... print(element)\n [[ 1 -1 -1 -1 -1]\n [ 2 2 -1 -1 -1]]\n [[ 3 3 3 -1 -1]\n [ 4 4 4 4 -1]]\n >>> # Components of nested elements can be padded independently.\n >>> elements = [([1, 2, 3], [10]),\n ... ([4, 5], [11, 12])]\n >>> dataset = tf.data.Dataset.from_generator(\n ... lambda: iter(elements), (tf.int32, tf.int32))\n >>> # Pad the first component of the tuple to length 4, and the second\n >>> # component to the smallest size that fits.\n >>> dataset = dataset.padded_batch(2,\n ... padded_shapes=([4], [None]),\n ... padding_values=(-1, 100))\n >>> list(dataset.as_numpy_iterator())\n [(array([[ 1, 2, 3, -1], [ 4, 5, -1, -1]], dtype=int32),\n array([[ 10, 100], [ 11, 12]], dtype=int32))]\n >>> # Pad with a single value and multiple components.\n >>> E = tf.data.Dataset.zip((A, A)).padded_batch(2, padding_values=-1)\n >>> for element in E.as_numpy_iterator():\n ... print(element)\n (array([[ 1, -1],\n [ 2, 2]], dtype=int32), array([[ 1, -1],\n [ 2, 2]], dtype=int32))\n (array([[ 3, 3, 3, -1],\n [ 4, 4, 4, 4]], dtype=int32), array([[ 3, 3, 3, -1],\n [ 4, 4, 4, 4]], dtype=int32))\n\n See also `tf.data.experimental.dense_to_sparse_batch`, which combines\n elements that may have different shapes into a `tf.sparse.SparseTensor`.\n\n Args:\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n consecutive elements of this dataset to combine in a single batch.\n padded_shapes: (Optional.) A nested structure of `tf.TensorShape` or\n `tf.int64` vector tensor-like objects representing the shape to which\n the respective component of each input element should be padded prior\n to batching. Any unknown dimensions will be padded to the maximum size\n of that dimension in each batch. If unset, all dimensions of all\n components are padded to the maximum size in the batch. `padded_shapes`\n must be set if any component has an unknown rank.\n padding_values: (Optional.) A nested structure of scalar-shaped\n `tf.Tensor`, representing the padding values to use for the respective\n components. None represents that the nested structure should be padded\n with default values. Defaults are `0` for numeric types and the empty\n string for string types. The `padding_values` should have the\n same structure as the input dataset. If `padding_values` is a single\n element and the input dataset has multiple components, then the same\n `padding_values` will be used to pad every component of the dataset.\n If `padding_values` is a scalar, then its value will be broadcasted\n to match the shape of each component.\n drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing\n whether the last batch should be dropped in the case it has fewer than\n `batch_size` elements; the default behavior is not to drop the smaller\n batch.\n\n Returns:\n Dataset: A `Dataset`.\n\n Raises:\n ValueError: If a component has an unknown rank, and the `padded_shapes`\n argument is not set.\n \"\"\"\n if padded_shapes is None:\n padded_shapes = get_legacy_output_shapes(self)\n # A `tf.TensorShape` is only false if its *rank* is unknown:\n # bool(tf.TensorShape(None)) is False\n if not all(nest.flatten(padded_shapes)):\n raise ValueError(\"You must set the `padded_shapes` argument to \"\n \"`Dataset.padded_batch` if any component of its \"\n \"input has an unknown rank\")\n return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values,\n drop_remainder)\n\n def map(self, map_func, num_parallel_calls=None, deterministic=None):\n \"\"\"Maps `map_func` across the elements of this dataset.\n\n This transformation applies `map_func` to each element of this dataset, and\n returns a new dataset containing the transformed elements, in the same\n order as they appeared in the input. `map_func` can be used to change both\n the values and the structure of a dataset's elements. For example, adding 1\n to each element, or projecting a subset of element components.\n\n >>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]\n >>> dataset = dataset.map(lambda x: x + 1)\n >>> list(dataset.as_numpy_iterator())\n [2, 3, 4, 5, 6]\n\n The input signature of `map_func` is determined by the structure of each\n element in this dataset.\n\n >>> dataset = Dataset.range(5)\n >>> # `map_func` takes a single argument of type `tf.Tensor` with the same\n >>> # shape and dtype.\n >>> result = dataset.map(lambda x: x + 1)\n\n >>> # Each element is a tuple containing two `tf.Tensor` objects.\n >>> elements = [(1, \"foo\"), (2, \"bar\"), (3, \"baz\")]\n >>> dataset = tf.data.Dataset.from_generator(\n ... lambda: elements, (tf.int32, tf.string))\n >>> # `map_func` takes two arguments of type `tf.Tensor`. This function\n >>> # projects out just the first component.\n >>> result = dataset.map(lambda x_int, y_str: x_int)\n >>> list(result.as_numpy_iterator())\n [1, 2, 3]\n\n >>> # Each element is a dictionary mapping strings to `tf.Tensor` objects.\n >>> elements = ([{\"a\": 1, \"b\": \"foo\"},\n ... {\"a\": 2, \"b\": \"bar\"},\n ... {\"a\": 3, \"b\": \"baz\"}])\n >>> dataset = tf.data.Dataset.from_generator(\n ... lambda: elements, {\"a\": tf.int32, \"b\": tf.string})\n >>> # `map_func` takes a single argument of type `dict` with the same keys\n >>> # as the elements.\n >>> result = dataset.map(lambda d: str(d[\"a\"]) + d[\"b\"])\n\n The value or values returned by `map_func` determine the structure of each\n element in the returned dataset.\n\n >>> dataset = tf.data.Dataset.range(3)\n >>> # `map_func` returns two `tf.Tensor` objects.\n >>> def g(x):\n ... return tf.constant(37.0), tf.constant([\"Foo\", \"Bar\", \"Baz\"])\n >>> result = dataset.map(g)\n >>> result.element_spec\n (TensorSpec(shape=(), dtype=tf.float32, name=None), TensorSpec(shape=(3,), \\\ndtype=tf.string, name=None))\n >>> # Python primitives, lists, and NumPy arrays are implicitly converted to\n >>> # `tf.Tensor`.\n >>> def h(x):\n ... return 37.0, [\"Foo\", \"Bar\"], np.array([1.0, 2.0], dtype=np.float64)\n >>> result = dataset.map(h)\n >>> result.element_spec\n (TensorSpec(shape=(), dtype=tf.float32, name=None), TensorSpec(shape=(2,), \\\ndtype=tf.string, name=None), TensorSpec(shape=(2,), dtype=tf.float64, \\\nname=None))\n >>> # `map_func` can return nested structures.\n >>> def i(x):\n ... return (37.0, [42, 16]), \"foo\"\n >>> result = dataset.map(i)\n >>> result.element_spec\n ((TensorSpec(shape=(), dtype=tf.float32, name=None),\n TensorSpec(shape=(2,), dtype=tf.int32, name=None)),\n TensorSpec(shape=(), dtype=tf.string, name=None))\n\n `map_func` can accept as arguments and return any type of dataset element.\n\n Note that irrespective of the context in which `map_func` is defined (eager\n vs. graph), tf.data traces the function and executes it as a graph. To use\n Python code inside of the function you have a few options:\n\n 1) Rely on AutoGraph to convert Python code into an equivalent graph\n computation. The downside of this approach is that AutoGraph can convert\n some but not all Python code.\n\n 2) Use `tf.py_function`, which allows you to write arbitrary Python code but\n will generally result in worse performance than 1). For example:\n\n >>> d = tf.data.Dataset.from_tensor_slices(['hello', 'world'])\n >>> # transform a string tensor to upper case string using a Python function\n >>> def upper_case_fn(t: tf.Tensor):\n ... return t.numpy().decode('utf-8').upper()\n >>> d = d.map(lambda x: tf.py_function(func=upper_case_fn,\n ... inp=[x], Tout=tf.string))\n >>> list(d.as_numpy_iterator())\n [b'HELLO', b'WORLD']\n\n 3) Use `tf.numpy_function`, which also allows you to write arbitrary\n Python code. Note that `tf.py_function` accepts `tf.Tensor` whereas\n `tf.numpy_function` accepts numpy arrays and returns only numpy arrays.\n For example:\n\n >>> d = tf.data.Dataset.from_tensor_slices(['hello', 'world'])\n >>> def upper_case_fn(t: np.ndarray):\n ... return t.decode('utf-8').upper()\n >>> d = d.map(lambda x: tf.numpy_function(func=upper_case_fn,\n ... inp=[x], Tout=tf.string))\n >>> list(d.as_numpy_iterator())\n [b'HELLO', b'WORLD']\n\n Note that the use of `tf.numpy_function` and `tf.py_function`\n in general precludes the possibility of executing user-defined\n transformations in parallel (because of Python GIL).\n\n Performance can often be improved by setting `num_parallel_calls` so that\n `map` will use multiple threads to process elements. If deterministic order\n isn't required, it can also improve performance to set\n `deterministic=False`.\n\n >>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]\n >>> dataset = dataset.map(lambda x: x + 1,\n ... num_parallel_calls=tf.data.AUTOTUNE,\n ... deterministic=False)\n\n The order of elements yielded by this transformation is deterministic if\n `deterministic=True`. If `map_func` contains stateful operations and\n `num_parallel_calls > 1`, the order in which that state is accessed is\n undefined, so the values of output elements may not be deterministic\n regardless of the `deterministic` flag value.\n\n Args:\n map_func: A function mapping a dataset element to another dataset element.\n num_parallel_calls: (Optional.) A `tf.int64` scalar `tf.Tensor`,\n representing the number elements to process asynchronously in parallel.\n If not specified, elements will be processed sequentially. If the value\n `tf.data.AUTOTUNE` is used, then the number of parallel\n calls is set dynamically based on available CPU.\n deterministic: (Optional.) A boolean controlling whether determinism\n should be traded for performance by allowing elements to be yielded out\n of order. If `deterministic` is `None`, the\n `tf.data.Options.experimental_deterministic` dataset option (`True` by\n default) is used to decide whether to run deterministically.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n if num_parallel_calls is None:\n return MapDataset(self, map_func, preserve_cardinality=True)\n else:\n return ParallelMapDataset(\n self,\n map_func,\n num_parallel_calls,\n deterministic,\n preserve_cardinality=True)\n\n def flat_map(self, map_func):\n \"\"\"Maps `map_func` across this dataset and flattens the result.\n\n Use `flat_map` if you want to make sure that the order of your dataset\n stays the same. For example, to flatten a dataset of batches into a\n dataset of their elements:\n\n >>> dataset = tf.data.Dataset.from_tensor_slices(\n ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> dataset = dataset.flat_map(lambda x: Dataset.from_tensor_slices(x))\n >>> list(dataset.as_numpy_iterator())\n [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n `tf.data.Dataset.interleave()` is a generalization of `flat_map`, since\n `flat_map` produces the same output as\n `tf.data.Dataset.interleave(cycle_length=1)`\n\n Args:\n map_func: A function mapping a dataset element to a dataset.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return FlatMapDataset(self, map_func)\n\n def interleave(self,\n map_func,\n cycle_length=None,\n block_length=None,\n num_parallel_calls=None,\n deterministic=None):\n \"\"\"Maps `map_func` across this dataset, and interleaves the results.\n\n For example, you can use `Dataset.interleave()` to process many input files\n concurrently:\n\n >>> # Preprocess 4 files concurrently, and interleave blocks of 16 records\n >>> # from each file.\n >>> filenames = [\"/var/data/file1.txt\", \"/var/data/file2.txt\",\n ... \"/var/data/file3.txt\", \"/var/data/file4.txt\"]\n >>> dataset = tf.data.Dataset.from_tensor_slices(filenames)\n >>> def parse_fn(filename):\n ... return tf.data.Dataset.range(10)\n >>> dataset = dataset.interleave(lambda x:\n ... tf.data.TextLineDataset(x).map(parse_fn, num_parallel_calls=1),\n ... cycle_length=4, block_length=16)\n\n The `cycle_length` and `block_length` arguments control the order in which\n elements are produced. `cycle_length` controls the number of input elements\n that are processed concurrently. If you set `cycle_length` to 1, this\n transformation will handle one input element at a time, and will produce\n identical results to `tf.data.Dataset.flat_map`. In general,\n this transformation will apply `map_func` to `cycle_length` input elements,\n open iterators on the returned `Dataset` objects, and cycle through them\n producing `block_length` consecutive elements from each iterator, and\n consuming the next input element each time it reaches the end of an\n iterator.\n\n For example:\n\n >>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]\n >>> # NOTE: New lines indicate \"block\" boundaries.\n >>> dataset = dataset.interleave(\n ... lambda x: Dataset.from_tensors(x).repeat(6),\n ... cycle_length=2, block_length=4)\n >>> list(dataset.as_numpy_iterator())\n [1, 1, 1, 1,\n 2, 2, 2, 2,\n 1, 1,\n 2, 2,\n 3, 3, 3, 3,\n 4, 4, 4, 4,\n 3, 3,\n 4, 4,\n 5, 5, 5, 5,\n 5, 5]\n\n Note: The order of elements yielded by this transformation is\n deterministic, as long as `map_func` is a pure function and\n `deterministic=True`. If `map_func` contains any stateful operations, the\n order in which that state is accessed is undefined.\n\n Performance can often be improved by setting `num_parallel_calls` so that\n `interleave` will use multiple threads to fetch elements. If determinism\n isn't required, it can also improve performance to set\n `deterministic=False`.\n\n >>> filenames = [\"/var/data/file1.txt\", \"/var/data/file2.txt\",\n ... \"/var/data/file3.txt\", \"/var/data/file4.txt\"]\n >>> dataset = tf.data.Dataset.from_tensor_slices(filenames)\n >>> dataset = dataset.interleave(lambda x: tf.data.TFRecordDataset(x),\n ... cycle_length=4, num_parallel_calls=tf.data.AUTOTUNE,\n ... deterministic=False)\n\n Args:\n map_func: A function mapping a dataset element to a dataset.\n cycle_length: (Optional.) The number of input elements that will be\n processed concurrently. If not set, the tf.data runtime decides what it\n should be based on available CPU. If `num_parallel_calls` is set to\n `tf.data.AUTOTUNE`, the `cycle_length` argument identifies\n the maximum degree of parallelism.\n block_length: (Optional.) The number of consecutive elements to produce\n from each input element before cycling to another input element. If not\n set, defaults to 1.\n num_parallel_calls: (Optional.) If specified, the implementation creates a\n threadpool, which is used to fetch inputs from cycle elements\n asynchronously and in parallel. The default behavior is to fetch inputs\n from cycle elements synchronously with no parallelism. If the value\n `tf.data.AUTOTUNE` is used, then the number of parallel\n calls is set dynamically based on available CPU.\n deterministic: (Optional.) A boolean controlling whether determinism\n should be traded for performance by allowing elements to be produced out\n of order. If `deterministic` is `None`, the\n `tf.data.Options.experimental_deterministic` dataset option (`True` by\n default) is used to decide whether to run deterministically.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n if block_length is None:\n block_length = 1\n\n if cycle_length is None:\n cycle_length = AUTOTUNE\n\n if num_parallel_calls is None:\n return InterleaveDataset(self, map_func, cycle_length, block_length)\n else:\n return ParallelInterleaveDataset(\n self,\n map_func,\n cycle_length,\n block_length,\n num_parallel_calls,\n deterministic=deterministic)\n\n def filter(self, predicate):\n \"\"\"Filters this dataset according to `predicate`.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> dataset = dataset.filter(lambda x: x < 3)\n >>> list(dataset.as_numpy_iterator())\n [1, 2]\n >>> # `tf.math.equal(x, y)` is required for equality comparison\n >>> def filter_fn(x):\n ... return tf.math.equal(x, 1)\n >>> dataset = dataset.filter(filter_fn)\n >>> list(dataset.as_numpy_iterator())\n [1]\n\n Args:\n predicate: A function mapping a dataset element to a boolean.\n\n Returns:\n Dataset: The `Dataset` containing the elements of this dataset for which\n `predicate` is `True`.\n \"\"\"\n return FilterDataset(self, predicate)\n\n def apply(self, transformation_func):\n \"\"\"Applies a transformation function to this dataset.\n\n `apply` enables chaining of custom `Dataset` transformations, which are\n represented as functions that take one `Dataset` argument and return a\n transformed `Dataset`.\n\n >>> dataset = tf.data.Dataset.range(100)\n >>> def dataset_fn(ds):\n ... return ds.filter(lambda x: x < 5)\n >>> dataset = dataset.apply(dataset_fn)\n >>> list(dataset.as_numpy_iterator())\n [0, 1, 2, 3, 4]\n\n Args:\n transformation_func: A function that takes one `Dataset` argument and\n returns a `Dataset`.\n\n Returns:\n Dataset: The `Dataset` returned by applying `transformation_func` to this\n dataset.\n \"\"\"\n dataset = transformation_func(self)\n if not isinstance(dataset, DatasetV2):\n raise TypeError(\n \"`transformation_func` must return a Dataset. Got {}.\".format(\n dataset))\n dataset._input_datasets = [self] # pylint: disable=protected-access\n return dataset\n\n def window(self, size, shift=None, stride=1, drop_remainder=False):\n \"\"\"Combines (nests of) input elements into a dataset of (nests of) windows.\n\n A \"window\" is a finite dataset of flat elements of size `size` (or possibly\n fewer if there are not enough input elements to fill the window and\n `drop_remainder` evaluates to `False`).\n\n The `shift` argument determines the number of input elements by which the\n window moves on each iteration. If windows and elements are both numbered\n starting at 0, the first element in window `k` will be element `k * shift`\n of the input dataset. In particular, the first element of the first window\n will always be the first element of the input dataset.\n\n The `stride` argument determines the stride of the input elements, and the\n `shift` argument determines the shift of the window.\n\n For example:\n\n >>> dataset = tf.data.Dataset.range(7).window(2)\n >>> for window in dataset:\n ... print(list(window.as_numpy_iterator()))\n [0, 1]\n [2, 3]\n [4, 5]\n [6]\n >>> dataset = tf.data.Dataset.range(7).window(3, 2, 1, True)\n >>> for window in dataset:\n ... print(list(window.as_numpy_iterator()))\n [0, 1, 2]\n [2, 3, 4]\n [4, 5, 6]\n >>> dataset = tf.data.Dataset.range(7).window(3, 1, 2, True)\n >>> for window in dataset:\n ... print(list(window.as_numpy_iterator()))\n [0, 2, 4]\n [1, 3, 5]\n [2, 4, 6]\n\n Note that when the `window` transformation is applied to a dataset of\n nested elements, it produces a dataset of nested windows.\n\n >>> nested = ([1, 2, 3, 4], [5, 6, 7, 8])\n >>> dataset = tf.data.Dataset.from_tensor_slices(nested).window(2)\n >>> for window in dataset:\n ... def to_numpy(ds):\n ... return list(ds.as_numpy_iterator())\n ... print(tuple(to_numpy(component) for component in window))\n ([1, 2], [5, 6])\n ([3, 4], [7, 8])\n\n >>> dataset = tf.data.Dataset.from_tensor_slices({'a': [1, 2, 3, 4]})\n >>> dataset = dataset.window(2)\n >>> for window in dataset:\n ... def to_numpy(ds):\n ... return list(ds.as_numpy_iterator())\n ... print({'a': to_numpy(window['a'])})\n {'a': [1, 2]}\n {'a': [3, 4]}\n\n Args:\n size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements\n of the input dataset to combine into a window. Must be positive.\n shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\n number of input elements by which the window moves in each iteration.\n Defaults to `size`. Must be positive.\n stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\n stride of the input elements in the sliding window. Must be positive.\n The default value of 1 means \"retain every input element\".\n drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing\n whether the last windows should be dropped if their size is smaller than\n `size`.\n\n Returns:\n Dataset: A `Dataset` of (nests of) windows -- a finite datasets of flat\n elements created from the (nests of) input elements.\n\n \"\"\"\n if shift is None:\n shift = size\n return WindowDataset(self, size, shift, stride, drop_remainder)\n\n def reduce(self, initial_state, reduce_func):\n \"\"\"Reduces the input dataset to a single element.\n\n The transformation calls `reduce_func` successively on every element of\n the input dataset until the dataset is exhausted, aggregating information in\n its internal state. The `initial_state` argument is used for the initial\n state and the final state is returned as the result.\n\n >>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x + 1).numpy()\n 5\n >>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x + y).numpy()\n 10\n\n Args:\n initial_state: An element representing the initial state of the\n transformation.\n reduce_func: A function that maps `(old_state, input_element)` to\n `new_state`. It must take two arguments and return a new element\n The structure of `new_state` must match the structure of\n `initial_state`.\n\n Returns:\n A dataset element corresponding to the final state of the transformation.\n\n \"\"\"\n\n with ops.name_scope(\"initial_state\"):\n initial_state = structure.normalize_element(initial_state)\n state_structure = structure.type_spec_from_value(initial_state)\n\n # Iteratively rerun the reduce function until reaching a fixed point on\n # `state_structure`.\n need_to_rerun = True\n while need_to_rerun:\n\n wrapped_func = StructuredFunctionWrapper(\n reduce_func,\n \"reduce()\",\n input_structure=(state_structure, self.element_spec),\n add_to_graph=False)\n\n # Extract and validate class information from the returned values.\n output_classes = wrapped_func.output_classes\n state_classes = nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access\n state_structure)\n for new_state_class, state_class in zip(\n nest.flatten(output_classes), nest.flatten(state_classes)):\n if not issubclass(new_state_class, state_class):\n raise TypeError(\n \"The element classes for the new state must match the initial \"\n \"state. Expected %s; got %s.\" %\n (state_classes, wrapped_func.output_classes))\n\n # Extract and validate type information from the returned values.\n output_types = wrapped_func.output_types\n state_types = nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access\n state_structure)\n for new_state_type, state_type in zip(\n nest.flatten(output_types), nest.flatten(state_types)):\n if new_state_type != state_type:\n raise TypeError(\n \"The element types for the new state must match the initial \"\n \"state. Expected %s; got %s.\" %\n (state_types, wrapped_func.output_types))\n\n # Extract shape information from the returned values.\n output_shapes = wrapped_func.output_shapes\n state_shapes = nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access\n state_structure)\n flat_state_shapes = nest.flatten(state_shapes)\n flat_new_state_shapes = nest.flatten(output_shapes)\n weakened_state_shapes = [\n original.most_specific_compatible_shape(new)\n for original, new in zip(flat_state_shapes, flat_new_state_shapes)\n ]\n\n need_to_rerun = False\n for original_shape, weakened_shape in zip(flat_state_shapes,\n weakened_state_shapes):\n if original_shape.ndims is not None and (\n weakened_shape.ndims is None or\n original_shape.as_list() != weakened_shape.as_list()):\n need_to_rerun = True\n break\n\n if need_to_rerun:\n # TODO(b/110122868): Support a \"most specific compatible structure\"\n # method for combining structures, to avoid using legacy structures\n # here.\n state_structure = structure.convert_legacy_structure(\n state_types,\n nest.pack_sequence_as(state_shapes, weakened_state_shapes),\n state_classes)\n\n reduce_func = wrapped_func.function\n reduce_func.add_to_graph(ops.get_default_graph())\n\n dataset = self._apply_options()\n\n # pylint: disable=protected-access\n return structure.from_compatible_tensor_list(\n state_structure,\n gen_dataset_ops.reduce_dataset(\n dataset._variant_tensor,\n structure.to_tensor_list(state_structure, initial_state),\n reduce_func.captured_inputs,\n f=reduce_func,\n output_shapes=structure.get_flat_tensor_shapes(state_structure),\n output_types=structure.get_flat_tensor_types(state_structure)))\n\n def unbatch(self):\n \"\"\"Splits elements of a dataset into multiple elements.\n\n For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,\n where `B` may vary for each input element, then for each element in the\n dataset, the unbatched dataset will contain `B` consecutive elements\n of shape `[a0, a1, ...]`.\n\n >>> elements = [ [1, 2, 3], [1, 2], [1, 2, 3, 4] ]\n >>> dataset = tf.data.Dataset.from_generator(lambda: elements, tf.int64)\n >>> dataset = dataset.unbatch()\n >>> list(dataset.as_numpy_iterator())\n [1, 2, 3, 1, 2, 1, 2, 3, 4]\n\n Note: `unbatch` requires a data copy to slice up the batched tensor into\n smaller, unbatched tensors. When optimizing performance, try to avoid\n unnecessary usage of `unbatch`.\n\n Returns:\n A `Dataset`.\n \"\"\"\n normalized_dataset = normalize_to_dense(self)\n return _UnbatchDataset(normalized_dataset)\n\n def with_options(self, options):\n \"\"\"Returns a new `tf.data.Dataset` with the given options set.\n\n The options are \"global\" in the sense they apply to the entire dataset.\n If options are set multiple times, they are merged as long as different\n options do not use different non-default values.\n\n >>> ds = tf.data.Dataset.range(5)\n >>> ds = ds.interleave(lambda x: tf.data.Dataset.range(5),\n ... cycle_length=3,\n ... num_parallel_calls=3)\n >>> options = tf.data.Options()\n >>> # This will make the interleave order non-deterministic.\n >>> options.experimental_deterministic = False\n >>> ds = ds.with_options(options)\n\n Args:\n options: A `tf.data.Options` that identifies the options the use.\n\n Returns:\n Dataset: A `Dataset` with the given options.\n\n Raises:\n ValueError: when an option is set more than once to a non-default value\n \"\"\"\n return _OptionsDataset(self, options)\n\n def cardinality(self):\n \"\"\"Returns the cardinality of the dataset, if known.\n\n `cardinality` may return `tf.data.INFINITE_CARDINALITY` if the dataset\n contains an infinite number of elements or `tf.data.UNKNOWN_CARDINALITY` if\n the analysis fails to determine the number of elements in the dataset\n (e.g. when the dataset source is a file).\n\n >>> dataset = tf.data.Dataset.range(42)\n >>> print(dataset.cardinality().numpy())\n 42\n >>> dataset = dataset.repeat()\n >>> cardinality = dataset.cardinality()\n >>> print((cardinality == tf.data.INFINITE_CARDINALITY).numpy())\n True\n >>> dataset = dataset.filter(lambda x: True)\n >>> cardinality = dataset.cardinality()\n >>> print((cardinality == tf.data.UNKNOWN_CARDINALITY).numpy())\n True\n\n Returns:\n A scalar `tf.int64` `Tensor` representing the cardinality of the dataset.\n If the cardinality is infinite or unknown, `cardinality` returns the\n named constants `tf.data.INFINITE_CARDINALITY` and\n `tf.data.UNKNOWN_CARDINALITY` respectively.\n \"\"\"\n return gen_dataset_ops.dataset_cardinality(self._variant_tensor)\n\n\n@tf_export(v1=[\"data.Dataset\"])\nclass DatasetV1(DatasetV2):\n \"\"\"Represents a potentially large set of elements.\n\n A `Dataset` can be used to represent an input pipeline as a\n collection of elements and a \"logical plan\" of transformations that act on\n those elements.\n \"\"\"\n\n def __init__(self):\n try:\n variant_tensor = self._as_variant_tensor()\n except AttributeError as e:\n if \"_as_variant_tensor\" in str(e):\n raise AttributeError(\"Please use _variant_tensor instead of \"\n \"_as_variant_tensor() to obtain the variant \"\n \"associated with a dataset\")\n raise AttributeError(\"{}: A likely cause of this error is that the super \"\n \"call for this dataset is not the last line of the \"\n \"__init__ method. The base class causes the \"\n \"_as_variant_tensor call in its constructor and \"\n \"if that uses attributes defined in the __init__ \"\n \"method, those attrs need to be defined before the \"\n \"super call.\".format(e))\n super(DatasetV1, self).__init__(variant_tensor)\n\n @abc.abstractmethod\n def _as_variant_tensor(self):\n \"\"\"Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset.\n\n Returns:\n A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.\n \"\"\"\n raise NotImplementedError(\"Dataset._as_variant_tensor\")\n\n @deprecation.deprecated(\n None, \"This is a deprecated API that should only be used in TF 1 graph \"\n \"mode and legacy TF 2 graph mode available through `tf.compat.v1`. In \"\n \"all other situations -- namely, eager mode and inside `tf.function` -- \"\n \"you can consume dataset elements using `for elem in dataset: ...` or \"\n \"by explicitly creating iterator via `iterator = iter(dataset)` and \"\n \"fetching its elements via `values = next(iterator)`. Furthermore, \"\n \"this API is not available in TF 2. During the transition from TF 1 \"\n \"to TF 2 you can use `tf.compat.v1.data.make_one_shot_iterator(dataset)` \"\n \"to create a TF 1 graph mode style iterator for a dataset created \"\n \"through TF 2 APIs. Note that this should be a transient state of your \"\n \"code base as there are in general no guarantees about the \"\n \"interoperability of TF 1 and TF 2 code.\")\n def make_one_shot_iterator(self):\n \"\"\"Creates an iterator for elements of this dataset.\n\n Note: The returned iterator will be initialized automatically.\n A \"one-shot\" iterator does not currently support re-initialization. For\n that see `make_initializable_iterator`.\n\n Example:\n\n ```python\n # Building graph ...\n dataset = ...\n next_value = dataset.make_one_shot_iterator().get_next()\n\n # ... from within a session ...\n try:\n while True:\n value = sess.run(next_value)\n ...\n except tf.errors.OutOfRangeError:\n pass\n ```\n\n Returns:\n An `tf.data.Iterator` for elements of this dataset.\n \"\"\"\n return self._make_one_shot_iterator()\n\n def _make_one_shot_iterator(self): # pylint: disable=missing-docstring\n if context.executing_eagerly():\n with ops.colocate_with(self._variant_tensor):\n return iterator_ops.OwnedIterator(self)\n\n _ensure_same_dataset_graph(self)\n # Now that we create datasets at python object creation time, the capture\n # by value _make_dataset() function would try to capture these variant\n # tensor dataset inputs, which are marked as stateful ops and would throw\n # an error if we try and capture them. We therefore traverse the graph\n # to find all these ops and allowlist them so that the capturing\n # logic instead of throwing an error recreates these ops which is what was\n # happening before.\n all_ds_ops = traverse.obtain_all_variant_tensor_ops(self)\n graph_level_seed, op_level_seed = core_random_seed.get_seed(None)\n\n # NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is\n # a 0-argument function.\n @function.Defun(capture_by_value=True, allowlisted_stateful_ops=all_ds_ops)\n def _make_dataset():\n \"\"\"Factory function for a dataset.\"\"\"\n # NOTE(mrry): `Defun` does not capture the graph-level seed from the\n # enclosing graph, so if a graph-level seed is present we set the local\n # graph seed based on a combination of the graph- and op-level seeds.\n if graph_level_seed is not None:\n assert op_level_seed is not None\n core_random_seed.set_random_seed(\n (graph_level_seed + 87654321 * op_level_seed) % (2 ** 63 - 1))\n\n dataset = self._apply_options()\n return dataset._variant_tensor # pylint: disable=protected-access\n\n try:\n _make_dataset.add_to_graph(ops.get_default_graph())\n except ValueError as err:\n if \"Cannot capture a stateful node\" in str(err):\n raise ValueError(\n \"Failed to create a one-shot iterator for a dataset. \"\n \"`Dataset.make_one_shot_iterator()` does not support datasets that \"\n \"capture stateful objects, such as a `Variable` or `LookupTable`. \"\n \"In these cases, use `Dataset.make_initializable_iterator()`. \"\n \"(Original error: %s)\" % err)\n else:\n six.reraise(ValueError, err)\n\n with ops.colocate_with(self._variant_tensor):\n # pylint: disable=protected-access\n return iterator_ops.Iterator(\n gen_dataset_ops.one_shot_iterator(\n dataset_factory=_make_dataset, **self._flat_structure), None,\n get_legacy_output_types(self), get_legacy_output_shapes(self),\n get_legacy_output_classes(self))\n\n @deprecation.deprecated(\n None, \"This is a deprecated API that should only be used in TF 1 graph \"\n \"mode and legacy TF 2 graph mode available through `tf.compat.v1`. \"\n \"In all other situations -- namely, eager mode and inside `tf.function` \"\n \"-- you can consume dataset elements using `for elem in dataset: ...` \"\n \"or by explicitly creating iterator via `iterator = iter(dataset)` \"\n \"and fetching its elements via `values = next(iterator)`. \"\n \"Furthermore, this API is not available in TF 2. During the transition \"\n \"from TF 1 to TF 2 you can use \"\n \"`tf.compat.v1.data.make_initializable_iterator(dataset)` to create a TF \"\n \"1 graph mode style iterator for a dataset created through TF 2 APIs. \"\n \"Note that this should be a transient state of your code base as there \"\n \"are in general no guarantees about the interoperability of TF 1 and TF \"\n \"2 code.\")\n def make_initializable_iterator(self, shared_name=None):\n \"\"\"Creates an iterator for elements of this dataset.\n\n Note: The returned iterator will be in an uninitialized state,\n and you must run the `iterator.initializer` operation before using it:\n\n ```python\n # Building graph ...\n dataset = ...\n iterator = dataset.make_initializable_iterator()\n next_value = iterator.get_next() # This is a Tensor.\n\n # ... from within a session ...\n sess.run(iterator.initializer)\n try:\n while True:\n value = sess.run(next_value)\n ...\n except tf.errors.OutOfRangeError:\n pass\n ```\n\n Args:\n shared_name: (Optional.) If non-empty, the returned iterator will be\n shared under the given name across multiple sessions that share the same\n devices (e.g. when using a remote server).\n\n Returns:\n A `tf.data.Iterator` for elements of this dataset.\n\n Raises:\n RuntimeError: If eager execution is enabled.\n \"\"\"\n return self._make_initializable_iterator(shared_name)\n\n def _make_initializable_iterator(self, shared_name=None): # pylint: disable=missing-docstring\n if context.executing_eagerly():\n raise RuntimeError(\n \"dataset.make_initializable_iterator is not supported when eager \"\n \"execution is enabled. Use `for element in dataset` instead.\")\n _ensure_same_dataset_graph(self)\n dataset = self._apply_options()\n if shared_name is None:\n shared_name = \"\"\n\n with ops.colocate_with(self._variant_tensor):\n iterator_resource = gen_dataset_ops.iterator_v2(\n container=\"\", shared_name=shared_name, **self._flat_structure)\n\n initializer = gen_dataset_ops.make_iterator(\n dataset._variant_tensor, # pylint: disable=protected-access\n iterator_resource)\n\n # pylint: disable=protected-access\n return iterator_ops.Iterator(iterator_resource, initializer,\n get_legacy_output_types(dataset),\n get_legacy_output_shapes(dataset),\n get_legacy_output_classes(dataset))\n\n @property\n @deprecation.deprecated(\n None, \"Use `tf.compat.v1.data.get_output_classes(dataset)`.\")\n def output_classes(self):\n \"\"\"Returns the class of each component of an element of this dataset.\n\n Returns:\n A nested structure of Python `type` objects corresponding to each\n component of an element of this dataset.\n \"\"\"\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access\n self.element_spec)\n\n @property\n @deprecation.deprecated(\n None, \"Use `tf.compat.v1.data.get_output_shapes(dataset)`.\")\n def output_shapes(self):\n \"\"\"Returns the shape of each component of an element of this dataset.\n\n Returns:\n A nested structure of `tf.TensorShape` objects corresponding to each\n component of an element of this dataset.\n \"\"\"\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access\n self.element_spec)\n\n @property\n @deprecation.deprecated(\n None, \"Use `tf.compat.v1.data.get_output_types(dataset)`.\")\n def output_types(self):\n \"\"\"Returns the type of each component of an element of this dataset.\n\n Returns:\n A nested structure of `tf.DType` objects corresponding to each component\n of an element of this dataset.\n \"\"\"\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access\n self.element_spec)\n\n @property\n def element_spec(self):\n # TODO(b/110122868): Remove this override once all `Dataset` instances\n # implement `element_structure`.\n return structure.convert_legacy_structure(\n self.output_types, self.output_shapes, self.output_classes)\n\n @staticmethod\n @functools.wraps(DatasetV2.from_tensors)\n def from_tensors(tensors):\n return DatasetV1Adapter(DatasetV2.from_tensors(tensors))\n\n @staticmethod\n @functools.wraps(DatasetV2.from_tensor_slices)\n def from_tensor_slices(tensors):\n return DatasetV1Adapter(DatasetV2.from_tensor_slices(tensors))\n\n @staticmethod\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.from_tensor_slices()`.\")\n def from_sparse_tensor_slices(sparse_tensor):\n \"\"\"Splits each rank-N `tf.sparse.SparseTensor` in this dataset row-wise.\n\n Args:\n sparse_tensor: A `tf.sparse.SparseTensor`.\n\n Returns:\n Dataset: A `Dataset` of rank-(N-1) sparse tensors.\n \"\"\"\n return DatasetV1Adapter(SparseTensorSliceDataset(sparse_tensor))\n\n @staticmethod\n @functools.wraps(DatasetV2.from_generator)\n def from_generator(generator,\n output_types=None,\n output_shapes=None,\n args=None,\n output_signature=None):\n return DatasetV1Adapter(\n DatasetV2.from_generator(generator, output_types, output_shapes, args,\n output_signature))\n\n @staticmethod\n @functools.wraps(DatasetV2.range)\n def range(*args, **kwargs):\n return DatasetV1Adapter(DatasetV2.range(*args, **kwargs))\n\n @staticmethod\n @functools.wraps(DatasetV2.zip)\n def zip(datasets):\n return DatasetV1Adapter(DatasetV2.zip(datasets))\n\n @functools.wraps(DatasetV2.concatenate)\n def concatenate(self, dataset):\n return DatasetV1Adapter(super(DatasetV1, self).concatenate(dataset))\n\n @functools.wraps(DatasetV2.prefetch)\n def prefetch(self, buffer_size):\n return DatasetV1Adapter(super(DatasetV1, self).prefetch(buffer_size))\n\n @staticmethod\n @functools.wraps(DatasetV2.list_files)\n def list_files(file_pattern, shuffle=None, seed=None):\n return DatasetV1Adapter(DatasetV2.list_files(file_pattern, shuffle, seed))\n\n @functools.wraps(DatasetV2.repeat)\n def repeat(self, count=None):\n return DatasetV1Adapter(super(DatasetV1, self).repeat(count))\n\n @functools.wraps(DatasetV2.shuffle)\n def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):\n return DatasetV1Adapter(super(DatasetV1, self).shuffle(\n buffer_size, seed, reshuffle_each_iteration))\n\n @functools.wraps(DatasetV2.cache)\n def cache(self, filename=\"\"):\n return DatasetV1Adapter(super(DatasetV1, self).cache(filename))\n\n @functools.wraps(DatasetV2.take)\n def take(self, count):\n return DatasetV1Adapter(super(DatasetV1, self).take(count))\n\n @functools.wraps(DatasetV2.skip)\n def skip(self, count):\n return DatasetV1Adapter(super(DatasetV1, self).skip(count))\n\n @functools.wraps(DatasetV2.shard)\n def shard(self, num_shards, index):\n return DatasetV1Adapter(super(DatasetV1, self).shard(num_shards, index))\n\n @functools.wraps(DatasetV2.batch)\n def batch(self, batch_size, drop_remainder=False):\n return DatasetV1Adapter(super(DatasetV1, self).batch(\n batch_size, drop_remainder))\n\n @functools.wraps(DatasetV2.padded_batch)\n def padded_batch(self,\n batch_size,\n padded_shapes=None,\n padding_values=None,\n drop_remainder=False):\n return DatasetV1Adapter(\n super(DatasetV1, self).padded_batch(batch_size, padded_shapes,\n padding_values, drop_remainder))\n\n @functools.wraps(DatasetV2.map)\n def map(self, map_func, num_parallel_calls=None, deterministic=None):\n if num_parallel_calls is None:\n return DatasetV1Adapter(\n MapDataset(self, map_func, preserve_cardinality=False))\n else:\n return DatasetV1Adapter(\n ParallelMapDataset(\n self,\n map_func,\n num_parallel_calls,\n deterministic,\n preserve_cardinality=False))\n\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.map()\")\n def map_with_legacy_function(self,\n map_func,\n num_parallel_calls=None,\n deterministic=None):\n \"\"\"Maps `map_func` across the elements of this dataset.\n\n Note: This is an escape hatch for existing uses of `map` that do not work\n with V2 functions. New uses are strongly discouraged and existing uses\n should migrate to `map` as this method will be removed in V2.\n\n Args:\n map_func: A function mapping a nested structure of tensors (having shapes\n and types defined by `self.output_shapes` and `self.output_types`) to\n another nested structure of tensors.\n num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,\n representing the number elements to process asynchronously in parallel.\n If not specified, elements will be processed sequentially. If the value\n `tf.data.AUTOTUNE` is used, then the number of parallel\n calls is set dynamically based on available CPU.\n deterministic: (Optional.) A boolean controlling whether determinism\n should be traded for performance by allowing elements to be produced out\n of order. If `deterministic` is `None`, the\n `tf.data.Options.experimental_deterministic` dataset option (`True` by\n default) is used to decide whether to produce elements\n deterministically.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n if num_parallel_calls is None:\n return DatasetV1Adapter(\n MapDataset(\n self,\n map_func,\n preserve_cardinality=False,\n use_legacy_function=True))\n else:\n return DatasetV1Adapter(\n ParallelMapDataset(\n self,\n map_func,\n num_parallel_calls,\n deterministic,\n preserve_cardinality=False,\n use_legacy_function=True))\n\n @functools.wraps(DatasetV2.flat_map)\n def flat_map(self, map_func):\n return DatasetV1Adapter(super(DatasetV1, self).flat_map(map_func))\n\n @functools.wraps(DatasetV2.interleave)\n def interleave(self,\n map_func,\n cycle_length=None,\n block_length=None,\n num_parallel_calls=None,\n deterministic=None):\n return DatasetV1Adapter(\n super(DatasetV1, self).interleave(map_func, cycle_length, block_length,\n num_parallel_calls, deterministic))\n\n @functools.wraps(DatasetV2.filter)\n def filter(self, predicate):\n return DatasetV1Adapter(super(DatasetV1, self).filter(predicate))\n\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.filter()\")\n def filter_with_legacy_function(self, predicate):\n \"\"\"Filters this dataset according to `predicate`.\n\n Note: This is an escape hatch for existing uses of `filter` that do not work\n with V2 functions. New uses are strongly discouraged and existing uses\n should migrate to `filter` as this method will be removed in V2.\n\n Args:\n predicate: A function mapping a nested structure of tensors (having shapes\n and types defined by `self.output_shapes` and `self.output_types`) to a\n scalar `tf.bool` tensor.\n\n Returns:\n Dataset: The `Dataset` containing the elements of this dataset for which\n `predicate` is `True`.\n \"\"\"\n return FilterDataset(self, predicate, use_legacy_function=True)\n\n @functools.wraps(DatasetV2.apply)\n def apply(self, transformation_func):\n return DatasetV1Adapter(super(DatasetV1, self).apply(transformation_func))\n\n @functools.wraps(DatasetV2.window)\n def window(self, size, shift=None, stride=1, drop_remainder=False):\n return DatasetV1Adapter(super(DatasetV1, self).window(\n size, shift, stride, drop_remainder))\n\n @functools.wraps(DatasetV2.unbatch)\n def unbatch(self):\n return DatasetV1Adapter(super(DatasetV1, self).unbatch())\n\n @functools.wraps(DatasetV2.with_options)\n def with_options(self, options):\n return DatasetV1Adapter(super(DatasetV1, self).with_options(options))\n\n\nif tf2.enabled():\n Dataset = DatasetV2\nelse:\n Dataset = DatasetV1\n\n\nclass DatasetV1Adapter(DatasetV1):\n \"\"\"Wraps a V2 `Dataset` object in the `tf.compat.v1.data.Dataset` API.\"\"\"\n\n def __init__(self, dataset):\n self._dataset = dataset\n super(DatasetV1Adapter, self).__init__()\n\n def _as_variant_tensor(self):\n return self._dataset._variant_tensor # pylint: disable=protected-access\n\n def _has_captured_ref(self):\n return self._dataset._has_captured_ref() # pylint: disable=protected-access\n\n def _inputs(self):\n return self._dataset._inputs() # pylint: disable=protected-access\n\n def _functions(self):\n return self._dataset._functions() # pylint: disable=protected-access\n\n def options(self):\n return self._dataset.options()\n\n @property\n def element_spec(self):\n return self._dataset.element_spec # pylint: disable=protected-access\n\n def __iter__(self):\n return iter(self._dataset)\n\n\ndef _ensure_same_dataset_graph(dataset):\n \"\"\"Walks the dataset graph to ensure all datasets come from the same graph.\"\"\"\n # pylint: disable=protected-access\n current_graph = ops.get_default_graph()\n bfs_q = Queue.Queue()\n bfs_q.put(dataset)\n visited = []\n while not bfs_q.empty():\n ds = bfs_q.get()\n visited.append(ds)\n ds_graph = ds._graph\n if current_graph != ds_graph:\n raise ValueError(\n \"The graph (\" + str(current_graph) + \") of the iterator is different \"\n \"from the graph (\" + str(ds_graph) + \") the dataset: \" +\n str(ds._variant_tensor) + \" was created in. If you are using the \"\n \"Estimator API, make sure that no part of the dataset returned by \"\n \"the `input_fn` function is defined outside the `input_fn` function. \"\n \"Please ensure that all datasets in the pipeline are created in the \"\n \"same graph as the iterator.\")\n for input_ds in ds._inputs():\n if input_ds not in visited:\n bfs_q.put(input_ds)\n\n\n@tf_export(v1=[\"data.make_one_shot_iterator\"])\ndef make_one_shot_iterator(dataset):\n \"\"\"Creates an iterator for elements of `dataset`.\n\n Note: The returned iterator will be initialized automatically.\n A \"one-shot\" iterator does not support re-initialization.\n\n Args:\n dataset: A `tf.data.Dataset`.\n\n Returns:\n A `tf.data.Iterator` for elements of `dataset`.\n \"\"\"\n try:\n # Call the defined `_make_one_shot_iterator()` if there is one, because some\n # datasets (e.g. for prefetching) override its behavior.\n return dataset._make_one_shot_iterator() # pylint: disable=protected-access\n except AttributeError:\n return DatasetV1Adapter(dataset)._make_one_shot_iterator() # pylint: disable=protected-access\n\n\n@tf_export(v1=[\"data.make_initializable_iterator\"])\ndef make_initializable_iterator(dataset, shared_name=None):\n \"\"\"Creates an iterator for elements of `dataset`.\n\n Note: The returned iterator will be in an uninitialized state,\n and you must run the `iterator.initializer` operation before using it:\n\n ```python\n dataset = ...\n iterator = tf.compat.v1.data.make_initializable_iterator(dataset)\n # ...\n sess.run(iterator.initializer)\n ```\n\n Args:\n dataset: A `tf.data.Dataset`.\n shared_name: (Optional.) If non-empty, the returned iterator will be shared\n under the given name across multiple sessions that share the same devices\n (e.g. when using a remote server).\n\n Returns:\n A `tf.data.Iterator` for elements of `dataset`.\n\n Raises:\n RuntimeError: If eager execution is enabled.\n \"\"\"\n try:\n # Call the defined `_make_initializable_iterator()` if there is one, because\n # some datasets (e.g. for prefetching) override its behavior.\n return dataset._make_initializable_iterator(shared_name) # pylint: disable=protected-access\n except AttributeError:\n return DatasetV1Adapter(dataset)._make_initializable_iterator(shared_name) # pylint: disable=protected-access\n\n\n@tf_export(\"data.experimental.get_structure\")\ndef get_structure(dataset_or_iterator):\n \"\"\"Returns the type signature for elements of the input dataset / iterator.\n\n Args:\n dataset_or_iterator: A `tf.data.Dataset` or an `tf.data.Iterator`.\n\n Returns:\n A nested structure of `tf.TypeSpec` objects matching the structure of an\n element of `dataset_or_iterator` and specifying the type of individual\n components.\n\n Raises:\n TypeError: If input is not a `tf.data.Dataset` or an `tf.data.Iterator`\n object.\n \"\"\"\n try:\n return dataset_or_iterator.element_spec # pylint: disable=protected-access\n except AttributeError:\n raise TypeError(\"`dataset_or_iterator` must be a `tf.data.Dataset` or \"\n \"tf.data.Iterator object, but got %s.\" %\n type(dataset_or_iterator))\n\n\n@tf_export(v1=[\"data.get_output_classes\"])\ndef get_legacy_output_classes(dataset_or_iterator):\n \"\"\"Returns the output classes for elements of the input dataset / iterator.\n\n Args:\n dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.\n\n Returns:\n A nested structure of Python `type` objects matching the structure of the\n dataset / iterator elements and specifying the class of the individual\n components.\n \"\"\"\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access\n get_structure(dataset_or_iterator))\n\n\n@tf_export(v1=[\"data.get_output_shapes\"])\ndef get_legacy_output_shapes(dataset_or_iterator):\n \"\"\"Returns the output shapes for elements of the input dataset / iterator.\n\n Args:\n dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.\n\n Returns:\n A nested structure of `tf.TensorShape` objects matching the structure of\n the dataset / iterator elements and specifying the shape of the individual\n components.\n \"\"\"\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access\n get_structure(dataset_or_iterator))\n\n\n@tf_export(v1=[\"data.get_output_types\"])\ndef get_legacy_output_types(dataset_or_iterator):\n \"\"\"Returns the output shapes for elements of the input dataset / iterator.\n\n Args:\n dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.\n\n Returns:\n A nested structure of `tf.DType` objects matching the structure of\n dataset / iterator elements and specifying the shape of the individual\n components.\n \"\"\"\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access\n get_structure(dataset_or_iterator))\n\n\n@tf_export(\"data.Options\")\nclass Options(options_lib.OptionsBase):\n \"\"\"Represents options for `tf.data.Dataset`.\n\n A `tf.data.Options` object can be, for instance, used to control which static\n optimizations to apply to the input pipeline graph or whether to use\n performance modeling to dynamically tune the parallelism of operations such as\n `tf.data.Dataset.map` or `tf.data.Dataset.interleave`.\n\n The options are set for the entire dataset and are carried over to datasets\n created through tf.data transformations.\n\n The options can be set either by mutating the object returned by\n `tf.data.Dataset.options()` or by constructing an `Options` object and using\n the `tf.data.Dataset.with_options(options)` transformation, which returns a\n dataset with the options set.\n\n >>> dataset = tf.data.Dataset.range(42)\n >>> dataset.options().experimental_deterministic = False\n >>> print(dataset.options().experimental_deterministic)\n False\n\n >>> dataset = tf.data.Dataset.range(42)\n >>> options = tf.data.Options()\n >>> options.experimental_deterministic = False\n >>> dataset = dataset.with_options(options)\n >>> print(dataset.options().experimental_deterministic)\n False\n\n Note: A known limitation of the `tf.data.Options` implementation is that the\n options are not preserved across tf.function boundaries. In particular, to\n set options for a dataset that is iterated within a tf.function, the options\n need to be set within the same tf.function.\n \"\"\"\n\n experimental_deterministic = options_lib.create_option(\n name=\"experimental_deterministic\",\n ty=bool,\n docstring=\n \"Whether the outputs need to be produced in deterministic order. If None,\"\n \" defaults to True.\")\n\n experimental_distribute = options_lib.create_option(\n name=\"experimental_distribute\",\n ty=distribute_options.DistributeOptions,\n docstring=\n \"The distribution strategy options associated with the dataset. See \"\n \"`tf.data.experimental.DistributeOptions` for more details.\",\n default_factory=distribute_options.DistributeOptions)\n\n experimental_optimization = options_lib.create_option(\n name=\"experimental_optimization\",\n ty=optimization_options.OptimizationOptions,\n docstring=\n \"The optimization options associated with the dataset. See \"\n \"`tf.data.experimental.OptimizationOptions` for more details.\",\n default_factory=optimization_options.OptimizationOptions)\n\n experimental_slack = options_lib.create_option(\n name=\"experimental_slack\",\n ty=bool,\n docstring=\"Whether to introduce 'slack' in the last `prefetch` of the \"\n \"input pipeline, if it exists. This may reduce CPU contention with \"\n \"accelerator host-side activity at the start of a step. The slack \"\n \"frequency is determined by the number of devices attached to this \"\n \"input pipeline. If None, defaults to False.\")\n\n experimental_stats = options_lib.create_option(\n name=\"experimental_stats\",\n ty=stats_options.StatsOptions,\n docstring=\n \"The statistics options associated with the dataset. See \"\n \"`tf.data.experimental.StatsOptions` for more details.\",\n default_factory=stats_options.StatsOptions)\n\n experimental_threading = options_lib.create_option(\n name=\"experimental_threading\",\n ty=threading_options.ThreadingOptions,\n docstring=\n \"The threading options associated with the dataset. See \"\n \"`tf.data.experimental.ThreadingOptions` for more details.\",\n default_factory=threading_options.ThreadingOptions)\n\n experimental_external_state_policy = options_lib.create_option(\n name=\"experimental_external_state_policy\",\n ty=distribute_options.ExternalStatePolicy,\n docstring=\"This option can be used to override the default policy for \"\n \"how to handle external state when serializing a dataset or \"\n \"checkpointing its iterator. There are three settings available - \"\n \"IGNORE: External state is ignored without a warning; WARN: External \"\n \"state is ignored and a warning is logged; FAIL: External state results \"\n \"in an error.\")\n\n def _graph_rewrites(self):\n \"\"\"Produces lists of enabled, disabled, default static graph rewrites.\n\n Returns:\n result: a namedtuple with three attributes. `result.enabled` is the list\n of user enabled graph rewrites. `result.disabled` is the list of user\n disabled graph rewrites. `result.default` is the list of graph\n rewrites that are enabled by default (the user has not explicitly\n enabled or disabled them).\n \"\"\"\n if self.experimental_optimization is not None:\n result = self.experimental_optimization._graph_rewrites() # pylint: disable=protected-access\n else:\n # Apply default options\n result = optimization_options.OptimizationOptions()._graph_rewrites() # pylint: disable=protected-access\n\n if self.experimental_deterministic is False: # pylint: disable=g-bool-id-comparison\n result.enabled.append(\"make_sloppy\")\n elif self.experimental_deterministic is True: # pylint: disable=g-bool-id-comparison\n result.disabled.append(\"make_sloppy\")\n if self.experimental_stats:\n if self.experimental_stats.latency_all_edges is True: # pylint: disable=g-bool-id-comparison\n result.enabled.append(\"latency_all_edges\")\n elif self.experimental_stats.latency_all_edges is False: # pylint: disable=g-bool-id-comparison\n result.disabled.append(\"latency_all_edges\")\n if self.experimental_slack is True: # pylint: disable=g-bool-id-comparison\n result.enabled.append(\"slack\")\n elif self.experimental_slack is False: # pylint: disable=g-bool-id-comparison\n result.disabled.append(\"slack\")\n\n graph_rewrites = options_lib.graph_rewrites()\n return graph_rewrites(enabled=list(set(result.enabled)),\n disabled=list(set(result.disabled)),\n default=list(set(result.default)))\n\n def _graph_rewrite_configs(self, autotune):\n \"\"\"Produces the list of configurations for enabled graph optimizations.\"\"\"\n result = []\n if self.experimental_optimization:\n result.extend(\n self.experimental_optimization._graph_rewrite_configs(autotune)) # pylint: disable=protected-access\n\n if self.experimental_slack:\n num_devices = self.experimental_distribute.num_devices\n if num_devices is None:\n num_devices = 1\n result.append(\"slack:slack_period:%d\" % num_devices)\n return result\n\n def _autotune_settings(self):\n if self.experimental_optimization is not None:\n return self.experimental_optimization._autotune_settings() # pylint: disable=protected-access\n\n # Return default autotune options\n return optimization_options.OptimizationOptions()._autotune_settings() # pylint: disable=protected-access\n\n def merge(self, options):\n \"\"\"Merges itself with the given `tf.data.Options`.\n\n If this object and the `options` to merge set an option differently, a\n warning is generated and this object's value is updated with the `options`\n object's value.\n\n Args:\n options: a `tf.data.Options` to merge with\n\n Returns:\n New `tf.data.Options` object which is the result of merging self with\n the input `tf.data.Options`.\n \"\"\"\n return options_lib.merge_options(self, options)\n\n\nclass DatasetSource(DatasetV2):\n \"\"\"Abstract class representing a dataset with no inputs.\"\"\"\n\n def _inputs(self):\n return []\n\n\nclass UnaryDataset(DatasetV2):\n \"\"\"Abstract class representing a dataset with one input.\"\"\"\n\n def __init__(self, input_dataset, variant_tensor):\n self._input_dataset = input_dataset\n super(UnaryDataset, self).__init__(variant_tensor)\n\n def _inputs(self):\n return [self._input_dataset]\n\n\nclass UnaryUnchangedStructureDataset(UnaryDataset):\n \"\"\"Represents a unary dataset with the same input and output structure.\"\"\"\n\n def __init__(self, input_dataset, variant_tensor):\n self._input_dataset = input_dataset\n super(UnaryUnchangedStructureDataset, self).__init__(\n input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._input_dataset.element_spec\n\n\nclass TensorDataset(DatasetSource):\n \"\"\"A `Dataset` with a single element.\"\"\"\n\n def __init__(self, element):\n \"\"\"See `Dataset.from_tensors()` for details.\"\"\"\n element = structure.normalize_element(element)\n self._structure = structure.type_spec_from_value(element)\n self._tensors = structure.to_tensor_list(self._structure, element)\n\n variant_tensor = gen_dataset_ops.tensor_dataset(\n self._tensors,\n output_shapes=structure.get_flat_tensor_shapes(self._structure))\n super(TensorDataset, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass TensorSliceDataset(DatasetSource):\n \"\"\"A `Dataset` of slices from a dataset element.\"\"\"\n\n def __init__(self, element):\n \"\"\"See `Dataset.from_tensor_slices()` for details.\"\"\"\n element = structure.normalize_element(element)\n batched_spec = structure.type_spec_from_value(element)\n self._tensors = structure.to_batched_tensor_list(batched_spec, element)\n self._structure = nest.map_structure(\n lambda component_spec: component_spec._unbatch(), batched_spec) # pylint: disable=protected-access\n\n batch_dim = tensor_shape.Dimension(tensor_shape.dimension_value(\n self._tensors[0].get_shape()[0]))\n for t in self._tensors[1:]:\n batch_dim.assert_is_compatible_with(tensor_shape.Dimension(\n tensor_shape.dimension_value(t.get_shape()[0])))\n\n variant_tensor = gen_dataset_ops.tensor_slice_dataset(\n self._tensors,\n output_shapes=structure.get_flat_tensor_shapes(self._structure))\n super(TensorSliceDataset, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass SparseTensorSliceDataset(DatasetSource):\n \"\"\"A `Dataset` that splits a rank-N `tf.sparse.SparseTensor` into its rows.\"\"\"\n\n def __init__(self, sparse_tensor):\n \"\"\"See `Dataset.from_sparse_tensor_slices()` for details.\"\"\"\n if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):\n raise TypeError(\n \"`sparse_tensor` must be a `tf.sparse.SparseTensor` object.\"\n \"Was {}.\".format(sparse_tensor))\n self._sparse_tensor = sparse_tensor\n\n indices_shape = self._sparse_tensor.indices.get_shape()\n shape_shape = self._sparse_tensor.dense_shape.get_shape()\n rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1)\n self._structure = (tensor_spec.TensorSpec([None, rank], dtypes.int64),\n tensor_spec.TensorSpec([None],\n self._sparse_tensor.dtype),\n tensor_spec.TensorSpec([rank], dtypes.int64))\n\n variant_tensor = gen_dataset_ops.sparse_tensor_slice_dataset(\n self._sparse_tensor.indices, self._sparse_tensor.values,\n self._sparse_tensor.dense_shape)\n super(SparseTensorSliceDataset, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass _VariantDataset(DatasetV2):\n \"\"\"A Dataset wrapper around a `tf.variant`-typed function argument.\"\"\"\n\n def __init__(self, dataset_variant, structure):\n self._structure = structure\n super(_VariantDataset, self).__init__(dataset_variant)\n\n def _inputs(self):\n return []\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass _NestedVariant(composite_tensor.CompositeTensor):\n\n def __init__(self, variant_tensor, element_spec, dataset_shape):\n self._variant_tensor = variant_tensor\n self._element_spec = element_spec\n self._dataset_shape = dataset_shape\n\n @property\n def _type_spec(self):\n return DatasetSpec(self._element_spec, self._dataset_shape)\n\n\n@tf_export(\"data.experimental.from_variant\")\ndef from_variant(variant, structure):\n \"\"\"Constructs a dataset from the given variant and structure.\n\n Args:\n variant: A scalar `tf.variant` tensor representing a dataset.\n structure: A `tf.data.experimental.Structure` object representing the\n structure of each element in the dataset.\n\n Returns:\n A `tf.data.Dataset` instance.\n \"\"\"\n return _VariantDataset(variant, structure) # pylint: disable=protected-access\n\n\n@tf_export(\"data.experimental.to_variant\")\ndef to_variant(dataset):\n \"\"\"Returns a variant representing the given dataset.\n\n Args:\n dataset: A `tf.data.Dataset`.\n\n Returns:\n A scalar `tf.variant` tensor representing the given dataset.\n \"\"\"\n return dataset._variant_tensor # pylint: disable=protected-access\n\n\n@tf_export(\n \"data.DatasetSpec\",\n v1=[\"data.DatasetSpec\", \"data.experimental.DatasetStructure\"])\nclass DatasetSpec(type_spec.BatchableTypeSpec):\n \"\"\"Type specification for `tf.data.Dataset`.\n\n See `tf.TypeSpec` for more information about TensorFlow type specifications.\n\n >>> dataset = tf.data.Dataset.range(3)\n >>> tf.data.DatasetSpec.from_value(dataset)\n DatasetSpec(TensorSpec(shape=(), dtype=tf.int64, name=None), TensorShape([]))\n \"\"\"\n\n __slots__ = [\"_element_spec\", \"_dataset_shape\"]\n\n def __init__(self, element_spec, dataset_shape=()):\n self._element_spec = element_spec\n self._dataset_shape = tensor_shape.as_shape(dataset_shape)\n\n @property\n def value_type(self):\n return Dataset\n\n @property\n def element_spec(self):\n \"\"\"The inner element spec.\"\"\"\n return self._element_spec\n\n def _serialize(self):\n return (self._element_spec, self._dataset_shape)\n\n @property\n def _component_specs(self):\n return tensor_spec.TensorSpec(self._dataset_shape, dtypes.variant)\n\n def _to_components(self, value):\n return value._variant_tensor # pylint: disable=protected-access\n\n def _from_components(self, components):\n # pylint: disable=protected-access\n if self._dataset_shape.ndims == 0:\n return _VariantDataset(components, self._element_spec)\n else:\n return _NestedVariant(components, self._element_spec, self._dataset_shape)\n\n def _to_tensor_list(self, value):\n return [\n ops.convert_to_tensor(\n tf_nest.map_structure(lambda x: x._variant_tensor, value)) # pylint: disable=protected-access\n ]\n\n @staticmethod\n def from_value(value):\n \"\"\"Creates a `DatasetSpec` for the given `tf.data.Dataset` value.\"\"\"\n return DatasetSpec(value.element_spec) # pylint: disable=protected-access\n\n def _batch(self, batch_size):\n return DatasetSpec(\n self._element_spec,\n tensor_shape.TensorShape([batch_size]).concatenate(self._dataset_shape))\n\n def _unbatch(self):\n if self._dataset_shape.ndims == 0:\n raise ValueError(\"Unbatching a dataset is only supported for rank >= 1\")\n return DatasetSpec(self._element_spec, self._dataset_shape[1:])\n\n def _to_batched_tensor_list(self, value):\n if self._dataset_shape.ndims == 0:\n raise ValueError(\"Unbatching a dataset is only supported for rank >= 1\")\n return self._to_tensor_list(value)\n\n def _to_legacy_output_types(self):\n return self\n\n def _to_legacy_output_shapes(self):\n return self\n\n def _to_legacy_output_classes(self):\n return self\n\n\nclass StructuredFunctionWrapper(object):\n \"\"\"A function wrapper that supports structured arguments and return values.\"\"\"\n\n def __init__(self,\n func,\n transformation_name,\n dataset=None,\n input_classes=None,\n input_shapes=None,\n input_types=None,\n input_structure=None,\n add_to_graph=True,\n use_legacy_function=False,\n defun_kwargs=None):\n \"\"\"Creates a new `StructuredFunctionWrapper` for the given function.\n\n Args:\n func: A function from a nested structure to another nested structure.\n transformation_name: Human-readable name of the transformation in which\n this function is being instantiated, for error messages.\n dataset: (Optional.) A `tf.data.Dataset`. If given, the structure of this\n dataset will be assumed as the structure for `func` arguments; otherwise\n `input_classes`, `input_shapes`, and `input_types` must be defined.\n input_classes: (Optional.) A nested structure of `type`. If given, this\n argument defines the Python types for `func` arguments.\n input_shapes: (Optional.) A nested structure of `tf.TensorShape`. If\n given, this argument defines the shapes and structure for `func`\n arguments.\n input_types: (Optional.) A nested structure of `tf.DType`. If given, this\n argument defines the element types and structure for `func` arguments.\n input_structure: (Optional.) A `Structure` object. If given, this argument\n defines the element types and structure for `func` arguments.\n add_to_graph: (Optional.) If `True`, the function will be added to the\n default graph, if it exists.\n use_legacy_function: (Optional.) A boolean that determines whether the\n function be created using `tensorflow.python.eager.function.defun`\n (default behavior) or `tensorflow.python.framework.function.Defun`\n (legacy behavior).\n defun_kwargs: (Optional.) A dictionary mapping string argument names to\n values. If supplied, will be passed to `function` as keyword arguments.\n\n Raises:\n ValueError: If an invalid combination of `dataset`, `input_classes`,\n `input_shapes`, and `input_types` is passed.\n \"\"\"\n # pylint: disable=protected-access\n if input_structure is None:\n if dataset is None:\n if input_classes is None or input_shapes is None or input_types is None:\n raise ValueError(\"Either `dataset`, `input_structure` or all of \"\n \"`input_classes`, `input_shapes`, and `input_types` \"\n \"must be specified.\")\n self._input_structure = structure.convert_legacy_structure(\n input_types, input_shapes, input_classes)\n else:\n if not (input_classes is None and input_shapes is None and\n input_types is None):\n raise ValueError(\"Either `dataset`, `input_structure` or all of \"\n \"`input_classes`, `input_shapes`, and `input_types` \"\n \"must be specified.\")\n self._input_structure = dataset.element_spec\n else:\n if not (dataset is None and input_classes is None and input_shapes is None\n and input_types is None):\n raise ValueError(\"Either `dataset`, `input_structure`, or all of \"\n \"`input_classes`, `input_shapes`, and `input_types` \"\n \"must be specified.\")\n self._input_structure = input_structure\n\n self._func = func\n\n # There is no graph to add in eager mode.\n add_to_graph &= not context.executing_eagerly()\n # There are some lifetime issues when a legacy function is not added to a\n # out-living graph. It's already deprecated so de-prioritizing the fix.\n add_to_graph |= use_legacy_function\n\n if defun_kwargs is None:\n defun_kwargs = {}\n\n readable_transformation_name = transformation_name.replace(\n \".\", \"_\")[:-2] if len(transformation_name) > 2 else \"\"\n\n func_name = \"_\".join(\n [readable_transformation_name,\n function_utils.get_func_name(func)])\n # Sanitize function name to remove symbols that interfere with graph\n # construction.\n for symbol in [\"<\", \">\", \"\\\\\", \"'\", \" \"]:\n func_name = func_name.replace(symbol, \"\")\n\n ag_ctx = autograph_ctx.control_status_ctx()\n\n def _warn_if_collections(transformation_name):\n \"\"\"Prints a warning if the given graph uses common graph collections.\n\n NOTE(mrry): Currently a warning is only generated for resources. Any\n variables created will be automatically hoisted out to the outermost scope\n using `init_scope()`. Some collections (such as for control-flow contexts)\n are benign and should not generate a warning.\n\n Args:\n transformation_name: A human-readable name for the transformation.\n \"\"\"\n warnings.warn(\"Creating resources inside a function passed to %s \"\n \"is not supported. Create each resource outside the \"\n \"function, and capture it inside the function to use it.\" %\n transformation_name, stacklevel=5)\n\n def _wrapper_helper(*args):\n \"\"\"Wrapper for passing nested structures to and from tf.data functions.\"\"\"\n nested_args = structure.from_compatible_tensor_list(\n self._input_structure, args)\n if not _should_unpack_args(nested_args):\n nested_args = (nested_args,)\n\n ret = autograph.tf_convert(func, ag_ctx)(*nested_args)\n # If `func` returns a list of tensors, `nest.flatten()` and\n # `ops.convert_to_tensor()` would conspire to attempt to stack\n # those tensors into a single tensor, because the customized\n # version of `nest.flatten()` does not recurse into lists. Since\n # it is more likely that the list arose from returning the\n # result of an operation (such as `tf.numpy_function()`) that returns a\n # list of not-necessarily-stackable tensors, we treat the\n # returned value is a `tuple` instead. A user wishing to pack\n # the return value into a single tensor can use an explicit\n # `tf.stack()` before returning.\n if isinstance(ret, list):\n ret = tuple(ret)\n\n try:\n self._output_structure = structure.type_spec_from_value(ret)\n except (ValueError, TypeError):\n six.reraise(\n TypeError,\n TypeError(\"Unsupported return value from function passed to \"\n \"%s: %s.\" % (transformation_name, ret)),\n sys.exc_info()[2])\n return ret\n\n if use_legacy_function:\n func_name = func_name + \"_\" + str(ops.uid())\n\n @function.Defun(\n *structure.get_flat_tensor_types(self._input_structure),\n func_name=func_name,\n **defun_kwargs)\n def wrapper_fn(*args):\n ret = _wrapper_helper(*args)\n # _warn_if_collections(transformation_name, ops.get_default_graph(), 0)\n return structure.to_tensor_list(self._output_structure, ret)\n\n self._function = wrapper_fn\n resource_tracker = tracking.ResourceTracker()\n with tracking.resource_tracker_scope(resource_tracker):\n if add_to_graph:\n self._function.add_to_graph(ops.get_default_graph())\n else:\n # Use the private method that will execute `wrapper_fn` but delay\n # adding it to the graph in case (e.g.) we need to rerun the function.\n self._function._create_definition_if_needed()\n if resource_tracker.resources:\n _warn_if_collections(transformation_name)\n\n else:\n if def_function.functions_run_eagerly():\n warnings.warn(\n \"Even though the tf.config.experimental_run_functions_eagerly \"\n \"option is set, this option does not apply to tf.data functions. \"\n \"tf.data functions are still traced and executed as graphs.\")\n\n defun_kwargs.update({\"func_name\": func_name})\n defun_kwargs.update({\"_tf_data_function\": True})\n\n # Note: _wrapper_helper will apply autograph based on context.\n @eager_function.defun_with_attributes(\n input_signature=structure.get_flat_tensor_specs(\n self._input_structure),\n autograph=False,\n attributes=defun_kwargs)\n def wrapper_fn(*args): # pylint: disable=missing-docstring\n ret = _wrapper_helper(*args)\n ret = structure.to_tensor_list(self._output_structure, ret)\n return [ops.convert_to_tensor(t) for t in ret]\n\n resource_tracker = tracking.ResourceTracker()\n with tracking.resource_tracker_scope(resource_tracker):\n # TODO(b/141462134): Switch to using garbage collection.\n self._function = wrapper_fn.get_concrete_function()\n if add_to_graph:\n self._function.add_to_graph(ops.get_default_graph())\n\n if resource_tracker.resources:\n _warn_if_collections(transformation_name)\n\n outer_graph_seed = ops.get_default_graph().seed\n if outer_graph_seed and self._function.graph.seed == outer_graph_seed:\n if self._function.graph._seed_used:\n warnings.warn(\n \"Seed %s from outer graph might be getting used by function %s, \"\n \"if the random op has not been provided any seed. Explicitly set \"\n \"the seed in the function if this is not the intended behavior.\"\n %(outer_graph_seed, func_name), stacklevel=4)\n\n @property\n def output_structure(self):\n return self._output_structure\n\n @property\n def output_classes(self):\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access\n self._output_structure)\n\n @property\n def output_shapes(self):\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access\n self._output_structure)\n\n @property\n def output_types(self):\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access\n self._output_structure)\n\n @property\n def function(self):\n return self._function\n\n\nclass _GeneratorDataset(DatasetSource):\n \"\"\"A `Dataset` that generates elements by invoking a function.\"\"\"\n\n def __init__(self, init_args, init_func, next_func, finalize_func,\n output_signature):\n \"\"\"Constructs a `_GeneratorDataset`.\n\n Args:\n init_args: A nested structure representing the arguments to `init_func`.\n init_func: A TensorFlow function that will be called on `init_args` each\n time a C++ iterator over this dataset is constructed. Returns a nested\n structure representing the \"state\" of the dataset.\n next_func: A TensorFlow function that will be called on the result of\n `init_func` to produce each element, and that raises `OutOfRangeError`\n to terminate iteration.\n finalize_func: A TensorFlow function that will be called on the result of\n `init_func` immediately before a C++ iterator over this dataset is\n destroyed. The return value is ignored.\n output_signature: A nested structure of `tf.TypeSpec` objects describing\n the output of `next_func`.\n \"\"\"\n self._init_args = init_args\n\n self._init_structure = structure.type_spec_from_value(init_args)\n\n self._init_func = StructuredFunctionWrapper(\n init_func,\n self._transformation_name(),\n input_structure=self._init_structure)\n\n self._next_func = StructuredFunctionWrapper(\n next_func,\n self._transformation_name(),\n input_structure=self._init_func.output_structure)\n\n self._finalize_func = StructuredFunctionWrapper(\n finalize_func,\n self._transformation_name(),\n input_structure=self._init_func.output_structure)\n\n self._output_signature = output_signature\n\n variant_tensor = gen_dataset_ops.generator_dataset(\n structure.to_tensor_list(self._init_structure, self._init_args) +\n self._init_func.function.captured_inputs,\n self._next_func.function.captured_inputs,\n self._finalize_func.function.captured_inputs,\n init_func=self._init_func.function,\n next_func=self._next_func.function,\n finalize_func=self._finalize_func.function,\n **self._flat_structure)\n super(_GeneratorDataset, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._output_signature\n\n def _transformation_name(self):\n return \"Dataset.from_generator()\"\n\n\nclass ZipDataset(DatasetV2):\n \"\"\"A `Dataset` that zips its inputs together.\"\"\"\n\n def __init__(self, datasets):\n \"\"\"See `Dataset.zip()` for details.\"\"\"\n for ds in nest.flatten(datasets):\n if not isinstance(ds, DatasetV2):\n if isinstance(ds, list):\n message = (\"The argument to `Dataset.zip()` must be a nested \"\n \"structure of `Dataset` objects. Nested structures do not \"\n \"support Python lists; please use a tuple instead.\")\n else:\n message = (\"The argument to `Dataset.zip()` must be a nested \"\n \"structure of `Dataset` objects.\")\n raise TypeError(message)\n self._datasets = datasets\n self._structure = nest.pack_sequence_as(\n self._datasets,\n [ds.element_spec for ds in nest.flatten(self._datasets)])\n variant_tensor = gen_dataset_ops.zip_dataset(\n [ds._variant_tensor for ds in nest.flatten(self._datasets)],\n **self._flat_structure)\n super(ZipDataset, self).__init__(variant_tensor)\n\n def _inputs(self):\n return nest.flatten(self._datasets)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass ConcatenateDataset(DatasetV2):\n \"\"\"A `Dataset` that concatenates its input with given dataset.\"\"\"\n\n def __init__(self, input_dataset, dataset_to_concatenate):\n \"\"\"See `Dataset.concatenate()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._dataset_to_concatenate = dataset_to_concatenate\n\n output_types = get_legacy_output_types(input_dataset)\n if output_types != get_legacy_output_types(dataset_to_concatenate):\n raise TypeError(\n \"Two datasets to concatenate have different types %s and %s\" %\n (output_types, get_legacy_output_types(dataset_to_concatenate)))\n\n output_classes = get_legacy_output_classes(input_dataset)\n if output_classes != get_legacy_output_classes(dataset_to_concatenate):\n raise TypeError(\n \"Two datasets to concatenate have different classes %s and %s\" %\n (output_classes, get_legacy_output_classes(dataset_to_concatenate)))\n\n input_shapes = get_legacy_output_shapes(self._input_dataset)\n output_shapes = nest.pack_sequence_as(input_shapes, [\n ts1.most_specific_compatible_shape(ts2)\n for (ts1, ts2) in zip(\n nest.flatten(input_shapes),\n nest.flatten(get_legacy_output_shapes(\n self._dataset_to_concatenate)))\n ])\n\n self._structure = structure.convert_legacy_structure(\n output_types, output_shapes, output_classes)\n\n self._input_datasets = [input_dataset, dataset_to_concatenate]\n # pylint: disable=protected-access\n variant_tensor = gen_dataset_ops.concatenate_dataset(\n input_dataset._variant_tensor, dataset_to_concatenate._variant_tensor,\n **self._flat_structure)\n # pylint: enable=protected-access\n super(ConcatenateDataset, self).__init__(variant_tensor)\n\n def _inputs(self):\n return self._input_datasets\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass RepeatDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that repeats its input several times.\"\"\"\n\n def __init__(self, input_dataset, count):\n \"\"\"See `Dataset.repeat()` for details.\"\"\"\n self._input_dataset = input_dataset\n if count is None:\n self._count = constant_op.constant(-1, dtype=dtypes.int64, name=\"count\")\n else:\n self._count = ops.convert_to_tensor(\n count, dtype=dtypes.int64, name=\"count\")\n variant_tensor = gen_dataset_ops.repeat_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n count=self._count,\n **self._flat_structure)\n super(RepeatDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass RangeDataset(DatasetSource):\n \"\"\"A `Dataset` of a step separated range of values.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"See `Dataset.range()` for details.\"\"\"\n self._parse_args(*args, **kwargs)\n self._structure = tensor_spec.TensorSpec([], self._output_type)\n variant_tensor = gen_dataset_ops.range_dataset(\n start=self._start,\n stop=self._stop,\n step=self._step,\n **self._flat_structure)\n super(RangeDataset, self).__init__(variant_tensor)\n\n def _parse_args(self, *args, **kwargs):\n \"\"\"Parse arguments according to the same rules as the `range()` builtin.\"\"\"\n if len(args) == 1:\n self._start = self._build_tensor(0, \"start\")\n self._stop = self._build_tensor(args[0], \"stop\")\n self._step = self._build_tensor(1, \"step\")\n elif len(args) == 2:\n self._start = self._build_tensor(args[0], \"start\")\n self._stop = self._build_tensor(args[1], \"stop\")\n self._step = self._build_tensor(1, \"step\")\n elif len(args) == 3:\n self._start = self._build_tensor(args[0], \"start\")\n self._stop = self._build_tensor(args[1], \"stop\")\n self._step = self._build_tensor(args[2], \"step\")\n else:\n raise ValueError(\"Invalid arguments to RangeDataset: %s\" % str(args))\n if \"output_type\" in kwargs:\n self._output_type = kwargs[\"output_type\"]\n else:\n self._output_type = dtypes.int64\n\n def _build_tensor(self, int64_value, name):\n return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass CacheDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that caches elements of its input.\"\"\"\n\n def __init__(self, input_dataset, filename):\n \"\"\"See `Dataset.cache()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._filename = ops.convert_to_tensor(\n filename, dtype=dtypes.string, name=\"filename\")\n if tf2.enabled() and (context.executing_eagerly() or ops.inside_function()):\n variant_tensor = gen_dataset_ops.cache_dataset_v2(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n filename=self._filename,\n cache=gen_dataset_ops.dummy_memory_cache(),\n **self._flat_structure)\n else:\n variant_tensor = gen_dataset_ops.cache_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n filename=self._filename,\n **self._flat_structure)\n super(CacheDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass ShuffleDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that randomly shuffles the elements of its input.\"\"\"\n\n def __init__(self,\n input_dataset,\n buffer_size,\n seed=None,\n reshuffle_each_iteration=None):\n \"\"\"Randomly shuffles the elements of this dataset.\n\n Args:\n input_dataset: The input dataset.\n buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n elements from this dataset from which the new dataset will sample.\n seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random\n seed that will be used to create the distribution. See\n `tf.random.set_seed` for behavior.\n reshuffle_each_iteration: (Optional.) A boolean, which if true indicates\n that the dataset should be pseudorandomly reshuffled each time it is\n iterated over. (Defaults to `True`.)\n\n Returns:\n A `Dataset`.\n\n Raises:\n ValueError: if invalid arguments are provided.\n \"\"\"\n self._input_dataset = input_dataset\n self._buffer_size = ops.convert_to_tensor(\n buffer_size, dtype=dtypes.int64, name=\"buffer_size\")\n self._seed, self._seed2 = random_seed.get_seed(seed)\n if reshuffle_each_iteration is None:\n reshuffle_each_iteration = True\n self._reshuffle_each_iteration = reshuffle_each_iteration\n\n if (tf2.enabled() and\n (context.executing_eagerly() or ops.inside_function())):\n variant_tensor = gen_dataset_ops.shuffle_dataset_v3(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n buffer_size=self._buffer_size,\n seed=self._seed,\n seed2=self._seed2,\n seed_generator=gen_dataset_ops.dummy_seed_generator(),\n reshuffle_each_iteration=self._reshuffle_each_iteration,\n **self._flat_structure)\n else:\n variant_tensor = gen_dataset_ops.shuffle_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n buffer_size=self._buffer_size,\n seed=self._seed,\n seed2=self._seed2,\n reshuffle_each_iteration=self._reshuffle_each_iteration,\n **self._flat_structure)\n super(ShuffleDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass TakeDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` containing the first `count` elements from its input.\"\"\"\n\n def __init__(self, input_dataset, count):\n \"\"\"See `Dataset.take()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name=\"count\")\n variant_tensor = gen_dataset_ops.take_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n count=self._count,\n **self._flat_structure)\n super(TakeDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass SkipDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` skipping the first `count` elements from its input.\"\"\"\n\n def __init__(self, input_dataset, count):\n \"\"\"See `Dataset.skip()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name=\"count\")\n variant_tensor = gen_dataset_ops.skip_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n count=self._count,\n **self._flat_structure)\n super(SkipDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass ShardDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` for sharding its input.\"\"\"\n\n def __init__(self, input_dataset, num_shards, index):\n \"\"\"See `Dataset.shard()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._num_shards = ops.convert_to_tensor(\n num_shards, dtype=dtypes.int64, name=\"num_shards\")\n self._index = ops.convert_to_tensor(index, dtype=dtypes.int64, name=\"index\")\n variant_tensor = gen_dataset_ops.shard_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n num_shards=self._num_shards,\n index=self._index,\n **self._flat_structure)\n super(ShardDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass BatchDataset(UnaryDataset):\n \"\"\"A `Dataset` that batches contiguous elements from its input.\"\"\"\n\n def __init__(self, input_dataset, batch_size, drop_remainder):\n \"\"\"See `Dataset.batch()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._batch_size = ops.convert_to_tensor(\n batch_size, dtype=dtypes.int64, name=\"batch_size\")\n self._drop_remainder = ops.convert_to_tensor(\n drop_remainder, dtype=dtypes.bool, name=\"drop_remainder\")\n\n constant_drop_remainder = tensor_util.constant_value(self._drop_remainder)\n # pylint: disable=protected-access\n if constant_drop_remainder:\n # NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)\n # or `False` (explicitly retaining the remainder).\n # pylint: disable=g-long-lambda\n constant_batch_size = tensor_util.constant_value(self._batch_size)\n self._structure = nest.map_structure(\n lambda component_spec: component_spec._batch(constant_batch_size),\n input_dataset.element_spec)\n else:\n self._structure = nest.map_structure(\n lambda component_spec: component_spec._batch(None),\n input_dataset.element_spec)\n variant_tensor = gen_dataset_ops.batch_dataset_v2(\n input_dataset._variant_tensor,\n batch_size=self._batch_size,\n drop_remainder=self._drop_remainder,\n **self._flat_structure)\n super(BatchDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass _NumpyIterator(object):\n \"\"\"Iterator over a dataset with elements converted to numpy.\"\"\"\n\n __slots__ = [\"_iterator\"]\n\n def __init__(self, dataset):\n self._iterator = iter(dataset)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return nest.map_structure(lambda x: x.numpy(), next(self._iterator))\n\n def next(self):\n return self.__next__()\n\n\nclass _VariantTracker(tracking.CapturableResource):\n \"\"\"Allows export of functions capturing a Dataset in SavedModels.\n\n When saving a SavedModel, `tf.saved_model.save` traverses the object\n graph. Since Datasets reference _VariantTracker objects, that traversal will\n find a _VariantTracker for each Dataset and so know how to save and restore\n functions which reference the Dataset's variant Tensor.\n \"\"\"\n\n def __init__(self, variant_tensor, resource_creator):\n \"\"\"Record that `variant_tensor` is associated with `resource_creator`.\n\n Args:\n variant_tensor: The variant-dtype Tensor associated with the Dataset. This\n Tensor will be a captured input to functions which use the Dataset, and\n is used by saving code to identify the corresponding _VariantTracker.\n resource_creator: A zero-argument function which creates a new\n variant-dtype Tensor. This function will be included in SavedModels and\n run to re-create the Dataset's variant Tensor on restore.\n \"\"\"\n super(_VariantTracker, self).__init__(device=\"CPU\")\n self._resource_handle = variant_tensor\n self._create_resource = resource_creator\n\n\ndef _is_padded_shape_compatible_with(padded_shape, input_component_shape):\n \"\"\"Returns `True` if `input_component_shape` can be padded to `padded_shape`.\n\n Args:\n padded_shape: A `tf.TensorShape`.\n input_component_shape: A `tf.TensorShape`.\n\n Returns:\n `True` if `input_component_shape` can be padded to `padded_shape`, otherwise\n `False`.\n \"\"\"\n\n if padded_shape.dims is None or input_component_shape.dims is None:\n return True\n if len(padded_shape.dims) != len(input_component_shape.dims):\n return False\n for padded_dim, input_dim in zip(\n padded_shape.dims, input_component_shape.dims):\n if (padded_dim.value is not None and input_dim.value is not None\n and padded_dim.value < input_dim.value):\n return False\n return True\n\n\ndef _padded_shape_to_tensor(padded_shape, input_component_shape):\n \"\"\"Converts `padded_shape` to a `tf.Tensor` representing that shape.\n\n Args:\n padded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python\n sequence, or a 1-D `tf.Tensor` of `tf.int64` elements.\n input_component_shape: A `tf.TensorShape`, with which `padded_shape` must\n be compatible.\n\n Returns:\n A 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`.\n\n Raises:\n ValueError: If `padded_shape` is not a shape or not compatible with\n `input_component_shape`.\n TypeError: If `padded_shape` is not convertible to a `tf.int64` tensor.\n \"\"\"\n try:\n # Try to convert the `padded_shape` to a `tf.TensorShape`\n padded_shape_as_shape = tensor_shape.as_shape(padded_shape)\n # We will return the \"canonical\" tensor representation, which uses\n # `-1` in place of `None`.\n ret = ops.convert_to_tensor(\n [dim if dim is not None else -1\n for dim in padded_shape_as_shape.as_list()], dtype=dtypes.int64)\n except (TypeError, ValueError):\n # The argument was not trivially convertible to a\n # `tf.TensorShape`, so fall back on the conversion to tensor\n # machinery.\n ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64)\n if ret.shape.dims is not None and len(ret.shape.dims) != 1:\n six.reraise(ValueError, ValueError(\n \"Padded shape %s must be a 1-D tensor of tf.int64 values, but its \"\n \"shape was %s.\" % (padded_shape, ret.shape)), sys.exc_info()[2])\n if ret.dtype != dtypes.int64:\n six.reraise(\n TypeError,\n TypeError(\n \"Padded shape %s must be a 1-D tensor of tf.int64 values, but \"\n \"its element type was %s.\" % (padded_shape, ret.dtype.name)),\n sys.exc_info()[2])\n padded_shape_as_shape = tensor_util.constant_value_as_shape(ret)\n\n if not _is_padded_shape_compatible_with(padded_shape_as_shape,\n input_component_shape):\n raise ValueError(\"The padded shape %s is not compatible with the \"\n \"corresponding input component shape %s.\"\n % (padded_shape_as_shape, input_component_shape))\n\n return ret\n\n\ndef _padding_value_to_tensor(value, output_type):\n \"\"\"Converts the padding value to a tensor.\n\n Args:\n value: The padding value.\n output_type: Its expected dtype.\n\n Returns:\n A scalar `Tensor`.\n\n Raises:\n ValueError: if the padding value is not a scalar.\n TypeError: if the padding value's type does not match `output_type`.\n \"\"\"\n value = ops.convert_to_tensor(value, name=\"padding_value\")\n if not value.shape.is_compatible_with(tensor_shape.TensorShape([])):\n raise ValueError(\"Padding value should be a scalar, but is not: %s\" % value)\n if value.dtype != output_type:\n raise TypeError(\"Padding value tensor (%s) does not match output type: %s\" %\n (value, output_type))\n return value\n\n\ndef _padding_values_or_default(padding_values, input_dataset):\n \"\"\"Returns padding values with None elements replaced with default values.\"\"\"\n\n def make_zero(t):\n if t.base_dtype == dtypes.string:\n return \"\"\n elif t.base_dtype == dtypes.variant:\n error_msg = (\"Unable to create padding for field of type 'variant' \"\n \"because t.base_type == dtypes.variant == \"\n \"{}.\".format(t.base_dtype))\n raise TypeError(error_msg)\n elif t.base_dtype == dtypes.bfloat16:\n # Special case `bfloat16` because it is not supported by NumPy.\n return constant_op.constant(0, dtype=dtypes.bfloat16)\n else:\n return np.zeros_like(t.as_numpy_dtype())\n\n def value_or_default(value, default):\n return default if value is None else value\n\n default_padding = nest.map_structure(\n make_zero,\n get_legacy_output_types(input_dataset))\n return nest.map_structure_up_to(padding_values, value_or_default,\n padding_values, default_padding)\n\n\nclass PaddedBatchDataset(UnaryDataset):\n \"\"\"A `Dataset` that batches and pads contiguous elements from its input.\"\"\"\n\n def __init__(self, input_dataset, batch_size, padded_shapes, padding_values,\n drop_remainder):\n \"\"\"See `Dataset.batch()` for details.\"\"\"\n self._input_dataset = input_dataset\n\n def check_types(component_spec):\n if not isinstance(component_spec, tensor_spec.TensorSpec):\n raise TypeError(\"Padded batching of components of type \",\n type(component_spec), \" is not supported.\")\n\n nest.map_structure(check_types, input_dataset.element_spec)\n self._input_dataset = input_dataset\n self._batch_size = ops.convert_to_tensor(\n batch_size, dtype=dtypes.int64, name=\"batch_size\")\n padding_values = _padding_values_or_default(padding_values, input_dataset)\n\n input_shapes = get_legacy_output_shapes(input_dataset)\n flat_padded_shapes = nest.flatten_up_to(input_shapes, padded_shapes)\n\n flat_padded_shapes_as_tensors = []\n\n for input_component_shape, padded_shape in zip(\n nest.flatten(input_shapes), flat_padded_shapes):\n flat_padded_shapes_as_tensors.append(\n _padded_shape_to_tensor(padded_shape, input_component_shape))\n\n self._padded_shapes = nest.pack_sequence_as(input_shapes,\n flat_padded_shapes_as_tensors)\n\n # If padding_values is a single element and input_shapes is a structure,\n # \"broadcast\" padding_values to the same structure as input_shapes.\n if nest.is_sequence(input_shapes) and not nest.is_sequence(padding_values):\n padding_values = nest.map_structure(lambda _: padding_values,\n input_shapes)\n\n self._padding_values = nest.map_structure_up_to(\n input_shapes, _padding_value_to_tensor, padding_values,\n get_legacy_output_types(input_dataset))\n self._drop_remainder = ops.convert_to_tensor(\n drop_remainder, dtype=dtypes.bool, name=\"drop_remainder\")\n\n def _padded_shape_to_batch_shape(s):\n return tensor_shape.TensorShape([\n tensor_util.constant_value(self._batch_size)\n if smart_cond.smart_constant_value(self._drop_remainder) else None\n ]).concatenate(tensor_util.constant_value_as_shape(s))\n\n output_shapes = nest.map_structure(\n _padded_shape_to_batch_shape, self._padded_shapes)\n self._structure = structure.convert_legacy_structure(\n get_legacy_output_types(self._input_dataset), output_shapes,\n get_legacy_output_classes(self._input_dataset))\n\n # pylint: disable=protected-access\n # TODO(jsimsa): Switch to using v2 only any time after 6/30/2018.\n if smart_cond.smart_constant_value(self._drop_remainder) is False:\n variant_tensor = gen_dataset_ops.padded_batch_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n batch_size=self._batch_size,\n padded_shapes=[\n ops.convert_to_tensor(s, dtype=dtypes.int64)\n for s in nest.flatten(self._padded_shapes)\n ],\n padding_values=nest.flatten(self._padding_values),\n output_shapes=structure.get_flat_tensor_shapes(self._structure))\n else:\n variant_tensor = gen_dataset_ops.padded_batch_dataset_v2(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n batch_size=self._batch_size,\n padded_shapes=[\n ops.convert_to_tensor(s, dtype=dtypes.int64)\n for s in nest.flatten(self._padded_shapes)\n ],\n padding_values=nest.flatten(self._padding_values),\n drop_remainder=self._drop_remainder,\n output_shapes=structure.get_flat_tensor_shapes(self._structure))\n super(PaddedBatchDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\ndef _should_unpack_args(args):\n \"\"\"Returns `True` if `args` should be `*args` when passed to a callable.\"\"\"\n return type(args) is tuple # pylint: disable=unidiomatic-typecheck\n\n\nclass MapDataset(UnaryDataset):\n \"\"\"A `Dataset` that maps a function over elements in its input.\"\"\"\n\n def __init__(self,\n input_dataset,\n map_func,\n use_inter_op_parallelism=True,\n preserve_cardinality=False,\n use_legacy_function=False):\n \"\"\"See `Dataset.map()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._use_inter_op_parallelism = use_inter_op_parallelism\n self._preserve_cardinality = preserve_cardinality\n self._map_func = StructuredFunctionWrapper(\n map_func,\n self._transformation_name(),\n dataset=input_dataset,\n use_legacy_function=use_legacy_function)\n variant_tensor = gen_dataset_ops.map_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._map_func.function.captured_inputs,\n f=self._map_func.function,\n use_inter_op_parallelism=self._use_inter_op_parallelism,\n preserve_cardinality=self._preserve_cardinality,\n **self._flat_structure)\n super(MapDataset, self).__init__(input_dataset, variant_tensor)\n\n def _functions(self):\n return [self._map_func]\n\n @property\n def element_spec(self):\n return self._map_func.output_structure\n\n def _transformation_name(self):\n return \"Dataset.map()\"\n\n\nclass ParallelMapDataset(UnaryDataset):\n \"\"\"A `Dataset` that maps a function over elements in its input in parallel.\"\"\"\n\n def __init__(self,\n input_dataset,\n map_func,\n num_parallel_calls,\n deterministic,\n use_inter_op_parallelism=True,\n preserve_cardinality=False,\n use_legacy_function=False):\n \"\"\"See `Dataset.map()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._use_inter_op_parallelism = use_inter_op_parallelism\n self._map_func = StructuredFunctionWrapper(\n map_func,\n self._transformation_name(),\n dataset=input_dataset,\n use_legacy_function=use_legacy_function)\n if deterministic is None:\n self._deterministic = \"default\"\n elif deterministic:\n self._deterministic = \"true\"\n else:\n self._deterministic = \"false\"\n self._preserve_cardinality = preserve_cardinality\n self._num_parallel_calls = ops.convert_to_tensor(\n num_parallel_calls, dtype=dtypes.int64, name=\"num_parallel_calls\")\n variant_tensor = gen_dataset_ops.parallel_map_dataset_v2(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._map_func.function.captured_inputs,\n f=self._map_func.function,\n num_parallel_calls=self._num_parallel_calls,\n deterministic=self._deterministic,\n use_inter_op_parallelism=self._use_inter_op_parallelism,\n preserve_cardinality=self._preserve_cardinality,\n **self._flat_structure)\n super(ParallelMapDataset, self).__init__(input_dataset, variant_tensor)\n\n def _functions(self):\n return [self._map_func]\n\n @property\n def element_spec(self):\n return self._map_func.output_structure\n\n def _transformation_name(self):\n return \"Dataset.map()\"\n\n\nclass FlatMapDataset(UnaryDataset):\n \"\"\"A `Dataset` that maps a function over its input and flattens the result.\"\"\"\n\n def __init__(self, input_dataset, map_func):\n \"\"\"See `Dataset.flat_map()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._map_func = StructuredFunctionWrapper(\n map_func, self._transformation_name(), dataset=input_dataset)\n if not isinstance(self._map_func.output_structure, DatasetSpec):\n raise TypeError(\n \"`map_func` must return a `Dataset` object. Got {}\".format(\n type(self._map_func.output_structure)))\n self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access\n variant_tensor = gen_dataset_ops.flat_map_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._map_func.function.captured_inputs,\n f=self._map_func.function,\n **self._flat_structure)\n super(FlatMapDataset, self).__init__(input_dataset, variant_tensor)\n\n def _functions(self):\n return [self._map_func]\n\n @property\n def element_spec(self):\n return self._structure\n\n def _transformation_name(self):\n return \"Dataset.flat_map()\"\n\n\nclass InterleaveDataset(UnaryDataset):\n \"\"\"A `Dataset` that interleaves the result of transformed inputs.\"\"\"\n\n def __init__(self, input_dataset, map_func, cycle_length, block_length):\n \"\"\"See `Dataset.interleave()` for details.\"\"\"\n\n self._input_dataset = input_dataset\n self._map_func = StructuredFunctionWrapper(\n map_func, self._transformation_name(), dataset=input_dataset)\n if not isinstance(self._map_func.output_structure, DatasetSpec):\n raise TypeError(\n \"`map_func` must return a `Dataset` object. Got {}\".format(\n type(self._map_func.output_structure)))\n self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access\n self._cycle_length = ops.convert_to_tensor(\n cycle_length, dtype=dtypes.int64, name=\"cycle_length\")\n self._block_length = ops.convert_to_tensor(\n block_length, dtype=dtypes.int64, name=\"block_length\")\n\n variant_tensor = gen_dataset_ops.interleave_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._map_func.function.captured_inputs, # pylint: disable=protected-access\n self._cycle_length,\n self._block_length,\n f=self._map_func.function,\n **self._flat_structure)\n super(InterleaveDataset, self).__init__(input_dataset, variant_tensor)\n\n def _functions(self):\n return [self._map_func]\n\n @property\n def element_spec(self):\n return self._structure\n\n def _transformation_name(self):\n return \"Dataset.interleave()\"\n\n\nclass ParallelInterleaveDataset(UnaryDataset):\n \"\"\"A `Dataset` that maps a function over its input and interleaves the result.\"\"\"\n\n def __init__(self,\n input_dataset,\n map_func,\n cycle_length,\n block_length,\n num_parallel_calls,\n buffer_output_elements=AUTOTUNE,\n prefetch_input_elements=AUTOTUNE,\n deterministic=None):\n \"\"\"See `Dataset.interleave()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._map_func = StructuredFunctionWrapper(\n map_func, self._transformation_name(), dataset=input_dataset)\n if not isinstance(self._map_func.output_structure, DatasetSpec):\n raise TypeError(\n \"`map_func` must return a `Dataset` object. Got {}\".format(\n type(self._map_func.output_structure)))\n self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access\n self._cycle_length = ops.convert_to_tensor(\n cycle_length, dtype=dtypes.int64, name=\"cycle_length\")\n self._block_length = ops.convert_to_tensor(\n block_length, dtype=dtypes.int64, name=\"block_length\")\n self._buffer_output_elements = ops.convert_to_tensor(\n buffer_output_elements,\n dtype=dtypes.int64,\n name=\"buffer_output_elements\")\n self._prefetch_input_elements = ops.convert_to_tensor(\n prefetch_input_elements,\n dtype=dtypes.int64,\n name=\"prefetch_input_elements\")\n\n self._num_parallel_calls = ops.convert_to_tensor(\n num_parallel_calls, dtype=dtypes.int64, name=\"num_parallel_calls\")\n if deterministic is None:\n deterministic_string = \"default\"\n elif deterministic:\n deterministic_string = \"true\"\n else:\n deterministic_string = \"false\"\n\n variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v4(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._map_func.function.captured_inputs, # pylint: disable=protected-access\n self._cycle_length,\n self._block_length,\n self._buffer_output_elements,\n self._prefetch_input_elements,\n self._num_parallel_calls,\n f=self._map_func.function,\n deterministic=deterministic_string,\n **self._flat_structure)\n super(ParallelInterleaveDataset, self).__init__(input_dataset,\n variant_tensor)\n\n def _functions(self):\n return [self._map_func]\n\n @property\n def element_spec(self):\n return self._structure\n\n def _transformation_name(self):\n return \"Dataset.interleave()\"\n\n\nclass FilterDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that filters its input according to a predicate function.\"\"\"\n\n def __init__(self, input_dataset, predicate, use_legacy_function=False):\n \"\"\"See `Dataset.filter()` for details.\"\"\"\n self._input_dataset = input_dataset\n wrapped_func = StructuredFunctionWrapper(\n predicate,\n self._transformation_name(),\n dataset=input_dataset,\n use_legacy_function=use_legacy_function)\n if not wrapped_func.output_structure.is_compatible_with(\n tensor_spec.TensorSpec([], dtypes.bool)):\n error_msg = (\"`predicate` return type must be convertible to a scalar \"\n \"boolean tensor. Was {}.\").format(\n wrapped_func.output_structure)\n raise ValueError(error_msg)\n self._predicate = wrapped_func\n variant_tensor = gen_dataset_ops.filter_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n other_arguments=self._predicate.function.captured_inputs,\n predicate=self._predicate.function,\n **self._flat_structure)\n super(FilterDataset, self).__init__(input_dataset, variant_tensor)\n\n def _functions(self):\n return [self._predicate]\n\n def _transformation_name(self):\n return \"Dataset.filter()\"\n\n\nclass PrefetchDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that asynchronously prefetches its input.\"\"\"\n\n def __init__(self, input_dataset, buffer_size, slack_period=None):\n \"\"\"See `Dataset.prefetch()` for details.\n\n Args:\n input_dataset: The input dataset.\n buffer_size: See `Dataset.prefetch()` for details.\n slack_period: (Optional.) An integer. If non-zero, determines the number\n of GetNext calls before injecting slack into the execution. This may\n reduce CPU contention at the start of a step. Note that a tensorflow\n user should not have to set this manually; enable this behavior\n automatically via `tf.data.Options.experimental_slack` instead. Defaults\n to None.\n \"\"\"\n self._input_dataset = input_dataset\n if buffer_size is None:\n buffer_size = AUTOTUNE\n self._buffer_size = ops.convert_to_tensor(\n buffer_size, dtype=dtypes.int64, name=\"buffer_size\")\n # pylint: disable=protected-access\n # We colocate the prefetch dataset with its input as this collocation only\n # happens automatically in graph mode.\n with ops.colocate_with(input_dataset._variant_tensor):\n variant_tensor = gen_dataset_ops.prefetch_dataset(\n input_dataset._variant_tensor,\n buffer_size=self._buffer_size,\n slack_period=slack_period,\n **self._flat_structure)\n super(PrefetchDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass WindowDataset(UnaryDataset):\n \"\"\"A dataset that creates window datasets from the input elements.\"\"\"\n\n def __init__(self, input_dataset, size, shift, stride, drop_remainder):\n \"\"\"See `window_dataset()` for more details.\"\"\"\n self._input_dataset = input_dataset\n self._size = ops.convert_to_tensor(size, dtype=dtypes.int64, name=\"size\")\n self._shift = ops.convert_to_tensor(shift, dtype=dtypes.int64, name=\"shift\")\n self._stride = ops.convert_to_tensor(\n stride, dtype=dtypes.int64, name=\"stride\")\n self._drop_remainder = ops.convert_to_tensor(\n drop_remainder, dtype=dtypes.bool, name=\"drop_remainder\")\n self._structure = nest.pack_sequence_as(\n get_legacy_output_classes(input_dataset), [\n DatasetSpec( # pylint: disable=g-complex-comprehension\n structure.convert_legacy_structure(\n output_type, output_shape, output_class))\n for output_class, output_shape, output_type in zip(\n nest.flatten(get_legacy_output_classes(input_dataset)),\n nest.flatten(get_legacy_output_shapes(input_dataset)),\n nest.flatten(get_legacy_output_types(input_dataset)))\n ])\n variant_tensor = gen_dataset_ops.window_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._size,\n self._shift,\n self._stride,\n self._drop_remainder,\n **self._flat_structure)\n super(WindowDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass _OptionsDataset(UnaryUnchangedStructureDataset):\n \"\"\"An identity `Dataset` that stores options.\"\"\"\n\n def __init__(self, input_dataset, options):\n self._input_dataset = input_dataset\n variant_tensor = input_dataset._variant_tensor # pylint: disable=protected-access\n super(_OptionsDataset, self).__init__(input_dataset, variant_tensor)\n\n if self._options_attr:\n self._options_attr = self._options_attr.merge(options)\n else:\n self._options_attr = options\n\n def options(self):\n return self._options_attr\n\n\nclass _ModelDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that acts as an identity, and models performance.\"\"\"\n\n def __init__(self, input_dataset, algorithm, cpu_budget, ram_budget):\n self._input_dataset = input_dataset\n variant_tensor = gen_dataset_ops.model_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n algorithm=algorithm.value,\n cpu_budget=cpu_budget,\n ram_budget=ram_budget,\n **self._flat_structure)\n super(_ModelDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass _OptimizeDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that acts as an identity, and applies optimizations.\"\"\"\n\n def __init__(self,\n input_dataset,\n optimizations_enabled,\n optimizations_disabled,\n optimizations_default,\n optimization_configs=None):\n self._input_dataset = input_dataset\n if optimization_configs is None:\n optimization_configs = []\n\n # We sort the options here before embedding as constant tensors to ensure\n # that serialization to NodeDef is determinstic.\n if optimizations_enabled:\n optimizations_enabled.sort()\n if optimizations_disabled:\n optimizations_disabled.sort()\n if optimizations_default:\n optimizations_default.sort()\n\n self._optimizations_enabled = convert.optional_param_to_tensor(\n argument_name=\"optimizations_enabled\",\n argument_value=optimizations_enabled,\n argument_default=[],\n argument_dtype=dtypes.string)\n self._optimizations_disabled = convert.optional_param_to_tensor(\n argument_name=\"optimizations_disabled\",\n argument_value=optimizations_disabled,\n argument_default=[],\n argument_dtype=dtypes.string)\n self._optimizations_default = convert.optional_param_to_tensor(\n argument_name=\"optimizations_default\",\n argument_value=optimizations_default,\n argument_default=[],\n argument_dtype=dtypes.string)\n\n variant_tensor = gen_dataset_ops.optimize_dataset_v2(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._optimizations_enabled,\n self._optimizations_disabled,\n self._optimizations_default,\n optimization_configs=optimization_configs,\n **self._flat_structure)\n\n super(_OptimizeDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass _SetStatsAggregatorDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that acts as an identity, and sets a stats aggregator.\"\"\"\n\n def __init__(self, input_dataset, aggregator, prefix, counter_prefix):\n self._input_dataset = input_dataset\n self._stats_aggregator = aggregator\n self._prefix = prefix\n self._counter_prefix = counter_prefix\n variant_tensor = ged_ops.set_stats_aggregator_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._stats_aggregator._resource, # pylint: disable=protected-access\n self._prefix,\n self._counter_prefix,\n **self._flat_structure)\n super(_SetStatsAggregatorDataset, self).__init__(input_dataset,\n variant_tensor)\n\n\nclass _MaxIntraOpParallelismDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that acts as an identity, overriding intra-op parallelism.\"\"\"\n\n def __init__(self, input_dataset, max_intra_op_parallelism):\n self._input_dataset = input_dataset\n self._max_intra_op_parallelism = ops.convert_to_tensor(\n max_intra_op_parallelism,\n dtype=dtypes.int64,\n name=\"max_intra_op_parallelism\")\n variant_tensor = ged_ops.max_intra_op_parallelism_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._max_intra_op_parallelism,\n **self._flat_structure)\n super(_MaxIntraOpParallelismDataset, self).__init__(input_dataset,\n variant_tensor)\n\n\nclass _PrivateThreadPoolDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that acts as an identity, setting a private threadpool.\"\"\"\n\n def __init__(self, input_dataset, num_threads):\n self._input_dataset = input_dataset\n self._num_threads = ops.convert_to_tensor(\n num_threads, dtype=dtypes.int64, name=\"num_threads\")\n variant_tensor = ged_ops.private_thread_pool_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._num_threads,\n **self._flat_structure)\n super(_PrivateThreadPoolDataset, self).__init__(input_dataset,\n variant_tensor)\n\n\ndef normalize_to_dense(dataset):\n \"\"\"Normalizes non-tensor components in a dataset to dense representations.\n\n This is necessary for dataset transformations that slice along the batch\n dimension and are oblivious to non-tensors, e.g. `unbatch`, `rebatch`.\n\n Args:\n dataset: Dataset to normalize.\n\n Returns:\n A dataset whose sparse and ragged tensors have been normalized to their\n dense representations.\n \"\"\"\n\n # NOTE(mrry): This leads to a somewhat inefficient re-encoding step for all\n # non-tensor components.\n #\n # TODO(mrry): Consider optimizing this if it turns out to be a bottleneck.\n if _should_unpack_args(dataset.element_spec):\n def normalize(*args):\n return structure.to_batched_tensor_list(dataset.element_spec, tuple(args))\n else:\n def normalize(arg):\n return structure.to_batched_tensor_list(dataset.element_spec, arg)\n\n normalized_dataset = dataset.map(normalize)\n\n # NOTE(mrry): Our `map()` has lost information about the structure of\n # non-tensor components, so re-apply the structure of the original dataset.\n return _RestructuredDataset(normalized_dataset, dataset.element_spec)\n\n\nclass _RestructuredDataset(UnaryDataset):\n \"\"\"An internal helper for changing the structure and shape of a dataset.\"\"\"\n\n def __init__(self, dataset, structure):\n self._input_dataset = dataset\n self._structure = structure\n\n variant_tensor = self._input_dataset._variant_tensor # pylint: disable=protected-access\n super(_RestructuredDataset, self).__init__(dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass _UnbatchDataset(UnaryDataset):\n \"\"\"A dataset that splits the elements of its input into multiple elements.\"\"\"\n\n def __init__(self, input_dataset):\n \"\"\"See `unbatch()` for more details.\"\"\"\n flat_shapes = input_dataset._flat_shapes # pylint: disable=protected-access\n if any(s.ndims == 0 for s in flat_shapes):\n raise ValueError(\"Cannot unbatch an input with scalar components.\")\n known_batch_dim = tensor_shape.Dimension(None)\n for s in flat_shapes:\n try:\n known_batch_dim = known_batch_dim.merge_with(s[0])\n except ValueError:\n raise ValueError(\"Cannot unbatch an input whose components have \"\n \"different batch sizes.\")\n self._input_dataset = input_dataset\n self._structure = nest.map_structure(\n lambda component_spec: component_spec._unbatch(), # pylint: disable=protected-access\n get_structure(input_dataset))\n variant_tensor = ged_ops.unbatch_dataset(\n self._input_dataset._variant_tensor, # pylint: disable=protected-access\n **self._flat_structure)\n super(_UnbatchDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\ndef _collect_resource_inputs(op):\n \"\"\"Collects resource inputs for the given ops (and its variant inputs).\"\"\"\n\n def _process(op_queue, seen_ops):\n \"\"\"Processes the next element of the op queue.\n\n Args:\n op_queue: Queue of Dataset operations to process.\n seen_ops: Already processed set of Operations.\n\n Returns:\n A 2-tuple containing sets of resource handles. The first tuple entry\n contains read-only handles and the second entry contains read-write\n handles.\n \"\"\"\n\n reads = []\n writes = []\n op = op_queue.pop()\n if op in seen_ops:\n return reads, writes\n seen_ops.add(op)\n # TODO(b/150139257): All resource inputs are in writes right now since we\n # have not updated the functional ops to set the special attribute that ACD\n # uses to figure out which of the op's inputs are read-only.\n reads, writes = acd_utils.get_read_write_resource_inputs(op)\n # Conservatively assume that any variant inputs are datasets.\n op_queue.extend(t.op for t in op.inputs if t.dtype == dtypes.variant)\n return reads, writes\n\n op_queue = [op]\n seen_ops = set()\n all_reads = []\n all_writes = []\n while op_queue:\n reads, writes = _process(op_queue, seen_ops)\n all_reads.extend(reads)\n all_writes.extend(writes)\n\n return all_reads, all_writes\n\n\n@auto_control_deps.register_acd_resource_resolver\ndef _resource_resolver(op, resource_reads, resource_writes):\n \"\"\"Updates resource inputs for tf.data ops with indirect dependencies.\"\"\"\n\n updated = False\n if op.type in [\n \"DatasetToSingleElement\", \"DatasetToTFRecord\", \"ReduceDataset\"\n ]:\n reads, writes = _collect_resource_inputs(op)\n for inp in reads:\n if inp not in resource_reads:\n updated = True\n resource_reads.add(inp)\n for inp in writes:\n if inp not in resource_writes:\n updated = True\n resource_writes.add(inp)\n\n if op.type in [\n \"IteratorGetNext\", \"IteratorGetNextSync\", \"IteratorGetNextAsOptional\"\n ]:\n iterator_resource = op.inputs[0]\n make_iterator_ops = [\n op for op in iterator_resource.consumers() if op.type == \"MakeIterator\"\n ]\n\n if len(make_iterator_ops) == 1:\n reads, writes = _collect_resource_inputs(make_iterator_ops[0])\n for inp in reads:\n if inp not in resource_reads:\n updated = True\n resource_reads.add(inp)\n for inp in writes:\n if inp not in resource_writes:\n updated = True\n resource_writes.add(inp)\n\n return updated\n"
] | [
[
"tensorflow.python.data.util.nest.map_structure_up_to",
"tensorflow.python.ops.gen_dataset_ops.window_dataset",
"tensorflow.python.data.util.structure.type_spec_from_value",
"tensorflow.python.ops.gen_dataset_ops.batch_dataset_v2",
"tensorflow.python.ops.gen_dataset_ops.dataset_cardinality",
"tensorflow.python.ops.gen_experimental_dataset_ops.unbatch_dataset",
"tensorflow.python.ops.gen_dataset_ops.take_dataset",
"tensorflow.python.framework.smart_cond.smart_constant_value",
"tensorflow.python.ops.string_ops.reduce_join",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.data.util.structure.from_compatible_tensor_list",
"tensorflow.python.data.util.nest.pack_sequence_as",
"tensorflow.python.ops.gen_dataset_ops.make_iterator",
"tensorflow.python.data.util.structure.normalize_element",
"tensorflow.python.ops.gen_dataset_ops.iterator_v2",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.ops.gen_dataset_ops.concatenate_dataset",
"tensorflow.python.data.util.convert.optional_param_to_tensor",
"tensorflow.python.ops.gen_dataset_ops.shard_dataset",
"tensorflow.python.util.deprecation.deprecated_args",
"numpy.array",
"tensorflow.python.framework.random_seed.set_random_seed",
"tensorflow.python.ops.gen_dataset_ops.prefetch_dataset",
"tensorflow.python.data.util.options.merge_options",
"tensorflow.python.ops.gen_dataset_ops.repeat_dataset",
"tensorflow.python.ops.gen_dataset_ops.range_dataset",
"tensorflow.python.ops.gen_experimental_dataset_ops.private_thread_pool_dataset",
"numpy.iinfo",
"tensorflow.python.framework.random_seed.get_seed",
"tensorflow.python.ops.gen_dataset_ops.parallel_interleave_dataset_v4",
"tensorflow.python.ops.gen_experimental_dataset_ops.max_intra_op_parallelism_dataset",
"tensorflow.python.ops.gen_dataset_ops.dummy_seed_generator",
"tensorflow.python.data.util.structure.get_flat_tensor_types",
"tensorflow.python.data.util.structure.get_flat_tensor_specs",
"tensorflow.python.data.util.structure.get_flat_tensor_shapes",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.data.util.structure.convert_legacy_structure",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.data.util.nest.flatten",
"tensorflow.python.ops.gen_dataset_ops.dummy_memory_cache",
"tensorflow.python.ops.gen_experimental_dataset_ops.set_stats_aggregator_dataset",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.data.util.options.create_option",
"tensorflow.python.framework.ops.convert_n_to_tensor",
"tensorflow.python.ops.script_ops.FuncRegistry._convert",
"tensorflow.python.ops.gen_dataset_ops.flat_map_dataset",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.framework.tensor_util.constant_value_as_shape",
"tensorflow.python.data.ops.iterator_ops.OwnedIterator",
"tensorflow.python.data.util.structure.to_batched_tensor_list",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.gen_dataset_ops.sparse_tensor_slice_dataset",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.framework.ops.NotDifferentiable",
"tensorflow.python.framework.function.Defun",
"tensorflow.python.data.util.traverse.obtain_all_variant_tensor_ops",
"tensorflow.python.training.tracking.tracking.ResourceTracker",
"tensorflow.python.eager.def_function.functions_run_eagerly",
"tensorflow.python.data.util.nest.is_sequence",
"tensorflow.python.data.util.structure.are_compatible",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.ops.gen_dataset_ops.shuffle_dataset",
"tensorflow.python.ops.script_ops._eager_py_func",
"tensorflow.python.data.util.random_seed.get_seed",
"tensorflow.python.training.tracking.tracking.resource_tracker_scope",
"tensorflow.python.data.util.nest.map_structure",
"tensorflow.python.ops.gen_dataset_ops.one_shot_iterator",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.gen_io_ops.matching_files",
"tensorflow.python.framework.auto_control_deps_utils.get_read_write_resource_inputs",
"tensorflow.python.tf2.enabled",
"tensorflow.python.data.experimental.ops.optimization_options.OptimizationOptions",
"tensorflow.python.ops.gen_dataset_ops.interleave_dataset",
"tensorflow.python.ops.script_ops.numpy_function",
"tensorflow.python.ops.gen_dataset_ops.model_dataset",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.gen_dataset_ops.cache_dataset",
"tensorflow.python.ops.gen_dataset_ops.filter_dataset",
"tensorflow.python.ops.gen_dataset_ops.skip_dataset",
"tensorflow.python.ops.gen_dataset_ops.parallel_map_dataset_v2",
"tensorflow.python.ops.gen_dataset_ops.optimize_dataset_v2",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.control_flow_ops.Assert",
"tensorflow.python.util.function_utils.get_func_name",
"tensorflow.python.framework.ops.inside_function",
"tensorflow.python.ops.gen_dataset_ops.dataset_to_graph",
"tensorflow.python.data.util.options.graph_rewrites",
"tensorflow.python.data.util.structure.to_tensor_list",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.data.util.nest.flatten_up_to",
"tensorflow.python.framework.ops.uid",
"tensorflow.python.ops.gen_dataset_ops.dataset_to_graph_v2",
"tensorflow.python.ops.gen_dataset_ops.map_dataset",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.core.framework.graph_pb2.GraphDef",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
starcksophie/fastmri-reproducible-benchmark | [
"9d1f17011f0be911f2da5300063bfeecea86876d"
] | [
"fastmri_recon/helpers/reconstruction.py"
] | [
"import numpy as np\n\nfrom modopt.opt.linear import Identity\nfrom modopt.opt.proximity import SparseThreshold, LinearCompositionProx\n\nfrom ..data.data_utils import from_test_file_to_mask_and_kspace\nfrom .fourier import FFT2\nfrom .utils import crop_center\n\n\ndef reco_z_filled(kspace, fourier_op):\n x_final = fourier_op.adj_op(kspace)\n x_final = np.abs(x_final)\n x_final = crop_center(x_final, 320)\n return x_final\n\ndef zero_filled_cropped_recon(kspace):\n \"\"\"Perform a fastMRI zero-filled reconstruction on the kspace.\n\n This function performs an inverse fourier transform on the zero filled\n kspace, then takes the modulus of the result and crops it to fastMRI\n proportions.\n\n Parameters:\n kspace (ndarray): the zero-filled kspace\n\n Returns:\n ndarray: the image obtained by zero-filled reconstruction in fastMRI format\n \"\"\"\n fourier_op = FFT2(np.ones_like(kspace))\n x_final = reco_z_filled(kspace, fourier_op)\n return x_final\n\n\ndef zero_filled_recon(kspaces, crop=False):\n \"\"\"Perform a zero-filled reconstruction on a volume\"\"\"\n fourier_op = FFT2(np.ones_like(kspaces[0]))\n x_final = np.empty_like(kspaces)\n for i, kspace in enumerate(kspaces):\n x_final[i] = fourier_op.adj_op(kspace)\n x_final = np.abs(x_final)\n if crop:\n x_final_cropped = np.empty((len(kspaces), 320, 320))\n for i, x in enumerate(x_final):\n x_final_cropped[i] = crop_center(x, 320)\n x_final = x_final_cropped\n return x_final\n\n\n\ndef reco_wav(kspace, gradient_op, mu=1*1e-8, max_iter=10, nb_scales=4, wavelet_name='db4'):\n # for now this is only working with my fork of pysap-fastMRI\n # I will get it changed soon so that we don't need to ask for a specific\n # pysap-mri install\n from ..wavelets import WaveletDecimated\n from mri.numerics.reconstruct import sparse_rec_fista\n\n linear_op = WaveletDecimated(\n nb_scale=nb_scales,\n wavelet_name=wavelet_name,\n padding='periodization',\n )\n\n prox_op = LinearCompositionProx(\n linear_op=linear_op,\n prox_op=SparseThreshold(Identity(), None, thresh_type=\"soft\"),\n )\n gradient_op.obs_data = kspace\n cost_op = None\n x_final, _, _, _ = sparse_rec_fista(\n gradient_op=gradient_op,\n linear_op=Identity(),\n prox_op=prox_op,\n cost_op=cost_op,\n xi_restart=0.96,\n s_greedy=1.1,\n mu=mu,\n restart_strategy='greedy',\n pov='analysis',\n max_nb_of_iter=max_iter,\n metrics=None,\n metric_call_period=1,\n verbose=0,\n progress=False,\n )\n x_final = np.abs(x_final)\n x_final = crop_center(x_final, 320)\n return x_final\n\n\ndef reco_iterative_from_test_file(filename, rec_type='wav', **kwargs):\n mask, kspaces = from_test_file_to_mask_and_kspace(filename)\n # mask handling\n fake_kspace = np.zeros_like(kspaces[0])\n fourier_mask = np.repeat(mask.astype(np.float)[None, :], fake_kspace.shape[0], axis=0)\n # op creation\n fourier_op_masked = FFT2(mask=fourier_mask)\n if rec_type == 'wav':\n from mri.numerics.gradient import GradAnalysis2\n gradient_op = GradAnalysis2(\n data=fake_kspace,\n fourier_op=fourier_op_masked,\n )\n im_recos = np.array([reco_wav(kspace * fourier_mask, gradient_op, **kwargs) for kspace in kspaces])\n elif rec_type == 'z_filled':\n im_recos = np.array([reco_z_filled(kspace * fourier_mask, fourier_op_masked) for kspace in kspaces])\n else:\n raise ValueError('{} not recognized as reconstruction type'.format(rec_type))\n return im_recos\n\ndef reco_and_gt_zfilled_from_val_file(kspace_and_mask_batch, img_batch, crop=True):\n kspaces, _ = kspace_and_mask_batch\n kspaces = np.squeeze(kspaces)\n im_recos = zero_filled_recon(kspaces, crop=crop)\n images = np.squeeze(img_batch)\n return im_recos, images\n\n\ndef reco_unet_from_test_file(zero_img_batch, means, stddevs, model):\n im_recos = model.predict_on_batch(zero_img_batch)\n im_recos = np.squeeze(im_recos)\n im_recos *= np.array(stddevs)[:, None, None]\n im_recos += np.array(means)[:, None, None]\n return im_recos\n\ndef reco_and_gt_unet_from_val_file(zero_img_batch, img_batch, means, stddevs, model):\n im_recos = reco_unet_from_test_file(zero_img_batch, means, stddevs, model)\n img_batch = np.squeeze(img_batch)\n img_batch *= np.array(stddevs)[:, None, None]\n img_batch += np.array(means)[:, None, None]\n return im_recos, img_batch\n\ndef reco_and_gt_unet_from_val_file_no_norm(zero_img_batch, img_batch, model):\n im_recos = model.predict_on_batch(zero_img_batch)\n im_recos = np.squeeze(im_recos)\n return im_recos, np.squeeze(img_batch)\n\n\ndef reco_net_from_test_file(kspace_and_mask_batch, model):\n im_recos = model.predict_on_batch(kspace_and_mask_batch)\n im_recos = np.squeeze(im_recos)\n return im_recos\n\ndef reco_and_gt_net_from_val_file(kspace_and_mask_batch, img_batch, model):\n im_recos = reco_net_from_test_file(kspace_and_mask_batch, model)\n img_batch = np.squeeze(img_batch)\n return im_recos, img_batch\n"
] | [
[
"numpy.ones_like",
"numpy.abs",
"numpy.empty_like",
"numpy.squeeze",
"numpy.zeros_like",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
christopher-besch/manim | [
"959af957fee395014bb86bddac7ccb1429428236"
] | [
"manim/mobject/numbers.py"
] | [
"\"\"\"Mobjects representing numbers.\"\"\"\n\n__all__ = [\"DecimalNumber\", \"Integer\", \"Variable\"]\n\nfrom typing import Optional, Sequence\n\nimport numpy as np\n\nfrom .. import config\nfrom ..constants import *\nfrom ..mobject.svg.tex_mobject import MathTex, SingleStringMathTex\nfrom ..mobject.types.vectorized_mobject import VMobject\nfrom ..mobject.value_tracker import ValueTracker\nfrom .opengl_compatibility import ConvertToOpenGL\n\nstring_to_mob_map = {}\n\n\nclass DecimalNumber(VMobject, metaclass=ConvertToOpenGL):\n \"\"\"An mobject representing a decimal number.\n\n Examples\n --------\n\n .. manim:: MovingSquareWithUpdaters\n\n class MovingSquareWithUpdaters(Scene):\n def construct(self):\n decimal = DecimalNumber(\n 0,\n show_ellipsis=True,\n num_decimal_places=3,\n include_sign=True,\n )\n square = Square().to_edge(UP)\n\n decimal.add_updater(lambda d: d.next_to(square, RIGHT))\n decimal.add_updater(lambda d: d.set_value(square.get_center()[1]))\n self.add(square, decimal)\n self.play(\n square.animate.to_edge(DOWN),\n rate_func=there_and_back,\n run_time=5,\n )\n self.wait()\n\n \"\"\"\n\n def __init__(\n self,\n number: float = 0,\n num_decimal_places: int = 2,\n mob_class: VMobject = MathTex,\n include_sign: bool = False,\n group_with_commas: bool = True,\n digit_buff_per_font_unit: float = 0.001,\n show_ellipsis: bool = False,\n unit: Optional[str] = None, # Aligned to bottom unless it starts with \"^\"\n include_background_rectangle: bool = False,\n edge_to_fix: Sequence[float] = LEFT,\n font_size: float = DEFAULT_FONT_SIZE,\n stroke_width: float = 0,\n fill_opacity: float = 1.0,\n **kwargs\n ):\n super().__init__(**kwargs)\n self.number = number\n self.num_decimal_places = num_decimal_places\n self.include_sign = include_sign\n self.mob_class = mob_class\n self.group_with_commas = group_with_commas\n self.digit_buff_per_font_unit = digit_buff_per_font_unit\n self.show_ellipsis = show_ellipsis\n self.unit = unit\n self.include_background_rectangle = include_background_rectangle\n self.edge_to_fix = edge_to_fix\n self._font_size = font_size\n self.stroke_width = stroke_width\n self.fill_opacity = fill_opacity\n\n self.initial_config = kwargs.copy()\n self.initial_config.update(\n {\n \"num_decimal_places\": num_decimal_places,\n \"include_sign\": include_sign,\n \"group_with_commas\": group_with_commas,\n \"digit_buff_per_font_unit\": digit_buff_per_font_unit,\n \"show_ellipsis\": show_ellipsis,\n \"unit\": unit,\n \"include_background_rectangle\": include_background_rectangle,\n \"edge_to_fix\": edge_to_fix,\n \"font_size\": font_size,\n \"stroke_width\": stroke_width,\n \"fill_opacity\": fill_opacity,\n },\n )\n\n self._set_submobjects_from_number(number)\n self.init_colors()\n\n @property\n def font_size(self):\n \"\"\"The font size of the tex mobject.\"\"\"\n return self.height / self.initial_height * self._font_size\n\n @font_size.setter\n def font_size(self, font_val):\n if font_val <= 0:\n raise ValueError(\"font_size must be greater than 0.\")\n elif self.height > 0:\n # sometimes manim generates a SingleStringMathex mobject with 0 height.\n # can't be scaled regardless and will error without the elif.\n\n # scale to a factor of the initial height so that setting\n # font_size does not depend on current size.\n self.scale(font_val / self.font_size)\n\n def _set_submobjects_from_number(self, number):\n self.number = number\n self.submobjects = []\n\n num_string = self._get_num_string(number)\n self.add(*(map(self._string_to_mob, num_string)))\n\n # Add non-numerical bits\n if self.show_ellipsis:\n self.add(\n self._string_to_mob(\"\\\\dots\", SingleStringMathTex, color=self.color),\n )\n\n if self.unit is not None:\n self.unit_sign = self._string_to_mob(self.unit, SingleStringMathTex)\n self.add(self.unit_sign)\n\n self.arrange(\n buff=self.digit_buff_per_font_unit * self._font_size,\n aligned_edge=DOWN,\n )\n\n # Handle alignment of parts that should be aligned\n # to the bottom\n for i, c in enumerate(num_string):\n if c == \"-\" and len(num_string) > i + 1:\n self[i].align_to(self[i + 1], UP)\n self[i].shift(self[i + 1].height * DOWN / 2)\n elif c == \",\":\n self[i].shift(self[i].height * DOWN / 2)\n if self.unit and self.unit.startswith(\"^\"):\n self.unit_sign.align_to(self, UP)\n\n # track the initial height to enable scaling via font_size\n self.initial_height = self.height\n\n if self.include_background_rectangle:\n self.add_background_rectangle()\n\n def _get_num_string(self, number):\n if isinstance(number, complex):\n formatter = self._get_complex_formatter()\n else:\n formatter = self._get_formatter()\n num_string = formatter.format(number)\n\n rounded_num = np.round(number, self.num_decimal_places)\n if num_string.startswith(\"-\") and rounded_num == 0:\n if self.include_sign:\n num_string = \"+\" + num_string[1:]\n else:\n num_string = num_string[1:]\n\n return num_string\n\n def _string_to_mob(\n self, string: str, mob_class: Optional[VMobject] = None, **kwargs\n ):\n if mob_class is None:\n mob_class = self.mob_class\n\n if string not in string_to_mob_map:\n string_to_mob_map[string] = mob_class(string, **kwargs)\n mob = string_to_mob_map[string].copy()\n mob.font_size = self._font_size\n return mob\n\n def _get_formatter(self, **kwargs):\n \"\"\"\n Configuration is based first off instance attributes,\n but overwritten by any kew word argument. Relevant\n key words:\n - include_sign\n - group_with_commas\n - num_decimal_places\n - field_name (e.g. 0 or 0.real)\n \"\"\"\n config = {\n attr: getattr(self, attr)\n for attr in [\n \"include_sign\",\n \"group_with_commas\",\n \"num_decimal_places\",\n ]\n }\n config.update(kwargs)\n return \"\".join(\n [\n \"{\",\n config.get(\"field_name\", \"\"),\n \":\",\n \"+\" if config[\"include_sign\"] else \"\",\n \",\" if config[\"group_with_commas\"] else \"\",\n \".\",\n str(config[\"num_decimal_places\"]),\n \"f\",\n \"}\",\n ],\n )\n\n def _get_complex_formatter(self):\n return \"\".join(\n [\n self._get_formatter(field_name=\"0.real\"),\n self._get_formatter(field_name=\"0.imag\", include_sign=True),\n \"i\",\n ],\n )\n\n def set_value(self, number: float):\n \"\"\"Set the value of the :class:`~.DecimalNumber` to a new number.\n\n Parameters\n ----------\n number\n The value that will overwrite the current number of the :class:`~.DecimalNumber`.\n\n \"\"\"\n # creates a new number mob via `set_submobjects_from_number`\n # then matches the properties (color, font_size, etc...)\n # of the previous mobject to the new one\n\n # old_family needed with cairo\n old_family = self.get_family()\n\n old_font_size = self.font_size\n move_to_point = self.get_edge_center(self.edge_to_fix)\n old_submobjects = self.submobjects\n\n self._set_submobjects_from_number(number)\n self.font_size = old_font_size\n self.move_to(move_to_point, self.edge_to_fix)\n for sm1, sm2 in zip(self.submobjects, old_submobjects):\n sm1.match_style(sm2)\n\n if config.renderer != \"opengl\":\n for mob in old_family:\n # Dumb hack...due to how scene handles families\n # of animated mobjects\n # for compatibility with updaters to not leave first number in place while updating,\n # not needed with opengl renderer\n mob.points[:] = 0\n\n self.init_colors()\n return self\n\n def get_value(self):\n return self.number\n\n def increment_value(self, delta_t=1):\n self.set_value(self.get_value() + delta_t)\n\n\nclass Integer(DecimalNumber):\n \"\"\"A class for displaying Integers.\n\n Examples\n --------\n\n .. manim:: IntegerExample\n :save_last_frame:\n\n class IntegerExample(Scene):\n def construct(self):\n self.add(Integer(number=2.5).set_color(ORANGE).scale(2.5).set_x(-0.5).set_y(0.8))\n self.add(Integer(number=3.14159, show_ellipsis=True).set_x(3).set_y(3.3).scale(3.14159))\n self.add(Integer(number=42).set_x(2.5).set_y(-2.3).set_color_by_gradient(BLUE, TEAL).scale(1.7))\n self.add(Integer(number=6.28).set_x(-1.5).set_y(-2).set_color(YELLOW).scale(1.4))\n \"\"\"\n\n def __init__(self, number=0, num_decimal_places=0, **kwargs):\n super().__init__(number=number, num_decimal_places=num_decimal_places, **kwargs)\n\n def get_value(self):\n return int(np.round(super().get_value()))\n\n\nclass Variable(VMobject, metaclass=ConvertToOpenGL):\n \"\"\"A class for displaying text that shows \"label = value\" with\n the value continuously updated from a :class:`~.ValueTracker`.\n\n Parameters\n ----------\n var : Union[:class:`int`, :class:`float`]\n The initial value you need to keep track of and display.\n label : Union[:class:`str`, :class:`~.Tex`, :class:`~.MathTex`, :class:`~.Text`, :class:`~.TexSymbol`, :class:`~.SingleStringMathTex`]\n The label for your variable. Raw strings are convertex to :class:`~.MathTex` objects.\n var_type : Union[:class:`DecimalNumber`, :class:`Integer`], optional\n The class used for displaying the number. Defaults to :class:`DecimalNumber`.\n num_decimal_places : :class:`int`, optional\n The number of decimal places to display in your variable. Defaults to 2.\n If `var_type` is an :class:`Integer`, this parameter is ignored.\n kwargs : Any\n Other arguments to be passed to `~.Mobject`.\n\n Attributes\n ----------\n label : Union[:class:`str`, :class:`~.Tex`, :class:`~.MathTex`, :class:`~.Text`, :class:`~.TexSymbol`, :class:`~.SingleStringMathTex`]\n The label for your variable, for example ``x = ...``.\n tracker : :class:`~.ValueTracker`\n Useful in updating the value of your variable on-screen.\n value : Union[:class:`DecimalNumber`, :class:`Integer`]\n The tex for the value of your variable.\n\n Examples\n --------\n Normal usage::\n\n # DecimalNumber type\n var = 0.5\n on_screen_var = Variable(var, Text(\"var\"), num_decimal_places=3)\n # Integer type\n int_var = 0\n on_screen_int_var = Variable(int_var, Text(\"int_var\"), var_type=Integer)\n # Using math mode for the label\n on_screen_int_var = Variable(int_var, \"{a}_{i}\", var_type=Integer)\n\n .. manim:: VariablesWithValueTracker\n\n class VariablesWithValueTracker(Scene):\n def construct(self):\n var = 0.5\n on_screen_var = Variable(var, Text(\"var\"), num_decimal_places=3)\n\n # You can also change the colours for the label and value\n on_screen_var.label.set_color(RED)\n on_screen_var.value.set_color(GREEN)\n\n self.play(Write(on_screen_var))\n # The above line will just display the variable with\n # its initial value on the screen. If you also wish to\n # update it, you can do so by accessing the `tracker` attribute\n self.wait()\n var_tracker = on_screen_var.tracker\n var = 10.5\n self.play(var_tracker.animate.set_value(var))\n self.wait()\n\n int_var = 0\n on_screen_int_var = Variable(\n int_var, Text(\"int_var\"), var_type=Integer\n ).next_to(on_screen_var, DOWN)\n on_screen_int_var.label.set_color(RED)\n on_screen_int_var.value.set_color(GREEN)\n\n self.play(Write(on_screen_int_var))\n self.wait()\n var_tracker = on_screen_int_var.tracker\n var = 10.5\n self.play(var_tracker.animate.set_value(var))\n self.wait()\n\n # If you wish to have a somewhat more complicated label for your\n # variable with subscripts, superscripts, etc. the default class\n # for the label is MathTex\n subscript_label_var = 10\n on_screen_subscript_var = Variable(subscript_label_var, \"{a}_{i}\").next_to(\n on_screen_int_var, DOWN\n )\n self.play(Write(on_screen_subscript_var))\n self.wait()\n\n .. manim:: VariableExample\n\n class VariableExample(Scene):\n def construct(self):\n start = 2.0\n\n x_var = Variable(start, 'x', num_decimal_places=3)\n sqr_var = Variable(start**2, 'x^2', num_decimal_places=3)\n Group(x_var, sqr_var).arrange(DOWN)\n\n sqr_var.add_updater(lambda v: v.tracker.set_value(x_var.tracker.get_value()**2))\n\n self.add(x_var, sqr_var)\n self.play(x_var.tracker.animate.set_value(5), run_time=2, rate_func=linear)\n self.wait(0.1)\n\n \"\"\"\n\n def __init__(\n self, var, label, var_type=DecimalNumber, num_decimal_places=2, **kwargs\n ):\n\n self.label = MathTex(label) if isinstance(label, str) else label\n equals = MathTex(\"=\").next_to(self.label, RIGHT)\n self.label.add(equals)\n\n self.tracker = ValueTracker(var)\n\n if var_type == DecimalNumber:\n self.value = DecimalNumber(\n self.tracker.get_value(),\n num_decimal_places=num_decimal_places,\n )\n elif var_type == Integer:\n self.value = Integer(self.tracker.get_value())\n\n self.value.add_updater(lambda v: v.set_value(self.tracker.get_value())).next_to(\n self.label,\n RIGHT,\n )\n\n super().__init__(**kwargs)\n self.add(self.label, self.value)\n"
] | [
[
"numpy.round"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
guobinufl/PianoTimerPy | [
"81e8d0ae1bc966f68f0a183b8a7fccaae3bedca4"
] | [
"PianoKey.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.fft import fft\nfrom numpy import hanning\nimport scipy.io.wavfile as wavfile\nimport pyaudio\nfrom datetime import datetime\nimport re\nimport os.path\n#import scipy.signal as signal\n\nclass PianoKeyFreq(object):\n def __init__(self):\n self.KeyFreqList_std = np.zeros(88)\n self.KeyFreqList_rcd = np.zeros(88)\n self.SingleKeyFFT = np.zeros((88, 8192*2), dtype=np.complex)\n self.SingleKeyWav = np.zeros((88, 8192))\n self.SingleKeyXcorrScale = np.ones(88)\n self.SingleKeyXcorrScale[80:88] = 4\n self.SingleKeyXcorrScale[76:80] = 2\n \n def StandardFreq(self):\n PianoKeyFreq = []\n for n in range(1, 89):\n KeyFreq = 2**((n-49.0)/12.0) * 440.0\n PianoKeyFreq.append(KeyFreq)\n self.KeyFreqList_std = np.asarray(PianoKeyFreq)\n \n return self.KeyFreqList_std\n \n \n def RecordFreq(self, fwave):\n rate, pianowav_tmp = wavfile.read(fwave)\n if(pianowav_tmp.dtype == 'int16'):\n if(len(pianowav_tmp.shape)>1):\n pianowav = pianowav_tmp[:,0].astype('float') / (2**15)\n else:\n pianowav = pianowav_tmp.astype('float') / (2**15)\n else:\n if(len(pianowav_tmp.shape)>1):\n pianowav = pianowav_tmp[:,0]\n else:\n pianowav = pianowav_tmp.copy()\n del pianowav_tmp\n pianowav = np.delete(pianowav, range(np.round(rate/2.0).astype('int')))\n fs = 1.0*rate\n \n pianokeyind = self.KeySegment(pianowav, dT=rate)\n PianoKeyFreq = []\n for ii, i in enumerate(pianokeyind):\n yt = pianowav[i:i+np.round(0.5*rate).astype('int')]\n if(np.mod(ii, 10) == 15):\n KeyFreq = self.KeyFreqFind(yt, fs, ikey=ii, isplot=True)\n else:\n KeyFreq = self.KeyFreqFind(yt, fs, ikey=ii)\n PianoKeyFreq.append(KeyFreq)\n self.KeyFreqList_rcd = np.asarray(PianoKeyFreq)\n \n return self.KeyFreqList_rcd\n \n \n def CalSingleKeyFFT(self, fwave, DeltaT=0.5):\n rate, pianowav_tmp = wavfile.read(fwave)\n if(pianowav_tmp.dtype == 'int16'):\n if(len(pianowav_tmp.shape)>1):\n pianowav = pianowav_tmp[:,0].astype('float') / (2**15)\n else:\n pianowav = pianowav_tmp.astype('float') / (2**15)\n else:\n if(len(pianowav_tmp.shape)>1):\n pianowav = pianowav_tmp[:,0]\n else:\n pianowav = pianowav_tmp.copy()\n del pianowav_tmp\n pianowav = np.delete(pianowav, range(np.round(rate/2.0).astype('int')))\n \n pianokeyind = self.KeySegment(pianowav, dT=rate)\n Nt = np.round(DeltaT*rate).astype('int')\n self.SingleKeyWav = np.zeros((88, Nt))\n# Nf = np.round(rate*(1.0+DeltaT)).astype('int')\n Nf = 2 * Nt\n self.SingleKeyFFT = np.zeros((88, Nf), dtype=np.complex)\n# plt.figure()\n# plt.plot(pianowav)\n# plt.show()\n# print(pianokeyind)\n for ii, i in enumerate(pianokeyind):\n yt = pianowav[i:i+Nt]\n self.SingleKeyWav[ii, :] = yt\n yf = np.fft.fft(yt, n=Nf)\n self.SingleKeyFFT[ii,:] = yf.conj()\n if(np.mod(ii, 10) == 15):\n fs = 1.0 * rate\n dt = 1.0 / fs\n Nt = len(yt)\n tt = np.arange(0, Nt*dt, dt)\n df = fs / Nf\n ff = np.arange(0, fs, df)\n plt.figure()\n plt.subplot(211)\n plt.plot(tt, yt)\n plt.subplot(212)\n plt.plot(ff, np.abs(yf))\n plt.xlim((0, fs/2.0))\n plt.show()\n \n \n def KeySegment(self, yt, dT=44100):\n yind = (np.abs(yt) > 0.90).nonzero()\n yind1 = (np.diff(yind[0])>dT).nonzero()\n yind2 = np.hstack((0, yind1[0]+1))\n pianokeyind = yind[0][yind2]\n \n return pianokeyind\n \n\n def KeyFreqFind(self, yt, fs, ikey=0, isplot=False): \n Nt = len(yt)\n Nf = np.int(fs)\n df = fs / Nf\n ff = np.arange(0,fs,df)\n if(Nt>=Nf):\n yt1 = yt[0:Nf]\n else:\n yt1 = np.zeros(Nf)\n tleft = np.floor((Nf-Nt)/2.0).astype('int')\n tright = tleft+Nt\n yt1[tleft:tright] = yt\n \n yf = np.abs(fft(yt1*hanning(Nf)))\n stdfreq = self.StandardFreq()[ikey]\n istdfreq = np.round(stdfreq/df).astype('int')\n fband = 0.1 * stdfreq\n ifband = np.max((np.round(fband/df).astype('int'), np.round(10.0/df).astype('int')))\n ifreqmax = np.argmax(yf[istdfreq-ifband : istdfreq+ifband])\n if((ifreqmax <= 1) or (ifreqmax >= 2*ifband-2)):\n freqmax = ff[istdfreq]\n else:\n ff1 = ff[istdfreq-ifband : istdfreq+ifband]\n freqmax = ff1[ifreqmax]\n \n if(isplot):\n dt = 1.0 / fs\n tt = np.arange(Nt) * dt\n plt.figure()\n plt.subplot(211)\n plt.plot(tt, yt)\n plt.subplot(212)\n plt.plot(ff, yf)\n ylims = plt.ylim()\n plt.plot([stdfreq, stdfreq], ylims, 'r:')\n plt.xlim((0, 4000))\n plt.show()\n \n return freqmax\n \n \n#piano = PianoKeyFreq()\n#pianokey_std = piano.StandardFreq()\n#fwave = 'pianokeys.wav'\n#pianokey_rcd = piano.RecordFreq(fwave)\n#plt.figure()\n#plt.subplot(211)\n#plt.plot(pianokey_std, '.b-')\n#plt.plot(pianokey_rcd, 'or:')\n#plt.subplot(212)\n#plt.plot(pianokey_rcd - pianokey_std, '.k-')\n\n\n\ndef PianoFFT(yt, fs):\n# dt = 1.0/fs\n# Nt = len(yt)\n# tt = np.arange(0, Nt*dt, dt)\n Nf = len(yt)\n df = fs / Nf\n ff = np.arange(0,fs,df)\n yf = np.abs(fft(yt*hanning(Nf)))\n \n return yf, ff, df\n\n\ndef pianofind(yf, ff):\n ifreqmax = np.argmax(yf)\n freqmax = ff[ifreqmax]\n yfmax = yf[ifreqmax]\n \n yfleft = yf[0:ifreqmax]\n ffleft = ff[0:ifreqmax]\n ifleft = (yfleft<0.75*yfmax).nonzero()\n \n yfright = yf[ifreqmax:]\n ffright = ff[ifreqmax:]\n ifright = (yfright<0.75*yfmax).nonzero()\n \n if((len(ifleft[0])==0) or (len(ifright[0])==0)):\n return 0.0, 0.0\n else:\n return freqmax, ffright[ifright[0][0]]-ffleft[ifleft[0][-1]]\n\n\ndef pianofind_xcorr_t(yt, PianoKeyWav, isplot=False):\n Nkey = PianoKeyWav.shape[0]\n Nt = len(yt) + PianoKeyWav.shape[1] - 1\n xcorr_key = np.zeros(Nkey)\n xcorr2d = np.zeros((Nkey, Nt))\n \n for i in range(Nkey):\n xcorr_tmp = np.correlate(yt, PianoKeyWav[i,:], mode='full')\n xcorr_key[i] = np.max(np.abs(xcorr_tmp))\n xcorr2d[i, :] = xcorr_tmp\n \n if(isplot):\n plt.figure()\n plt.subplot(211)\n plt.plot(xcorr_key)\n plt.subplot(212)\n plt.imshow(xcorr2d.T, aspect='auto')\n plt.show()\n \n return xcorr_key\n\n\ndef pianofind_xcorr_f(yt, PianoKeyFFT, isplot=False):\n Nkey = PianoKeyFFT.shape[0]\n Nf = PianoKeyFFT.shape[1]\n xcorr_key = np.zeros(Nkey)\n xcorr2d = np.zeros((Nkey, Nf))\n\n yf = np.fft.fft(yt, Nf)\n for i in range(Nkey):\n xcorrf = yf * PianoKeyFFT[i, :]\n xcorr_tmp = np.real(np.fft.fftshift(np.fft.ifft(xcorrf)))\n xcorr_key[i] = np.max(np.abs(xcorr_tmp))\n xcorr2d[i, :] = xcorr_tmp\n \n if(isplot):\n plt.figure()\n plt.subplot(211)\n plt.plot(xcorr_key)\n plt.subplot(212)\n plt.imshow(xcorr2d.T, aspect='auto')\n plt.show()\n \n return xcorr_key\n\n\ndef main_offline_xcorr():\n import time\n\n# rate, pianowav_tmp = wavfile.read('PianoKeys_16K_1.wav')\n# rate, pianowav_tmp = wavfile.read('PianoKeys_16K_2.wav')\n# rate, pianowav_tmp = wavfile.read('RecordWav/2016-01-24-16-18-17-725882.wav')\n# rate, pianowav_tmp = wavfile.read('RecordWav/2016-01-24-16-22-21-510476.wav')\n# rate, pianowav_tmp = wavfile.read('RecordWav/2016-01-24-16-28-48-364498.wav')\n rate, pianowav_tmp = wavfile.read('RecordWav/2016-01-24-16-37-04-235849.wav')\n if(pianowav_tmp.dtype == 'int16'):\n if(len(pianowav_tmp.shape)>1):\n pianowav = pianowav_tmp[:,0].astype('float') / (2**15)\n else:\n pianowav = pianowav_tmp.astype('float') / (2**15)\n else:\n if(len(pianowav_tmp.shape)>1):\n pianowav = pianowav_tmp[:,0]\n else:\n pianowav = pianowav_tmp.copy()\n del pianowav_tmp\n\n fs = 1.0*rate\n Nsec = np.floor(pianowav.shape[0]/rate).astype('int')\n\n pianokeyfreq = PianoKeyFreq()\n fwave = 'PianoKeys.wav'\n pianokeyfreq.CalSingleKeyFFT(fwave, DeltaT=0.5)\n \n ispiano = [False]\n xcorr_key_all = np.zeros((Nsec, 88))\n for isec in range(1, Nsec-1):\n ileft = np.round((isec-1.0)*rate).astype('int')\n iright = np.round((isec+0.0)*rate).astype('int')\n y = pianowav[ileft:iright]\n \n ymax = np.max(np.abs(y))\n if(ymax > 0.2):\n ispiano_amp = True\n else:\n ispiano_amp = False\n y = y / ymax\n \n# xcorr_key_t = pianofind_xcorr_t(y, pianokeyfreq.SingleKeyWav, isplot=True)\n# xcorr_key_f = pianofind_xcorr_f(y, pianokeyfreq.SingleKeyFFT, isplot=True)\n# plt.figure()\n# plt.plot(xcorr_key_t)\n# plt.plot(xcorr_key_f)\n \n now = time.time()\n xcorr_key = pianofind_xcorr_f(y, pianokeyfreq.SingleKeyFFT, isplot=False)\n xcorr_key = xcorr_key * pianokeyfreq.SingleKeyXcorrScale\n xcorr_key_all[isec, :] = xcorr_key\n if(np.max(xcorr_key) > 80):\n ispiano_xcorr = True\n else:\n ispiano_xcorr = False\n usedtime = time.time() - now\n \n if(ispiano_amp and ispiano_xcorr):\n ispiano.append(True)\n print('{0:} - piano key find. isamp={1:}, isxcorr={2:}. Used {3:8.5f} sec'.\\\n format(isec, ispiano_amp, ispiano_xcorr, usedtime))\n else:\n ispiano.append(False)\n print('{0:} - piano key not find. isamp={1:}, isxcorr={2:}. Used {3:8.5f} sec'.\\\n format(isec, ispiano_amp, ispiano_xcorr, usedtime))\n \n ispiano.append(False) \n print(ispiano)\n \n plt.figure(figsize=(12,12))\n plt.subplot(211)\n plt.imshow(xcorr_key_all.T, aspect='auto')\n ax = plt.gca()\n ax.invert_yaxis()\n plt.colorbar()\n plt.subplot(212)\n plt.plot(np.max(xcorr_key_all, axis=1))\n plt.show()\n \n plt.figure(figsize=(12,9))\n dt = 1.0/fs\n for isec in range(1, Nsec):\n ileft = np.round((isec-1.0)*rate).astype('int')\n iright = np.round((isec+0.0)*rate).astype('int')\n y = pianowav[ileft:iright].astype('float')\n tt = np.arange(isec-1.0, isec, dt)\n if(ispiano[isec]):\n plt.plot(tt[0::10], y[0::10], 'r')\n else:\n plt.plot(tt[0::10], y[0::10], 'b')\n plt.show()\n \n \n \n\ndef main_offline():\n rate, pianowav_tmp = wavfile.read('piano.wav')\n pianowav = np.delete(pianowav_tmp, range(4000), axis=0)\n Nsec = np.floor(pianowav.shape[0]/rate).astype('int')\n #Nsec = 10\n fs = 1.0*rate\n \n pianokeyfreq = PianoKeyFreq()\n# pianokey_std = pianokeyfreq.StandardFreq()\n fwave = 'pianokeys.wav'\n pianokey_rcd = pianokeyfreq.RecordFreq(fwave)\n print(pianokey_rcd)\n\n ispiano = [False]\n for isec in range(1, Nsec-1):\n ileft = np.round((isec-1.0)*rate).astype('int')\n iright = np.round((isec+0.0)*rate).astype('int')\n y = pianowav[ileft:iright, 0].astype('float')\n y = y / 2**15\n \n ymax = np.max(np.abs(y))\n if(ymax > 0.5):\n ispiano_amp = True\n else:\n ispiano_amp = False\n ispiano.append(ispiano_amp)\n print(isec, 'amp=false')\n continue\n \n yf, ff, df = PianoFFT(y, fs)\n fleft = 0\n fright = np.round(5000/df).astype('int')\n piano_fmax, piano_bw = pianofind(yf[fleft:fright], ff[fleft:fright])\n if(isec == 18):\n print(' piano_fmax={0:}, piano_bw={1:}'.format(piano_fmax, piano_bw))\n plt.figure()\n plt.plot(ff, yf)\n plt.show()\n \n if(piano_bw <6):\n ispiano_bw = True\n else:\n ispiano_bw = False\n \n# if piano_fmax < 500:\n# gap = 3\n# elif piano_fmax>=500 and piano_fmax<1500:\n# gap = 4\n# else:\n# gap = 5\n gap = 3 + np.floor(piano_fmax / 500.0)\n pianokeyfind = (np.abs(pianokey_rcd - piano_fmax) < gap).nonzero()\n if(len(pianokeyfind[0]) > 0):\n ispiano_key = True\n else:\n ispiano_key = False\n \n if(ispiano_bw and ispiano_key):\n ispiano.append(True)\n print('{0:} - piano key find. isbw={1:}, bw={2:}, iskey={3:}, gap={4:}, keyfind={5:}'.\\\n format(isec, ispiano_bw, piano_bw, ispiano_key, gap, pianokeyfind))\n else:\n ispiano.append(False)\n print('{0:} - isbw={1:}, bw={2:}, iskey={3:}, gap={4:}, keyfind={5:}'.\\\n format(isec, ispiano_bw, piano_bw, ispiano_key, gap, pianokeyfind))\n \n ispiano.append(False) \n #print(ispiano)\n \n \n plt.figure(figsize=(12,9))\n dt = 1.0/fs\n for isec in range(1, Nsec):\n ileft = np.round((isec-1.0)*rate).astype('int')\n iright = np.round((isec+0.0)*rate).astype('int')\n y = pianowav[ileft:iright, 0].astype('float')\n y = y / 2**15\n tt = np.arange(isec-1.0, isec, dt)\n if(ispiano[isec]):\n plt.plot(tt[0::10], y[0::10], 'r')\n else:\n plt.plot(tt[0::10], y[0::10], 'b')\n\n\ndef main_online():\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n CHANNELS = 1\n RATE = 1024*16 #44100\n max_sec = 50\n play_sec = 30\n fs = 1.0*RATE\n \n pianokeyfreq = PianoKeyFreq()\n# pianokey_std = pianokeyfreq.StandardFreq()\n fwave = 'pianokeys.wav'\n pianokey_rcd = pianokeyfreq.RecordFreq(fwave)\n \n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n wav_total = np.zeros((max_sec, RATE))\n ispiano = []\n play_sec_count = 0\n all_sec_count = 0\n \n print(\"* Piano Timer Start.\")\n for isec in range(max_sec+1):\n frames = []\n for j in range(0, int(RATE / CHUNK)):\n chunk_wave = stream.read(CHUNK)\n data = np.fromstring(chunk_wave, dtype=np.int16)\n frames.append(data)\n wav_sec = np.hstack(frames)\n wav_total[isec, :] = wav_sec\n del frames\n\n y = wav_sec.astype('float') / 2**15\n ymax = np.max(np.abs(y))\n if(ymax > 0.5):\n ispiano_amp = True\n else:\n ispiano_amp = False\n \n yf, ff, df = PianoFFT(y, fs)\n fleft = 0\n fright = np.round(5000/df).astype('int')\n piano_fmax, piano_bw = pianofind(yf[fleft:fright], ff[fleft:fright])\n# if(isec == 18):\n# print(' piano_fmax={0:}, piano_bw={1:}'.format(piano_fmax, piano_bw))\n# plt.figure()\n# plt.plot(ff, yf)\n# plt.show()\n \n if(piano_bw <6):\n ispiano_bw = True\n else:\n ispiano_bw = False\n \n gap = 3 + np.floor(piano_fmax / 500.0)\n pianokeyfind = (np.abs(pianokey_rcd - piano_fmax) < gap).nonzero()\n if(len(pianokeyfind[0]) > 0):\n ispiano_key = True\n else:\n ispiano_key = False\n \n if(ispiano_amp and ispiano_bw and ispiano_key):\n all_sec_count += 1\n play_sec_count += 1\n ispiano.append(True)\n# print('{0:} - piano key find. isbw={1:}, bw={2:}, iskey={3:}, gap={4:}, keyfind={5:}'.\\\n# format(isec, ispiano_bw, piano_bw, ispiano_key, gap, pianokeyfind))\n else:\n all_sec_count += 1\n ispiano.append(False)\n# print('{0:} - isbw={1:}, bw={2:}, iskey={3:}, gap={4:}, keyfind={5:}'.\\\n# format(isec, ispiano_bw, piano_bw, ispiano_key, gap, pianokeyfind))\n \n print('isec={0:}, play_sec={1:}'.format(isec, play_sec_count))\n \n if(play_sec_count > play_sec):\n break\n\n print(\"* Piano Timer Stop.\")\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n \n wav_data = np.hstack(wav_total)\n cur_dt = str(datetime.now())\n wav_file = cur_dt.replace(':', '-').replace(' ', '-').replace('.', '-')\n wavfile.write(wav_file+'.wav', RATE, wav_data)\n \n plt.figure(figsize=(12,9))\n dt = 1.0/fs\n for isec in range(0, all_sec_count-1):\n y = wav_total[isec, :].astype('float') / 2**15\n tt = np.arange(isec, isec+1.0, dt)\n if(ispiano[isec]):\n plt.plot(tt[0::10], y[0::10], 'r')\n else:\n plt.plot(tt[0::10], y[0::10], 'b')\n plt.show()\n\n\nif __name__ == \"__main__\":\n# main_offline()\n# main_online()\n main_offline_xcorr()\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.asarray",
"matplotlib.pyplot.plot",
"numpy.int",
"numpy.max",
"numpy.round",
"numpy.hanning",
"scipy.io.wavfile.read",
"matplotlib.pyplot.gca",
"numpy.hstack",
"numpy.arange",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"numpy.diff",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"numpy.fft.ifft",
"numpy.floor",
"numpy.correlate",
"matplotlib.pyplot.show",
"scipy.io.wavfile.write",
"numpy.abs",
"numpy.fft.fft",
"numpy.ones",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"numpy.fromstring",
"numpy.mod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
chawins/aml | [
"f62254ff92ff67bab427cb44fe470a5f39975cbe"
] | [
"train_adv.py"
] | [
"\"\"\"\nA run script to start adversarial training.\n\"\"\"\n\nimport os\nfrom os.path import basename\n\nimport keras\nfrom keras import backend as K\nfrom keras.models import save_model\nfrom lib.attacks import symb_iter_fgs, symbolic_fgs\nfrom lib.keras_utils import *\nfrom lib.tf_utils import tf_test_error_rate, tf_train\nfrom lib.utils import *\nfrom parameters import *\nfrom tensorflow.python.platform import flags\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n# Specify training set to load from ithin DATA_DIR\nTRAIN_FILE_NAME = 'train_extended_75.p'\nFLAGS = flags.FLAGS\n\n\ndef main():\n np.random.seed(0)\n assert keras.backend.backend() == \"tensorflow\"\n\n flags.DEFINE_bool('NUM_EPOCHS', args.epochs, 'Number of epochs')\n\n # Get MNIST test data\n x_train, y_train, _, _, x_test, y_test = load_dataset_GTSRB(\n n_channel=N_CHANNEL, train_file_name=TRAIN_FILE_NAME)\n\n # Convert to one-hot encoding\n y_train = keras.utils.to_categorical(y_train, NUM_LABELS)\n y_test = keras.utils.to_categorical(y_test, NUM_LABELS)\n\n x = K.placeholder(shape=(None, HEIGHT, WIDTH, N_CHANNEL))\n y = K.placeholder(shape=(BATCH_SIZE, NUM_LABELS))\n\n eps = args.eps\n x_advs = [None]\n\n model = build_mltscl()\n\n if args.iter == 0:\n logits = model(x)\n grad = gen_grad(x, logits, y, loss='training')\n x_advs = symbolic_fgs(x, grad, eps=eps)\n elif args.iter == 1:\n x_advs = symb_iter_fgs(model, x, y, steps=40, alpha=0.01, eps=args.eps)\n\n # Train an MNIST model\n tf_train(x, y, model, x_train, y_train, x_advs=x_advs, benign=args.ben)\n\n # Finally print the result!\n test_error = tf_test_error_rate(model, x, x_test, y_test)\n print(test_error)\n\n # Specify model name\n model_name = './tmp/multiscale_adv'\n save_model(model, model_name)\n json_string = model.to_json()\n with open(model_name + '.json', 'wr') as f:\n f.write(json_string)\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--epochs\", type=int, default=30,\n help=\"number of epochs\")\n parser.add_argument(\"--eps\", type=float, default=0.3,\n help=\"FGS attack scale\")\n parser.add_argument(\"--norm\", type=str, default='linf',\n help=\"norm used to constrain perturbation\")\n parser.add_argument(\"--iter\", type=int, default=0,\n help=\"whether an iterative training method is to be used\")\n parser.add_argument(\"--ben\", type=int, default=1,\n help=\"whether benign data is to be used while performing adversarial training\")\n\n args = parser.parse_args()\n main()\n"
] | [
[
"tensorflow.python.platform.flags.DEFINE_bool"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"0.12",
"1.0",
"1.2",
"1.4"
]
}
] |
TLouf/pandas | [
"e045034e5c89b932a526d6c9e691d3031784c377"
] | [
"pandas/core/indexes/base.py"
] | [
"from __future__ import annotations\n\nfrom datetime import datetime\nimport functools\nfrom itertools import zip_longest\nimport operator\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Hashable,\n Literal,\n Sequence,\n TypeVar,\n cast,\n final,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n algos as libalgos,\n index as libindex,\n lib,\n)\nimport pandas._libs.join as libjoin\nfrom pandas._libs.lib import (\n is_datetime_array,\n no_default,\n)\nfrom pandas._libs.tslibs import (\n IncompatibleFrequency,\n NaTType,\n OutOfBoundsDatetime,\n Timestamp,\n tz_compare,\n)\nfrom pandas._typing import (\n AnyArrayLike,\n ArrayLike,\n Dtype,\n DtypeObj,\n F,\n Shape,\n npt,\n)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import (\n DuplicateLabelError,\n InvalidIndexError,\n)\nfrom pandas.util._decorators import (\n Appender,\n cache_readonly,\n deprecate_nonkeyword_arguments,\n doc,\n)\n\nfrom pandas.core.dtypes.cast import (\n can_hold_element,\n find_common_type,\n infer_dtype_from,\n validate_numeric_casting,\n)\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_object,\n ensure_platform_int,\n is_bool_dtype,\n is_categorical_dtype,\n is_dtype_equal,\n is_ea_or_datetimelike_dtype,\n is_extension_array_dtype,\n is_float,\n is_float_dtype,\n is_hashable,\n is_integer,\n is_interval_dtype,\n is_iterator,\n is_list_like,\n is_object_dtype,\n is_scalar,\n is_signed_integer_dtype,\n is_unsigned_integer_dtype,\n needs_i8_conversion,\n pandas_dtype,\n validate_all_hashable,\n)\nfrom pandas.core.dtypes.concat import concat_compat\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n ExtensionDtype,\n IntervalDtype,\n PandasDtype,\n PeriodDtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDatetimeIndex,\n ABCMultiIndex,\n ABCPeriodIndex,\n ABCSeries,\n ABCTimedeltaIndex,\n)\nfrom pandas.core.dtypes.inference import is_dict_like\nfrom pandas.core.dtypes.missing import (\n array_equivalent,\n is_valid_na_for_dtype,\n isna,\n)\n\nfrom pandas.core import (\n missing,\n ops,\n)\nfrom pandas.core.accessor import CachedAccessor\nimport pandas.core.algorithms as algos\nfrom pandas.core.array_algos.putmask import (\n setitem_datetimelike_compat,\n validate_putmask,\n)\nfrom pandas.core.arrays import (\n Categorical,\n ExtensionArray,\n)\nfrom pandas.core.arrays.datetimes import (\n tz_to_dtype,\n validate_tz_from_dtype,\n)\nfrom pandas.core.arrays.sparse import SparseDtype\nfrom pandas.core.base import (\n IndexOpsMixin,\n PandasObject,\n)\nimport pandas.core.common as com\nfrom pandas.core.construction import (\n ensure_wrapped_if_datetimelike,\n extract_array,\n sanitize_array,\n)\nfrom pandas.core.indexers import deprecate_ndim_indexing\nfrom pandas.core.indexes.frozen import FrozenList\nfrom pandas.core.ops import get_op_result_name\nfrom pandas.core.ops.invalid import make_invalid_op\nfrom pandas.core.sorting import (\n ensure_key_mapped,\n get_group_index_sorter,\n nargsort,\n)\nfrom pandas.core.strings import StringMethods\n\nfrom pandas.io.formats.printing import (\n PrettyDict,\n default_pprint,\n format_object_attrs,\n format_object_summary,\n pprint_thing,\n)\n\nif TYPE_CHECKING:\n\n from pandas import (\n CategoricalIndex,\n DataFrame,\n IntervalIndex,\n MultiIndex,\n RangeIndex,\n Series,\n )\n\n\n__all__ = [\"Index\"]\n\n_unsortable_types = frozenset((\"mixed\", \"mixed-integer\"))\n\n_index_doc_kwargs: dict[str, str] = {\n \"klass\": \"Index\",\n \"inplace\": \"\",\n \"target_klass\": \"Index\",\n \"raises_section\": \"\",\n \"unique\": \"Index\",\n \"duplicated\": \"np.ndarray\",\n}\n_index_shared_docs: dict[str, str] = {}\nstr_t = str\n\n\n_o_dtype = np.dtype(\"object\")\n\n\ndef _maybe_return_indexers(meth: F) -> F:\n \"\"\"\n Decorator to simplify 'return_indexers' checks in Index.join.\n \"\"\"\n\n @functools.wraps(meth)\n def join(\n self,\n other,\n how: str_t = \"left\",\n level=None,\n return_indexers: bool = False,\n sort: bool = False,\n ):\n join_index, lidx, ridx = meth(self, other, how=how, level=level, sort=sort)\n if not return_indexers:\n return join_index\n\n if lidx is not None:\n lidx = ensure_platform_int(lidx)\n if ridx is not None:\n ridx = ensure_platform_int(ridx)\n return join_index, lidx, ridx\n\n return cast(F, join)\n\n\ndef disallow_kwargs(kwargs: dict[str, Any]) -> None:\n if kwargs:\n raise TypeError(f\"Unexpected keyword arguments {repr(set(kwargs))}\")\n\n\ndef _new_Index(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't\n have arguments and breaks __new__.\n \"\"\"\n # required for backward compat, because PI can't be instantiated with\n # ordinals through __new__ GH #13277\n if issubclass(cls, ABCPeriodIndex):\n from pandas.core.indexes.period import _new_PeriodIndex\n\n return _new_PeriodIndex(cls, **d)\n\n if issubclass(cls, ABCMultiIndex):\n if \"labels\" in d and \"codes\" not in d:\n # GH#23752 \"labels\" kwarg has been replaced with \"codes\"\n d[\"codes\"] = d.pop(\"labels\")\n\n return cls.__new__(cls, **d)\n\n\n_IndexT = TypeVar(\"_IndexT\", bound=\"Index\")\n\n\nclass Index(IndexOpsMixin, PandasObject):\n \"\"\"\n Immutable sequence used for indexing and alignment. The basic object\n storing axis labels for all pandas objects.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype (default: object)\n If dtype is None, we find the dtype that best fits the data.\n If an actual dtype is provided, we coerce to that dtype if it's safe.\n Otherwise, an error will be raised.\n copy : bool\n Make a copy of input ndarray.\n name : object\n Name to be stored in the index.\n tupleize_cols : bool (default: True)\n When True, attempt to create a MultiIndex if possible.\n\n See Also\n --------\n RangeIndex : Index implementing a monotonic integer range.\n CategoricalIndex : Index of :class:`Categorical` s.\n MultiIndex : A multi-level, or hierarchical Index.\n IntervalIndex : An Index of :class:`Interval` s.\n DatetimeIndex : Index of datetime64 data.\n TimedeltaIndex : Index of timedelta64 data.\n PeriodIndex : Index of Period data.\n Int64Index : A special case of :class:`Index` with purely integer labels.\n UInt64Index : A special case of :class:`Index` with purely unsigned integer labels.\n Float64Index : A special case of :class:`Index` with purely float labels.\n\n Notes\n -----\n An Index instance can **only** contain hashable objects\n\n Examples\n --------\n >>> pd.Index([1, 2, 3])\n Int64Index([1, 2, 3], dtype='int64')\n\n >>> pd.Index(list('abc'))\n Index(['a', 'b', 'c'], dtype='object')\n \"\"\"\n\n # tolist is not actually deprecated, just suppressed in the __dir__\n _hidden_attrs: frozenset[str] = (\n PandasObject._hidden_attrs\n | IndexOpsMixin._hidden_attrs\n | frozenset([\"contains\", \"set_value\"])\n )\n\n # To hand over control to subclasses\n _join_precedence = 1\n\n # Cython methods; see github.com/cython/cython/issues/2647\n # for why we need to wrap these instead of making them class attributes\n # Moreover, cython will choose the appropriate-dtyped sub-function\n # given the dtypes of the passed arguments\n\n @final\n def _left_indexer_unique(self: _IndexT, other: _IndexT) -> npt.NDArray[np.intp]:\n # Caller is responsible for ensuring other.dtype == self.dtype\n sv = self._get_join_target()\n ov = other._get_join_target()\n return libjoin.left_join_indexer_unique(sv, ov)\n\n @final\n def _left_indexer(\n self: _IndexT, other: _IndexT\n ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n # Caller is responsible for ensuring other.dtype == self.dtype\n sv = self._get_join_target()\n ov = other._get_join_target()\n joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov)\n joined = self._from_join_target(joined_ndarray)\n return joined, lidx, ridx\n\n @final\n def _inner_indexer(\n self: _IndexT, other: _IndexT\n ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n # Caller is responsible for ensuring other.dtype == self.dtype\n sv = self._get_join_target()\n ov = other._get_join_target()\n joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov)\n joined = self._from_join_target(joined_ndarray)\n return joined, lidx, ridx\n\n @final\n def _outer_indexer(\n self: _IndexT, other: _IndexT\n ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n # Caller is responsible for ensuring other.dtype == self.dtype\n sv = self._get_join_target()\n ov = other._get_join_target()\n joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov)\n joined = self._from_join_target(joined_ndarray)\n return joined, lidx, ridx\n\n _typ: str = \"index\"\n _data: ExtensionArray | np.ndarray\n _id: object | None = None\n _name: Hashable = None\n # MultiIndex.levels previously allowed setting the index name. We\n # don't allow this anymore, and raise if it happens rather than\n # failing silently.\n _no_setting_name: bool = False\n _comparables: list[str] = [\"name\"]\n _attributes: list[str] = [\"name\"]\n _is_numeric_dtype: bool = False\n _can_hold_na: bool = True\n _can_hold_strings: bool = True\n\n _engine_type: type[libindex.IndexEngine] = libindex.ObjectEngine\n # whether we support partial string indexing. Overridden\n # in DatetimeIndex and PeriodIndex\n _supports_partial_string_indexing = False\n\n _accessors = {\"str\"}\n\n str = CachedAccessor(\"str\", StringMethods)\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls, data=None, dtype=None, copy=False, name=None, tupleize_cols=True, **kwargs\n ) -> Index:\n\n if kwargs:\n warnings.warn(\n \"Passing keywords other than 'data', 'dtype', 'copy', 'name', \"\n \"'tupleize_cols' is deprecated and will raise TypeError in a \"\n \"future version. Use the specific Index subclass directly instead\",\n FutureWarning,\n stacklevel=2,\n )\n\n from pandas.core.arrays import PandasArray\n from pandas.core.indexes.range import RangeIndex\n\n name = maybe_extract_name(name, data, cls)\n\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n if \"tz\" in kwargs:\n tz = kwargs.pop(\"tz\")\n validate_tz_from_dtype(dtype, tz)\n dtype = tz_to_dtype(tz)\n\n if isinstance(data, PandasArray):\n # ensure users don't accidentally put a PandasArray in an index.\n data = data.to_numpy()\n if isinstance(dtype, PandasDtype):\n dtype = dtype.numpy_dtype\n\n data_dtype = getattr(data, \"dtype\", None)\n\n # range\n if isinstance(data, (range, RangeIndex)):\n result = RangeIndex(start=data, copy=copy, name=name)\n if dtype is not None:\n return result.astype(dtype, copy=False)\n return result\n\n elif is_ea_or_datetimelike_dtype(dtype):\n # non-EA dtype indexes have special casting logic, so we punt here\n klass = cls._dtype_to_subclass(dtype)\n if klass is not Index:\n return klass(data, dtype=dtype, copy=copy, name=name, **kwargs)\n\n ea_cls = dtype.construct_array_type()\n data = ea_cls._from_sequence(data, dtype=dtype, copy=copy)\n data = np.asarray(data, dtype=object)\n disallow_kwargs(kwargs)\n return Index._simple_new(data, name=name)\n\n elif is_ea_or_datetimelike_dtype(data_dtype):\n klass = cls._dtype_to_subclass(data_dtype)\n if klass is not Index:\n result = klass(data, copy=copy, name=name, **kwargs)\n if dtype is not None:\n return result.astype(dtype, copy=False)\n return result\n\n data = np.array(data, dtype=object, copy=copy)\n disallow_kwargs(kwargs)\n return Index._simple_new(data, name=name)\n\n # index-like\n elif isinstance(data, (np.ndarray, Index, ABCSeries)):\n\n if isinstance(data, ABCMultiIndex):\n data = data._values\n\n if dtype is not None:\n # we need to avoid having numpy coerce\n # things that look like ints/floats to ints unless\n # they are actually ints, e.g. '0' and 0.0\n # should not be coerced\n # GH 11836\n data = sanitize_array(data, None, dtype=dtype, copy=copy)\n\n dtype = data.dtype\n\n if data.dtype.kind in [\"i\", \"u\", \"f\"]:\n # maybe coerce to a sub-class\n arr = data\n else:\n arr = com.asarray_tuplesafe(data, dtype=np.dtype(\"object\"))\n\n if dtype is None:\n arr = _maybe_cast_data_without_dtype(arr)\n dtype = arr.dtype\n\n if kwargs:\n return cls(arr, dtype, copy=copy, name=name, **kwargs)\n\n klass = cls._dtype_to_subclass(arr.dtype)\n arr = klass._ensure_array(arr, dtype, copy)\n disallow_kwargs(kwargs)\n return klass._simple_new(arr, name)\n\n elif is_scalar(data):\n raise cls._scalar_data_error(data)\n elif hasattr(data, \"__array__\"):\n return Index(np.asarray(data), dtype=dtype, copy=copy, name=name, **kwargs)\n else:\n\n if tupleize_cols and is_list_like(data):\n # GH21470: convert iterable to list before determining if empty\n if is_iterator(data):\n data = list(data)\n\n if data and all(isinstance(e, tuple) for e in data):\n # we must be all tuples, otherwise don't construct\n # 10697\n from pandas.core.indexes.multi import MultiIndex\n\n return MultiIndex.from_tuples(\n data, names=name or kwargs.get(\"names\")\n )\n # other iterable of some kind\n\n subarr = com.asarray_tuplesafe(data, dtype=np.dtype(\"object\"))\n return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)\n\n @classmethod\n def _ensure_array(cls, data, dtype, copy: bool):\n \"\"\"\n Ensure we have a valid array to pass to _simple_new.\n \"\"\"\n if data.ndim > 1:\n # GH#13601, GH#20285, GH#27125\n raise ValueError(\"Index data must be 1-dimensional\")\n if copy:\n # asarray_tuplesafe does not always copy underlying data,\n # so need to make sure that this happens\n data = data.copy()\n return data\n\n @final\n @classmethod\n def _dtype_to_subclass(cls, dtype: DtypeObj):\n # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423\n\n if isinstance(dtype, ExtensionDtype):\n if isinstance(dtype, DatetimeTZDtype):\n from pandas import DatetimeIndex\n\n return DatetimeIndex\n elif isinstance(dtype, CategoricalDtype):\n from pandas import CategoricalIndex\n\n return CategoricalIndex\n elif isinstance(dtype, IntervalDtype):\n from pandas import IntervalIndex\n\n return IntervalIndex\n elif isinstance(dtype, PeriodDtype):\n from pandas import PeriodIndex\n\n return PeriodIndex\n\n elif isinstance(dtype, SparseDtype):\n return cls._dtype_to_subclass(dtype.subtype)\n\n return Index\n\n if dtype.kind == \"M\":\n from pandas import DatetimeIndex\n\n return DatetimeIndex\n\n elif dtype.kind == \"m\":\n from pandas import TimedeltaIndex\n\n return TimedeltaIndex\n\n elif is_float_dtype(dtype):\n from pandas import Float64Index\n\n return Float64Index\n elif is_unsigned_integer_dtype(dtype):\n from pandas import UInt64Index\n\n return UInt64Index\n elif is_signed_integer_dtype(dtype):\n from pandas import Int64Index\n\n return Int64Index\n\n # error: Non-overlapping equality check (left operand type: \"dtype[Any]\", right\n # operand type: \"Type[object]\")\n elif dtype == object: # type: ignore[comparison-overlap]\n # NB: assuming away MultiIndex\n return Index\n\n elif issubclass(dtype.type, (str, bool, np.bool_)):\n return Index\n\n raise NotImplementedError(dtype)\n\n \"\"\"\n NOTE for new Index creation:\n\n - _simple_new: It returns new Index with the same type as the caller.\n All metadata (such as name) must be provided by caller's responsibility.\n Using _shallow_copy is recommended because it fills these metadata\n otherwise specified.\n\n - _shallow_copy: It returns new Index with the same type (using\n _simple_new), but fills caller's metadata otherwise specified. Passed\n kwargs will overwrite corresponding metadata.\n\n See each method's docstring.\n \"\"\"\n\n @property\n def asi8(self):\n \"\"\"\n Integer representation of the values.\n\n Returns\n -------\n ndarray\n An ndarray with int64 dtype.\n \"\"\"\n warnings.warn(\n \"Index.asi8 is deprecated and will be removed in a future version\",\n FutureWarning,\n stacklevel=2,\n )\n return None\n\n @classmethod\n def _simple_new(cls: type[_IndexT], values, name: Hashable = None) -> _IndexT:\n \"\"\"\n We require that we have a dtype compat for the values. If we are passed\n a non-dtype compat, then coerce using the constructor.\n\n Must be careful not to recurse.\n \"\"\"\n assert isinstance(values, np.ndarray), type(values)\n\n result = object.__new__(cls)\n result._data = values\n # _index_data is a (temporary?) fix to ensure that the direct data\n # manipulation we do in `_libs/reduction.pyx` continues to work.\n # We need access to the actual ndarray, since we're messing with\n # data buffers and strides.\n result._index_data = values\n result._name = name\n result._cache = {}\n result._reset_identity()\n\n return result\n\n @cache_readonly\n def _constructor(self: _IndexT) -> type[_IndexT]:\n return type(self)\n\n @final\n def _maybe_check_unique(self) -> None:\n \"\"\"\n Check that an Index has no duplicates.\n\n This is typically only called via\n `NDFrame.flags.allows_duplicate_labels.setter` when it's set to\n True (duplicates aren't allowed).\n\n Raises\n ------\n DuplicateLabelError\n When the index is not unique.\n \"\"\"\n if not self.is_unique:\n msg = \"\"\"Index has duplicates.\"\"\"\n duplicates = self._format_duplicate_message()\n msg += f\"\\n{duplicates}\"\n\n raise DuplicateLabelError(msg)\n\n @final\n def _format_duplicate_message(self) -> DataFrame:\n \"\"\"\n Construct the DataFrame for a DuplicateLabelError.\n\n This returns a DataFrame indicating the labels and positions\n of duplicates in an index. This should only be called when it's\n already known that duplicates are present.\n\n Examples\n --------\n >>> idx = pd.Index(['a', 'b', 'a'])\n >>> idx._format_duplicate_message()\n positions\n label\n a [0, 2]\n \"\"\"\n from pandas import Series\n\n duplicates = self[self.duplicated(keep=\"first\")].unique()\n assert len(duplicates)\n\n out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates]\n if self._is_multi:\n # test_format_duplicate_labels_message_multi\n # error: \"Type[Index]\" has no attribute \"from_tuples\" [attr-defined]\n out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined]\n\n if self.nlevels == 1:\n out = out.rename_axis(\"label\")\n return out.to_frame(name=\"positions\")\n\n # --------------------------------------------------------------------\n # Index Internals Methods\n\n @final\n def _get_attributes_dict(self) -> dict[str_t, Any]:\n \"\"\"\n Return an attributes dict for my class.\n \"\"\"\n return {k: getattr(self, k, None) for k in self._attributes}\n\n def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT:\n \"\"\"\n Create a new Index with the same class as the caller, don't copy the\n data, use the same object attributes with passed in attributes taking\n precedence.\n\n *this is an internal non-public method*\n\n Parameters\n ----------\n values : the values to create the new Index, optional\n name : Label, defaults to self.name\n \"\"\"\n name = self._name if name is no_default else name\n\n return self._simple_new(values, name=name)\n\n def _view(self: _IndexT) -> _IndexT:\n \"\"\"\n fastpath to make a shallow copy, i.e. new object with same data.\n \"\"\"\n result = self._simple_new(self._values, name=self._name)\n\n result._cache = self._cache\n return result\n\n @final\n def _rename(self: _IndexT, name: Hashable) -> _IndexT:\n \"\"\"\n fastpath for rename if new name is already validated.\n \"\"\"\n result = self._view()\n result._name = name\n return result\n\n @final\n def is_(self, other) -> bool:\n \"\"\"\n More flexible, faster check like ``is`` but that works through views.\n\n Note: this is *not* the same as ``Index.identical()``, which checks\n that metadata is also the same.\n\n Parameters\n ----------\n other : object\n Other object to compare against.\n\n Returns\n -------\n bool\n True if both have same underlying data, False otherwise.\n\n See Also\n --------\n Index.identical : Works like ``Index.is_`` but also checks metadata.\n \"\"\"\n if self is other:\n return True\n elif not hasattr(other, \"_id\"):\n return False\n elif self._id is None or other._id is None:\n return False\n else:\n return self._id is other._id\n\n @final\n def _reset_identity(self) -> None:\n \"\"\"\n Initializes or resets ``_id`` attribute with new object.\n \"\"\"\n self._id = object()\n\n @final\n def _cleanup(self) -> None:\n self._engine.clear_mapping()\n\n @cache_readonly\n def _engine(self) -> libindex.IndexEngine:\n # For base class (object dtype) we get ObjectEngine\n\n # to avoid a reference cycle, bind `target_values` to a local variable, so\n # `self` is not passed into the lambda.\n target_values = self._get_engine_target()\n return self._engine_type(lambda: target_values, len(self))\n\n @final\n @cache_readonly\n def _dir_additions_for_owner(self) -> set[str_t]:\n \"\"\"\n Add the string-like labels to the owner dataframe/series dir output.\n\n If this is a MultiIndex, it's first level values are used.\n \"\"\"\n return {\n c\n for c in self.unique(level=0)[:100]\n if isinstance(c, str) and c.isidentifier()\n }\n\n # --------------------------------------------------------------------\n # Array-Like Methods\n\n # ndarray compat\n def __len__(self) -> int:\n \"\"\"\n Return the length of the Index.\n \"\"\"\n return len(self._data)\n\n def __array__(self, dtype=None) -> np.ndarray:\n \"\"\"\n The array interface, return my values.\n \"\"\"\n return np.asarray(self._data, dtype=dtype)\n\n def __array_wrap__(self, result, context=None):\n \"\"\"\n Gets called after a ufunc and other functions.\n \"\"\"\n result = lib.item_from_zerodim(result)\n if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1:\n return result\n\n attrs = self._get_attributes_dict()\n attrs.pop(\"freq\", None) # For DatetimeIndex/TimedeltaIndex\n return Index(result, **attrs)\n\n @cache_readonly\n def dtype(self) -> DtypeObj:\n \"\"\"\n Return the dtype object of the underlying data.\n \"\"\"\n return self._data.dtype\n\n @final\n def ravel(self, order=\"C\"):\n \"\"\"\n Return an ndarray of the flattened values of the underlying data.\n\n Returns\n -------\n numpy.ndarray\n Flattened array.\n\n See Also\n --------\n numpy.ndarray.ravel : Return a flattened array.\n \"\"\"\n warnings.warn(\n \"Index.ravel returning ndarray is deprecated; in a future version \"\n \"this will return a view on self.\",\n FutureWarning,\n stacklevel=2,\n )\n values = self._get_engine_target()\n return values.ravel(order=order)\n\n def view(self, cls=None):\n\n # we need to see if we are subclassing an\n # index type here\n if cls is not None and not hasattr(cls, \"_typ\"):\n dtype = cls\n if isinstance(cls, str):\n dtype = pandas_dtype(cls)\n\n if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion(\n dtype\n ):\n if dtype.kind == \"m\" and dtype != \"m8[ns]\":\n # e.g. m8[s]\n return self._data.view(cls)\n\n arr = self._data.view(\"i8\")\n idx_cls = self._dtype_to_subclass(dtype)\n arr_cls = idx_cls._data_cls\n arr = arr_cls(self._data.view(\"i8\"), dtype=dtype)\n return idx_cls._simple_new(arr, name=self.name)\n\n result = self._data.view(cls)\n else:\n result = self._view()\n if isinstance(result, Index):\n result._id = self._id\n return result\n\n def astype(self, dtype, copy=True):\n \"\"\"\n Create an Index with values cast to dtypes.\n\n The class of a new Index is determined by dtype. When conversion is\n impossible, a TypeError exception is raised.\n\n Parameters\n ----------\n dtype : numpy dtype or pandas type\n Note that any signed integer `dtype` is treated as ``'int64'``,\n and any unsigned integer `dtype` is treated as ``'uint64'``,\n regardless of the size.\n copy : bool, default True\n By default, astype always returns a newly allocated object.\n If copy is set to False and internal requirements on dtype are\n satisfied, the original data is used to create a new Index\n or the original Index is returned.\n\n Returns\n -------\n Index\n Index with values cast to specified dtype.\n \"\"\"\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n if is_dtype_equal(self.dtype, dtype):\n return self.copy() if copy else self\n\n elif isinstance(dtype, ExtensionDtype):\n cls = dtype.construct_array_type()\n new_values = cls._from_sequence(self, dtype=dtype, copy=False)\n return Index(new_values, dtype=dtype, copy=copy, name=self.name)\n\n try:\n casted = self._values.astype(dtype, copy=copy)\n except (TypeError, ValueError) as err:\n raise TypeError(\n f\"Cannot cast {type(self).__name__} to dtype {dtype}\"\n ) from err\n return Index(casted, name=self.name, dtype=dtype)\n\n _index_shared_docs[\n \"take\"\n ] = \"\"\"\n Return a new %(klass)s of the values selected by the indices.\n\n For internal compatibility with numpy arrays.\n\n Parameters\n ----------\n indices : array-like\n Indices to be taken.\n axis : int, optional\n The axis over which to select values, always 0.\n allow_fill : bool, default True\n fill_value : scalar, default None\n If allow_fill=True and fill_value is not None, indices specified by\n -1 are regarded as NA. If Index doesn't hold NA, raise ValueError.\n\n Returns\n -------\n Index\n An index formed of elements at the given indices. Will be the same\n type as self, except for RangeIndex.\n\n See Also\n --------\n numpy.ndarray.take: Return an array formed from the\n elements of a at the given indices.\n \"\"\"\n\n @Appender(_index_shared_docs[\"take\"] % _index_doc_kwargs)\n def take(\n self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs\n ):\n if kwargs:\n nv.validate_take((), kwargs)\n indices = ensure_platform_int(indices)\n allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)\n\n # Note: we discard fill_value and use self._na_value, only relevant\n # in the case where allow_fill is True and fill_value is not None\n taken = algos.take(\n self._values, indices, allow_fill=allow_fill, fill_value=self._na_value\n )\n return type(self)._simple_new(taken, name=self.name)\n\n @final\n def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool:\n \"\"\"\n We only use pandas-style take when allow_fill is True _and_\n fill_value is not None.\n \"\"\"\n if allow_fill and fill_value is not None:\n # only fill if we are passing a non-None fill_value\n if self._can_hold_na:\n if (indices < -1).any():\n raise ValueError(\n \"When allow_fill=True and fill_value is not None, \"\n \"all indices must be >= -1\"\n )\n else:\n cls_name = type(self).__name__\n raise ValueError(\n f\"Unable to fill values because {cls_name} cannot contain NA\"\n )\n else:\n allow_fill = False\n return allow_fill\n\n _index_shared_docs[\n \"repeat\"\n ] = \"\"\"\n Repeat elements of a %(klass)s.\n\n Returns a new %(klass)s where each element of the current %(klass)s\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n %(klass)s.\n axis : None\n Must be ``None``. Has no effect but is accepted for compatibility\n with numpy.\n\n Returns\n -------\n repeated_index : %(klass)s\n Newly created %(klass)s with repeated elements.\n\n See Also\n --------\n Series.repeat : Equivalent function for Series.\n numpy.repeat : Similar method for :class:`numpy.ndarray`.\n\n Examples\n --------\n >>> idx = pd.Index(['a', 'b', 'c'])\n >>> idx\n Index(['a', 'b', 'c'], dtype='object')\n >>> idx.repeat(2)\n Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object')\n >>> idx.repeat([1, 2, 3])\n Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object')\n \"\"\"\n\n @Appender(_index_shared_docs[\"repeat\"] % _index_doc_kwargs)\n def repeat(self, repeats, axis=None):\n repeats = ensure_platform_int(repeats)\n nv.validate_repeat((), {\"axis\": axis})\n res_values = self._values.repeat(repeats)\n\n return type(self)._simple_new(res_values, name=self.name)\n\n # --------------------------------------------------------------------\n # Copying Methods\n\n def copy(\n self: _IndexT,\n name: Hashable | None = None,\n deep: bool = False,\n dtype: Dtype | None = None,\n names: Sequence[Hashable] | None = None,\n ) -> _IndexT:\n \"\"\"\n Make a copy of this object.\n\n Name and dtype sets those attributes on the new object.\n\n Parameters\n ----------\n name : Label, optional\n Set name for new object.\n deep : bool, default False\n dtype : numpy dtype or pandas type, optional\n Set dtype for new object.\n\n .. deprecated:: 1.2.0\n use ``astype`` method instead.\n names : list-like, optional\n Kept for compatibility with MultiIndex. Should not be used.\n\n Returns\n -------\n Index\n Index refer to new object which is a copy of this object.\n\n Notes\n -----\n In most cases, there should be no functional difference from using\n ``deep``, but if ``deep`` is passed it will attempt to deepcopy.\n \"\"\"\n name = self._validate_names(name=name, names=names, deep=deep)[0]\n if deep:\n new_data = self._data.copy()\n new_index = type(self)._simple_new(new_data, name=name)\n else:\n new_index = self._rename(name=name)\n\n if dtype:\n warnings.warn(\n \"parameter dtype is deprecated and will be removed in a future \"\n \"version. Use the astype method instead.\",\n FutureWarning,\n stacklevel=2,\n )\n new_index = new_index.astype(dtype)\n return new_index\n\n @final\n def __copy__(self: _IndexT, **kwargs) -> _IndexT:\n return self.copy(**kwargs)\n\n @final\n def __deepcopy__(self: _IndexT, memo=None) -> _IndexT:\n \"\"\"\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n \"\"\"\n return self.copy(deep=True)\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n @final\n def __repr__(self) -> str_t:\n \"\"\"\n Return a string representation for this object.\n \"\"\"\n klass_name = type(self).__name__\n data = self._format_data()\n attrs = self._format_attrs()\n space = self._format_space()\n attrs_str = [f\"{k}={v}\" for k, v in attrs]\n prepr = f\",{space}\".join(attrs_str)\n\n # no data provided, just attributes\n if data is None:\n data = \"\"\n\n return f\"{klass_name}({data}{prepr})\"\n\n def _format_space(self) -> str_t:\n\n # using space here controls if the attributes\n # are line separated or not (the default)\n\n # max_seq_items = get_option('display.max_seq_items')\n # if len(self) > max_seq_items:\n # space = \"\\n%s\" % (' ' * (len(klass) + 1))\n return \" \"\n\n @property\n def _formatter_func(self):\n \"\"\"\n Return the formatter function.\n \"\"\"\n return default_pprint\n\n def _format_data(self, name=None) -> str_t:\n \"\"\"\n Return the formatted data as a unicode string.\n \"\"\"\n # do we want to justify (only do so for non-objects)\n is_justify = True\n\n if self.inferred_type == \"string\":\n is_justify = False\n elif self.inferred_type == \"categorical\":\n self = cast(\"CategoricalIndex\", self)\n if is_object_dtype(self.categories):\n is_justify = False\n\n return format_object_summary(\n self,\n self._formatter_func,\n is_justify=is_justify,\n name=name,\n line_break_each_value=self._is_multi,\n )\n\n def _format_attrs(self) -> list[tuple[str_t, str_t | int]]:\n \"\"\"\n Return a list of tuples of the (attr,formatted_value).\n \"\"\"\n return format_object_attrs(self, include_dtype=not self._is_multi)\n\n @final\n def _mpl_repr(self) -> np.ndarray:\n # how to represent ourselves to matplotlib\n if isinstance(self.dtype, np.dtype) and self.dtype.kind != \"M\":\n return cast(np.ndarray, self.values)\n return self.astype(object, copy=False)._values\n\n def format(\n self,\n name: bool = False,\n formatter: Callable | None = None,\n na_rep: str_t = \"NaN\",\n ) -> list[str_t]:\n \"\"\"\n Render a string representation of the Index.\n \"\"\"\n header = []\n if name:\n header.append(\n pprint_thing(self.name, escape_chars=(\"\\t\", \"\\r\", \"\\n\"))\n if self.name is not None\n else \"\"\n )\n\n if formatter is not None:\n return header + list(self.map(formatter))\n\n return self._format_with_header(header, na_rep=na_rep)\n\n def _format_with_header(\n self, header: list[str_t], na_rep: str_t = \"NaN\"\n ) -> list[str_t]:\n from pandas.io.formats.format import format_array\n\n values = self._values\n\n if is_object_dtype(values.dtype):\n values = cast(np.ndarray, values)\n values = lib.maybe_convert_objects(values, safe=True)\n\n result = [pprint_thing(x, escape_chars=(\"\\t\", \"\\r\", \"\\n\")) for x in values]\n\n # could have nans\n mask = isna(values)\n if mask.any():\n result_arr = np.array(result)\n result_arr[mask] = na_rep\n result = result_arr.tolist()\n else:\n result = trim_front(format_array(values, None, justify=\"left\"))\n return header + result\n\n @final\n def to_native_types(self, slicer=None, **kwargs) -> np.ndarray:\n \"\"\"\n Format specified values of `self` and return them.\n\n .. deprecated:: 1.2.0\n\n Parameters\n ----------\n slicer : int, array-like\n An indexer into `self` that specifies which values\n are used in the formatting process.\n kwargs : dict\n Options for specifying how the values should be formatted.\n These options include the following:\n\n 1) na_rep : str\n The value that serves as a placeholder for NULL values\n 2) quoting : bool or None\n Whether or not there are quoted values in `self`\n 3) date_format : str\n The format used to represent date-like values.\n\n Returns\n -------\n numpy.ndarray\n Formatted values.\n \"\"\"\n warnings.warn(\n \"The 'to_native_types' method is deprecated and will be removed in \"\n \"a future version. Use 'astype(str)' instead.\",\n FutureWarning,\n stacklevel=2,\n )\n values = self\n if slicer is not None:\n values = values[slicer]\n return values._format_native_types(**kwargs)\n\n def _format_native_types(self, na_rep=\"\", quoting=None, **kwargs):\n \"\"\"\n Actually format specific types of the index.\n \"\"\"\n mask = isna(self)\n if not self.is_object() and not quoting:\n values = np.asarray(self).astype(str)\n else:\n values = np.array(self, dtype=object, copy=True)\n\n values[mask] = na_rep\n return values\n\n def _summary(self, name=None) -> str_t:\n \"\"\"\n Return a summarized representation.\n\n Parameters\n ----------\n name : str\n name to use in the summary representation\n\n Returns\n -------\n String with a summarized representation of the index\n \"\"\"\n if len(self) > 0:\n head = self[0]\n if hasattr(head, \"format\") and not isinstance(head, str):\n head = head.format()\n tail = self[-1]\n if hasattr(tail, \"format\") and not isinstance(tail, str):\n tail = tail.format()\n index_summary = f\", {head} to {tail}\"\n else:\n index_summary = \"\"\n\n if name is None:\n name = type(self).__name__\n return f\"{name}: {len(self)} entries{index_summary}\"\n\n # --------------------------------------------------------------------\n # Conversion Methods\n\n def to_flat_index(self):\n \"\"\"\n Identity method.\n\n This is implemented for compatibility with subclass implementations\n when chaining.\n\n Returns\n -------\n pd.Index\n Caller.\n\n See Also\n --------\n MultiIndex.to_flat_index : Subclass implementation.\n \"\"\"\n return self\n\n def to_series(self, index=None, name: Hashable = None) -> Series:\n \"\"\"\n Create a Series with both index and values equal to the index keys.\n\n Useful with map for returning an indexer based on an index.\n\n Parameters\n ----------\n index : Index, optional\n Index of resulting Series. If None, defaults to original index.\n name : str, optional\n Name of resulting Series. If None, defaults to name of original\n index.\n\n Returns\n -------\n Series\n The dtype will be based on the type of the Index values.\n\n See Also\n --------\n Index.to_frame : Convert an Index to a DataFrame.\n Series.to_frame : Convert Series to DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')\n\n By default, the original Index and original name is reused.\n\n >>> idx.to_series()\n animal\n Ant Ant\n Bear Bear\n Cow Cow\n Name: animal, dtype: object\n\n To enforce a new Index, specify new labels to ``index``:\n\n >>> idx.to_series(index=[0, 1, 2])\n 0 Ant\n 1 Bear\n 2 Cow\n Name: animal, dtype: object\n\n To override the name of the resulting column, specify `name`:\n\n >>> idx.to_series(name='zoo')\n animal\n Ant Ant\n Bear Bear\n Cow Cow\n Name: zoo, dtype: object\n \"\"\"\n from pandas import Series\n\n if index is None:\n index = self._view()\n if name is None:\n name = self.name\n\n return Series(self._values.copy(), index=index, name=name)\n\n def to_frame(self, index: bool = True, name: Hashable = None) -> DataFrame:\n \"\"\"\n Create a DataFrame with a column containing the Index.\n\n Parameters\n ----------\n index : bool, default True\n Set the index of the returned DataFrame as the original Index.\n\n name : object, default None\n The passed name should substitute for the index name (if it has\n one).\n\n Returns\n -------\n DataFrame\n DataFrame containing the original Index data.\n\n See Also\n --------\n Index.to_series : Convert an Index to a Series.\n Series.to_frame : Convert Series to DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')\n >>> idx.to_frame()\n animal\n animal\n Ant Ant\n Bear Bear\n Cow Cow\n\n By default, the original Index is reused. To enforce a new Index:\n\n >>> idx.to_frame(index=False)\n animal\n 0 Ant\n 1 Bear\n 2 Cow\n\n To override the name of the resulting column, specify `name`:\n\n >>> idx.to_frame(index=False, name='zoo')\n zoo\n 0 Ant\n 1 Bear\n 2 Cow\n \"\"\"\n from pandas import DataFrame\n\n if name is None:\n name = self.name or 0\n result = DataFrame({name: self._values.copy()})\n\n if index:\n result.index = self\n return result\n\n # --------------------------------------------------------------------\n # Name-Centric Methods\n\n @property\n def name(self):\n \"\"\"\n Return Index or MultiIndex name.\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, value: Hashable):\n if self._no_setting_name:\n # Used in MultiIndex.levels to avoid silently ignoring name updates.\n raise RuntimeError(\n \"Cannot set name on a level of a MultiIndex. Use \"\n \"'MultiIndex.set_names' instead.\"\n )\n maybe_extract_name(value, None, type(self))\n self._name = value\n\n @final\n def _validate_names(\n self, name=None, names=None, deep: bool = False\n ) -> list[Hashable]:\n \"\"\"\n Handles the quirks of having a singular 'name' parameter for general\n Index and plural 'names' parameter for MultiIndex.\n \"\"\"\n from copy import deepcopy\n\n if names is not None and name is not None:\n raise TypeError(\"Can only provide one of `names` and `name`\")\n elif names is None and name is None:\n new_names = deepcopy(self.names) if deep else self.names\n elif names is not None:\n if not is_list_like(names):\n raise TypeError(\"Must pass list-like as `names`.\")\n new_names = names\n elif not is_list_like(name):\n new_names = [name]\n else:\n new_names = name\n\n if len(new_names) != len(self.names):\n raise ValueError(\n f\"Length of new names must be {len(self.names)}, got {len(new_names)}\"\n )\n\n # All items in 'new_names' need to be hashable\n validate_all_hashable(*new_names, error_name=f\"{type(self).__name__}.name\")\n\n return new_names\n\n def _get_names(self) -> FrozenList:\n return FrozenList((self.name,))\n\n def _set_names(self, values, *, level=None) -> None:\n \"\"\"\n Set new names on index. Each name has to be a hashable type.\n\n Parameters\n ----------\n values : str or sequence\n name(s) to set\n level : int, level name, or sequence of int/level names (default None)\n If the index is a MultiIndex (hierarchical), level(s) to set (None\n for all levels). Otherwise level must be None\n\n Raises\n ------\n TypeError if each name is not hashable.\n \"\"\"\n if not is_list_like(values):\n raise ValueError(\"Names must be a list-like\")\n if len(values) != 1:\n raise ValueError(f\"Length of new names must be 1, got {len(values)}\")\n\n # GH 20527\n # All items in 'name' need to be hashable:\n validate_all_hashable(*values, error_name=f\"{type(self).__name__}.name\")\n\n self._name = values[0]\n\n names = property(fset=_set_names, fget=_get_names)\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"names\"])\n def set_names(self, names, level=None, inplace: bool = False):\n \"\"\"\n Set Index or MultiIndex name.\n\n Able to set new names partially and by level.\n\n Parameters\n ----------\n\n names : label or list of label or dict-like for MultiIndex\n Name(s) to set.\n\n .. versionchanged:: 1.3.0\n\n level : int, label or list of int or label, optional\n If the index is a MultiIndex and names is not dict-like, level(s) to set\n (None for all levels). Otherwise level must be None.\n\n .. versionchanged:: 1.3.0\n\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Index or\n MultiIndex.\n\n Returns\n -------\n Index or None\n The same type as the caller or None if ``inplace=True``.\n\n See Also\n --------\n Index.rename : Able to set new names without level.\n\n Examples\n --------\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx\n Int64Index([1, 2, 3, 4], dtype='int64')\n >>> idx.set_names('quarter')\n Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')\n\n >>> idx = pd.MultiIndex.from_product([['python', 'cobra'],\n ... [2018, 2019]])\n >>> idx\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n )\n >>> idx.set_names(['kind', 'year'], inplace=True)\n >>> idx\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['kind', 'year'])\n >>> idx.set_names('species', level=0)\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['species', 'year'])\n\n When renaming levels with a dict, levels can not be passed.\n\n >>> idx.set_names({'kind': 'snake'})\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['snake', 'year'])\n \"\"\"\n if level is not None and not isinstance(self, ABCMultiIndex):\n raise ValueError(\"Level must be None for non-MultiIndex\")\n\n elif level is not None and not is_list_like(level) and is_list_like(names):\n raise TypeError(\"Names must be a string when a single level is provided.\")\n\n elif not is_list_like(names) and level is None and self.nlevels > 1:\n raise TypeError(\"Must pass list-like as `names`.\")\n\n elif is_dict_like(names) and not isinstance(self, ABCMultiIndex):\n raise TypeError(\"Can only pass dict-like as `names` for MultiIndex.\")\n\n elif is_dict_like(names) and level is not None:\n raise TypeError(\"Can not pass level for dictlike `names`.\")\n\n if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None:\n # Transform dict to list of new names and corresponding levels\n level, names_adjusted = [], []\n for i, name in enumerate(self.names):\n if name in names.keys():\n level.append(i)\n names_adjusted.append(names[name])\n names = names_adjusted\n\n if not is_list_like(names):\n names = [names]\n if level is not None and not is_list_like(level):\n level = [level]\n\n if inplace:\n idx = self\n else:\n idx = self._view()\n\n idx._set_names(names, level=level)\n if not inplace:\n return idx\n\n def rename(self, name, inplace=False):\n \"\"\"\n Alter Index or MultiIndex name.\n\n Able to set new names without level. Defaults to returning new index.\n Length of names must match number of levels in MultiIndex.\n\n Parameters\n ----------\n name : label or list of labels\n Name(s) to set.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Index or\n MultiIndex.\n\n Returns\n -------\n Index or None\n The same type as the caller or None if ``inplace=True``.\n\n See Also\n --------\n Index.set_names : Able to set new names partially and by level.\n\n Examples\n --------\n >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score')\n >>> idx.rename('grade')\n Index(['A', 'C', 'A', 'B'], dtype='object', name='grade')\n\n >>> idx = pd.MultiIndex.from_product([['python', 'cobra'],\n ... [2018, 2019]],\n ... names=['kind', 'year'])\n >>> idx\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['kind', 'year'])\n >>> idx.rename(['species', 'year'])\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['species', 'year'])\n >>> idx.rename('species')\n Traceback (most recent call last):\n TypeError: Must pass list-like as `names`.\n \"\"\"\n return self.set_names([name], inplace=inplace)\n\n # --------------------------------------------------------------------\n # Level-Centric Methods\n\n @property\n def nlevels(self) -> int:\n \"\"\"\n Number of levels.\n \"\"\"\n return 1\n\n def _sort_levels_monotonic(self: _IndexT) -> _IndexT:\n \"\"\"\n Compat with MultiIndex.\n \"\"\"\n return self\n\n @final\n def _validate_index_level(self, level) -> None:\n \"\"\"\n Validate index level.\n\n For single-level Index getting level number is a no-op, but some\n verification must be done like in MultiIndex.\n\n \"\"\"\n if isinstance(level, int):\n if level < 0 and level != -1:\n raise IndexError(\n \"Too many levels: Index has only 1 level, \"\n f\"{level} is not a valid level number\"\n )\n elif level > 0:\n raise IndexError(\n f\"Too many levels: Index has only 1 level, not {level + 1}\"\n )\n elif level != self.name:\n raise KeyError(\n f\"Requested level ({level}) does not match index name ({self.name})\"\n )\n\n def _get_level_number(self, level) -> int:\n self._validate_index_level(level)\n return 0\n\n def sortlevel(self, level=None, ascending=True, sort_remaining=None):\n \"\"\"\n For internal compatibility with the Index API.\n\n Sort the Index. This is for compat with MultiIndex\n\n Parameters\n ----------\n ascending : bool, default True\n False to sort in descending order\n\n level, sort_remaining are compat parameters\n\n Returns\n -------\n Index\n \"\"\"\n if not isinstance(ascending, (list, bool)):\n raise TypeError(\n \"ascending must be a single bool value or\"\n \"a list of bool values of length 1\"\n )\n\n if isinstance(ascending, list):\n if len(ascending) != 1:\n raise TypeError(\"ascending must be a list of bool values of length 1\")\n ascending = ascending[0]\n\n if not isinstance(ascending, bool):\n raise TypeError(\"ascending must be a bool value\")\n\n return self.sort_values(return_indexer=True, ascending=ascending)\n\n def _get_level_values(self, level) -> Index:\n \"\"\"\n Return an Index of values for requested level.\n\n This is primarily useful to get an individual level of values from a\n MultiIndex, but is provided on Index as well for compatibility.\n\n Parameters\n ----------\n level : int or str\n It is either the integer position or the name of the level.\n\n Returns\n -------\n Index\n Calling object, as there is only one level in the Index.\n\n See Also\n --------\n MultiIndex.get_level_values : Get values for a level of a MultiIndex.\n\n Notes\n -----\n For Index, level should be 0, since there are no multiple levels.\n\n Examples\n --------\n >>> idx = pd.Index(list('abc'))\n >>> idx\n Index(['a', 'b', 'c'], dtype='object')\n\n Get level values by supplying `level` as integer:\n\n >>> idx.get_level_values(0)\n Index(['a', 'b', 'c'], dtype='object')\n \"\"\"\n self._validate_index_level(level)\n return self\n\n get_level_values = _get_level_values\n\n @final\n def droplevel(self, level=0):\n \"\"\"\n Return index with requested level(s) removed.\n\n If resulting index has only 1 level left, the result will be\n of Index type, not MultiIndex.\n\n Parameters\n ----------\n level : int, str, or list-like, default 0\n If a string is given, must be the name of a level\n If list-like, elements must be names or indexes of levels.\n\n Returns\n -------\n Index or MultiIndex\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays(\n ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])\n >>> mi\n MultiIndex([(1, 3, 5),\n (2, 4, 6)],\n names=['x', 'y', 'z'])\n\n >>> mi.droplevel()\n MultiIndex([(3, 5),\n (4, 6)],\n names=['y', 'z'])\n\n >>> mi.droplevel(2)\n MultiIndex([(1, 3),\n (2, 4)],\n names=['x', 'y'])\n\n >>> mi.droplevel('z')\n MultiIndex([(1, 3),\n (2, 4)],\n names=['x', 'y'])\n\n >>> mi.droplevel(['x', 'y'])\n Int64Index([5, 6], dtype='int64', name='z')\n \"\"\"\n if not isinstance(level, (tuple, list)):\n level = [level]\n\n levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]\n\n return self._drop_level_numbers(levnums)\n\n @final\n def _drop_level_numbers(self, levnums: list[int]):\n \"\"\"\n Drop MultiIndex levels by level _number_, not name.\n \"\"\"\n\n if not levnums and not isinstance(self, ABCMultiIndex):\n return self\n if len(levnums) >= self.nlevels:\n raise ValueError(\n f\"Cannot remove {len(levnums)} levels from an index with \"\n f\"{self.nlevels} levels: at least one level must be left.\"\n )\n # The two checks above guarantee that here self is a MultiIndex\n self = cast(\"MultiIndex\", self)\n\n new_levels = list(self.levels)\n new_codes = list(self.codes)\n new_names = list(self.names)\n\n for i in levnums:\n new_levels.pop(i)\n new_codes.pop(i)\n new_names.pop(i)\n\n if len(new_levels) == 1:\n lev = new_levels[0]\n\n if len(lev) == 0:\n # If lev is empty, lev.take will fail GH#42055\n res_values = algos.take(lev._values, new_codes[0], allow_fill=True)\n result = type(lev)._simple_new(res_values, name=new_names[0])\n else:\n # set nan if needed\n mask = new_codes[0] == -1\n result = new_levels[0].take(new_codes[0])\n if mask.any():\n result = result.putmask(mask, np.nan)\n\n result._name = new_names[0]\n\n return result\n else:\n from pandas.core.indexes.multi import MultiIndex\n\n return MultiIndex(\n levels=new_levels,\n codes=new_codes,\n names=new_names,\n verify_integrity=False,\n )\n\n def _get_grouper_for_level(self, mapper, *, level=None):\n \"\"\"\n Get index grouper corresponding to an index level\n\n Parameters\n ----------\n mapper: Group mapping function or None\n Function mapping index values to groups\n level : int or None\n Index level, positional\n\n Returns\n -------\n grouper : Index\n Index of values to group on.\n labels : ndarray of int or None\n Array of locations in level_index.\n uniques : Index or None\n Index of unique values for level.\n \"\"\"\n assert level is None or level == 0\n if mapper is None:\n grouper = self\n else:\n grouper = self.map(mapper)\n\n return grouper, None, None\n\n # --------------------------------------------------------------------\n # Introspection Methods\n\n @final\n @property\n def is_monotonic(self) -> bool:\n \"\"\"\n Alias for is_monotonic_increasing.\n \"\"\"\n return self.is_monotonic_increasing\n\n @property\n def is_monotonic_increasing(self) -> bool:\n \"\"\"\n Return if the index is monotonic increasing (only equal or\n increasing) values.\n\n Examples\n --------\n >>> Index([1, 2, 3]).is_monotonic_increasing\n True\n >>> Index([1, 2, 2]).is_monotonic_increasing\n True\n >>> Index([1, 3, 2]).is_monotonic_increasing\n False\n \"\"\"\n return self._engine.is_monotonic_increasing\n\n @property\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return if the index is monotonic decreasing (only equal or\n decreasing) values.\n\n Examples\n --------\n >>> Index([3, 2, 1]).is_monotonic_decreasing\n True\n >>> Index([3, 2, 2]).is_monotonic_decreasing\n True\n >>> Index([3, 1, 2]).is_monotonic_decreasing\n False\n \"\"\"\n return self._engine.is_monotonic_decreasing\n\n @final\n @property\n def _is_strictly_monotonic_increasing(self) -> bool:\n \"\"\"\n Return if the index is strictly monotonic increasing\n (only increasing) values.\n\n Examples\n --------\n >>> Index([1, 2, 3])._is_strictly_monotonic_increasing\n True\n >>> Index([1, 2, 2])._is_strictly_monotonic_increasing\n False\n >>> Index([1, 3, 2])._is_strictly_monotonic_increasing\n False\n \"\"\"\n return self.is_unique and self.is_monotonic_increasing\n\n @final\n @property\n def _is_strictly_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return if the index is strictly monotonic decreasing\n (only decreasing) values.\n\n Examples\n --------\n >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing\n True\n >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing\n False\n >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing\n False\n \"\"\"\n return self.is_unique and self.is_monotonic_decreasing\n\n @cache_readonly\n def is_unique(self) -> bool:\n \"\"\"\n Return if the index has unique values.\n \"\"\"\n return self._engine.is_unique\n\n @final\n @property\n def has_duplicates(self) -> bool:\n \"\"\"\n Check if the Index has duplicate values.\n\n Returns\n -------\n bool\n Whether or not the Index has duplicate values.\n\n Examples\n --------\n >>> idx = pd.Index([1, 5, 7, 7])\n >>> idx.has_duplicates\n True\n\n >>> idx = pd.Index([1, 5, 7])\n >>> idx.has_duplicates\n False\n\n >>> idx = pd.Index([\"Watermelon\", \"Orange\", \"Apple\",\n ... \"Watermelon\"]).astype(\"category\")\n >>> idx.has_duplicates\n True\n\n >>> idx = pd.Index([\"Orange\", \"Apple\",\n ... \"Watermelon\"]).astype(\"category\")\n >>> idx.has_duplicates\n False\n \"\"\"\n return not self.is_unique\n\n @final\n def is_boolean(self) -> bool:\n \"\"\"\n Check if the Index only consists of booleans.\n\n Returns\n -------\n bool\n Whether or not the Index only consists of booleans.\n\n See Also\n --------\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([True, False, True])\n >>> idx.is_boolean()\n True\n\n >>> idx = pd.Index([\"True\", \"False\", \"True\"])\n >>> idx.is_boolean()\n False\n\n >>> idx = pd.Index([True, False, \"True\"])\n >>> idx.is_boolean()\n False\n \"\"\"\n return self.inferred_type in [\"boolean\"]\n\n @final\n def is_integer(self) -> bool:\n \"\"\"\n Check if the Index only consists of integers.\n\n Returns\n -------\n bool\n Whether or not the Index only consists of integers.\n\n See Also\n --------\n is_boolean : Check if the Index only consists of booleans.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx.is_integer()\n True\n\n >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])\n >>> idx.is_integer()\n False\n\n >>> idx = pd.Index([\"Apple\", \"Mango\", \"Watermelon\"])\n >>> idx.is_integer()\n False\n \"\"\"\n return self.inferred_type in [\"integer\"]\n\n @final\n def is_floating(self) -> bool:\n \"\"\"\n Check if the Index is a floating type.\n\n The Index may consist of only floats, NaNs, or a mix of floats,\n integers, or NaNs.\n\n Returns\n -------\n bool\n Whether or not the Index only consists of only consists of floats, NaNs, or\n a mix of floats, integers, or NaNs.\n\n See Also\n --------\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])\n >>> idx.is_floating()\n True\n\n >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0])\n >>> idx.is_floating()\n True\n\n >>> idx = pd.Index([1, 2, 3, 4, np.nan])\n >>> idx.is_floating()\n True\n\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx.is_floating()\n False\n \"\"\"\n return self.inferred_type in [\"floating\", \"mixed-integer-float\", \"integer-na\"]\n\n @final\n def is_numeric(self) -> bool:\n \"\"\"\n Check if the Index only consists of numeric data.\n\n Returns\n -------\n bool\n Whether or not the Index only consists of numeric data.\n\n See Also\n --------\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])\n >>> idx.is_numeric()\n True\n\n >>> idx = pd.Index([1, 2, 3, 4.0])\n >>> idx.is_numeric()\n True\n\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx.is_numeric()\n True\n\n >>> idx = pd.Index([1, 2, 3, 4.0, np.nan])\n >>> idx.is_numeric()\n True\n\n >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, \"Apple\"])\n >>> idx.is_numeric()\n False\n \"\"\"\n return self.inferred_type in [\"integer\", \"floating\"]\n\n @final\n def is_object(self) -> bool:\n \"\"\"\n Check if the Index is of the object dtype.\n\n Returns\n -------\n bool\n Whether or not the Index is of the object dtype.\n\n See Also\n --------\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([\"Apple\", \"Mango\", \"Watermelon\"])\n >>> idx.is_object()\n True\n\n >>> idx = pd.Index([\"Apple\", \"Mango\", 2.0])\n >>> idx.is_object()\n True\n\n >>> idx = pd.Index([\"Watermelon\", \"Orange\", \"Apple\",\n ... \"Watermelon\"]).astype(\"category\")\n >>> idx.is_object()\n False\n\n >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])\n >>> idx.is_object()\n False\n \"\"\"\n return is_object_dtype(self.dtype)\n\n @final\n def is_categorical(self) -> bool:\n \"\"\"\n Check if the Index holds categorical data.\n\n Returns\n -------\n bool\n True if the Index is categorical.\n\n See Also\n --------\n CategoricalIndex : Index for categorical data.\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_interval : Check if the Index holds Interval objects.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([\"Watermelon\", \"Orange\", \"Apple\",\n ... \"Watermelon\"]).astype(\"category\")\n >>> idx.is_categorical()\n True\n\n >>> idx = pd.Index([1, 3, 5, 7])\n >>> idx.is_categorical()\n False\n\n >>> s = pd.Series([\"Peter\", \"Victor\", \"Elisabeth\", \"Mar\"])\n >>> s\n 0 Peter\n 1 Victor\n 2 Elisabeth\n 3 Mar\n dtype: object\n >>> s.index.is_categorical()\n False\n \"\"\"\n return self.inferred_type in [\"categorical\"]\n\n @final\n def is_interval(self) -> bool:\n \"\"\"\n Check if the Index holds Interval objects.\n\n Returns\n -------\n bool\n Whether or not the Index holds Interval objects.\n\n See Also\n --------\n IntervalIndex : Index for Interval objects.\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([pd.Interval(left=0, right=5),\n ... pd.Interval(left=5, right=10)])\n >>> idx.is_interval()\n True\n\n >>> idx = pd.Index([1, 3, 5, 7])\n >>> idx.is_interval()\n False\n \"\"\"\n return self.inferred_type in [\"interval\"]\n\n @final\n def is_mixed(self) -> bool:\n \"\"\"\n Check if the Index holds data with mixed data types.\n\n Returns\n -------\n bool\n Whether or not the Index holds data with mixed data types.\n\n See Also\n --------\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n\n Examples\n --------\n >>> idx = pd.Index(['a', np.nan, 'b'])\n >>> idx.is_mixed()\n True\n\n >>> idx = pd.Index([1.0, 2.0, 3.0, 5.0])\n >>> idx.is_mixed()\n False\n \"\"\"\n warnings.warn(\n \"Index.is_mixed is deprecated and will be removed in a future version. \"\n \"Check index.inferred_type directly instead.\",\n FutureWarning,\n stacklevel=2,\n )\n return self.inferred_type in [\"mixed\"]\n\n @final\n def holds_integer(self) -> bool:\n \"\"\"\n Whether the type is an integer type.\n \"\"\"\n return self.inferred_type in [\"integer\", \"mixed-integer\"]\n\n @cache_readonly\n def inferred_type(self) -> str_t:\n \"\"\"\n Return a string of the type inferred from the values.\n \"\"\"\n return lib.infer_dtype(self._values, skipna=False)\n\n @cache_readonly\n def _is_all_dates(self) -> bool:\n \"\"\"\n Whether or not the index values only consist of dates.\n \"\"\"\n return is_datetime_array(ensure_object(self._values))\n\n @cache_readonly\n @final\n def is_all_dates(self) -> bool:\n \"\"\"\n Whether or not the index values only consist of dates.\n \"\"\"\n warnings.warn(\n \"Index.is_all_dates is deprecated, will be removed in a future version. \"\n \"check index.inferred_type instead\",\n FutureWarning,\n stacklevel=2,\n )\n return self._is_all_dates\n\n @cache_readonly\n def _is_multi(self) -> bool:\n \"\"\"\n Cached check equivalent to isinstance(self, MultiIndex)\n \"\"\"\n return isinstance(self, ABCMultiIndex)\n\n # --------------------------------------------------------------------\n # Pickle Methods\n\n def __reduce__(self):\n d = {\"data\": self._data}\n d.update(self._get_attributes_dict())\n return _new_Index, (type(self), d), None\n\n # --------------------------------------------------------------------\n # Null Handling Methods\n\n _na_value: float | NaTType = np.nan\n \"\"\"The expected NA value to use with this index.\"\"\"\n\n @cache_readonly\n def _isnan(self) -> np.ndarray:\n \"\"\"\n Return if each value is NaN.\n \"\"\"\n if self._can_hold_na:\n return isna(self)\n else:\n # shouldn't reach to this condition by checking hasnans beforehand\n values = np.empty(len(self), dtype=np.bool_)\n values.fill(False)\n return values\n\n @cache_readonly\n def hasnans(self) -> bool:\n \"\"\"\n Return if I have any nans; enables various perf speedups.\n \"\"\"\n if self._can_hold_na:\n return bool(self._isnan.any())\n else:\n return False\n\n @final\n def isna(self) -> np.ndarray:\n \"\"\"\n Detect missing values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get\n mapped to ``True`` values.\n Everything else get mapped to ``False`` values. Characters such as\n empty strings `''` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n\n Returns\n -------\n numpy.ndarray[bool]\n A boolean array of whether my values are NA.\n\n See Also\n --------\n Index.notna : Boolean inverse of isna.\n Index.dropna : Omit entries with missing values.\n isna : Top-level isna.\n Series.isna : Detect missing values in Series object.\n\n Examples\n --------\n Show which entries in a pandas.Index are NA. The result is an\n array.\n\n >>> idx = pd.Index([5.2, 6.0, np.NaN])\n >>> idx\n Float64Index([5.2, 6.0, nan], dtype='float64')\n >>> idx.isna()\n array([False, False, True])\n\n Empty strings are not considered NA values. None is considered an NA\n value.\n\n >>> idx = pd.Index(['black', '', 'red', None])\n >>> idx\n Index(['black', '', 'red', None], dtype='object')\n >>> idx.isna()\n array([False, False, False, True])\n\n For datetimes, `NaT` (Not a Time) is considered as an NA value.\n\n >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),\n ... pd.Timestamp(''), None, pd.NaT])\n >>> idx\n DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],\n dtype='datetime64[ns]', freq=None)\n >>> idx.isna()\n array([False, True, True, True])\n \"\"\"\n return self._isnan\n\n isnull = isna\n\n @final\n def notna(self) -> np.ndarray:\n \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to ``True``. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``\n values.\n\n Returns\n -------\n numpy.ndarray[bool]\n Boolean array to indicate which entries are not NA.\n\n See Also\n --------\n Index.notnull : Alias of notna.\n Index.isna: Inverse of notna.\n notna : Top-level notna.\n\n Examples\n --------\n Show which entries in an Index are not NA. The result is an\n array.\n\n >>> idx = pd.Index([5.2, 6.0, np.NaN])\n >>> idx\n Float64Index([5.2, 6.0, nan], dtype='float64')\n >>> idx.notna()\n array([ True, True, False])\n\n Empty strings are not considered NA values. None is considered a NA\n value.\n\n >>> idx = pd.Index(['black', '', 'red', None])\n >>> idx\n Index(['black', '', 'red', None], dtype='object')\n >>> idx.notna()\n array([ True, True, True, False])\n \"\"\"\n return ~self.isna()\n\n notnull = notna\n\n def fillna(self, value=None, downcast=None):\n \"\"\"\n Fill NA/NaN values with the specified value.\n\n Parameters\n ----------\n value : scalar\n Scalar value to use to fill holes (e.g. 0).\n This value cannot be a list-likes.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n Index\n\n See Also\n --------\n DataFrame.fillna : Fill NaN values of a DataFrame.\n Series.fillna : Fill NaN Values of a Series.\n \"\"\"\n value = self._require_scalar(value)\n if self.hasnans:\n result = self.putmask(self._isnan, value)\n if downcast is None:\n # no need to care metadata other than name\n # because it can't have freq if\n return Index(result, name=self.name)\n return self._view()\n\n def dropna(self: _IndexT, how: str_t = \"any\") -> _IndexT:\n \"\"\"\n Return Index without NA/NaN values.\n\n Parameters\n ----------\n how : {'any', 'all'}, default 'any'\n If the Index is a MultiIndex, drop the value when any or all levels\n are NaN.\n\n Returns\n -------\n Index\n \"\"\"\n if how not in (\"any\", \"all\"):\n raise ValueError(f\"invalid how option: {how}\")\n\n if self.hasnans:\n res_values = self._values[~self._isnan]\n return type(self)._simple_new(res_values, name=self.name)\n return self._view()\n\n # --------------------------------------------------------------------\n # Uniqueness Methods\n\n def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT:\n \"\"\"\n Return unique values in the index.\n\n Unique values are returned in order of appearance, this does NOT sort.\n\n Parameters\n ----------\n level : int or hashable, optional\n Only return values from specified level (for MultiIndex).\n If int, gets the level by integer position, else by level name.\n\n Returns\n -------\n Index\n\n See Also\n --------\n unique : Numpy array of unique values in that column.\n Series.unique : Return unique values of Series object.\n \"\"\"\n if level is not None:\n self._validate_index_level(level)\n\n if self.is_unique:\n return self._view()\n\n result = super().unique()\n return self._shallow_copy(result)\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\"])\n def drop_duplicates(self: _IndexT, keep: str_t | bool = \"first\") -> _IndexT:\n \"\"\"\n Return Index with duplicate values removed.\n\n Parameters\n ----------\n keep : {'first', 'last', ``False``}, default 'first'\n - 'first' : Drop duplicates except for the first occurrence.\n - 'last' : Drop duplicates except for the last occurrence.\n - ``False`` : Drop all duplicates.\n\n Returns\n -------\n deduplicated : Index\n\n See Also\n --------\n Series.drop_duplicates : Equivalent method on Series.\n DataFrame.drop_duplicates : Equivalent method on DataFrame.\n Index.duplicated : Related method on Index, indicating duplicate\n Index values.\n\n Examples\n --------\n Generate an pandas.Index with duplicate values.\n\n >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])\n\n The `keep` parameter controls which duplicate values are removed.\n The value 'first' keeps the first occurrence for each\n set of duplicated entries. The default value of keep is 'first'.\n\n >>> idx.drop_duplicates(keep='first')\n Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')\n\n The value 'last' keeps the last occurrence for each set of duplicated\n entries.\n\n >>> idx.drop_duplicates(keep='last')\n Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')\n\n The value ``False`` discards all sets of duplicated entries.\n\n >>> idx.drop_duplicates(keep=False)\n Index(['cow', 'beetle', 'hippo'], dtype='object')\n \"\"\"\n if self.is_unique:\n return self._view()\n\n return super().drop_duplicates(keep=keep)\n\n def duplicated(self, keep: Literal[\"first\", \"last\", False] = \"first\") -> np.ndarray:\n \"\"\"\n Indicate duplicate index values.\n\n Duplicated values are indicated as ``True`` values in the resulting\n array. Either all duplicates, all except the first, or all except the\n last occurrence of duplicates can be indicated.\n\n Parameters\n ----------\n keep : {'first', 'last', False}, default 'first'\n The value or values in a set of duplicates to mark as missing.\n\n - 'first' : Mark duplicates as ``True`` except for the first\n occurrence.\n - 'last' : Mark duplicates as ``True`` except for the last\n occurrence.\n - ``False`` : Mark all duplicates as ``True``.\n\n Returns\n -------\n np.ndarray[bool]\n\n See Also\n --------\n Series.duplicated : Equivalent method on pandas.Series.\n DataFrame.duplicated : Equivalent method on pandas.DataFrame.\n Index.drop_duplicates : Remove duplicate values from Index.\n\n Examples\n --------\n By default, for each set of duplicated values, the first occurrence is\n set to False and all others to True:\n\n >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])\n >>> idx.duplicated()\n array([False, False, True, False, True])\n\n which is equivalent to\n\n >>> idx.duplicated(keep='first')\n array([False, False, True, False, True])\n\n By using 'last', the last occurrence of each set of duplicated values\n is set on False and all others on True:\n\n >>> idx.duplicated(keep='last')\n array([ True, False, True, False, False])\n\n By setting keep on ``False``, all duplicates are True:\n\n >>> idx.duplicated(keep=False)\n array([ True, False, True, False, True])\n \"\"\"\n if self.is_unique:\n # fastpath available bc we are immutable\n return np.zeros(len(self), dtype=bool)\n return self._duplicated(keep=keep)\n\n # --------------------------------------------------------------------\n # Arithmetic & Logical Methods\n\n def __iadd__(self, other):\n # alias for __add__\n return self + other\n\n @final\n def __and__(self, other):\n warnings.warn(\n \"Index.__and__ operating as a set operation is deprecated, \"\n \"in the future this will be a logical operation matching \"\n \"Series.__and__. Use index.intersection(other) instead\",\n FutureWarning,\n stacklevel=2,\n )\n return self.intersection(other)\n\n @final\n def __or__(self, other):\n warnings.warn(\n \"Index.__or__ operating as a set operation is deprecated, \"\n \"in the future this will be a logical operation matching \"\n \"Series.__or__. Use index.union(other) instead\",\n FutureWarning,\n stacklevel=2,\n )\n return self.union(other)\n\n @final\n def __xor__(self, other):\n warnings.warn(\n \"Index.__xor__ operating as a set operation is deprecated, \"\n \"in the future this will be a logical operation matching \"\n \"Series.__xor__. Use index.symmetric_difference(other) instead\",\n FutureWarning,\n stacklevel=2,\n )\n return self.symmetric_difference(other)\n\n @final\n def __nonzero__(self):\n raise ValueError(\n f\"The truth value of a {type(self).__name__} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n )\n\n __bool__ = __nonzero__\n\n # --------------------------------------------------------------------\n # Set Operation Methods\n\n def _get_reconciled_name_object(self, other):\n \"\"\"\n If the result of a set operation will be self,\n return self, unless the name changes, in which\n case make a shallow copy of self.\n \"\"\"\n name = get_op_result_name(self, other)\n if self.name != name:\n return self.rename(name)\n return self\n\n @final\n def _validate_sort_keyword(self, sort):\n if sort not in [None, False]:\n raise ValueError(\n \"The 'sort' keyword only takes the values of \"\n f\"None or False; {sort} was passed.\"\n )\n\n @final\n def union(self, other, sort=None):\n \"\"\"\n Form the union of two Index objects.\n\n If the Index objects are incompatible, both Index objects will be\n cast to dtype('object') first.\n\n .. versionchanged:: 0.25.0\n\n Parameters\n ----------\n other : Index or array-like\n sort : bool or None, default None\n Whether to sort the resulting Index.\n\n * None : Sort the result, except when\n\n 1. `self` and `other` are equal.\n 2. `self` or `other` has length 0.\n 3. Some values in `self` or `other` cannot be compared.\n A RuntimeWarning is issued in this case.\n\n * False : do not sort the result.\n\n Returns\n -------\n union : Index\n\n Examples\n --------\n Union matching dtypes\n\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.union(idx2)\n Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')\n\n Union mismatched dtypes\n\n >>> idx1 = pd.Index(['a', 'b', 'c', 'd'])\n >>> idx2 = pd.Index([1, 2, 3, 4])\n >>> idx1.union(idx2)\n Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object')\n\n MultiIndex case\n\n >>> idx1 = pd.MultiIndex.from_arrays(\n ... [[1, 1, 2, 2], [\"Red\", \"Blue\", \"Red\", \"Blue\"]]\n ... )\n >>> idx1\n MultiIndex([(1, 'Red'),\n (1, 'Blue'),\n (2, 'Red'),\n (2, 'Blue')],\n )\n >>> idx2 = pd.MultiIndex.from_arrays(\n ... [[3, 3, 2, 2], [\"Red\", \"Green\", \"Red\", \"Green\"]]\n ... )\n >>> idx2\n MultiIndex([(3, 'Red'),\n (3, 'Green'),\n (2, 'Red'),\n (2, 'Green')],\n )\n >>> idx1.union(idx2)\n MultiIndex([(1, 'Blue'),\n (1, 'Red'),\n (2, 'Blue'),\n (2, 'Green'),\n (2, 'Red'),\n (3, 'Green'),\n (3, 'Red')],\n )\n >>> idx1.union(idx2, sort=False)\n MultiIndex([(1, 'Red'),\n (1, 'Blue'),\n (2, 'Red'),\n (2, 'Blue'),\n (3, 'Red'),\n (3, 'Green'),\n (2, 'Green')],\n )\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name = self._convert_can_do_setop(other)\n\n if not is_dtype_equal(self.dtype, other.dtype):\n if (\n isinstance(self, ABCMultiIndex)\n and not is_object_dtype(unpack_nested_dtype(other))\n and len(other) > 0\n ):\n raise NotImplementedError(\n \"Can only union MultiIndex with MultiIndex or Index of tuples, \"\n \"try mi.to_flat_index().union(other) instead.\"\n )\n if (\n isinstance(self, ABCDatetimeIndex)\n and isinstance(other, ABCDatetimeIndex)\n and self.tz is not None\n and other.tz is not None\n ):\n # GH#39328\n warnings.warn(\n \"In a future version, the union of DatetimeIndex objects \"\n \"with mismatched timezones will cast both to UTC instead of \"\n \"object dtype. To retain the old behavior, \"\n \"use `index.astype(object).union(other)`\",\n FutureWarning,\n stacklevel=2,\n )\n\n dtype = self._find_common_type_compat(other)\n left = self.astype(dtype, copy=False)\n right = other.astype(dtype, copy=False)\n return left.union(right, sort=sort)\n\n elif not len(other) or self.equals(other):\n # NB: whether this (and the `if not len(self)` check below) come before\n # or after the is_dtype_equal check above affects the returned dtype\n return self._get_reconciled_name_object(other)\n\n elif not len(self):\n return other._get_reconciled_name_object(self)\n\n result = self._union(other, sort=sort)\n\n return self._wrap_setop_result(other, result)\n\n def _union(self, other: Index, sort):\n \"\"\"\n Specific union logic should go here. In subclasses, union behavior\n should be overwritten here rather than in `self.union`.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default False\n Whether to sort the resulting index.\n\n * False : do not sort the result.\n * None : sort the result, except when `self` and `other` are equal\n or when the values cannot be compared.\n\n Returns\n -------\n Index\n \"\"\"\n # TODO(EA): setops-refactor, clean all this up\n lvals = self._values\n rvals = other._values\n\n if (\n sort is None\n and self.is_monotonic\n and other.is_monotonic\n and not (self.has_duplicates and other.has_duplicates)\n ):\n # Both are unique and monotonic, so can use outer join\n try:\n return self._outer_indexer(other)[0]\n except (TypeError, IncompatibleFrequency):\n # incomparable objects\n value_list = list(lvals)\n\n # worth making this faster? a very unusual case\n value_set = set(lvals)\n value_list.extend([x for x in rvals if x not in value_set])\n # If objects are unorderable, we must have object dtype.\n return np.array(value_list, dtype=object)\n\n elif not other.is_unique:\n # other has duplicates\n result = algos.union_with_duplicates(lvals, rvals)\n return _maybe_try_sort(result, sort)\n\n # Self may have duplicates\n # find indexes of things in \"other\" that are not in \"self\"\n if self._index_as_unique:\n indexer = self.get_indexer(other)\n missing = (indexer == -1).nonzero()[0]\n else:\n missing = algos.unique1d(self.get_indexer_non_unique(other)[1])\n\n if len(missing) > 0:\n other_diff = rvals.take(missing)\n result = concat_compat((lvals, other_diff))\n else:\n result = lvals\n\n if not self.is_monotonic or not other.is_monotonic:\n result = _maybe_try_sort(result, sort)\n\n return result\n\n @final\n def _wrap_setop_result(self, other: Index, result) -> Index:\n name = get_op_result_name(self, other)\n if isinstance(result, Index):\n if result.name != name:\n return result.rename(name)\n return result\n else:\n return self._shallow_copy(result, name=name)\n\n # TODO: standardize return type of non-union setops type(self vs other)\n @final\n def intersection(self, other, sort=False):\n \"\"\"\n Form the intersection of two Index objects.\n\n This returns a new Index with elements common to the index and `other`.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default False\n Whether to sort the resulting index.\n\n * False : do not sort the result.\n * None : sort the result, except when `self` and `other` are equal\n or when the values cannot be compared.\n\n Returns\n -------\n intersection : Index\n\n Examples\n --------\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.intersection(idx2)\n Int64Index([3, 4], dtype='int64')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name = self._convert_can_do_setop(other)\n\n if self.equals(other):\n if self.has_duplicates:\n return self.unique()._get_reconciled_name_object(other)\n return self._get_reconciled_name_object(other)\n\n if len(self) == 0 or len(other) == 0:\n # fastpath; we need to be careful about having commutativity\n\n if self._is_multi or other._is_multi:\n # _convert_can_do_setop ensures that we have both or neither\n # We retain self.levels\n return self[:0].rename(result_name)\n\n dtype = self._find_common_type_compat(other)\n if is_dtype_equal(self.dtype, dtype):\n # Slicing allows us to retain DTI/TDI.freq, RangeIndex\n\n # Note: self[:0] vs other[:0] affects\n # 1) which index's `freq` we get in DTI/TDI cases\n # This may be a historical artifact, i.e. no documented\n # reason for this choice.\n # 2) The `step` we get in RangeIndex cases\n if len(self) == 0:\n return self[:0].rename(result_name)\n else:\n return other[:0].rename(result_name)\n\n return Index([], dtype=dtype, name=result_name)\n\n elif not self._should_compare(other):\n # We can infer that the intersection is empty.\n if isinstance(self, ABCMultiIndex):\n return self[:0].rename(result_name)\n return Index([], name=result_name)\n\n elif not is_dtype_equal(self.dtype, other.dtype):\n dtype = self._find_common_type_compat(other)\n this = self.astype(dtype, copy=False)\n other = other.astype(dtype, copy=False)\n return this.intersection(other, sort=sort)\n\n result = self._intersection(other, sort=sort)\n return self._wrap_intersection_result(other, result)\n\n def _intersection(self, other: Index, sort=False):\n \"\"\"\n intersection specialized to the case with matching dtypes.\n \"\"\"\n if (\n self.is_monotonic\n and other.is_monotonic\n and not is_interval_dtype(self.dtype)\n ):\n # For IntervalIndex _inner_indexer is not more performant than get_indexer,\n # so don't take this fastpath\n try:\n result = self._inner_indexer(other)[0]\n except TypeError:\n pass\n else:\n # TODO: algos.unique1d should preserve DTA/TDA\n res = algos.unique1d(result)\n return ensure_wrapped_if_datetimelike(res)\n\n res_values = self._intersection_via_get_indexer(other, sort=sort)\n res_values = _maybe_try_sort(res_values, sort)\n return res_values\n\n def _wrap_intersection_result(self, other, result):\n # We will override for MultiIndex to handle empty results\n return self._wrap_setop_result(other, result)\n\n @final\n def _intersection_via_get_indexer(self, other: Index, sort) -> ArrayLike:\n \"\"\"\n Find the intersection of two Indexes using get_indexer.\n\n Returns\n -------\n np.ndarray or ExtensionArray\n The returned array will be unique.\n \"\"\"\n left_unique = self.unique()\n right_unique = other.unique()\n\n # even though we are unique, we need get_indexer_for for IntervalIndex\n indexer = left_unique.get_indexer_for(right_unique)\n\n mask = indexer != -1\n\n taker = indexer.take(mask.nonzero()[0])\n if sort is False:\n # sort bc we want the elements in the same order they are in self\n # unnecessary in the case with sort=None bc we will sort later\n taker = np.sort(taker)\n\n result = left_unique.take(taker)._values\n return result\n\n @final\n def difference(self, other, sort=None):\n \"\"\"\n Return a new Index with elements of index not in `other`.\n\n This is the set difference of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default None\n Whether to sort the resulting index. By default, the\n values are attempted to be sorted, but any TypeError from\n incomparable elements is caught by pandas.\n\n * None : Attempt to sort the result, but catch any TypeErrors\n from comparing incomparable elements.\n * False : Do not sort the result.\n\n Returns\n -------\n difference : Index\n\n Examples\n --------\n >>> idx1 = pd.Index([2, 1, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.difference(idx2)\n Int64Index([1, 2], dtype='int64')\n >>> idx1.difference(idx2, sort=False)\n Int64Index([2, 1], dtype='int64')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name = self._convert_can_do_setop(other)\n\n if self.equals(other):\n # Note: we do not (yet) sort even if sort=None GH#24959\n return self[:0].rename(result_name)\n\n if len(other) == 0:\n # Note: we do not (yet) sort even if sort=None GH#24959\n return self.rename(result_name)\n\n if not self._should_compare(other):\n # Nothing matches -> difference is everything\n return self.rename(result_name)\n\n result = self._difference(other, sort=sort)\n return self._wrap_difference_result(other, result)\n\n def _difference(self, other, sort):\n # overridden by RangeIndex\n\n this = self.unique()\n\n indexer = this.get_indexer_for(other)\n indexer = indexer.take((indexer != -1).nonzero()[0])\n\n label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)\n the_diff = this._values.take(label_diff)\n the_diff = _maybe_try_sort(the_diff, sort)\n\n return the_diff\n\n def _wrap_difference_result(self, other, result):\n # We will override for MultiIndex to handle empty results\n return self._wrap_setop_result(other, result)\n\n def symmetric_difference(self, other, result_name=None, sort=None):\n \"\"\"\n Compute the symmetric difference of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n result_name : str\n sort : False or None, default None\n Whether to sort the resulting index. By default, the\n values are attempted to be sorted, but any TypeError from\n incomparable elements is caught by pandas.\n\n * None : Attempt to sort the result, but catch any TypeErrors\n from comparing incomparable elements.\n * False : Do not sort the result.\n\n Returns\n -------\n symmetric_difference : Index\n\n Notes\n -----\n ``symmetric_difference`` contains elements that appear in either\n ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by\n ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates\n dropped.\n\n Examples\n --------\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([2, 3, 4, 5])\n >>> idx1.symmetric_difference(idx2)\n Int64Index([1, 5], dtype='int64')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name_update = self._convert_can_do_setop(other)\n if result_name is None:\n result_name = result_name_update\n\n if not self._should_compare(other):\n return self.union(other, sort=sort).rename(result_name)\n\n elif not is_dtype_equal(self.dtype, other.dtype):\n dtype = self._find_common_type_compat(other)\n this = self.astype(dtype, copy=False)\n that = other.astype(dtype, copy=False)\n return this.symmetric_difference(that, sort=sort).rename(result_name)\n\n this = self.unique()\n other = other.unique()\n indexer = this.get_indexer_for(other)\n\n # {this} minus {other}\n common_indexer = indexer.take((indexer != -1).nonzero()[0])\n left_indexer = np.setdiff1d(\n np.arange(this.size), common_indexer, assume_unique=True\n )\n left_diff = this._values.take(left_indexer)\n\n # {other} minus {this}\n right_indexer = (indexer == -1).nonzero()[0]\n right_diff = other._values.take(right_indexer)\n\n res_values = concat_compat([left_diff, right_diff])\n res_values = _maybe_try_sort(res_values, sort)\n\n result = Index(res_values, name=result_name)\n\n if self._is_multi:\n self = cast(\"MultiIndex\", self)\n if len(result) == 0:\n # On equal symmetric_difference MultiIndexes the difference is empty.\n # Therefore, an empty MultiIndex is returned GH#13490\n return type(self)(\n levels=[[] for _ in range(self.nlevels)],\n codes=[[] for _ in range(self.nlevels)],\n names=result.name,\n )\n return type(self).from_tuples(result, names=result.name)\n\n return result\n\n @final\n def _assert_can_do_setop(self, other) -> bool:\n if not is_list_like(other):\n raise TypeError(\"Input must be Index or array-like\")\n return True\n\n def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]:\n if not isinstance(other, Index):\n other = Index(other, name=self.name)\n result_name = self.name\n else:\n result_name = get_op_result_name(self, other)\n return other, result_name\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"\n Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional\n * default: exact matches only.\n * pad / ffill: find the PREVIOUS index value if no exact match.\n * backfill / bfill: use NEXT index value if no exact match\n * nearest: use the NEAREST index value if no exact match. Tied\n distances are broken by preferring the larger index value.\n tolerance : int or float, optional\n Maximum distance from index value for inexact matches. The value of\n the index at the matching location must satisfy the equation\n ``abs(index[loc] - key) <= tolerance``.\n\n Returns\n -------\n loc : int if unique index, slice if monotonic index, else mask\n\n Examples\n --------\n >>> unique_index = pd.Index(list('abc'))\n >>> unique_index.get_loc('b')\n 1\n\n >>> monotonic_index = pd.Index(list('abbc'))\n >>> monotonic_index.get_loc('b')\n slice(1, 3, None)\n\n >>> non_monotonic_index = pd.Index(list('abcb'))\n >>> non_monotonic_index.get_loc('b')\n array([False, True, False, True])\n \"\"\"\n if method is None:\n if tolerance is not None:\n raise ValueError(\n \"tolerance argument only valid if using pad, \"\n \"backfill or nearest lookups\"\n )\n casted_key = self._maybe_cast_indexer(key)\n try:\n return self._engine.get_loc(casted_key)\n except KeyError as err:\n raise KeyError(key) from err\n\n # GH#42269\n warnings.warn(\n f\"Passing method to {type(self).__name__}.get_loc is deprecated \"\n \"and will raise in a future version. Use \"\n \"index.get_indexer([item], method=...) instead\",\n FutureWarning,\n stacklevel=2,\n )\n\n if is_scalar(key) and isna(key) and not self.hasnans:\n raise KeyError(key)\n\n if tolerance is not None:\n tolerance = self._convert_tolerance(tolerance, np.asarray(key))\n\n indexer = self.get_indexer([key], method=method, tolerance=tolerance)\n if indexer.ndim > 1 or indexer.size > 1:\n raise TypeError(\"get_loc requires scalar valued input\")\n loc = indexer.item()\n if loc == -1:\n raise KeyError(key)\n return loc\n\n _index_shared_docs[\n \"get_indexer\"\n ] = \"\"\"\n Compute indexer and mask for new index given the current index. The\n indexer should be then used as an input to ndarray.take to align the\n current data to the new index.\n\n Parameters\n ----------\n target : %(target_klass)s\n method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional\n * default: exact matches only.\n * pad / ffill: find the PREVIOUS index value if no exact match.\n * backfill / bfill: use NEXT index value if no exact match\n * nearest: use the NEAREST index value if no exact match. Tied\n distances are broken by preferring the larger index value.\n limit : int, optional\n Maximum number of consecutive labels in ``target`` to match for\n inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n Returns\n -------\n indexer : np.ndarray[np.intp]\n Integers from 0 to n - 1 indicating that the index at these\n positions matches the corresponding target values. Missing values\n in the target are marked by -1.\n %(raises_section)s\n Examples\n --------\n >>> index = pd.Index(['c', 'a', 'b'])\n >>> index.get_indexer(['a', 'b', 'x'])\n array([ 1, 2, -1])\n\n Notice that the return value is an array of locations in ``index``\n and ``x`` is marked by -1, as it is not in ``index``.\n \"\"\"\n\n @Appender(_index_shared_docs[\"get_indexer\"] % _index_doc_kwargs)\n @final\n def get_indexer(\n self,\n target,\n method: str_t | None = None,\n limit: int | None = None,\n tolerance=None,\n ) -> npt.NDArray[np.intp]:\n method = missing.clean_reindex_fill_method(method)\n target = self._maybe_cast_listlike_indexer(target)\n\n self._check_indexing_method(method, limit, tolerance)\n\n if not self._index_as_unique:\n raise InvalidIndexError(self._requires_unique_msg)\n\n if len(target) == 0:\n return np.array([], dtype=np.intp)\n\n if not self._should_compare(target) and not self._should_partial_index(target):\n # IntervalIndex get special treatment bc numeric scalars can be\n # matched to Interval scalars\n return self._get_indexer_non_comparable(target, method=method, unique=True)\n\n if is_categorical_dtype(self.dtype):\n # _maybe_cast_listlike_indexer ensures target has our dtype\n # (could improve perf by doing _should_compare check earlier?)\n assert is_dtype_equal(self.dtype, target.dtype)\n\n indexer = self._engine.get_indexer(target.codes)\n if self.hasnans and target.hasnans:\n loc = self.get_loc(np.nan)\n mask = target.isna()\n indexer[mask] = loc\n return indexer\n\n if is_categorical_dtype(target.dtype):\n # potential fastpath\n # get an indexer for unique categories then propagate to codes via take_nd\n # get_indexer instead of _get_indexer needed for MultiIndex cases\n # e.g. test_append_different_columns_types\n categories_indexer = self.get_indexer(target.categories)\n\n indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1)\n\n if (not self._is_multi and self.hasnans) and target.hasnans:\n # Exclude MultiIndex because hasnans raises NotImplementedError\n # we should only get here if we are unique, so loc is an integer\n # GH#41934\n loc = self.get_loc(np.nan)\n mask = target.isna()\n indexer[mask] = loc\n\n return ensure_platform_int(indexer)\n\n pself, ptarget = self._maybe_promote(target)\n if pself is not self or ptarget is not target:\n return pself.get_indexer(\n ptarget, method=method, limit=limit, tolerance=tolerance\n )\n\n if is_dtype_equal(self.dtype, target.dtype) and self.equals(target):\n # Only call equals if we have same dtype to avoid inference/casting\n return np.arange(len(target), dtype=np.intp)\n\n if not is_dtype_equal(self.dtype, target.dtype) and not is_interval_dtype(\n self.dtype\n ):\n # IntervalIndex gets special treatment for partial-indexing\n dtype = self._find_common_type_compat(target)\n\n this = self.astype(dtype, copy=False)\n target = target.astype(dtype, copy=False)\n return this._get_indexer(\n target, method=method, limit=limit, tolerance=tolerance\n )\n\n return self._get_indexer(target, method, limit, tolerance)\n\n def _get_indexer(\n self,\n target: Index,\n method: str_t | None = None,\n limit: int | None = None,\n tolerance=None,\n ) -> np.ndarray:\n if tolerance is not None:\n tolerance = self._convert_tolerance(tolerance, target)\n\n if method in [\"pad\", \"backfill\"]:\n indexer = self._get_fill_indexer(target, method, limit, tolerance)\n elif method == \"nearest\":\n indexer = self._get_nearest_indexer(target, limit, tolerance)\n else:\n indexer = self._engine.get_indexer(target._get_engine_target())\n\n return ensure_platform_int(indexer)\n\n @final\n def _should_partial_index(self, target: Index) -> bool:\n \"\"\"\n Should we attempt partial-matching indexing?\n \"\"\"\n if is_interval_dtype(self.dtype):\n # \"Index\" has no attribute \"left\"\n return self.left._should_compare(target) # type: ignore[attr-defined]\n return False\n\n @final\n def _check_indexing_method(\n self,\n method: str_t | None,\n limit: int | None = None,\n tolerance=None,\n ) -> None:\n \"\"\"\n Raise if we have a get_indexer `method` that is not supported or valid.\n \"\"\"\n if method not in [None, \"bfill\", \"backfill\", \"pad\", \"ffill\", \"nearest\"]:\n # in practice the clean_reindex_fill_method call would raise\n # before we get here\n raise ValueError(\"Invalid fill method\") # pragma: no cover\n\n if self._is_multi:\n if method == \"nearest\":\n raise NotImplementedError(\n \"method='nearest' not implemented yet \"\n \"for MultiIndex; see GitHub issue 9365\"\n )\n elif method == \"pad\" or method == \"backfill\":\n if tolerance is not None:\n raise NotImplementedError(\n \"tolerance not implemented yet for MultiIndex\"\n )\n\n if is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype):\n # GH#37871 for now this is only for IntervalIndex and CategoricalIndex\n if method is not None:\n raise NotImplementedError(\n f\"method {method} not yet implemented for {type(self).__name__}\"\n )\n\n if method is None:\n if tolerance is not None:\n raise ValueError(\n \"tolerance argument only valid if doing pad, \"\n \"backfill or nearest reindexing\"\n )\n if limit is not None:\n raise ValueError(\n \"limit argument only valid if doing pad, \"\n \"backfill or nearest reindexing\"\n )\n\n def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray:\n # override this method on subclasses\n tolerance = np.asarray(tolerance)\n if target.size != tolerance.size and tolerance.size > 1:\n raise ValueError(\"list-like tolerance size must match target index size\")\n return tolerance\n\n @final\n def _get_fill_indexer(\n self, target: Index, method: str_t, limit: int | None = None, tolerance=None\n ) -> np.ndarray:\n\n if self._is_multi:\n # TODO: get_indexer_with_fill docstring says values must be _sorted_\n # but that doesn't appear to be enforced\n return self._engine.get_indexer_with_fill(\n target=target._values, values=self._values, method=method, limit=limit\n )\n\n target_values = target._get_engine_target()\n\n if self.is_monotonic_increasing and target.is_monotonic_increasing:\n engine_method = (\n self._engine.get_pad_indexer\n if method == \"pad\"\n else self._engine.get_backfill_indexer\n )\n indexer = engine_method(target_values, limit)\n else:\n indexer = self._get_fill_indexer_searchsorted(target, method, limit)\n if tolerance is not None and len(self):\n indexer = self._filter_indexer_tolerance(target_values, indexer, tolerance)\n return indexer\n\n @final\n def _get_fill_indexer_searchsorted(\n self, target: Index, method: str_t, limit: int | None = None\n ) -> np.ndarray:\n \"\"\"\n Fallback pad/backfill get_indexer that works for monotonic decreasing\n indexes and non-monotonic targets.\n \"\"\"\n if limit is not None:\n raise ValueError(\n f\"limit argument for {repr(method)} method only well-defined \"\n \"if index and target are monotonic\"\n )\n\n side = \"left\" if method == \"pad\" else \"right\"\n\n # find exact matches first (this simplifies the algorithm)\n indexer = self.get_indexer(target)\n nonexact = indexer == -1\n indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side)\n if side == \"left\":\n # searchsorted returns \"indices into a sorted array such that,\n # if the corresponding elements in v were inserted before the\n # indices, the order of a would be preserved\".\n # Thus, we need to subtract 1 to find values to the left.\n indexer[nonexact] -= 1\n # This also mapped not found values (values of 0 from\n # np.searchsorted) to -1, which conveniently is also our\n # sentinel for missing values\n else:\n # Mark indices to the right of the largest value as not found\n indexer[indexer == len(self)] = -1\n return indexer\n\n @final\n def _get_nearest_indexer(\n self, target: Index, limit: int | None, tolerance\n ) -> np.ndarray:\n \"\"\"\n Get the indexer for the nearest index labels; requires an index with\n values that can be subtracted from each other (e.g., not strings or\n tuples).\n \"\"\"\n if not len(self):\n return self._get_fill_indexer(target, \"pad\")\n\n left_indexer = self.get_indexer(target, \"pad\", limit=limit)\n right_indexer = self.get_indexer(target, \"backfill\", limit=limit)\n\n target_values = target._get_engine_target()\n own_values = self._get_engine_target()\n left_distances = np.abs(own_values[left_indexer] - target_values)\n right_distances = np.abs(own_values[right_indexer] - target_values)\n\n op = operator.lt if self.is_monotonic_increasing else operator.le\n indexer = np.where(\n op(left_distances, right_distances) | (right_indexer == -1),\n left_indexer,\n right_indexer,\n )\n if tolerance is not None:\n indexer = self._filter_indexer_tolerance(target_values, indexer, tolerance)\n return indexer\n\n @final\n def _filter_indexer_tolerance(\n self,\n target: Index | np.ndarray | ExtensionArray,\n indexer: np.ndarray,\n tolerance,\n ) -> np.ndarray:\n own_values = self._get_engine_target()\n distance = abs(own_values[indexer] - target)\n return np.where(distance <= tolerance, indexer, -1)\n\n # --------------------------------------------------------------------\n # Indexer Conversion Methods\n\n @final\n def _validate_positional_slice(self, key: slice) -> None:\n \"\"\"\n For positional indexing, a slice must have either int or None\n for each of start, stop, and step.\n \"\"\"\n self._validate_indexer(\"positional\", key.start, \"iloc\")\n self._validate_indexer(\"positional\", key.stop, \"iloc\")\n self._validate_indexer(\"positional\", key.step, \"iloc\")\n\n def _convert_slice_indexer(self, key: slice, kind: str_t):\n \"\"\"\n Convert a slice indexer.\n\n By definition, these are labels unless 'iloc' is passed in.\n Floats are not allowed as the start, step, or stop of the slice.\n\n Parameters\n ----------\n key : label of the slice bound\n kind : {'loc', 'getitem'}\n \"\"\"\n assert kind in [\"loc\", \"getitem\"], kind\n\n # potentially cast the bounds to integers\n start, stop, step = key.start, key.stop, key.step\n\n # figure out if this is a positional indexer\n def is_int(v):\n return v is None or is_integer(v)\n\n is_index_slice = is_int(start) and is_int(stop) and is_int(step)\n is_positional = is_index_slice and not (\n self.is_integer() or self.is_categorical()\n )\n\n if kind == \"getitem\":\n \"\"\"\n called from the getitem slicers, validate that we are in fact\n integers\n \"\"\"\n if self.is_integer() or is_index_slice:\n self._validate_indexer(\"slice\", key.start, \"getitem\")\n self._validate_indexer(\"slice\", key.stop, \"getitem\")\n self._validate_indexer(\"slice\", key.step, \"getitem\")\n return key\n\n # convert the slice to an indexer here\n\n # if we are mixed and have integers\n if is_positional:\n try:\n # Validate start & stop\n if start is not None:\n self.get_loc(start)\n if stop is not None:\n self.get_loc(stop)\n is_positional = False\n except KeyError:\n pass\n\n if com.is_null_slice(key):\n # It doesn't matter if we are positional or label based\n indexer = key\n elif is_positional:\n if kind == \"loc\":\n # GH#16121, GH#24612, GH#31810\n warnings.warn(\n \"Slicing a positional slice with .loc is not supported, \"\n \"and will raise TypeError in a future version. \"\n \"Use .loc with labels or .iloc with positions instead.\",\n FutureWarning,\n stacklevel=5,\n )\n indexer = key\n else:\n indexer = self.slice_indexer(start, stop, step)\n\n return indexer\n\n @final\n def _invalid_indexer(self, form: str_t, key) -> TypeError:\n \"\"\"\n Consistent invalid indexer message.\n \"\"\"\n return TypeError(\n f\"cannot do {form} indexing on {type(self).__name__} with these \"\n f\"indexers [{key}] of type {type(key).__name__}\"\n )\n\n # --------------------------------------------------------------------\n # Reindex Methods\n\n @final\n def _validate_can_reindex(self, indexer: np.ndarray) -> None:\n \"\"\"\n Check if we are allowing reindexing with this particular indexer.\n\n Parameters\n ----------\n indexer : an integer ndarray\n\n Raises\n ------\n ValueError if its a duplicate axis\n \"\"\"\n # trying to reindex on an axis with duplicates\n if not self._index_as_unique and len(indexer):\n raise ValueError(\"cannot reindex on an axis with duplicate labels\")\n\n def reindex(\n self, target, method=None, level=None, limit=None, tolerance=None\n ) -> tuple[Index, npt.NDArray[np.intp] | None]:\n \"\"\"\n Create index with target's values.\n\n Parameters\n ----------\n target : an iterable\n\n Returns\n -------\n new_index : pd.Index\n Resulting index.\n indexer : np.ndarray[np.intp] or None\n Indices of output values in original index.\n \"\"\"\n # GH6552: preserve names when reindexing to non-named target\n # (i.e. neither Index nor Series).\n preserve_names = not hasattr(target, \"name\")\n\n # GH7774: preserve dtype/tz if target is empty and not an Index.\n target = ensure_has_len(target) # target may be an iterator\n\n if not isinstance(target, Index) and len(target) == 0:\n if level is not None and self._is_multi:\n # \"Index\" has no attribute \"levels\"; maybe \"nlevels\"?\n idx = self.levels[level] # type: ignore[attr-defined]\n else:\n idx = self\n target = idx[:0]\n else:\n target = ensure_index(target)\n\n if level is not None:\n if method is not None:\n raise TypeError(\"Fill method not supported if level passed\")\n\n # TODO: tests where passing `keep_order=not self._is_multi`\n # makes a difference for non-MultiIndex case\n target, indexer, _ = self._join_level(\n target, level, how=\"right\", keep_order=not self._is_multi\n )\n\n else:\n if self.equals(target):\n indexer = None\n else:\n if self._index_as_unique:\n indexer = self.get_indexer(\n target, method=method, limit=limit, tolerance=tolerance\n )\n elif self._is_multi:\n raise ValueError(\"cannot handle a non-unique multi-index!\")\n else:\n if method is not None or limit is not None:\n raise ValueError(\n \"cannot reindex a non-unique index \"\n \"with a method or limit\"\n )\n indexer, _ = self.get_indexer_non_unique(target)\n\n target = self._wrap_reindex_result(target, indexer, preserve_names)\n return target, indexer\n\n def _wrap_reindex_result(self, target, indexer, preserve_names: bool):\n target = self._maybe_preserve_names(target, preserve_names)\n return target\n\n def _maybe_preserve_names(self, target: Index, preserve_names: bool):\n if preserve_names and target.nlevels == 1 and target.name != self.name:\n target = target.copy(deep=False)\n target.name = self.name\n return target\n\n @final\n def _reindex_non_unique(\n self, target: Index\n ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]:\n \"\"\"\n Create a new index with target's values (move/add/delete values as\n necessary) use with non-unique Index and a possibly non-unique target.\n\n Parameters\n ----------\n target : an iterable\n\n Returns\n -------\n new_index : pd.Index\n Resulting index.\n indexer : np.ndarray[np.intp]\n Indices of output values in original index.\n new_indexer : np.ndarray[np.intp] or None\n\n \"\"\"\n target = ensure_index(target)\n if len(target) == 0:\n # GH#13691\n return self[:0], np.array([], dtype=np.intp), None\n\n indexer, missing = self.get_indexer_non_unique(target)\n check = indexer != -1\n new_labels = self.take(indexer[check])\n new_indexer = None\n\n if len(missing):\n length = np.arange(len(indexer), dtype=np.intp)\n\n missing = ensure_platform_int(missing)\n missing_labels = target.take(missing)\n missing_indexer = length[~check]\n cur_labels = self.take(indexer[check]).values\n cur_indexer = length[check]\n\n # Index constructor below will do inference\n new_labels = np.empty((len(indexer),), dtype=object)\n new_labels[cur_indexer] = cur_labels\n new_labels[missing_indexer] = missing_labels\n\n # GH#38906\n if not len(self):\n\n new_indexer = np.arange(0, dtype=np.intp)\n\n # a unique indexer\n elif target.is_unique:\n\n # see GH5553, make sure we use the right indexer\n new_indexer = np.arange(len(indexer), dtype=np.intp)\n new_indexer[cur_indexer] = np.arange(len(cur_labels))\n new_indexer[missing_indexer] = -1\n\n # we have a non_unique selector, need to use the original\n # indexer here\n else:\n\n # need to retake to have the same size as the indexer\n indexer[~check] = -1\n\n # reset the new indexer to account for the new size\n new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp)\n new_indexer[~check] = -1\n\n if isinstance(self, ABCMultiIndex):\n new_index = type(self).from_tuples(new_labels, names=self.names)\n else:\n new_index = Index(new_labels, name=self.name)\n return new_index, indexer, new_indexer\n\n # --------------------------------------------------------------------\n # Join Methods\n\n @final\n @_maybe_return_indexers\n def join(\n self,\n other,\n how: str_t = \"left\",\n level=None,\n return_indexers: bool = False,\n sort: bool = False,\n ):\n \"\"\"\n Compute join_index and indexers to conform data\n structures to the new index.\n\n Parameters\n ----------\n other : Index\n how : {'left', 'right', 'inner', 'outer'}\n level : int or level name, default None\n return_indexers : bool, default False\n sort : bool, default False\n Sort the join keys lexicographically in the result Index. If False,\n the order of the join keys depends on the join type (how keyword).\n\n Returns\n -------\n join_index, (left_indexer, right_indexer)\n \"\"\"\n other = ensure_index(other)\n self_is_mi = isinstance(self, ABCMultiIndex)\n other_is_mi = isinstance(other, ABCMultiIndex)\n\n if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex):\n if (self.tz is None) ^ (other.tz is None):\n # Raise instead of casting to object below.\n raise TypeError(\"Cannot join tz-naive with tz-aware DatetimeIndex\")\n\n if not self._is_multi and not other._is_multi:\n # We have specific handling for MultiIndex below\n pself, pother = self._maybe_promote(other)\n if pself is not self or pother is not other:\n return pself.join(\n pother, how=how, level=level, return_indexers=True, sort=sort\n )\n\n lindexer: np.ndarray | None\n rindexer: np.ndarray | None\n\n # try to figure out the join level\n # GH3662\n if level is None and (self_is_mi or other_is_mi):\n\n # have the same levels/names so a simple join\n if self.names == other.names:\n pass\n else:\n return self._join_multi(other, how=how)\n\n # join on the level\n if level is not None and (self_is_mi or other_is_mi):\n return self._join_level(other, level, how=how)\n\n if len(other) == 0 and how in (\"left\", \"outer\"):\n join_index = self._view()\n rindexer = np.repeat(np.intp(-1), len(join_index))\n return join_index, None, rindexer\n\n if len(self) == 0 and how in (\"right\", \"outer\"):\n join_index = other._view()\n lindexer = np.repeat(np.intp(-1), len(join_index))\n return join_index, lindexer, None\n\n if self._join_precedence < other._join_precedence:\n how = {\"right\": \"left\", \"left\": \"right\"}.get(how, how)\n join_index, lidx, ridx = other.join(\n self, how=how, level=level, return_indexers=True\n )\n lidx, ridx = ridx, lidx\n return join_index, lidx, ridx\n\n if not is_dtype_equal(self.dtype, other.dtype):\n dtype = self._find_common_type_compat(other)\n this = self.astype(dtype, copy=False)\n other = other.astype(dtype, copy=False)\n return this.join(other, how=how, return_indexers=True)\n\n _validate_join_method(how)\n\n if not self.is_unique and not other.is_unique:\n return self._join_non_unique(other, how=how)\n elif not self.is_unique or not other.is_unique:\n if self.is_monotonic and other.is_monotonic:\n return self._join_monotonic(other, how=how)\n else:\n return self._join_non_unique(other, how=how)\n elif (\n self.is_monotonic\n and other.is_monotonic\n and (\n not isinstance(self, ABCMultiIndex)\n or not any(is_categorical_dtype(dtype) for dtype in self.dtypes)\n )\n ):\n # Categorical is monotonic if data are ordered as categories, but join can\n # not handle this in case of not lexicographically monotonic GH#38502\n try:\n return self._join_monotonic(other, how=how)\n except TypeError:\n pass\n\n if how == \"left\":\n join_index = self\n elif how == \"right\":\n join_index = other\n elif how == \"inner\":\n # TODO: sort=False here for backwards compat. It may\n # be better to use the sort parameter passed into join\n join_index = self.intersection(other, sort=False)\n elif how == \"outer\":\n # TODO: sort=True here for backwards compat. It may\n # be better to use the sort parameter passed into join\n join_index = self.union(other)\n\n if sort:\n join_index = join_index.sort_values()\n\n if join_index is self:\n lindexer = None\n else:\n lindexer = self.get_indexer(join_index)\n if join_index is other:\n rindexer = None\n else:\n rindexer = other.get_indexer(join_index)\n return join_index, lindexer, rindexer\n\n @final\n def _join_multi(self, other: Index, how: str_t):\n from pandas.core.indexes.multi import MultiIndex\n from pandas.core.reshape.merge import restore_dropped_levels_multijoin\n\n # figure out join names\n self_names_list = list(com.not_none(*self.names))\n other_names_list = list(com.not_none(*other.names))\n self_names_order = self_names_list.index\n other_names_order = other_names_list.index\n self_names = set(self_names_list)\n other_names = set(other_names_list)\n overlap = self_names & other_names\n\n # need at least 1 in common\n if not overlap:\n raise ValueError(\"cannot join with no overlapping index names\")\n\n if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):\n\n # Drop the non-matching levels from left and right respectively\n ldrop_names = sorted(self_names - overlap, key=self_names_order)\n rdrop_names = sorted(other_names - overlap, key=other_names_order)\n\n # if only the order differs\n if not len(ldrop_names + rdrop_names):\n self_jnlevels = self\n other_jnlevels = other.reorder_levels(self.names)\n else:\n self_jnlevels = self.droplevel(ldrop_names)\n other_jnlevels = other.droplevel(rdrop_names)\n\n # Join left and right\n # Join on same leveled multi-index frames is supported\n join_idx, lidx, ridx = self_jnlevels.join(\n other_jnlevels, how, return_indexers=True\n )\n\n # Restore the dropped levels\n # Returned index level order is\n # common levels, ldrop_names, rdrop_names\n dropped_names = ldrop_names + rdrop_names\n\n levels, codes, names = restore_dropped_levels_multijoin(\n self, other, dropped_names, join_idx, lidx, ridx\n )\n\n # Re-create the multi-index\n multi_join_idx = MultiIndex(\n levels=levels, codes=codes, names=names, verify_integrity=False\n )\n\n multi_join_idx = multi_join_idx.remove_unused_levels()\n\n return multi_join_idx, lidx, ridx\n\n jl = list(overlap)[0]\n\n # Case where only one index is multi\n # make the indices into mi's that match\n flip_order = False\n if isinstance(self, MultiIndex):\n self, other = other, self\n flip_order = True\n # flip if join method is right or left\n how = {\"right\": \"left\", \"left\": \"right\"}.get(how, how)\n\n level = other.names.index(jl)\n result = self._join_level(other, level, how=how)\n\n if flip_order:\n return result[0], result[2], result[1]\n return result\n\n @final\n def _join_non_unique(\n self, other: Index, how: str_t = \"left\"\n ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n from pandas.core.reshape.merge import get_join_indexers\n\n # We only get here if dtypes match\n assert self.dtype == other.dtype\n\n lvalues = self._get_join_target()\n rvalues = other._get_join_target()\n\n left_idx, right_idx = get_join_indexers(\n [lvalues], [rvalues], how=how, sort=True\n )\n\n left_idx = ensure_platform_int(left_idx)\n right_idx = ensure_platform_int(right_idx)\n\n join_array = np.asarray(lvalues.take(left_idx))\n mask = left_idx == -1\n np.putmask(join_array, mask, rvalues.take(right_idx))\n\n join_arraylike = self._from_join_target(join_array)\n join_index = self._wrap_joined_index(join_arraylike, other)\n\n return join_index, left_idx, right_idx\n\n @final\n def _join_level(\n self, other: Index, level, how: str_t = \"left\", keep_order: bool = True\n ) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:\n \"\"\"\n The join method *only* affects the level of the resulting\n MultiIndex. Otherwise it just exactly aligns the Index data to the\n labels of the level in the MultiIndex.\n\n If ```keep_order == True```, the order of the data indexed by the\n MultiIndex will not be changed; otherwise, it will tie out\n with `other`.\n \"\"\"\n from pandas.core.indexes.multi import MultiIndex\n\n def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]:\n \"\"\"\n Returns sorter for the inner most level while preserving the\n order of higher levels.\n\n Parameters\n ----------\n labels : list[np.ndarray]\n Each ndarray has signed integer dtype, not necessarily identical.\n\n Returns\n -------\n np.ndarray[np.intp]\n \"\"\"\n if labels[0].size == 0:\n return np.empty(0, dtype=np.intp)\n\n if len(labels) == 1:\n return get_group_index_sorter(ensure_platform_int(labels[0]))\n\n # find indexers of beginning of each set of\n # same-key labels w.r.t all but last level\n tic = labels[0][:-1] != labels[0][1:]\n for lab in labels[1:-1]:\n tic |= lab[:-1] != lab[1:]\n\n starts = np.hstack(([True], tic, [True])).nonzero()[0]\n lab = ensure_int64(labels[-1])\n return lib.get_level_sorter(lab, ensure_platform_int(starts))\n\n if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):\n raise TypeError(\"Join on level between two MultiIndex objects is ambiguous\")\n\n left, right = self, other\n\n flip_order = not isinstance(self, MultiIndex)\n if flip_order:\n left, right = right, left\n how = {\"right\": \"left\", \"left\": \"right\"}.get(how, how)\n\n assert isinstance(left, MultiIndex)\n\n level = left._get_level_number(level)\n old_level = left.levels[level]\n\n if not right.is_unique:\n raise NotImplementedError(\n \"Index._join_level on non-unique index is not implemented\"\n )\n\n new_level, left_lev_indexer, right_lev_indexer = old_level.join(\n right, how=how, return_indexers=True\n )\n\n if left_lev_indexer is None:\n if keep_order or len(left) == 0:\n left_indexer = None\n join_index = left\n else: # sort the leaves\n left_indexer = _get_leaf_sorter(left.codes[: level + 1])\n join_index = left[left_indexer]\n\n else:\n left_lev_indexer = ensure_platform_int(left_lev_indexer)\n rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level))\n old_codes = left.codes[level]\n\n taker = old_codes[old_codes != -1]\n new_lev_codes = rev_indexer.take(taker)\n\n new_codes = list(left.codes)\n new_codes[level] = new_lev_codes\n\n new_levels = list(left.levels)\n new_levels[level] = new_level\n\n if keep_order: # just drop missing values. o.w. keep order\n left_indexer = np.arange(len(left), dtype=np.intp)\n left_indexer = cast(np.ndarray, left_indexer)\n mask = new_lev_codes != -1\n if not mask.all():\n new_codes = [lab[mask] for lab in new_codes]\n left_indexer = left_indexer[mask]\n\n else: # tie out the order with other\n if level == 0: # outer most level, take the fast route\n max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max()\n ngroups = 1 + max_new_lev\n left_indexer, counts = libalgos.groupsort_indexer(\n new_lev_codes, ngroups\n )\n\n # missing values are placed first; drop them!\n left_indexer = left_indexer[counts[0] :]\n new_codes = [lab[left_indexer] for lab in new_codes]\n\n else: # sort the leaves\n mask = new_lev_codes != -1\n mask_all = mask.all()\n if not mask_all:\n new_codes = [lab[mask] for lab in new_codes]\n\n left_indexer = _get_leaf_sorter(new_codes[: level + 1])\n new_codes = [lab[left_indexer] for lab in new_codes]\n\n # left_indexers are w.r.t masked frame.\n # reverse to original frame!\n if not mask_all:\n left_indexer = mask.nonzero()[0][left_indexer]\n\n join_index = MultiIndex(\n levels=new_levels,\n codes=new_codes,\n names=left.names,\n verify_integrity=False,\n )\n\n if right_lev_indexer is not None:\n right_indexer = right_lev_indexer.take(join_index.codes[level])\n else:\n right_indexer = join_index.codes[level]\n\n if flip_order:\n left_indexer, right_indexer = right_indexer, left_indexer\n\n left_indexer = (\n None if left_indexer is None else ensure_platform_int(left_indexer)\n )\n right_indexer = (\n None if right_indexer is None else ensure_platform_int(right_indexer)\n )\n return join_index, left_indexer, right_indexer\n\n @final\n def _join_monotonic(self, other: Index, how: str_t = \"left\"):\n # We only get here with matching dtypes\n assert other.dtype == self.dtype\n\n if self.equals(other):\n ret_index = other if how == \"right\" else self\n return ret_index, None, None\n\n ridx: np.ndarray | None\n lidx: np.ndarray | None\n\n if self.is_unique and other.is_unique:\n # We can perform much better than the general case\n if how == \"left\":\n join_index = self\n lidx = None\n ridx = self._left_indexer_unique(other)\n elif how == \"right\":\n join_index = other\n lidx = other._left_indexer_unique(self)\n ridx = None\n elif how == \"inner\":\n join_array, lidx, ridx = self._inner_indexer(other)\n join_index = self._wrap_joined_index(join_array, other)\n elif how == \"outer\":\n join_array, lidx, ridx = self._outer_indexer(other)\n join_index = self._wrap_joined_index(join_array, other)\n else:\n if how == \"left\":\n join_array, lidx, ridx = self._left_indexer(other)\n elif how == \"right\":\n join_array, ridx, lidx = other._left_indexer(self)\n elif how == \"inner\":\n join_array, lidx, ridx = self._inner_indexer(other)\n elif how == \"outer\":\n join_array, lidx, ridx = self._outer_indexer(other)\n\n join_index = self._wrap_joined_index(join_array, other)\n\n lidx = None if lidx is None else ensure_platform_int(lidx)\n ridx = None if ridx is None else ensure_platform_int(ridx)\n return join_index, lidx, ridx\n\n def _wrap_joined_index(self: _IndexT, joined: ArrayLike, other: _IndexT) -> _IndexT:\n assert other.dtype == self.dtype\n\n if isinstance(self, ABCMultiIndex):\n name = self.names if self.names == other.names else None\n else:\n name = get_op_result_name(self, other)\n return self._constructor(joined, name=name)\n\n # --------------------------------------------------------------------\n # Uncategorized Methods\n\n @property\n def values(self) -> ArrayLike:\n \"\"\"\n Return an array representing the data in the Index.\n\n .. warning::\n\n We recommend using :attr:`Index.array` or\n :meth:`Index.to_numpy`, depending on whether you need\n a reference to the underlying data or a NumPy array.\n\n Returns\n -------\n array: numpy.ndarray or ExtensionArray\n\n See Also\n --------\n Index.array : Reference to the underlying data.\n Index.to_numpy : A NumPy array representing the underlying data.\n \"\"\"\n return self._data\n\n @cache_readonly\n @doc(IndexOpsMixin.array)\n def array(self) -> ExtensionArray:\n array = self._data\n if isinstance(array, np.ndarray):\n from pandas.core.arrays.numpy_ import PandasArray\n\n array = PandasArray(array)\n return array\n\n @property\n def _values(self) -> ExtensionArray | np.ndarray:\n \"\"\"\n The best array representation.\n\n This is an ndarray or ExtensionArray.\n\n ``_values`` are consistent between ``Series`` and ``Index``.\n\n It may differ from the public '.values' method.\n\n index | values | _values |\n ----------------- | --------------- | ------------- |\n Index | ndarray | ndarray |\n CategoricalIndex | Categorical | Categorical |\n DatetimeIndex | ndarray[M8ns] | DatetimeArray |\n DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray |\n PeriodIndex | ndarray[object] | PeriodArray |\n IntervalIndex | IntervalArray | IntervalArray |\n\n See Also\n --------\n values : Values\n \"\"\"\n return self._data\n\n def _get_engine_target(self) -> np.ndarray:\n \"\"\"\n Get the ndarray that we can pass to the IndexEngine constructor.\n \"\"\"\n # error: Incompatible return value type (got \"Union[ExtensionArray,\n # ndarray]\", expected \"ndarray\")\n return self._values # type: ignore[return-value]\n\n def _get_join_target(self) -> np.ndarray:\n \"\"\"\n Get the ndarray that we will pass to libjoin functions.\n \"\"\"\n return self._get_engine_target()\n\n def _from_join_target(self, result: np.ndarray) -> ArrayLike:\n \"\"\"\n Cast the ndarray returned from one of the libjoin.foo_indexer functions\n back to type(self)._data.\n \"\"\"\n return result\n\n @doc(IndexOpsMixin._memory_usage)\n def memory_usage(self, deep: bool = False) -> int:\n result = self._memory_usage(deep=deep)\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result\n\n @final\n def where(self, cond, other=None) -> Index:\n \"\"\"\n Replace values where the condition is False.\n\n The replacement is taken from other.\n\n Parameters\n ----------\n cond : bool array-like with the same length as self\n Condition to select the values on.\n other : scalar, or array-like, default None\n Replacement if the condition is False.\n\n Returns\n -------\n pandas.Index\n A copy of self with values replaced from other\n where the condition is False.\n\n See Also\n --------\n Series.where : Same method for Series.\n DataFrame.where : Same method for DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index(['car', 'bike', 'train', 'tractor'])\n >>> idx\n Index(['car', 'bike', 'train', 'tractor'], dtype='object')\n >>> idx.where(idx.isin(['car', 'train']), 'other')\n Index(['car', 'other', 'train', 'other'], dtype='object')\n \"\"\"\n if isinstance(self, ABCMultiIndex):\n raise NotImplementedError(\n \".where is not supported for MultiIndex operations\"\n )\n cond = np.asarray(cond, dtype=bool)\n return self.putmask(~cond, other)\n\n # construction helpers\n @final\n @classmethod\n def _scalar_data_error(cls, data):\n # We return the TypeError so that we can raise it from the constructor\n # in order to keep mypy happy\n return TypeError(\n f\"{cls.__name__}(...) must be called with a collection of some \"\n f\"kind, {repr(data)} was passed\"\n )\n\n @final\n @classmethod\n def _string_data_error(cls, data):\n raise TypeError(\n \"String dtype not supported, you may need \"\n \"to explicitly cast to a numeric type\"\n )\n\n def _validate_fill_value(self, value):\n \"\"\"\n Check if the value can be inserted into our array without casting,\n and convert it to an appropriate native type if necessary.\n\n Raises\n ------\n TypeError\n If the value cannot be inserted into an array of this dtype.\n \"\"\"\n if not can_hold_element(self._values, value):\n raise TypeError\n return value\n\n @final\n def _require_scalar(self, value):\n \"\"\"\n Check that this is a scalar value that we can use for setitem-like\n operations without changing dtype.\n \"\"\"\n if not is_scalar(value):\n raise TypeError(f\"'value' must be a scalar, passed: {type(value).__name__}\")\n return value\n\n @property\n def _has_complex_internals(self) -> bool:\n \"\"\"\n Indicates if an index is not directly backed by a numpy array\n \"\"\"\n # used to avoid libreduction code paths, which raise or require conversion\n return False\n\n def _is_memory_usage_qualified(self) -> bool:\n \"\"\"\n Return a boolean if we need a qualified .info display.\n \"\"\"\n return self.is_object()\n\n def is_type_compatible(self, kind: str_t) -> bool:\n \"\"\"\n Whether the index type is compatible with the provided type.\n \"\"\"\n warnings.warn(\n \"Index.is_type_compatible is deprecated and will be removed in a \"\n \"future version\",\n FutureWarning,\n stacklevel=2,\n )\n return kind == self.inferred_type\n\n def __contains__(self, key: Any) -> bool:\n \"\"\"\n Return a boolean indicating whether the provided key is in the index.\n\n Parameters\n ----------\n key : label\n The key to check if it is present in the index.\n\n Returns\n -------\n bool\n Whether the key search is in the index.\n\n Raises\n ------\n TypeError\n If the key is not hashable.\n\n See Also\n --------\n Index.isin : Returns an ndarray of boolean dtype indicating whether the\n list-like key is in the index.\n\n Examples\n --------\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx\n Int64Index([1, 2, 3, 4], dtype='int64')\n\n >>> 2 in idx\n True\n >>> 6 in idx\n False\n \"\"\"\n hash(key)\n try:\n return key in self._engine\n except (OverflowError, TypeError, ValueError):\n return False\n\n # https://github.com/python/typeshed/issues/2148#issuecomment-520783318\n # Incompatible types in assignment (expression has type \"None\", base class\n # \"object\" defined the type as \"Callable[[object], int]\")\n __hash__: None # type: ignore[assignment]\n\n @final\n def __setitem__(self, key, value):\n raise TypeError(\"Index does not support mutable operations\")\n\n def __getitem__(self, key):\n \"\"\"\n Override numpy.ndarray's __getitem__ method to work as desired.\n\n This function adds lists and Series as valid boolean indexers\n (ndarrays only supports ndarray with dtype=bool).\n\n If resulting ndim != 1, plain ndarray is returned instead of\n corresponding `Index` subclass.\n\n \"\"\"\n # There's no custom logic to be implemented in __getslice__, so it's\n # not overloaded intentionally.\n getitem = self._data.__getitem__\n\n if is_scalar(key):\n key = com.cast_scalar_indexer(key, warn_float=True)\n return getitem(key)\n\n if isinstance(key, slice):\n # This case is separated from the conditional above to avoid\n # pessimization of basic indexing.\n result = getitem(key)\n # Going through simple_new for performance.\n return type(self)._simple_new(result, name=self._name)\n\n if com.is_bool_indexer(key):\n key = np.asarray(key, dtype=bool)\n\n result = getitem(key)\n if not is_scalar(result):\n # error: Argument 1 to \"ndim\" has incompatible type \"Union[ExtensionArray,\n # Any]\"; expected \"Union[Union[int, float, complex, str, bytes, generic],\n # Sequence[Union[int, float, complex, str, bytes, generic]],\n # Sequence[Sequence[Any]], _SupportsArray]\"\n if np.ndim(result) > 1: # type: ignore[arg-type]\n deprecate_ndim_indexing(result)\n return result\n # NB: Using _constructor._simple_new would break if MultiIndex\n # didn't override __getitem__\n return self._constructor._simple_new(result, name=self._name)\n else:\n return result\n\n def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT:\n \"\"\"\n Fastpath for __getitem__ when we know we have a slice.\n \"\"\"\n res = self._data[slobj]\n return type(self)._simple_new(res, name=self._name)\n\n @final\n def _can_hold_identifiers_and_holds_name(self, name) -> bool:\n \"\"\"\n Faster check for ``name in self`` when we know `name` is a Python\n identifier (e.g. in NDFrame.__getattr__, which hits this to support\n . key lookup). For indexes that can't hold identifiers (everything\n but object & categorical) we just return False.\n\n https://github.com/pandas-dev/pandas/issues/19764\n \"\"\"\n if self.is_object() or self.is_categorical():\n return name in self\n return False\n\n def append(self, other: Index | Sequence[Index]) -> Index:\n \"\"\"\n Append a collection of Index options together.\n\n Parameters\n ----------\n other : Index or list/tuple of indices\n\n Returns\n -------\n Index\n \"\"\"\n to_concat = [self]\n\n if isinstance(other, (list, tuple)):\n to_concat += list(other)\n else:\n # error: Argument 1 to \"append\" of \"list\" has incompatible type\n # \"Union[Index, Sequence[Index]]\"; expected \"Index\"\n to_concat.append(other) # type: ignore[arg-type]\n\n for obj in to_concat:\n if not isinstance(obj, Index):\n raise TypeError(\"all inputs must be Index\")\n\n names = {obj.name for obj in to_concat}\n name = None if len(names) > 1 else self.name\n\n return self._concat(to_concat, name)\n\n def _concat(self, to_concat: list[Index], name: Hashable) -> Index:\n \"\"\"\n Concatenate multiple Index objects.\n \"\"\"\n to_concat_vals = [x._values for x in to_concat]\n\n result = concat_compat(to_concat_vals)\n return Index(result, name=name)\n\n def putmask(self, mask, value) -> Index:\n \"\"\"\n Return a new Index of the values set with the mask.\n\n Returns\n -------\n Index\n\n See Also\n --------\n numpy.ndarray.putmask : Changes elements of an array\n based on conditional and input values.\n \"\"\"\n mask, noop = validate_putmask(self._values, mask)\n if noop:\n return self.copy()\n\n if value is None and (self._is_numeric_dtype or self.dtype == object):\n value = self._na_value\n try:\n converted = self._validate_fill_value(value)\n except (ValueError, TypeError) as err:\n if is_object_dtype(self):\n raise err\n\n dtype = self._find_common_type_compat(value)\n return self.astype(dtype).putmask(mask, value)\n\n values = self._values.copy()\n # error: Argument 1 to \"setitem_datetimelike_compat\" has incompatible type\n # \"Union[ExtensionArray, ndarray]\"; expected \"ndarray\"\n converted = setitem_datetimelike_compat(\n values, mask.sum(), converted # type: ignore[arg-type]\n )\n np.putmask(values, mask, converted)\n\n return type(self)._simple_new(values, name=self.name)\n\n def equals(self, other: Any) -> bool:\n \"\"\"\n Determine if two Index object are equal.\n\n The things that are being compared are:\n\n * The elements inside the Index object.\n * The order of the elements inside the Index object.\n\n Parameters\n ----------\n other : Any\n The other object to compare against.\n\n Returns\n -------\n bool\n True if \"other\" is an Index and it has the same elements and order\n as the calling index; False otherwise.\n\n Examples\n --------\n >>> idx1 = pd.Index([1, 2, 3])\n >>> idx1\n Int64Index([1, 2, 3], dtype='int64')\n >>> idx1.equals(pd.Index([1, 2, 3]))\n True\n\n The elements inside are compared\n\n >>> idx2 = pd.Index([\"1\", \"2\", \"3\"])\n >>> idx2\n Index(['1', '2', '3'], dtype='object')\n\n >>> idx1.equals(idx2)\n False\n\n The order is compared\n\n >>> ascending_idx = pd.Index([1, 2, 3])\n >>> ascending_idx\n Int64Index([1, 2, 3], dtype='int64')\n >>> descending_idx = pd.Index([3, 2, 1])\n >>> descending_idx\n Int64Index([3, 2, 1], dtype='int64')\n >>> ascending_idx.equals(descending_idx)\n False\n\n The dtype is *not* compared\n\n >>> int64_idx = pd.Int64Index([1, 2, 3])\n >>> int64_idx\n Int64Index([1, 2, 3], dtype='int64')\n >>> uint64_idx = pd.UInt64Index([1, 2, 3])\n >>> uint64_idx\n UInt64Index([1, 2, 3], dtype='uint64')\n >>> int64_idx.equals(uint64_idx)\n True\n \"\"\"\n if self.is_(other):\n return True\n\n if not isinstance(other, Index):\n return False\n\n if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype):\n # if other is not object, use other's logic for coercion\n return other.equals(self)\n\n if isinstance(other, ABCMultiIndex):\n # d-level MultiIndex can equal d-tuple Index\n return other.equals(self)\n\n if is_extension_array_dtype(other.dtype):\n # All EA-backed Index subclasses override equals\n return other.equals(self)\n\n return array_equivalent(self._values, other._values)\n\n @final\n def identical(self, other) -> bool:\n \"\"\"\n Similar to equals, but checks that object attributes and types are also equal.\n\n Returns\n -------\n bool\n If two Index objects have equal elements and same type True,\n otherwise False.\n \"\"\"\n return (\n self.equals(other)\n and all(\n getattr(self, c, None) == getattr(other, c, None)\n for c in self._comparables\n )\n and type(self) == type(other)\n )\n\n @final\n def asof(self, label):\n \"\"\"\n Return the label from the index, or, if not present, the previous one.\n\n Assuming that the index is sorted, return the passed index label if it\n is in the index, or return the previous index label if the passed one\n is not in the index.\n\n Parameters\n ----------\n label : object\n The label up to which the method returns the latest index label.\n\n Returns\n -------\n object\n The passed label if it is in the index. The previous label if the\n passed label is not in the sorted index or `NaN` if there is no\n such label.\n\n See Also\n --------\n Series.asof : Return the latest value in a Series up to the\n passed index.\n merge_asof : Perform an asof merge (similar to left join but it\n matches on nearest key rather than equal key).\n Index.get_loc : An `asof` is a thin wrapper around `get_loc`\n with method='pad'.\n\n Examples\n --------\n `Index.asof` returns the latest index label up to the passed label.\n\n >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03'])\n >>> idx.asof('2014-01-01')\n '2013-12-31'\n\n If the label is in the index, the method returns the passed label.\n\n >>> idx.asof('2014-01-02')\n '2014-01-02'\n\n If all of the labels in the index are later than the passed label,\n NaN is returned.\n\n >>> idx.asof('1999-01-02')\n nan\n\n If the index is not sorted, an error is raised.\n\n >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02',\n ... '2014-01-03'])\n >>> idx_not_sorted.asof('2013-12-31')\n Traceback (most recent call last):\n ValueError: index must be monotonic increasing or decreasing\n \"\"\"\n self._searchsorted_monotonic(label) # validate sortedness\n try:\n loc = self.get_loc(label)\n except (KeyError, TypeError):\n # KeyError -> No exact match, try for padded\n # TypeError -> passed e.g. non-hashable, fall through to get\n # the tested exception message\n indexer = self.get_indexer([label], method=\"pad\")\n if indexer.ndim > 1 or indexer.size > 1:\n raise TypeError(\"asof requires scalar valued input\")\n loc = indexer.item()\n if loc == -1:\n return self._na_value\n else:\n if isinstance(loc, slice):\n loc = loc.indices(len(self))[-1]\n\n return self[loc]\n\n def asof_locs(self, where: Index, mask: np.ndarray) -> npt.NDArray[np.intp]:\n \"\"\"\n Return the locations (indices) of labels in the index.\n\n As in the `asof` function, if the label (a particular entry in\n `where`) is not in the index, the latest index label up to the\n passed label is chosen and its index returned.\n\n If all of the labels in the index are later than a label in `where`,\n -1 is returned.\n\n `mask` is used to ignore NA values in the index during calculation.\n\n Parameters\n ----------\n where : Index\n An Index consisting of an array of timestamps.\n mask : np.ndarray[bool]\n Array of booleans denoting where values in the original\n data are not NA.\n\n Returns\n -------\n np.ndarray[np.intp]\n An array of locations (indices) of the labels from the Index\n which correspond to the return values of the `asof` function\n for every element in `where`.\n \"\"\"\n locs = self._values[mask].searchsorted(where._values, side=\"right\")\n locs = np.where(locs > 0, locs - 1, 0)\n\n result = np.arange(len(self), dtype=np.intp)[mask].take(locs)\n\n # TODO: overload return type of ExtensionArray.__getitem__\n first_value = cast(Any, self._values[mask.argmax()])\n result[(locs == 0) & (where._values < first_value)] = -1\n\n return result\n\n @final\n def sort_values(\n self,\n return_indexer: bool = False,\n ascending: bool = True,\n na_position: str_t = \"last\",\n key: Callable | None = None,\n ):\n \"\"\"\n Return a sorted copy of the index.\n\n Return a sorted copy of the index, and optionally return the indices\n that sorted the index itself.\n\n Parameters\n ----------\n return_indexer : bool, default False\n Should the indices that would sort the index be returned.\n ascending : bool, default True\n Should the index values be sorted in an ascending order.\n na_position : {'first' or 'last'}, default 'last'\n Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at\n the end.\n\n .. versionadded:: 1.2.0\n\n key : callable, optional\n If not None, apply the key function to the index values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect an\n ``Index`` and return an ``Index`` of the same shape.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n sorted_index : pandas.Index\n Sorted copy of the index.\n indexer : numpy.ndarray, optional\n The indices that the index itself was sorted by.\n\n See Also\n --------\n Series.sort_values : Sort values of a Series.\n DataFrame.sort_values : Sort values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([10, 100, 1, 1000])\n >>> idx\n Int64Index([10, 100, 1, 1000], dtype='int64')\n\n Sort values in ascending order (default behavior).\n\n >>> idx.sort_values()\n Int64Index([1, 10, 100, 1000], dtype='int64')\n\n Sort values in descending order, and also get the indices `idx` was\n sorted by.\n\n >>> idx.sort_values(ascending=False, return_indexer=True)\n (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))\n \"\"\"\n idx = ensure_key_mapped(self, key)\n\n # GH 35584. Sort missing values according to na_position kwarg\n # ignore na_position for MultiIndex\n if not isinstance(self, ABCMultiIndex):\n _as = nargsort(\n items=idx, ascending=ascending, na_position=na_position, key=key\n )\n else:\n _as = idx.argsort()\n if not ascending:\n _as = _as[::-1]\n\n sorted_index = self.take(_as)\n\n if return_indexer:\n return sorted_index, _as\n else:\n return sorted_index\n\n @final\n def sort(self, *args, **kwargs):\n \"\"\"\n Use sort_values instead.\n \"\"\"\n raise TypeError(\"cannot sort an Index object in-place, use sort_values instead\")\n\n def shift(self, periods=1, freq=None):\n \"\"\"\n Shift index by desired number of time frequency increments.\n\n This method is for shifting the values of datetime-like indexes\n by a specified time increment a given number of times.\n\n Parameters\n ----------\n periods : int, default 1\n Number of periods (or increments) to shift by,\n can be positive or negative.\n freq : pandas.DateOffset, pandas.Timedelta or str, optional\n Frequency increment to shift by.\n If None, the index is shifted by its own `freq` attribute.\n Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.\n\n Returns\n -------\n pandas.Index\n Shifted index.\n\n See Also\n --------\n Series.shift : Shift values of Series.\n\n Notes\n -----\n This method is only implemented for datetime-like index classes,\n i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.\n\n Examples\n --------\n Put the first 5 month starts of 2011 into an index.\n\n >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')\n >>> month_starts\n DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',\n '2011-05-01'],\n dtype='datetime64[ns]', freq='MS')\n\n Shift the index by 10 days.\n\n >>> month_starts.shift(10, freq='D')\n DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',\n '2011-05-11'],\n dtype='datetime64[ns]', freq=None)\n\n The default value of `freq` is the `freq` attribute of the index,\n which is 'MS' (month start) in this example.\n\n >>> month_starts.shift(10)\n DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',\n '2012-03-01'],\n dtype='datetime64[ns]', freq='MS')\n \"\"\"\n raise NotImplementedError(\n f\"This method is only implemented for DatetimeIndex, PeriodIndex and \"\n f\"TimedeltaIndex; Got type {type(self).__name__}\"\n )\n\n def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:\n \"\"\"\n Return the integer indices that would sort the index.\n\n Parameters\n ----------\n *args\n Passed to `numpy.ndarray.argsort`.\n **kwargs\n Passed to `numpy.ndarray.argsort`.\n\n Returns\n -------\n np.ndarray[np.intp]\n Integer indices that would sort the index if used as\n an indexer.\n\n See Also\n --------\n numpy.argsort : Similar method for NumPy arrays.\n Index.sort_values : Return sorted copy of Index.\n\n Examples\n --------\n >>> idx = pd.Index(['b', 'a', 'd', 'c'])\n >>> idx\n Index(['b', 'a', 'd', 'c'], dtype='object')\n\n >>> order = idx.argsort()\n >>> order\n array([1, 0, 3, 2])\n\n >>> idx[order]\n Index(['a', 'b', 'c', 'd'], dtype='object')\n \"\"\"\n # This works for either ndarray or EA, is overridden\n # by RangeIndex, MultIIndex\n return self._data.argsort(*args, **kwargs)\n\n @final\n def get_value(self, series: Series, key):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray.\n\n Only use this if you know what you're doing.\n\n Returns\n -------\n scalar or Series\n \"\"\"\n warnings.warn(\n \"get_value is deprecated and will be removed in a future version. \"\n \"Use Series[key] instead\",\n FutureWarning,\n stacklevel=2,\n )\n\n self._check_indexing_error(key)\n\n try:\n # GH 20882, 21257\n # First try to convert the key to a location\n # If that fails, raise a KeyError if an integer\n # index, otherwise, see if key is an integer, and\n # try that\n loc = self.get_loc(key)\n except KeyError:\n if not self._should_fallback_to_positional:\n raise\n elif is_integer(key):\n # If the Index cannot hold integer, then this is unambiguously\n # a locational lookup.\n loc = key\n else:\n raise\n\n return self._get_values_for_loc(series, loc, key)\n\n def _check_indexing_error(self, key):\n if not is_scalar(key):\n # if key is not a scalar, directly raise an error (the code below\n # would convert to numpy arrays and raise later any way) - GH29926\n raise InvalidIndexError(key)\n\n @cache_readonly\n def _should_fallback_to_positional(self) -> bool:\n \"\"\"\n Should an integer key be treated as positional?\n \"\"\"\n return not self.holds_integer() and not self.is_boolean()\n\n def _get_values_for_loc(self, series: Series, loc, key):\n \"\"\"\n Do a positional lookup on the given Series, returning either a scalar\n or a Series.\n\n Assumes that `series.index is self`\n\n key is included for MultiIndex compat.\n \"\"\"\n if is_integer(loc):\n return series._values[loc]\n\n return series.iloc[loc]\n\n @final\n def set_value(self, arr, key, value):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray.\n\n .. deprecated:: 1.0\n\n Notes\n -----\n Only use this if you know what you're doing.\n \"\"\"\n warnings.warn(\n (\n \"The 'set_value' method is deprecated, and \"\n \"will be removed in a future version.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n loc = self._engine.get_loc(key)\n validate_numeric_casting(arr.dtype, value)\n arr[loc] = value\n\n _index_shared_docs[\n \"get_indexer_non_unique\"\n ] = \"\"\"\n Compute indexer and mask for new index given the current index. The\n indexer should be then used as an input to ndarray.take to align the\n current data to the new index.\n\n Parameters\n ----------\n target : %(target_klass)s\n\n Returns\n -------\n indexer : np.ndarray[np.intp]\n Integers from 0 to n - 1 indicating that the index at these\n positions matches the corresponding target values. Missing values\n in the target are marked by -1.\n missing : np.ndarray[np.intp]\n An indexer into the target of the values not found.\n These correspond to the -1 in the indexer array.\n \"\"\"\n\n @Appender(_index_shared_docs[\"get_indexer_non_unique\"] % _index_doc_kwargs)\n def get_indexer_non_unique(\n self, target\n ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n target = ensure_index(target)\n target = self._maybe_cast_listlike_indexer(target)\n\n if not self._should_compare(target) and not is_interval_dtype(self.dtype):\n # IntervalIndex get special treatment bc numeric scalars can be\n # matched to Interval scalars\n return self._get_indexer_non_comparable(target, method=None, unique=False)\n\n pself, ptarget = self._maybe_promote(target)\n if pself is not self or ptarget is not target:\n return pself.get_indexer_non_unique(ptarget)\n\n if not is_dtype_equal(self.dtype, target.dtype):\n # TODO: if object, could use infer_dtype to preempt costly\n # conversion if still non-comparable?\n dtype = self._find_common_type_compat(target)\n\n this = self.astype(dtype, copy=False)\n that = target.astype(dtype, copy=False)\n return this.get_indexer_non_unique(that)\n\n # Note: _maybe_promote ensures we never get here with MultiIndex\n # self and non-Multi target\n tgt_values = target._get_engine_target()\n\n indexer, missing = self._engine.get_indexer_non_unique(tgt_values)\n return ensure_platform_int(indexer), ensure_platform_int(missing)\n\n @final\n def get_indexer_for(self, target) -> npt.NDArray[np.intp]:\n \"\"\"\n Guaranteed return of an indexer even when non-unique.\n\n This dispatches to get_indexer or get_indexer_non_unique\n as appropriate.\n\n Returns\n -------\n np.ndarray[np.intp]\n List of indices.\n \"\"\"\n if self._index_as_unique:\n return self.get_indexer(target)\n indexer, _ = self.get_indexer_non_unique(target)\n return indexer\n\n def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]:\n \"\"\"\n Analogue to get_indexer that raises if any elements are missing.\n \"\"\"\n keyarr = key\n if not isinstance(keyarr, Index):\n keyarr = com.asarray_tuplesafe(keyarr)\n\n if self._index_as_unique:\n indexer = self.get_indexer_for(keyarr)\n keyarr = self.reindex(keyarr)[0]\n else:\n keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr)\n\n self._raise_if_missing(keyarr, indexer, axis_name)\n\n keyarr = self.take(indexer)\n if isinstance(key, Index):\n # GH 42790 - Preserve name from an Index\n keyarr.name = key.name\n if keyarr.dtype.kind in [\"m\", \"M\"]:\n # DTI/TDI.take can infer a freq in some cases when we dont want one\n if isinstance(key, list) or (\n isinstance(key, type(self))\n # \"Index\" has no attribute \"freq\"\n and key.freq is None # type: ignore[attr-defined]\n ):\n keyarr = keyarr._with_freq(None)\n\n return keyarr, indexer\n\n def _raise_if_missing(self, key, indexer, axis_name: str_t):\n \"\"\"\n Check that indexer can be used to return a result.\n\n e.g. at least one element was found,\n unless the list of keys was actually empty.\n\n Parameters\n ----------\n key : list-like\n Targeted labels (only used to show correct error message).\n indexer: array-like of booleans\n Indices corresponding to the key,\n (with -1 indicating not found).\n axis_name : str\n\n Raises\n ------\n KeyError\n If at least one key was requested but none was found.\n \"\"\"\n if len(key) == 0:\n return\n\n # Count missing values\n missing_mask = indexer < 0\n nmissing = missing_mask.sum()\n\n if nmissing:\n\n # TODO: remove special-case; this is just to keep exception\n # message tests from raising while debugging\n use_interval_msg = is_interval_dtype(self.dtype) or (\n is_categorical_dtype(self.dtype)\n # \"Index\" has no attribute \"categories\" [attr-defined]\n and is_interval_dtype(\n self.categories.dtype # type: ignore[attr-defined]\n )\n )\n\n if nmissing == len(indexer):\n if use_interval_msg:\n key = list(key)\n raise KeyError(f\"None of [{key}] are in the [{axis_name}]\")\n\n not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique())\n raise KeyError(f\"{not_found} not in index\")\n\n @overload\n def _get_indexer_non_comparable(\n self, target: Index, method, unique: Literal[True] = ...\n ) -> npt.NDArray[np.intp]:\n ...\n\n @overload\n def _get_indexer_non_comparable(\n self, target: Index, method, unique: Literal[False]\n ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n ...\n\n @overload\n def _get_indexer_non_comparable(\n self, target: Index, method, unique: bool = True\n ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n ...\n\n @final\n def _get_indexer_non_comparable(\n self, target: Index, method, unique: bool = True\n ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n \"\"\"\n Called from get_indexer or get_indexer_non_unique when the target\n is of a non-comparable dtype.\n\n For get_indexer lookups with method=None, get_indexer is an _equality_\n check, so non-comparable dtypes mean we will always have no matches.\n\n For get_indexer lookups with a method, get_indexer is an _inequality_\n check, so non-comparable dtypes mean we will always raise TypeError.\n\n Parameters\n ----------\n target : Index\n method : str or None\n unique : bool, default True\n * True if called from get_indexer.\n * False if called from get_indexer_non_unique.\n\n Raises\n ------\n TypeError\n If doing an inequality check, i.e. method is not None.\n \"\"\"\n if method is not None:\n other = unpack_nested_dtype(target)\n raise TypeError(f\"Cannot compare dtypes {self.dtype} and {other.dtype}\")\n\n no_matches = -1 * np.ones(target.shape, dtype=np.intp)\n if unique:\n # This is for get_indexer\n return no_matches\n else:\n # This is for get_indexer_non_unique\n missing = np.arange(len(target), dtype=np.intp)\n return no_matches, missing\n\n @property\n def _index_as_unique(self) -> bool:\n \"\"\"\n Whether we should treat this as unique for the sake of\n get_indexer vs get_indexer_non_unique.\n\n For IntervalIndex compat.\n \"\"\"\n return self.is_unique\n\n _requires_unique_msg = \"Reindexing only valid with uniquely valued Index objects\"\n\n @final\n def _maybe_promote(self, other: Index) -> tuple[Index, Index]:\n \"\"\"\n When dealing with an object-dtype Index and a non-object Index, see\n if we can upcast the object-dtype one to improve performance.\n \"\"\"\n\n if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex):\n if (\n self.tz is not None\n and other.tz is not None\n and not tz_compare(self.tz, other.tz)\n ):\n # standardize on UTC\n return self.tz_convert(\"UTC\"), other.tz_convert(\"UTC\")\n\n elif self.inferred_type == \"date\" and isinstance(other, ABCDatetimeIndex):\n try:\n return type(other)(self), other\n except OutOfBoundsDatetime:\n return self, other\n elif self.inferred_type == \"timedelta\" and isinstance(other, ABCTimedeltaIndex):\n # TODO: we dont have tests that get here\n return type(other)(self), other\n elif self.inferred_type == \"boolean\":\n if not is_object_dtype(self.dtype):\n return self.astype(\"object\"), other.astype(\"object\")\n\n elif self.dtype.kind == \"u\" and other.dtype.kind == \"i\":\n # GH#41873\n if other.min() >= 0:\n # lookup min as it may be cached\n # TODO: may need itemsize check if we have non-64-bit Indexes\n return self, other.astype(self.dtype)\n\n elif self._is_multi and not other._is_multi:\n try:\n # \"Type[Index]\" has no attribute \"from_tuples\"\n other = type(self).from_tuples(other) # type: ignore[attr-defined]\n except (TypeError, ValueError):\n # let's instead try with a straight Index\n self = Index(self._values)\n\n if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype):\n # Reverse op so we dont need to re-implement on the subclasses\n other, self = other._maybe_promote(self)\n\n return self, other\n\n @final\n def _find_common_type_compat(self, target) -> DtypeObj:\n \"\"\"\n Implementation of find_common_type that adjusts for Index-specific\n special cases.\n \"\"\"\n if is_interval_dtype(self.dtype) and is_valid_na_for_dtype(target, self.dtype):\n # e.g. setting NA value into IntervalArray[int64]\n self = cast(\"IntervalIndex\", self)\n return IntervalDtype(np.float64, closed=self.closed)\n\n target_dtype, _ = infer_dtype_from(target, pandas_dtype=True)\n\n # special case: if one dtype is uint64 and the other a signed int, return object\n # See https://github.com/pandas-dev/pandas/issues/26778 for discussion\n # Now it's:\n # * float | [u]int -> float\n # * uint64 | signed int -> object\n # We may change union(float | [u]int) to go to object.\n if self.dtype == \"uint64\" or target_dtype == \"uint64\":\n if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype(\n target_dtype\n ):\n return np.dtype(\"object\")\n\n dtype = find_common_type([self.dtype, target_dtype])\n\n if dtype.kind in [\"i\", \"u\"]:\n # TODO: what about reversed with self being categorical?\n if (\n isinstance(target, Index)\n and is_categorical_dtype(target.dtype)\n and target.hasnans\n ):\n # FIXME: find_common_type incorrect with Categorical GH#38240\n # FIXME: some cases where float64 cast can be lossy?\n dtype = np.dtype(np.float64)\n if dtype.kind == \"c\":\n dtype = np.dtype(object)\n return dtype\n\n @final\n def _should_compare(self, other: Index) -> bool:\n \"\"\"\n Check if `self == other` can ever have non-False entries.\n \"\"\"\n\n if (other.is_boolean() and self.is_numeric()) or (\n self.is_boolean() and other.is_numeric()\n ):\n # GH#16877 Treat boolean labels passed to a numeric index as not\n # found. Without this fix False and True would be treated as 0 and 1\n # respectively.\n return False\n\n other = unpack_nested_dtype(other)\n dtype = other.dtype\n return self._is_comparable_dtype(dtype) or is_object_dtype(dtype)\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n \"\"\"\n Can we compare values of the given dtype to our own?\n \"\"\"\n return True\n\n @final\n def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]:\n \"\"\"\n Group the index labels by a given array of values.\n\n Parameters\n ----------\n values : array\n Values used to determine the groups.\n\n Returns\n -------\n dict\n {group name -> group labels}\n \"\"\"\n # TODO: if we are a MultiIndex, we can do better\n # that converting to tuples\n if isinstance(values, ABCMultiIndex):\n values = values._values\n values = Categorical(values)\n result = values._reverse_indexer()\n\n # map to the label\n result = {k: self.take(v) for k, v in result.items()}\n\n return PrettyDict(result)\n\n def map(self, mapper, na_action=None):\n \"\"\"\n Map values using input correspondence (a dict, Series, or function).\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values, without passing them to the\n mapping correspondence.\n\n Returns\n -------\n applied : Union[Index, MultiIndex], inferred\n The output of the mapping function applied to the index.\n If the function returns a tuple with more than one element\n a MultiIndex will be returned.\n \"\"\"\n from pandas.core.indexes.multi import MultiIndex\n\n new_values = self._map_values(mapper, na_action=na_action)\n\n attributes = self._get_attributes_dict()\n\n # we can return a MultiIndex\n if new_values.size and isinstance(new_values[0], tuple):\n if isinstance(self, MultiIndex):\n names = self.names\n elif attributes.get(\"name\"):\n names = [attributes.get(\"name\")] * len(new_values[0])\n else:\n names = None\n return MultiIndex.from_tuples(new_values, names=names)\n\n attributes[\"copy\"] = False\n if not new_values.size:\n # empty\n attributes[\"dtype\"] = self.dtype\n\n return Index(new_values, **attributes)\n\n # TODO: De-duplicate with map, xref GH#32349\n @final\n def _transform_index(self, func, *, level=None) -> Index:\n \"\"\"\n Apply function to all values found in index.\n\n This includes transforming multiindex entries separately.\n Only apply function to one level of the MultiIndex if level is specified.\n \"\"\"\n if isinstance(self, ABCMultiIndex):\n if level is not None:\n # Caller is responsible for ensuring level is positional.\n items = [\n tuple(func(y) if i == level else y for i, y in enumerate(x))\n for x in self\n ]\n else:\n items = [tuple(func(y) for y in x) for x in self]\n return type(self).from_tuples(items, names=self.names)\n else:\n items = [func(x) for x in self]\n return Index(items, name=self.name, tupleize_cols=False)\n\n def isin(self, values, level=None) -> np.ndarray:\n \"\"\"\n Return a boolean array where the index values are in `values`.\n\n Compute boolean array of whether each index value is found in the\n passed set of values. The length of the returned boolean array matches\n the length of the index.\n\n Parameters\n ----------\n values : set or list-like\n Sought values.\n level : str or int, optional\n Name or position of the index level to use (if the index is a\n `MultiIndex`).\n\n Returns\n -------\n np.ndarray[bool]\n NumPy array of boolean values.\n\n See Also\n --------\n Series.isin : Same for Series.\n DataFrame.isin : Same method for DataFrames.\n\n Notes\n -----\n In the case of `MultiIndex` you must either specify `values` as a\n list-like object containing tuples that are the same length as the\n number of levels, or specify `level`. Otherwise it will raise a\n ``ValueError``.\n\n If `level` is specified:\n\n - if it is the name of one *and only one* index level, use that level;\n - otherwise it should be a number indicating level position.\n\n Examples\n --------\n >>> idx = pd.Index([1,2,3])\n >>> idx\n Int64Index([1, 2, 3], dtype='int64')\n\n Check whether each index value in a list of values.\n\n >>> idx.isin([1, 4])\n array([ True, False, False])\n\n >>> midx = pd.MultiIndex.from_arrays([[1,2,3],\n ... ['red', 'blue', 'green']],\n ... names=('number', 'color'))\n >>> midx\n MultiIndex([(1, 'red'),\n (2, 'blue'),\n (3, 'green')],\n names=['number', 'color'])\n\n Check whether the strings in the 'color' level of the MultiIndex\n are in a list of colors.\n\n >>> midx.isin(['red', 'orange', 'yellow'], level='color')\n array([ True, False, False])\n\n To check across the levels of a MultiIndex, pass a list of tuples:\n\n >>> midx.isin([(1, 'red'), (3, 'red')])\n array([ True, False, False])\n\n For a DatetimeIndex, string values in `values` are converted to\n Timestamps.\n\n >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']\n >>> dti = pd.to_datetime(dates)\n >>> dti\n DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],\n dtype='datetime64[ns]', freq=None)\n\n >>> dti.isin(['2000-03-11'])\n array([ True, False, False])\n \"\"\"\n if level is not None:\n self._validate_index_level(level)\n return algos.isin(self._values, values)\n\n def _get_string_slice(self, key: str_t):\n # this is for partial string indexing,\n # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex\n raise NotImplementedError\n\n def slice_indexer(\n self,\n start: Hashable | None = None,\n end: Hashable | None = None,\n step: int | None = None,\n kind=no_default,\n ) -> slice:\n \"\"\"\n Compute the slice indexer for input labels and step.\n\n Index needs to be ordered and unique.\n\n Parameters\n ----------\n start : label, default None\n If None, defaults to the beginning.\n end : label, default None\n If None, defaults to the end.\n step : int, default None\n kind : str, default None\n\n .. deprecated:: 1.4.0\n\n Returns\n -------\n indexer : slice\n\n Raises\n ------\n KeyError : If key does not exist, or key is not unique and index is\n not ordered.\n\n Notes\n -----\n This function assumes that the data is sorted, so use at your own peril\n\n Examples\n --------\n This is a method on all index types. For example you can do:\n\n >>> idx = pd.Index(list('abcd'))\n >>> idx.slice_indexer(start='b', end='c')\n slice(1, 3, None)\n\n >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])\n >>> idx.slice_indexer(start='b', end=('c', 'g'))\n slice(1, 3, None)\n \"\"\"\n self._deprecated_arg(kind, \"kind\", \"slice_indexer\")\n\n start_slice, end_slice = self.slice_locs(start, end, step=step)\n\n # return a slice\n if not is_scalar(start_slice):\n raise AssertionError(\"Start slice bound is non-scalar\")\n if not is_scalar(end_slice):\n raise AssertionError(\"End slice bound is non-scalar\")\n\n return slice(start_slice, end_slice, step)\n\n def _maybe_cast_indexer(self, key):\n \"\"\"\n If we have a float key and are not a floating index, then try to cast\n to an int if equivalent.\n \"\"\"\n if not self.is_floating():\n return com.cast_scalar_indexer(key)\n return key\n\n def _maybe_cast_listlike_indexer(self, target) -> Index:\n \"\"\"\n Analogue to maybe_cast_indexer for get_indexer instead of get_loc.\n \"\"\"\n return ensure_index(target)\n\n @final\n def _validate_indexer(self, form: str_t, key, kind: str_t):\n \"\"\"\n If we are positional indexer, validate that we have appropriate\n typed bounds must be an integer.\n \"\"\"\n assert kind in [\"getitem\", \"iloc\"]\n\n if key is not None and not is_integer(key):\n raise self._invalid_indexer(form, key)\n\n def _maybe_cast_slice_bound(self, label, side: str_t, kind=no_default):\n \"\"\"\n This function should be overloaded in subclasses that allow non-trivial\n casting on label-slice bounds, e.g. datetime-like indices allowing\n strings containing formatted datetimes.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'} or None\n\n .. deprecated:: 1.3.0\n\n Returns\n -------\n label : object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n \"\"\"\n assert kind in [\"loc\", \"getitem\", None, no_default]\n self._deprecated_arg(kind, \"kind\", \"_maybe_cast_slice_bound\")\n\n # We are a plain index here (sub-class override this method if they\n # wish to have special treatment for floats/ints, e.g. Float64Index and\n # datetimelike Indexes\n # reject them, if index does not contain label\n if (is_float(label) or is_integer(label)) and label not in self._values:\n raise self._invalid_indexer(\"slice\", label)\n\n return label\n\n def _searchsorted_monotonic(self, label, side: str_t = \"left\"):\n if self.is_monotonic_increasing:\n return self.searchsorted(label, side=side)\n elif self.is_monotonic_decreasing:\n # np.searchsorted expects ascending sort order, have to reverse\n # everything for it to work (element ordering, search side and\n # resulting value).\n pos = self[::-1].searchsorted(\n label, side=\"right\" if side == \"left\" else \"left\"\n )\n return len(self) - pos\n\n raise ValueError(\"index must be monotonic increasing or decreasing\")\n\n def get_slice_bound(self, label, side: str_t, kind=no_default) -> int:\n \"\"\"\n Calculate slice bound that corresponds to given label.\n\n Returns leftmost (one-past-the-rightmost if ``side=='right'``) position\n of given label.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'} or None\n\n .. deprecated:: 1.4.0\n\n Returns\n -------\n int\n Index of label.\n \"\"\"\n assert kind in [\"loc\", \"getitem\", None, no_default]\n self._deprecated_arg(kind, \"kind\", \"get_slice_bound\")\n\n if side not in (\"left\", \"right\"):\n raise ValueError(\n \"Invalid value for side kwarg, must be either \"\n f\"'left' or 'right': {side}\"\n )\n\n original_label = label\n\n # For datetime indices label may be a string that has to be converted\n # to datetime boundary according to its resolution.\n label = self._maybe_cast_slice_bound(label, side)\n\n # we need to look up the label\n try:\n slc = self.get_loc(label)\n except KeyError as err:\n try:\n return self._searchsorted_monotonic(label, side)\n except ValueError:\n # raise the original KeyError\n raise err\n\n if isinstance(slc, np.ndarray):\n # get_loc may return a boolean array or an array of indices, which\n # is OK as long as they are representable by a slice.\n if is_bool_dtype(slc):\n slc = lib.maybe_booleans_to_slice(slc.view(\"u1\"))\n else:\n slc = lib.maybe_indices_to_slice(\n slc.astype(np.intp, copy=False), len(self)\n )\n if isinstance(slc, np.ndarray):\n raise KeyError(\n f\"Cannot get {side} slice bound for non-unique \"\n f\"label: {repr(original_label)}\"\n )\n\n if isinstance(slc, slice):\n if side == \"left\":\n return slc.start\n else:\n return slc.stop\n else:\n if side == \"right\":\n return slc + 1\n else:\n return slc\n\n def slice_locs(self, start=None, end=None, step=None, kind=no_default):\n \"\"\"\n Compute slice locations for input labels.\n\n Parameters\n ----------\n start : label, default None\n If None, defaults to the beginning.\n end : label, default None\n If None, defaults to the end.\n step : int, defaults None\n If None, defaults to 1.\n kind : {'loc', 'getitem'} or None\n\n .. deprecated:: 1.4.0\n\n Returns\n -------\n start, end : int\n\n See Also\n --------\n Index.get_loc : Get location for a single label.\n\n Notes\n -----\n This method only works if the index is monotonic or unique.\n\n Examples\n --------\n >>> idx = pd.Index(list('abcd'))\n >>> idx.slice_locs(start='b', end='c')\n (1, 3)\n \"\"\"\n self._deprecated_arg(kind, \"kind\", \"slice_locs\")\n inc = step is None or step >= 0\n\n if not inc:\n # If it's a reverse slice, temporarily swap bounds.\n start, end = end, start\n\n # GH 16785: If start and end happen to be date strings with UTC offsets\n # attempt to parse and check that the offsets are the same\n if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)):\n try:\n ts_start = Timestamp(start)\n ts_end = Timestamp(end)\n except (ValueError, TypeError):\n pass\n else:\n if not tz_compare(ts_start.tzinfo, ts_end.tzinfo):\n raise ValueError(\"Both dates must have the same UTC offset\")\n\n start_slice = None\n if start is not None:\n start_slice = self.get_slice_bound(start, \"left\")\n if start_slice is None:\n start_slice = 0\n\n end_slice = None\n if end is not None:\n end_slice = self.get_slice_bound(end, \"right\")\n if end_slice is None:\n end_slice = len(self)\n\n if not inc:\n # Bounds at this moment are swapped, swap them back and shift by 1.\n #\n # slice_locs('B', 'A', step=-1): s='B', e='A'\n #\n # s='A' e='B'\n # AFTER SWAP: | |\n # v ------------------> V\n # -----------------------------------\n # | | |A|A|A|A| | | | | |B|B| | | | |\n # -----------------------------------\n # ^ <------------------ ^\n # SHOULD BE: | |\n # end=s-1 start=e-1\n #\n end_slice, start_slice = start_slice - 1, end_slice - 1\n\n # i == -1 triggers ``len(self) + i`` selection that points to the\n # last element, not before-the-first one, subtracting len(self)\n # compensates that.\n if end_slice == -1:\n end_slice -= len(self)\n if start_slice == -1:\n start_slice -= len(self)\n\n return start_slice, end_slice\n\n def delete(self: _IndexT, loc) -> _IndexT:\n \"\"\"\n Make new Index with passed location(-s) deleted.\n\n Parameters\n ----------\n loc : int or list of int\n Location of item(-s) which will be deleted.\n Use a list of locations to delete more than one value at the same time.\n\n Returns\n -------\n Index\n Will be same type as self, except for RangeIndex.\n\n See Also\n --------\n numpy.delete : Delete any rows and column from NumPy array (ndarray).\n\n Examples\n --------\n >>> idx = pd.Index(['a', 'b', 'c'])\n >>> idx.delete(1)\n Index(['a', 'c'], dtype='object')\n\n >>> idx = pd.Index(['a', 'b', 'c'])\n >>> idx.delete([0, 2])\n Index(['b'], dtype='object')\n \"\"\"\n res_values = np.delete(self._data, loc)\n return type(self)._simple_new(res_values, name=self.name)\n\n def insert(self, loc: int, item) -> Index:\n \"\"\"\n Make new Index inserting new item at location.\n\n Follows Python list.append semantics for negative values.\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n new_index : Index\n \"\"\"\n # Note: this method is overridden by all ExtensionIndex subclasses,\n # so self is never backed by an EA.\n item = lib.item_from_zerodim(item)\n if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object:\n item = self._na_value\n\n try:\n item = self._validate_fill_value(item)\n except TypeError:\n dtype = self._find_common_type_compat(item)\n return self.astype(dtype).insert(loc, item)\n\n arr = np.asarray(self)\n\n # Use Index constructor to ensure we get tuples cast correctly.\n item = Index([item], dtype=self.dtype)._values\n idx = np.concatenate((arr[:loc], item, arr[loc:]))\n return Index(idx, name=self.name)\n\n def drop(self, labels, errors: str_t = \"raise\") -> Index:\n \"\"\"\n Make new Index with passed list of labels deleted.\n\n Parameters\n ----------\n labels : array-like or scalar\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n Returns\n -------\n dropped : Index\n Will be same type as self, except for RangeIndex.\n\n Raises\n ------\n KeyError\n If not all of the labels are found in the selected axis\n \"\"\"\n arr_dtype = \"object\" if self.dtype == \"object\" else None\n labels = com.index_labels_to_array(labels, dtype=arr_dtype)\n indexer = self.get_indexer_for(labels)\n mask = indexer == -1\n if mask.any():\n if errors != \"ignore\":\n raise KeyError(f\"{labels[mask]} not found in axis\")\n indexer = indexer[~mask]\n return self.delete(indexer)\n\n # --------------------------------------------------------------------\n # Generated Arithmetic, Comparison, and Unary Methods\n\n def _cmp_method(self, other, op):\n \"\"\"\n Wrapper used to dispatch comparison operations.\n \"\"\"\n if self.is_(other):\n # fastpath\n if op in {operator.eq, operator.le, operator.ge}:\n arr = np.ones(len(self), dtype=bool)\n if self._can_hold_na and not isinstance(self, ABCMultiIndex):\n # TODO: should set MultiIndex._can_hold_na = False?\n arr[self.isna()] = False\n return arr\n elif op in {operator.ne, operator.lt, operator.gt}:\n return np.zeros(len(self), dtype=bool)\n\n if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len(\n self\n ) != len(other):\n raise ValueError(\"Lengths must match to compare\")\n\n if not isinstance(other, ABCMultiIndex):\n other = extract_array(other, extract_numpy=True)\n else:\n other = np.asarray(other)\n\n if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray):\n # e.g. PeriodArray, Categorical\n with np.errstate(all=\"ignore\"):\n result = op(self._values, other)\n\n elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex):\n # don't pass MultiIndex\n with np.errstate(all=\"ignore\"):\n result = ops.comp_method_OBJECT_ARRAY(op, self._values, other)\n\n else:\n with np.errstate(all=\"ignore\"):\n result = ops.comparison_op(self._values, other, op)\n\n return result\n\n def _arith_method(self, other, op):\n \"\"\"\n Wrapper used to dispatch arithmetic operations.\n \"\"\"\n\n from pandas import Series\n\n result = op(Series(self), other)\n if isinstance(result, tuple):\n return (Index(result[0]), Index(result[1]))\n return Index(result)\n\n @final\n def _unary_method(self, op):\n result = op(self._values)\n return Index(result, name=self.name)\n\n def __abs__(self):\n return self._unary_method(operator.abs)\n\n def __neg__(self):\n return self._unary_method(operator.neg)\n\n def __pos__(self):\n return self._unary_method(operator.pos)\n\n def __inv__(self):\n # TODO: why not operator.inv?\n # TODO: __inv__ vs __invert__?\n return self._unary_method(lambda x: -x)\n\n # --------------------------------------------------------------------\n # Reductions\n\n def any(self, *args, **kwargs):\n \"\"\"\n Return whether any element is Truthy.\n\n Parameters\n ----------\n *args\n Required for compatibility with numpy.\n **kwargs\n Required for compatibility with numpy.\n\n Returns\n -------\n any : bool or array-like (if axis is specified)\n A single element array-like may be converted to bool.\n\n See Also\n --------\n Index.all : Return whether all elements are True.\n Series.all : Return whether all elements are True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to True because these are not equal to zero.\n\n Examples\n --------\n >>> index = pd.Index([0, 1, 2])\n >>> index.any()\n True\n\n >>> index = pd.Index([0, 0, 0])\n >>> index.any()\n False\n \"\"\"\n nv.validate_any(args, kwargs)\n self._maybe_disable_logical_methods(\"any\")\n # error: Argument 1 to \"any\" has incompatible type \"ArrayLike\"; expected\n # \"Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int,\n # float, complex, str, bytes, generic]], Sequence[Sequence[Any]],\n # _SupportsArray]\"\n return np.any(self.values) # type: ignore[arg-type]\n\n def all(self, *args, **kwargs):\n \"\"\"\n Return whether all elements are Truthy.\n\n Parameters\n ----------\n *args\n Required for compatibility with numpy.\n **kwargs\n Required for compatibility with numpy.\n\n Returns\n -------\n all : bool or array-like (if axis is specified)\n A single element array-like may be converted to bool.\n\n See Also\n --------\n Index.any : Return whether any element in an Index is True.\n Series.any : Return whether any element in a Series is True.\n Series.all : Return whether all elements in a Series are True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to True because these are not equal to zero.\n\n Examples\n --------\n **all**\n\n True, because nonzero integers are considered True.\n\n >>> pd.Index([1, 2, 3]).all()\n True\n\n False, because ``0`` is considered False.\n\n >>> pd.Index([0, 1, 2]).all()\n False\n\n **any**\n\n True, because ``1`` is considered True.\n\n >>> pd.Index([0, 0, 1]).any()\n True\n\n False, because ``0`` is considered False.\n\n >>> pd.Index([0, 0, 0]).any()\n False\n \"\"\"\n nv.validate_all(args, kwargs)\n self._maybe_disable_logical_methods(\"all\")\n # error: Argument 1 to \"all\" has incompatible type \"ArrayLike\"; expected\n # \"Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int,\n # float, complex, str, bytes, generic]], Sequence[Sequence[Any]],\n # _SupportsArray]\"\n return np.all(self.values) # type: ignore[arg-type]\n\n @final\n def _maybe_disable_logical_methods(self, opname: str_t):\n \"\"\"\n raise if this Index subclass does not support any or all.\n \"\"\"\n if (\n isinstance(self, ABCMultiIndex)\n or needs_i8_conversion(self.dtype)\n or is_interval_dtype(self.dtype)\n or is_categorical_dtype(self.dtype)\n or is_float_dtype(self.dtype)\n ):\n # This call will raise\n make_invalid_op(opname)(self)\n\n @Appender(IndexOpsMixin.argmin.__doc__)\n def argmin(self, axis=None, skipna=True, *args, **kwargs):\n nv.validate_argmin(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n if not self._is_multi and self.hasnans:\n # Take advantage of cache\n mask = self._isnan\n if not skipna or mask.all():\n return -1\n return super().argmin(skipna=skipna)\n\n @Appender(IndexOpsMixin.argmax.__doc__)\n def argmax(self, axis=None, skipna=True, *args, **kwargs):\n nv.validate_argmax(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n if not self._is_multi and self.hasnans:\n # Take advantage of cache\n mask = self._isnan\n if not skipna or mask.all():\n return -1\n return super().argmax(skipna=skipna)\n\n @doc(IndexOpsMixin.min)\n def min(self, axis=None, skipna=True, *args, **kwargs):\n nv.validate_min(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n if not len(self):\n return self._na_value\n\n if len(self) and self.is_monotonic_increasing:\n # quick check\n first = self[0]\n if not isna(first):\n return first\n\n if not self._is_multi and self.hasnans:\n # Take advantage of cache\n mask = self._isnan\n if not skipna or mask.all():\n return self._na_value\n\n if not self._is_multi and not isinstance(self._values, np.ndarray):\n # \"ExtensionArray\" has no attribute \"min\"\n return self._values.min(skipna=skipna) # type: ignore[attr-defined]\n\n return super().min(skipna=skipna)\n\n @doc(IndexOpsMixin.max)\n def max(self, axis=None, skipna=True, *args, **kwargs):\n nv.validate_max(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n if not len(self):\n return self._na_value\n\n if len(self) and self.is_monotonic_increasing:\n # quick check\n last = self[-1]\n if not isna(last):\n return last\n\n if not self._is_multi and self.hasnans:\n # Take advantage of cache\n mask = self._isnan\n if not skipna or mask.all():\n return self._na_value\n\n if not self._is_multi and not isinstance(self._values, np.ndarray):\n # \"ExtensionArray\" has no attribute \"max\"\n return self._values.max(skipna=skipna) # type: ignore[attr-defined]\n\n return super().max(skipna=skipna)\n\n # --------------------------------------------------------------------\n\n @final\n @property\n def shape(self) -> Shape:\n \"\"\"\n Return a tuple of the shape of the underlying data.\n \"\"\"\n # See GH#27775, GH#27384 for history/reasoning in how this is defined.\n return (len(self),)\n\n @final\n def _deprecated_arg(self, value, name: str_t, methodname: str_t) -> None:\n \"\"\"\n Issue a FutureWarning if the arg/kwarg is not no_default.\n \"\"\"\n if value is not no_default:\n warnings.warn(\n f\"'{name}' argument in {methodname} is deprecated \"\n \"and will be removed in a future version. Do not pass it.\",\n FutureWarning,\n stacklevel=3,\n )\n\n\ndef ensure_index_from_sequences(sequences, names=None):\n \"\"\"\n Construct an index from sequences of data.\n\n A single sequence returns an Index. Many sequences returns a\n MultiIndex.\n\n Parameters\n ----------\n sequences : sequence of sequences\n names : sequence of str\n\n Returns\n -------\n index : Index or MultiIndex\n\n Examples\n --------\n >>> ensure_index_from_sequences([[1, 2, 3]], names=[\"name\"])\n Int64Index([1, 2, 3], dtype='int64', name='name')\n\n >>> ensure_index_from_sequences([[\"a\", \"a\"], [\"a\", \"b\"]], names=[\"L1\", \"L2\"])\n MultiIndex([('a', 'a'),\n ('a', 'b')],\n names=['L1', 'L2'])\n\n See Also\n --------\n ensure_index\n \"\"\"\n from pandas.core.indexes.multi import MultiIndex\n\n if len(sequences) == 1:\n if names is not None:\n names = names[0]\n return Index(sequences[0], name=names)\n else:\n return MultiIndex.from_arrays(sequences, names=names)\n\n\ndef ensure_index(index_like: AnyArrayLike | Sequence, copy: bool = False) -> Index:\n \"\"\"\n Ensure that we have an index from some index-like object.\n\n Parameters\n ----------\n index_like : sequence\n An Index or other sequence\n copy : bool, default False\n\n Returns\n -------\n index : Index or MultiIndex\n\n See Also\n --------\n ensure_index_from_sequences\n\n Examples\n --------\n >>> ensure_index(['a', 'b'])\n Index(['a', 'b'], dtype='object')\n\n >>> ensure_index([('a', 'a'), ('b', 'c')])\n Index([('a', 'a'), ('b', 'c')], dtype='object')\n\n >>> ensure_index([['a', 'a'], ['b', 'c']])\n MultiIndex([('a', 'b'),\n ('a', 'c')],\n )\n \"\"\"\n if isinstance(index_like, Index):\n if copy:\n index_like = index_like.copy()\n return index_like\n\n if isinstance(index_like, ABCSeries):\n name = index_like.name\n return Index(index_like, name=name, copy=copy)\n\n if is_iterator(index_like):\n index_like = list(index_like)\n\n if isinstance(index_like, list):\n if type(index_like) is not list:\n # must check for exactly list here because of strict type\n # check in clean_index_list\n index_like = list(index_like)\n\n if len(index_like) and lib.is_all_arraylike(index_like):\n from pandas.core.indexes.multi import MultiIndex\n\n return MultiIndex.from_arrays(index_like)\n else:\n return Index(index_like, copy=copy, tupleize_cols=False)\n else:\n\n return Index(index_like, copy=copy)\n\n\ndef ensure_has_len(seq):\n \"\"\"\n If seq is an iterator, put its values into a list.\n \"\"\"\n try:\n len(seq)\n except TypeError:\n return list(seq)\n else:\n return seq\n\n\ndef trim_front(strings: list[str]) -> list[str]:\n \"\"\"\n Trims zeros and decimal points.\n\n Examples\n --------\n >>> trim_front([\" a\", \" b\"])\n ['a', 'b']\n\n >>> trim_front([\" a\", \" \"])\n ['a', '']\n \"\"\"\n if not strings:\n return strings\n while all(strings) and all(x[0] == \" \" for x in strings):\n strings = [x[1:] for x in strings]\n return strings\n\n\ndef _validate_join_method(method: str) -> None:\n if method not in [\"left\", \"right\", \"inner\", \"outer\"]:\n raise ValueError(f\"do not recognize join method {method}\")\n\n\ndef default_index(n: int) -> RangeIndex:\n from pandas.core.indexes.range import RangeIndex\n\n return RangeIndex(0, n, name=None)\n\n\ndef maybe_extract_name(name, obj, cls) -> Hashable:\n \"\"\"\n If no name is passed, then extract it from data, validating hashability.\n \"\"\"\n if name is None and isinstance(obj, (Index, ABCSeries)):\n # Note we don't just check for \"name\" attribute since that would\n # pick up e.g. dtype.name\n name = obj.name\n\n # GH#29069\n if not is_hashable(name):\n raise TypeError(f\"{cls.__name__}.name must be a hashable type\")\n\n return name\n\n\ndef _maybe_cast_data_without_dtype(subarr: np.ndarray) -> ArrayLike:\n \"\"\"\n If we have an arraylike input but no passed dtype, try to infer\n a supported dtype.\n\n Parameters\n ----------\n subarr : np.ndarray[object]\n\n Returns\n -------\n np.ndarray or ExtensionArray\n \"\"\"\n\n result = lib.maybe_convert_objects(\n subarr,\n convert_datetime=True,\n convert_timedelta=True,\n convert_period=True,\n convert_interval=True,\n dtype_if_all_nat=np.dtype(\"datetime64[ns]\"),\n )\n if result.dtype.kind in [\"b\", \"c\"]:\n return subarr\n result = ensure_wrapped_if_datetimelike(result)\n return result\n\n\ndef get_unanimous_names(*indexes: Index) -> tuple[Hashable, ...]:\n \"\"\"\n Return common name if all indices agree, otherwise None (level-by-level).\n\n Parameters\n ----------\n indexes : list of Index objects\n\n Returns\n -------\n list\n A list representing the unanimous 'names' found.\n \"\"\"\n name_tups = [tuple(i.names) for i in indexes]\n name_sets = [{*ns} for ns in zip_longest(*name_tups)]\n names = tuple(ns.pop() if len(ns) == 1 else None for ns in name_sets)\n return names\n\n\ndef unpack_nested_dtype(other: _IndexT) -> _IndexT:\n \"\"\"\n When checking if our dtype is comparable with another, we need\n to unpack CategoricalDtype to look at its categories.dtype.\n\n Parameters\n ----------\n other : Index\n\n Returns\n -------\n Index\n \"\"\"\n dtype = other.dtype\n if is_categorical_dtype(dtype):\n # If there is ever a SparseIndex, this could get dispatched\n # here too.\n return dtype.categories\n return other\n\n\ndef _maybe_try_sort(result, sort):\n if sort is None:\n try:\n result = algos.safe_sort(result)\n except TypeError as err:\n warnings.warn(\n f\"{err}, sort order is undefined for incomparable objects\",\n RuntimeWarning,\n stacklevel=4,\n )\n return result\n"
] | [
[
"numpy.all",
"pandas.core.indexes.multi.MultiIndex",
"pandas.core.indexes.range.RangeIndex",
"pandas.util._decorators.deprecate_nonkeyword_arguments",
"pandas.core.dtypes.common.ensure_object",
"numpy.where",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.core.reshape.merge.restore_dropped_levels_multijoin",
"pandas.core.common.cast_scalar_indexer",
"pandas.core.dtypes.common.is_iterator",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas._libs.join.outer_join_indexer",
"pandas.core.dtypes.common.is_list_like",
"numpy.delete",
"numpy.array",
"pandas.core.algorithms.take",
"pandas.core.ops.comparison_op",
"pandas.compat.numpy.function.validate_minmax_axis",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.common.is_null_slice",
"pandas.core.dtypes.cast.find_common_type",
"pandas.core.dtypes.missing.isna",
"pandas.core.algorithms.union_with_duplicates",
"pandas.compat.numpy.function.validate_repeat",
"pandas.io.formats.printing.pprint_thing",
"pandas.core.sorting.ensure_key_mapped",
"pandas.Series",
"pandas._libs.tslibs.Timestamp",
"numpy.asarray",
"pandas.core.arrays.datetimes.tz_to_dtype",
"pandas._libs.join.inner_join_indexer",
"pandas.core.dtypes.dtypes.IntervalDtype",
"numpy.concatenate",
"pandas.core.dtypes.common.is_unsigned_integer_dtype",
"pandas.compat.numpy.function.validate_take",
"pandas.core.common.asarray_tuplesafe",
"pandas.io.formats.printing.PrettyDict",
"pandas.io.formats.format.format_array",
"pandas.core.indexes.multi.MultiIndex.from_tuples",
"pandas.core.algorithms.take_nd",
"numpy.putmask",
"pandas.core.algorithms.safe_sort",
"pandas.core.dtypes.common.is_ea_or_datetimelike_dtype",
"pandas.core.dtypes.inference.is_dict_like",
"numpy.ndim",
"pandas.core.construction.ensure_wrapped_if_datetimelike",
"numpy.errstate",
"pandas.core.indexers.deprecate_ndim_indexing",
"pandas.core.dtypes.cast.infer_dtype_from",
"pandas._libs.algos.groupsort_indexer",
"pandas.core.dtypes.common.is_integer",
"pandas.core.algorithms.unique1d",
"numpy.ones",
"pandas.compat.numpy.function.validate_min",
"pandas._libs.lib.infer_dtype",
"pandas.util._decorators.doc",
"pandas._libs.lib.item_from_zerodim",
"numpy.empty",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas._libs.lib.is_scalar",
"pandas.core.indexes.frozen.FrozenList",
"pandas.compat.numpy.function.validate_any",
"pandas.core.array_algos.putmask.validate_putmask",
"pandas._libs.lib.is_all_arraylike",
"pandas.compat.numpy.function.validate_all",
"numpy.hstack",
"pandas.errors.DuplicateLabelError",
"pandas.core.common.not_none",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.concat.concat_compat",
"pandas.core.arrays.numpy_.PandasArray",
"pandas.core.ops.invalid.make_invalid_op",
"pandas._libs.join.left_join_indexer_unique",
"pandas.errors.InvalidIndexError",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.cast.validate_numeric_casting",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_hashable",
"pandas.core.indexes.period._new_PeriodIndex",
"pandas.compat.numpy.function.validate_argmin",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.ops.comp_method_OBJECT_ARRAY",
"pandas.core.dtypes.missing.is_valid_na_for_dtype",
"pandas.core.algorithms.isin",
"pandas.core.common.is_bool_indexer",
"pandas.core.sorting.nargsort",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.missing.array_equivalent",
"pandas.core.accessor.CachedAccessor",
"pandas.core.dtypes.cast.can_hold_element",
"pandas.io.formats.printing.format_object_summary",
"pandas.compat.numpy.function.validate_argmax",
"numpy.dtype",
"numpy.any",
"pandas.core.reshape.merge.get_join_indexers",
"pandas.core.dtypes.common.is_signed_integer_dtype",
"numpy.arange",
"pandas.core.ops.get_op_result_name",
"pandas.core.arrays.Categorical",
"pandas.core.dtypes.common.is_float",
"pandas._libs.tslibs.tz_compare",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.io.formats.printing.format_object_attrs",
"numpy.abs",
"numpy.intp",
"pandas.compat.numpy.function.validate_max",
"pandas._libs.join.left_join_indexer",
"numpy.sort",
"pandas.core.arrays.datetimes.validate_tz_from_dtype",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.indexes.multi.MultiIndex.from_arrays",
"pandas.core.construction.sanitize_array",
"pandas._libs.lib.maybe_convert_objects",
"pandas.core.common.index_labels_to_array",
"pandas.core.construction.extract_array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.0",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
sisilmehta2000/pytorch | [
"fdc74e2373b242e5e68ac334234ccb41008018ad"
] | [
"test/onnx/test_models.py"
] | [
"from torchvision.models.alexnet import alexnet\nfrom torchvision.models.inception import inception_v3\nfrom torchvision.models.densenet import densenet121\nfrom torchvision.models.resnet import resnet50\nfrom torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn\nfrom torchvision.models.googlenet import googlenet\nfrom torchvision.models.mnasnet import mnasnet1_0\nfrom torchvision.models.mobilenet import mobilenet_v2\nfrom torchvision.models import shufflenet_v2_x1_0\nfrom torchvision.models.segmentation import fcn_resnet101, deeplabv3_resnet101\nfrom torchvision.models.video import r3d_18, mc3_18, r2plus1d_18\n\nfrom model_defs.mnist import MNIST\nfrom model_defs.squeezenet import SqueezeNet\nfrom model_defs.super_resolution import SuperResolutionNet\nfrom model_defs.srresnet import SRResNet\nfrom model_defs.dcgan import _netD, _netG, weights_init, bsz, imgsz, nz\nfrom model_defs.op_test import DummyNet, ConcatNet, PermuteNet, PReluNet, FakeQuantNet\nfrom model_defs.emb_seq import EmbeddingNetwork1, EmbeddingNetwork2\n\nfrom test_pytorch_common import TestCase, run_tests, skipIfNoLapack, skipIfUnsupportedMinOpsetVersion, disableScriptTest\n\nimport torch\nimport torch.onnx\nimport torch.onnx.utils\nfrom torch.autograd import Variable\nfrom torch.onnx import OperatorExportTypes\nfrom torch import quantization\n\nimport unittest\n\nimport caffe2.python.onnx.backend as backend\n\nfrom verify import verify\n\nif torch.cuda.is_available():\n def toC(x):\n return x.cuda()\nelse:\n def toC(x):\n return x\n\nBATCH_SIZE = 2\n\n\nclass TestModels(TestCase):\n keep_initializers_as_inputs = False\n from torch.onnx.symbolic_helper import _export_onnx_opset_version\n opset_version = _export_onnx_opset_version\n\n def exportTest(self, model, inputs, rtol=1e-2, atol=1e-7):\n with torch.onnx.select_model_mode_for_export(model, None):\n graph = torch.onnx.utils._trace(model, inputs, OperatorExportTypes.ONNX)\n torch._C._jit_pass_lint(graph)\n verify(model, inputs, backend, rtol=rtol, atol=atol)\n\n def test_ops(self):\n x = Variable(\n torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0)\n )\n self.exportTest(toC(DummyNet()), toC(x))\n\n def test_prelu(self):\n x = Variable(\n torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0)\n )\n self.exportTest(PReluNet(), x)\n\n @disableScriptTest()\n def test_concat(self):\n input_a = Variable(torch.randn(BATCH_SIZE, 3))\n input_b = Variable(torch.randn(BATCH_SIZE, 3))\n inputs = ((toC(input_a), toC(input_b)), )\n self.exportTest(toC(ConcatNet()), inputs)\n\n def test_permute(self):\n x = Variable(torch.randn(BATCH_SIZE, 3, 10, 12))\n self.exportTest(PermuteNet(), x)\n\n @disableScriptTest()\n def test_embedding_sequential_1(self):\n x = Variable(torch.randint(0, 10, (BATCH_SIZE, 3)))\n self.exportTest(EmbeddingNetwork1(), x)\n\n @disableScriptTest()\n def test_embedding_sequential_2(self):\n x = Variable(torch.randint(0, 10, (BATCH_SIZE, 3)))\n self.exportTest(EmbeddingNetwork2(), x)\n\n @unittest.skip(\"This model takes too much memory\")\n def test_srresnet(self):\n x = Variable(torch.randn(1, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(SRResNet(rescale_factor=4, n_filters=64, n_blocks=8)), toC(x))\n\n @skipIfNoLapack\n def test_super_resolution(self):\n x = Variable(\n torch.randn(BATCH_SIZE, 1, 224, 224).fill_(1.0)\n )\n self.exportTest(toC(SuperResolutionNet(upscale_factor=3)), toC(x), atol=1e-6)\n\n def test_alexnet(self):\n x = Variable(\n torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0)\n )\n self.exportTest(toC(alexnet()), toC(x))\n\n def test_mnist(self):\n x = Variable(torch.randn(BATCH_SIZE, 1, 28, 28).fill_(1.0))\n self.exportTest(toC(MNIST()), toC(x))\n\n @unittest.skip(\"This model takes too much memory\")\n def test_vgg16(self):\n # VGG 16-layer model (configuration \"D\")\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(vgg16()), toC(x))\n\n @unittest.skip(\"This model takes too much memory\")\n def test_vgg16_bn(self):\n # VGG 16-layer model (configuration \"D\") with batch normalization\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(vgg16_bn()), toC(x))\n\n @unittest.skip(\"This model takes too much memory\")\n def test_vgg19(self):\n # VGG 19-layer model (configuration \"E\")\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(vgg19()), toC(x))\n\n @unittest.skip(\"This model takes too much memory\")\n def test_vgg19_bn(self):\n # VGG 19-layer model (configuration \"E\") with batch normalization\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(vgg19_bn()), toC(x))\n\n def test_resnet(self):\n # ResNet50 model\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(resnet50()), toC(x), atol=1e-6)\n\n @disableScriptTest() # None type in outputs\n def test_inception(self):\n x = Variable(torch.randn(BATCH_SIZE, 3, 299, 299))\n self.exportTest(toC(inception_v3()), toC(x))\n\n def test_squeezenet(self):\n # SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and\n # <0.5MB model size\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n sqnet_v1_0 = SqueezeNet(version=1.1)\n self.exportTest(toC(sqnet_v1_0), toC(x))\n\n # SqueezeNet 1.1 has 2.4x less computation and slightly fewer params\n # than SqueezeNet 1.0, without sacrificing accuracy.\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n sqnet_v1_1 = SqueezeNet(version=1.1)\n self.exportTest(toC(sqnet_v1_1), toC(x))\n\n def test_densenet(self):\n # Densenet-121 model\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(densenet121()), toC(x), rtol=1e-2, atol=1e-5)\n\n @disableScriptTest()\n def test_dcgan_netD(self):\n netD = _netD(1)\n netD.apply(weights_init)\n input = Variable(torch.empty(bsz, 3, imgsz, imgsz).normal_(0, 1))\n self.exportTest(toC(netD), toC(input))\n\n @disableScriptTest()\n def test_dcgan_netG(self):\n netG = _netG(1)\n netG.apply(weights_init)\n input = Variable(torch.empty(bsz, nz, 1, 1).normal_(0, 1))\n self.exportTest(toC(netG), toC(input))\n\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_fake_quant(self):\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(FakeQuantNet()), toC(x))\n\n @skipIfUnsupportedMinOpsetVersion(10)\n def test_qat_resnet_pertensor(self):\n # Quantize ResNet50 model\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n qat_resnet50 = resnet50()\n\n # Use per tensor for weight. Per channel support will come with opset 13\n qat_resnet50.qconfig = quantization.QConfig(\n activation=quantization.default_fake_quant, weight=quantization.default_fake_quant)\n quantization.prepare_qat(qat_resnet50, inplace=True)\n qat_resnet50.apply(torch.ao.quantization.enable_observer)\n qat_resnet50.apply(torch.ao.quantization.enable_fake_quant)\n\n _ = qat_resnet50(x)\n for module in qat_resnet50.modules():\n if isinstance(module, quantization.FakeQuantize):\n module.calculate_qparams()\n qat_resnet50.apply(torch.ao.quantization.disable_observer)\n\n self.exportTest(toC(qat_resnet50), toC(x))\n\n @skipIfUnsupportedMinOpsetVersion(13)\n def test_qat_resnet_per_channel(self):\n # Quantize ResNet50 model\n x = torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0)\n qat_resnet50 = resnet50()\n\n qat_resnet50.qconfig = quantization.QConfig(\n activation=quantization.default_fake_quant,\n weight=quantization.default_per_channel_weight_fake_quant)\n quantization.prepare_qat(qat_resnet50, inplace=True)\n qat_resnet50.apply(torch.ao.quantization.enable_observer)\n qat_resnet50.apply(torch.ao.quantization.enable_fake_quant)\n\n _ = qat_resnet50(x)\n for module in qat_resnet50.modules():\n if isinstance(module, quantization.FakeQuantize):\n module.calculate_qparams()\n qat_resnet50.apply(torch.ao.quantization.disable_observer)\n\n self.exportTest(toC(qat_resnet50), toC(x))\n\n @disableScriptTest() # None type in outputs\n def test_googlenet(self):\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(googlenet()), toC(x), rtol=1e-3, atol=1e-5)\n\n def test_mnasnet(self):\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(mnasnet1_0()), toC(x), rtol=1e-3, atol=1e-5)\n\n def test_mobilenet(self):\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(mobilenet_v2()), toC(x), rtol=1e-3, atol=1e-5)\n\n @disableScriptTest() # prim_data\n def test_shufflenet(self):\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(shufflenet_v2_x1_0()), toC(x), rtol=1e-3, atol=1e-5)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_fcn(self):\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(fcn_resnet101()), toC(x), rtol=1e-3, atol=1e-5)\n\n @skipIfUnsupportedMinOpsetVersion(11)\n def test_deeplab(self):\n x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))\n self.exportTest(toC(deeplabv3_resnet101()), toC(x), rtol=1e-3, atol=1e-5)\n\n def test_r3d_18_video(self):\n x = Variable(torch.randn(1, 3, 4, 112, 112).fill_(1.0))\n self.exportTest(toC(r3d_18()), toC(x), rtol=1e-3, atol=1e-5)\n\n def test_mc3_18_video(self):\n x = Variable(torch.randn(1, 3, 4, 112, 112).fill_(1.0))\n self.exportTest(toC(mc3_18()), toC(x), rtol=1e-3, atol=1e-5)\n\n def test_r2plus1d_18_video(self):\n x = Variable(torch.randn(1, 3, 4, 112, 112).fill_(1.0))\n self.exportTest(toC(r2plus1d_18()), toC(x), rtol=1e-3, atol=1e-5)\n\n\nif __name__ == \"__main__\":\n run_tests()\n"
] | [
[
"torch.randint",
"torch.empty",
"torch.quantization.prepare_qat",
"torch.randn",
"torch.onnx.utils._trace",
"torch._C._jit_pass_lint",
"torch.onnx.select_model_mode_for_export",
"torch.cuda.is_available",
"torch.quantization.QConfig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Soldelli/MAD | [
"3c4e85cc792236fec2c94e5095f3bc793bf07529"
] | [
"baselines/VLG-Net/lib/data/__init__.py"
] | [
"import logging\n\nimport torch\n\nfrom lib.utils.comm import get_world_size\nfrom lib.utils.imports import import_file\nfrom . import datasets as D\nfrom .samplers import DistributedSampler\nfrom .collate_batch import BatchCollator\n\ndef build_dataset(dataset_list, dataset_catalog, cfg, is_train=True, is_for_period=True):\n # build specific dataset\n if not isinstance(dataset_list, (list, tuple)):\n raise RuntimeError(\n \"dataset_list should be a list of strings, got {}\".format(\n dataset_list\n )\n )\n datasets = []\n for dataset_name in dataset_list:\n data = dataset_catalog.get(dataset_name)\n factory = getattr(D, data[\"factory\"])\n args = data[\"args\"]\n # clip info\n args[\"num_pre_clips\"] = cfg.INPUT.NUM_PRE_CLIPS\n args[\"num_clips\"] = cfg.MODEL.VLG.NUM_CLIPS\n args[\"pre_query_size\"] = cfg.INPUT.PRE_QUERY_SIZE\n args[\"test_stride\"] = cfg.TEST.STRIDE\n args[\"neg_prob\"] = cfg.MODEL.VLG.NEG_PROB\n args[\"input_stride\"] = cfg.INPUT.STRIDE\n args[\"lang_feat_type\"] = cfg.INPUT.LANG_FEAT\n dataset = factory(**args)\n datasets.append(dataset)\n\n # for testing, return a list of datasets\n if not is_train and not is_for_period:\n return datasets\n\n # for training, concatenate all datasets into a single one\n dataset = datasets[0]\n if len(datasets) > 1:\n dataset = D.ConcatDataset(datasets)\n return [dataset]\n\ndef make_data_sampler(dataset, shuffle, distributed):\n if distributed:\n return DistributedSampler(dataset, shuffle=shuffle)\n if shuffle:\n sampler = torch.utils.data.sampler.RandomSampler(dataset)\n else:\n sampler = torch.utils.data.sampler.SequentialSampler(dataset)\n return sampler\n\ndef make_batch_data_sampler(dataset, sampler, batch_size):\n batch_sampler = torch.utils.data.sampler.BatchSampler(\n sampler, batch_size, drop_last=False\n )\n return batch_sampler\n\ndef make_data_loader(cfg, is_train=True, is_distributed=False, is_for_period=False):\n num_gpus = get_world_size()\n if is_train:\n batch_size = cfg.SOLVER.BATCH_SIZE\n assert (\n batch_size % num_gpus == 0\n ), \"SOLVER.BATCH_SIZE ({}) must be divisible by the number of GPUs ({}) used.\".format(\n batch_size, num_gpus)\n batch_size_per_gpu = batch_size // num_gpus\n shuffle = True\n max_epoch = cfg.SOLVER.MAX_EPOCH\n else:\n batch_size = cfg.TEST.BATCH_SIZE\n assert (\n batch_size % num_gpus == 0\n ), \"TEST.BATCH_SIZE ({}) must be divisible by the number of GPUs ({}) used.\".format(\n batch_size, num_gpus)\n batch_size_per_gpu = batch_size // num_gpus\n shuffle = False if not is_distributed else True\n\n if batch_size_per_gpu > 1:\n logger = logging.getLogger(__name__)\n\n paths_catalog = import_file(\n \"vlg.cfg.paths_catalog\", cfg.PATHS_CATALOG, True\n )\n DatasetCatalog = paths_catalog.DatasetCatalog\n if is_train and not is_for_period:\n dataset_list = cfg.DATASETS.TRAIN \n elif not is_train and is_for_period:\n dataset_list = cfg.DATASETS.VAL\n else:\n dataset_list = cfg.DATASETS.TEST\n datasets = build_dataset(dataset_list, DatasetCatalog, cfg, is_train=is_train, is_for_period=is_for_period)\n\n data_loaders = []\n for dataset in datasets:\n sampler = make_data_sampler(dataset, shuffle, is_distributed)\n batch_sampler = make_batch_data_sampler(dataset, sampler, batch_size_per_gpu)\n data_loader = torch.utils.data.DataLoader(\n dataset,\n num_workers=cfg.DATALOADER.NUM_WORKERS,\n batch_sampler=batch_sampler,\n collate_fn=BatchCollator(),\n )\n data_loaders.append(data_loader)\n if is_train or is_for_period:\n # during training, a single (possibly concatenated) data_loader is returned\n assert len(data_loaders) == 1\n return data_loaders[0]\n return data_loaders\n"
] | [
[
"torch.utils.data.sampler.SequentialSampler",
"torch.utils.data.sampler.RandomSampler",
"torch.utils.data.sampler.BatchSampler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RichardBruskiewich/RTX | [
"ce126fdc5df6b5b13cc3ac2857ffee23954a0a7f"
] | [
"code/ARAX/ARAXQuery/Overlay/fisher_exact_test.py"
] | [
"#!/bin/env python3\n# This class will perform fisher's exact test to evalutate the significance of connection between\n# a list of source nodes with certain qnode_id in KG and each of the target nodes with specified type.\n\n# relative imports\nimport scipy.stats as stats\nimport traceback\nimport sys\nimport os\nimport multiprocessing\nimport pandas as pd\nfrom datetime import datetime\nfrom neo4j import GraphDatabase, basic_auth\nsys.path.append(os.path.dirname(os.path.abspath(__file__))+\"/../../../\")\nfrom RTXConfiguration import RTXConfiguration\nsys.path.append(os.path.dirname(os.path.abspath(__file__))+\"/../\")\nfrom ARAX_query import ARAXQuery\nsys.path.append(os.path.dirname(os.path.abspath(__file__))+\"/../../UI/OpenAPI/python-flask-server/\")\nfrom swagger_server.models.edge_attribute import EdgeAttribute\nfrom swagger_server.models.edge import Edge\nfrom swagger_server.models.q_edge import QEdge\nsys.path.append(os.path.dirname(os.path.abspath(__file__))+\"/../../reasoningtool/kg-construction/\")\nfrom KGNodeIndex import KGNodeIndex\nimport collections\n\n\nclass ComputeFTEST:\n\n #### Constructor\n def __init__(self, response, message, parameters):\n self.response = response\n self.message = message\n self.parameters = parameters\n\n def fisher_exact_test(self):\n \"\"\"\n Peform the fisher's exact test to expand or decorate the knowledge graph\n :return: response\n \"\"\"\n\n self.response.info(f\"Performing Fisher's Exact Test to add p-value to edge attribute of virtual edge\")\n\n # check the input parameters\n if 'source_qnode_id' not in self.parameters:\n self.response.error(f\"The argument 'source_qnode_id' is required for fisher_exact_test function\")\n return self.response\n else:\n source_qnode_id = self.parameters['source_qnode_id']\n if 'virtual_relation_label' not in self.parameters:\n self.response.error(f\"The argument 'virtual_relation_label' is required for fisher_exact_test function\")\n return self.response\n else:\n virtual_relation_label = str(self.parameters['virtual_relation_label'])\n if 'target_qnode_id' not in self.parameters:\n self.response.error(f\"The argument 'target_qnode_id' is required for fisher_exact_test function\")\n return self.response\n else:\n target_qnode_id = self.parameters['target_qnode_id']\n rel_edge_id = self.parameters['rel_edge_id'] if 'rel_edge_id' in self.parameters else None\n top_n = int(self.parameters['top_n']) if 'top_n' in self.parameters else None\n cutoff = float(self.parameters['cutoff']) if 'cutoff' in self.parameters else None\n\n # initialize some variables\n nodes_info = {}\n edge_expand_kp = []\n source_node_list = []\n target_node_dict = {}\n size_of_target = {}\n source_node_exist = False\n target_node_exist = False\n query_edge_id = set()\n rel_edge_type = set()\n source_node_type = None\n target_node_type = None\n\n ## Check if source_qnode_id and target_qnode_id are in the Query Graph\n try:\n if len(self.message.query_graph.nodes) != 0:\n for node in self.message.query_graph.nodes:\n if node.id == source_qnode_id:\n source_node_exist = True\n source_node_type = node.type\n elif node.id == target_qnode_id:\n target_node_exist = True\n target_node_type = node.type\n else:\n pass\n else:\n self.response.error(f\"There is no query node in QG\")\n return self.response\n except:\n tb = traceback.format_exc()\n error_type, error, _ = sys.exc_info()\n self.response.error(tb, error_code=error_type.__name__)\n self.response.error(f\"Something went wrong with retrieving nodes in message QG\")\n return self.response\n\n if source_node_exist:\n if target_node_exist:\n pass\n else:\n self.response.error(f\"No query node with target qnode id {target_qnode_id} detected in QG for Fisher's Exact Test\")\n return self.response\n else:\n self.response.error(f\"No query node with source qnode id {source_qnode_id} detected in QG for Fisher's Exact Test\")\n return self.response\n\n ## Check if there is a query edge connected to both source_qnode_id and target_qnode_id in the Query Graph\n try:\n if len(self.message.query_graph.edges) != 0:\n for edge in self.message.query_graph.edges:\n if edge.source_id == source_qnode_id and edge.target_id == target_qnode_id and edge.relation == None:\n query_edge_id.update([edge.id]) # only actual query edge is added\n elif edge.source_id == target_qnode_id and edge.target_id == source_qnode_id and edge.relation == None:\n query_edge_id.update([edge.id]) # only actual query edge is added\n else:\n continue\n else:\n self.response.error(f\"There is no query edge in Query Graph\")\n return self.response\n except:\n tb = traceback.format_exc()\n error_type, error, _ = sys.exc_info()\n self.response.error(tb, error_code=error_type.__name__)\n self.response.error(f\"Something went wrong with retrieving edges in message QG\")\n return self.response\n\n if len(query_edge_id)!=0:\n if rel_edge_id:\n if rel_edge_id in query_edge_id:\n pass\n else:\n self.response.error(f\"No query edge with qedge id {rel_edge_id} connected to both source node with qnode id {source_qnode_id} and target node with qnode id {target_qnode_id} detected in QG for Fisher's Exact Test\")\n return self.response\n else:\n pass\n else:\n self.response.error(\n f\"No query edge connected to both source node with qnode id {source_qnode_id} and target node with qnode id {target_qnode_id} detected in QG for Fisher's Exact Test\")\n return self.response\n\n ## loop over all nodes in KG and collect their node information\n try:\n count = 0\n for node in self.message.knowledge_graph.nodes:\n nodes_info[node.id] = {'count': count, 'qnode_ids': node.qnode_ids, 'type': node.type[0], 'edge_index': []}\n count = count + 1\n except:\n tb = traceback.format_exc()\n error_type, error, _ = sys.exc_info()\n self.response.error(tb, error_code=error_type.__name__)\n self.response.error(f\"Something went wrong with retrieving nodes in message KG\")\n return self.response\n\n ## loop over all edges in KG and create source node list and target node dict based on source_qnode_id, target_qnode_id as well as rel_edge_id (optional, otherwise all edges are considered)\n try:\n count = 0\n for edge in self.message.knowledge_graph.edges:\n if edge.provided_by != \"ARAX\":\n\n nodes_info[edge.source_id]['edge_index'].append(count)\n nodes_info[edge.target_id]['edge_index'].append(count)\n\n if rel_edge_id:\n if rel_edge_id in edge.qedge_ids:\n if source_qnode_id in nodes_info[edge.source_id]['qnode_ids']:\n edge_expand_kp.append(edge.is_defined_by)\n rel_edge_type.update([edge.type])\n source_node_list.append(edge.source_id)\n if edge.target_id not in target_node_dict.keys():\n target_node_dict[edge.target_id] = {edge.source_id}\n else:\n target_node_dict[edge.target_id].update([edge.source_id])\n else:\n edge_expand_kp.append(edge.is_defined_by)\n rel_edge_type.update([edge.type])\n source_node_list.append(edge.target_id)\n if edge.source_id not in target_node_dict.keys():\n target_node_dict[edge.source_id] = {edge.target_id}\n else:\n target_node_dict[edge.source_id].update([edge.target_id])\n else:\n pass\n else:\n if source_qnode_id in nodes_info[edge.source_id]['qnode_ids']:\n if target_qnode_id in nodes_info[edge.target_id]['qnode_ids']:\n edge_expand_kp.append(edge.is_defined_by)\n source_node_list.append(edge.source_id)\n if edge.target_id not in target_node_dict.keys():\n target_node_dict[edge.target_id] = {edge.source_id}\n else:\n target_node_dict[edge.target_id].update([edge.source_id])\n\n else:\n pass\n elif target_qnode_id in nodes_info[edge.source_id]['qnode_ids']:\n if source_qnode_id in nodes_info[edge.target_id]['qnode_ids']:\n edge_expand_kp.append(edge.is_defined_by)\n source_node_list.append(edge.target_id)\n if edge.source_id not in target_node_dict.keys():\n target_node_dict[edge.source_id] = {edge.target_id}\n else:\n target_node_dict[edge.source_id].update([edge.target_id])\n\n else:\n pass\n else:\n pass\n\n else:\n pass\n\n count = count + 1 ## record edge position in message.knowledge_graph\n\n except:\n tb = traceback.format_exc()\n error_type, error, _ = sys.exc_info()\n self.response.error(tb, error_code=error_type.__name__)\n self.response.error(f\"Something went wrong with retrieving edges in message KG\")\n return self.response\n\n source_node_list = list(set(source_node_list)) ## remove the duplicate source node id\n\n ## check if there is no source node in message KG\n if len(source_node_list) == 0:\n self.response.error(f\"No source node found in message KG for Fisher's Exact Test\")\n return self.response\n\n ## check if there is no target node in message KG\n if len(target_node_dict) == 0:\n self.response.error(f\"No target node found in message KG for Fisher's Exact Test\")\n return self.response\n\n ## check if source node has more than one type. If so, throw an error\n if source_node_type is None:\n self.response.error(f\"Source node with qnode id {source_qnode_id} was set to None in Query Graph. Please specify the node type\")\n return self.response\n else:\n pass\n\n ## check if target node has more than one type. If so, throw an error\n if target_node_type is None:\n self.response.error(f\"Target node with qnode id {target_qnode_id} was set to None in Query Graph. Please specify the node type\")\n return self.response\n else:\n pass\n\n ##check how many kps were used in message KG. If more than one, the one with the max number of edges connnected to both source nodes and target nodes was used\n if len(collections.Counter(edge_expand_kp))==1:\n kp = edge_expand_kp[0]\n else:\n occurrences = collections.Counter(edge_expand_kp)\n max_index = max([(value, index) for index, value in enumerate(occurrences.values())])[1] # if there are more than one kp having the maximum number of edges, then the last one based on alphabetical order will be chosen.\n kp = list(occurrences.keys())[max_index]\n self.response.debug(f\"{occurrences}\")\n self.response.warning(f\"More than one knowledge provider was detected to be used for expanding the edges connected to both source node with qnode id {source_qnode_id} and target node with qnode id {target_qnode_id}\")\n self.response.warning(f\"The knowledge provider {kp} was used to calculate Fisher's exact test because it has the maximum number of edges both source node with qnode id {source_qnode_id} and target node with qnode id {target_qnode_id}\")\n\n ## Print out some information used to calculate FET\n if len(source_node_list) == 1:\n self.response.debug(f\"{len(source_node_list)} source node with qnode id {source_qnode_id} and node type {source_node_type} was found in message KG and used to calculate Fisher's Exact Test\")\n else:\n self.response.debug(f\"{len(source_node_list)} source nodes with qnode id {source_qnode_id} and node type {source_node_type} was found in message KG and used to calculate Fisher's Exact Test\")\n if len(target_node_dict) == 1:\n self.response.debug(f\"{len(target_node_dict)} target node with qnode id {target_qnode_id} and node type {target_node_type} was found in message KG and used to calculate Fisher's Exact Test\")\n else:\n self.response.debug(f\"{len(target_node_dict)} target nodes with qnode id {target_qnode_id} and node type {target_node_type} was found in message KG and used to calculate Fisher's Exact Test\")\n\n\n # find all nodes with the same type of 'source_qnode_id' nodes in specified KP ('ARAX/KG1','ARAX/KG2','BTE') that are adjacent to target nodes\n use_parallel = False\n\n if not use_parallel:\n # query adjacent node in one DSL command by providing a list of query nodes to add_qnode()\n if rel_edge_id:\n if len(rel_edge_type) == 1: # if the edge with rel_edge_id has only type, we use this rel_edge_type to find all source nodes in KP\n self.response.debug(f\"{kp} and edge relation type {list(rel_edge_type)[0]} were used to calculate total adjacent nodes in Fisher's Exact Test\")\n result = self.query_size_of_adjacent_nodes(node_curie=list(target_node_dict.keys()), source_type=target_node_type, adjacent_type=source_node_type, kp = kp, rel_type=list(rel_edge_type)[0], use_cypher_command=False)\n else: # if the edge with rel_edge_id has more than one type, we ignore the edge type and use all types to find all source nodes in KP\n self.response.warning(f\"The edges with specified qedge id {rel_edge_id} have more than one type, we ignore the edge type and use all types to calculate Fisher's Exact Test\")\n self.response.debug(f\"{kp} was used to calculate total adjacent nodes in Fisher's Exact Test\")\n result = self.query_size_of_adjacent_nodes(node_curie=list(target_node_dict.keys()), source_type=target_node_type, adjacent_type=source_node_type, kp=kp, rel_type=None, use_cypher_command=False)\n else: # if no rel_edge_id is specified, we ignore the edge type and use all types to find all source nodes in KP\n self.response.debug(f\"{kp} was used to calculate total adjacent nodes in Fisher's Exact Test\")\n result = self.query_size_of_adjacent_nodes(node_curie=list(target_node_dict.keys()), source_type=target_node_type, adjacent_type=source_node_type, kp=kp, rel_type=None, use_cypher_command=False)\n\n if result is None:\n return self.response ## Something wrong happened for querying the adjacent nodes\n else:\n res, removed_nodes = result\n if len(removed_nodes)==0:\n size_of_target = res\n else:\n if len(removed_nodes) == 1:\n self.response.warning(f\"One adjacent node which is {removed_nodes[0]} can't find its neighbors. This node will be ignored for FET calculation.\")\n else:\n self.response.warning(f\"{len(removed_nodes)} adjacent nodes which are {removed_nodes} can't find its neighbors. These nodes will be ignored for FET calculation.\")\n for node in removed_nodes:\n del target_node_dict[node]\n size_of_target = res\n else:\n # query adjacent node for query nodes one by one in parallel\n if rel_edge_id:\n if len(rel_edge_type) == 1: # if the edge with rel_edge_id has only type, we use this rel_edge_type to find all source nodes in KP\n self.response.debug(f\"{kp} and edge relation type {list(rel_edge_type)[0]} were used to calculate total adjacent nodes in Fisher's Exact Test\")\n parameter_list = [(node, target_node_type, source_node_type, kp, list(rel_edge_type)[0]) for node in list(target_node_dict.keys())]\n else: # if the edge with rel_edge_id has more than one type, we ignore the edge type and use all types to find all source nodes in KP\n self.response.warning(f\"The edges with specified qedge id {rel_edge_id} have more than one type, we ignore the edge type and use all types to calculate Fisher's Exact Test\")\n self.response.debug(f\"{kp} was used to calculate total adjacent nodes in Fisher's Exact Test\")\n parameter_list = [(node, target_node_type, source_node_type, kp, None) for node in list(target_node_dict.keys())]\n else: # if no rel_edge_id is specified, we ignore the edge type and use all types to find all source nodes in KP\n self.response.debug(f\"{kp} was used to calculate total adjacent nodes in Fisher's Exact Test\")\n parameter_list = [(node, target_node_type, source_node_type, kp, None) for node in list(target_node_dict.keys())]\n\n ## get the count of all nodes with the type of 'source_qnode_id' nodes in KP for each target node in parallel\n try:\n with multiprocessing.Pool() as executor:\n target_count_res = [elem for elem in executor.map(self._query_size_of_adjacent_nodes_parallel, parameter_list)]\n except:\n tb = traceback.format_exc()\n error_type, error, _ = sys.exc_info()\n self.response.error(tb, error_code=error_type.__name__)\n self.response.error(f\"Something went wrong with querying adjacent nodes in parallel\")\n return self.response\n\n if any([type(elem) is list for elem in target_count_res]):\n for msg in [elem2 for elem1 in target_count_res if type(elem1) is list for elem2 in elem1]:\n if type(msg) is tuple:\n self.response.error(msg[0], error_code=msg[1])\n else:\n self.response.error(msg)\n return self.response ## Something wrong happened for querying the adjacent nodes\n else:\n for index in range(len(target_node_dict)):\n node = list(target_node_dict.keys())[index]\n size_of_target[node] = target_count_res[index]\n\n if len(target_node_dict) != 0:\n ## Based on KP detected in message KG, find the total number of node with the same type of source node\n if kp=='ARAX/KG1':\n size_of_total = self.size_of_given_type_in_KP(node_type=source_node_type,use_cypher_command=True, kg='KG1') ## Try cypher query first\n if size_of_total is not None:\n if size_of_total != 0:\n self.response.debug(f\"ARAX/KG1 and cypher query were used to calculate total number of node with the same type of source node in Fisher's Exact Test\")\n self.response.debug(f\"Total {size_of_total} nodes with node type {source_node_type} was found in ARAX/KG1\")\n pass\n else:\n size_of_total = self.size_of_given_type_in_KP(node_type=source_node_type, use_cypher_command=False, kg='KG1') ## If cypher query fails, then try kgNodeIndex\n if size_of_total==0:\n self.response.error(f\"KG1 has 0 node with the same type of source node with qnode id {source_qnode_id}\")\n return self.response\n else:\n self.response.debug(f\"ARAX/KG1 and kgNodeIndex were used to calculate total number of node with the same type of source node in Fisher's Exact Test\")\n self.response.debug(f\"Total {size_of_total} nodes with node type {source_node_type} was found in ARAX/KG1\")\n pass\n else:\n return self.response ## Something wrong happened for querying total number of node with the same type of source node\n\n elif kp=='ARAX/KG2':\n ## check KG1 first as KG2 might have many duplicates. If KG1 is 0, then check KG2\n size_of_total = self.size_of_given_type_in_KP(node_type=source_node_type, use_cypher_command=True, kg='KG1') ## Try cypher query first\n if size_of_total is not None:\n if size_of_total!=0:\n self.response.warning(f\"Although ARAX/KG2 was found to have the maximum number of edges connected to both {source_qnode_id} and {target_qnode_id}, ARAX/KG1 and cypher query were used to find the total number of nodes with the same type of source node with qnode id {source_qnode_id} as KG2 might have many duplicates\")\n self.response.debug(f\"Total {size_of_total} nodes with node type {source_node_type} was found in ARAX/KG1\")\n pass\n else:\n size_of_total = self.size_of_given_type_in_KP(node_type=source_node_type, use_cypher_command=False, kg='KG1') ## If cypher query fails, then try kgNodeIndex\n if size_of_total is not None:\n if size_of_total != 0:\n self.response.warning(f\"Although ARAX/KG2 was found to have the maximum number of edges connected to both {source_qnode_id} and {target_qnode_id}, ARAX/KG1 and kgNodeIndex were used to find the total number of nodes with the same type of source node with qnode id {source_qnode_id} as KG2 might have many duplicates\")\n self.response.debug(f\"Total {size_of_total} nodes with node type {source_node_type} was found in ARAX/KG1\")\n pass\n else:\n size_of_total = self.size_of_given_type_in_KP(node_type=source_node_type, use_cypher_command=False, kg='KG2')\n if size_of_total is None:\n return self.response ## Something wrong happened for querying total number of node with the same type of source node\n elif size_of_total==0:\n self.response.error(f\"KG2 has 0 node with the same type of source node with qnode id {source_qnode_id}\")\n return self.response\n else:\n self.response.debug(f\"ARAX/KG2 and kgNodeIndex were used to calculate total number of node with the same type of source node in Fisher's Exact Test\")\n self.response.debug(f\"Total {size_of_total} nodes with node type {source_node_type} was found in ARAX/KG2\")\n pass\n else:\n return self.response ## Something wrong happened for querying total number of node with the same type of source node\n else:\n return self.response ## Something wrong happened for querying total number of node with the same type of source node\n else:\n self.response.error(f\"Only KG1 or KG2 is allowable to calculate the Fisher's exact test temporally\")\n return self.response\n\n size_of_query_sample = len(source_node_list)\n\n\n self.response.debug(f\"Computing Fisher's Exact Test P-value\")\n # calculate FET p-value for each target node in parallel\n parameter_list = [(node, len(target_node_dict[node]), size_of_target[node]-len(target_node_dict[node]), size_of_query_sample - len(target_node_dict[node]), (size_of_total - size_of_target[node]) - (size_of_query_sample - len(target_node_dict[node]))) for node in target_node_dict]\n\n try:\n with multiprocessing.Pool() as executor:\n FETpvalue_list = [elem for elem in executor.map(self._calculate_FET_pvalue_parallel, parameter_list)]\n except:\n tb = traceback.format_exc()\n error_type, error, _ = sys.exc_info()\n self.response.error(tb, error_code=error_type.__name__)\n self.response.error(f\"Something went wrong with computing Fisher's Exact Test P-value\")\n return self.response\n\n if any([type(elem) is list for elem in FETpvalue_list]):\n for msg in [elem2 for elem1 in FETpvalue_list if type(elem1) is list for elem2 in elem1]:\n if type(msg) is tuple:\n self.response.error(msg[0], error_code=msg[1])\n else:\n self.response.error(msg)\n return self.response\n else:\n output = dict(FETpvalue_list)\n\n # check if the results need to be filtered\n output = dict(sorted(output.items(), key=lambda x: x[1]))\n if cutoff:\n output = dict(filter(lambda x: x[1] < cutoff, output.items()))\n else:\n pass\n if top_n:\n output = dict(list(output.items())[:top_n])\n else:\n pass\n\n # add the virtual edge with FET result to message KG\n self.response.debug(f\"Adding virtual edge with FET result to message KG\")\n\n virtual_edge_list = [Edge(id=f\"{value[0]}_{index}\",\n type='has_fisher_exact_test_p-value_with',\n relation=value[0],\n source_id=value[2],\n target_id=value[3],\n is_defined_by=\"ARAX\",\n defined_datetime=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n provided_by=\"ARAX\",\n confidence=None,\n weight=None,\n edge_attributes=[EdgeAttribute(type=\"EDAM:data_1669\", name=\"fisher_exact_test_p-value\", value=str(value[1]), url=None)],\n qedge_ids=[value[0]]) for index, value in enumerate([(virtual_relation_label, output[adj], node, adj) for adj in target_node_dict if adj in output.keys() for node in target_node_dict[adj]], 1)]\n\n self.message.knowledge_graph.edges.extend(virtual_edge_list)\n\n count = len(virtual_edge_list)\n\n self.response.debug(f\"{count} new virtual edges were added to message KG\")\n\n # add the virtual edge to message QG\n if count > 0:\n self.response.debug(f\"Adding virtual edge to message QG\")\n edge_type = \"has_fisher_exact_test_p-value_with\"\n q_edge = QEdge(id=virtual_relation_label, type=edge_type, relation=virtual_relation_label,\n source_id=source_qnode_id, target_id=target_qnode_id)\n self.message.query_graph.edges.append(q_edge)\n self.response.debug(f\"One virtual edge was added to message QG\")\n\n return self.response\n\n\n def query_size_of_adjacent_nodes(self, node_curie, source_type, adjacent_type, kp=\"ARAX/KG1\", rel_type=None, use_cypher_command=True):\n \"\"\"\n Query adjacent nodes of a given source node based on adjacent node type.\n :param node_curie: (required) the curie id of query node. It accepts both single curie id or curie id list eg. \"UniProtKB:P14136\" or ['UniProtKB:P02675', 'UniProtKB:P01903', 'UniProtKB:P09601', 'UniProtKB:Q02878']\n :param source_type: (required) the type of source node, eg. \"gene\"\n :param adjacent_type: (required) the type of adjacent node, eg. \"biological_process\"\n :param kp: (optional) the knowledge provider to use, eg. \"ARAX/KG1\"(default)\n :param rel_type: (optional) edge type to consider, eg. \"involved_in\"\n :param use_cypher_command: Boolean (True or False). If True, it used cypher command to the size of query adjacent nodes(default:True)\n :return a tuple with a dict containing the number of adjacent nodes for the query node and a list of removed nodes\n \"\"\"\n\n res = None\n\n if use_cypher_command is True:\n\n #create the RTXConfiguration object\n rtxConfig = RTXConfiguration()\n # Connection information for the neo4j server, populated with orangeboard\n if kp==\"ARAX/KG1\":\n driver = GraphDatabase.driver(rtxConfig.neo4j_bolt, auth=basic_auth(rtxConfig.neo4j_username, rtxConfig.neo4j_password))\n elif kp==\"ARAX/KG2\":\n rtxConfig.live = \"KG2\"\n driver = GraphDatabase.driver(rtxConfig.neo4j_bolt, auth=basic_auth(rtxConfig.neo4j_username, rtxConfig.neo4j_password))\n else:\n self.response.error(f\"The 'kp' argument of 'query_size_of_adjacent_nodes' method within FET only accepts 'ARAX/KG1' or 'ARAX/KG2' for cypher query right now\")\n return res\n\n session = driver.session()\n\n # check if node_curie is a str or a list\n if type(node_curie) is str:\n if not rel_type:\n query = f\"match (n00:{adjacent_type})-[]-(n01) where n01.id='{node_curie}' with collect(distinct n00.id) as nodes_n00, n01 as node_n01 return node_n01.id as curie, size(nodes_n00) as count\"\n else:\n query = f\"match (n00:{adjacent_type})-[:{rel_type}]-(n01) where n01.id='{node_curie}' with collect(distinct n00.id) as nodes_n00, n01 as node_n01 return node_n01.id as curie, size(nodes_n00) as count\"\n elif type(node_curie) is list:\n if not rel_type:\n query = f\"match (n00:{adjacent_type})-[]-(n01) where n01.id in {node_curie} with collect(distinct n00.id) as nodes_n00, n01 as node_n01 return node_n01.id as curie, size(nodes_n00) as count\"\n else:\n query = f\"match (n00:{adjacent_type})-[:{rel_type}]-(n01) where n01.id in {node_curie} with collect(distinct n00.id) as nodes_n00, n01 as node_n01 return node_n01.id as curie, size(nodes_n00) as count\"\n else:\n self.response.error(\"The 'node_curie' argument of 'query_size_of_adjacent_nodes' method within FET only accepts str or list\")\n return res\n\n try:\n cypher_res = session.run(query)\n result = pd.DataFrame(cypher_res.data())\n if result.shape[0] == 0:\n self.response.error(f\"Fail to query adjacent nodes from {kp} for {node_curie}\")\n return res\n else:\n res_dict = dict()\n has_error = False\n if type(node_curie) is str:\n res_dict[node_curie] = result['count'][0]\n return res_dict\n else:\n for node in node_curie:\n if node in list(result['curie']):\n row_ind = list(result['curie']).index(node)\n res_dict[node] = result.iloc[row_ind, 1]\n else:\n self.response.error(f\"Fail to query adjacent nodes from {kp} for {node}\")\n has_error = True\n\n if len(res_dict)==0:\n return res\n elif has_error is True:\n return res\n else:\n return res_dict\n except:\n tb = traceback.format_exc()\n error_type, error, _ = sys.exc_info()\n self.response.error(tb, error_code=error_type.__name__)\n self.response.error(f\"Something went wrong with querying adjacent nodes from {kp} for {node_curie}\")\n return res\n\n else:\n\n # construct the instance of ARAXQuery class\n araxq = ARAXQuery()\n\n # check if node_curie is a str or a list\n if type(node_curie) is str:\n query_node_curie = node_curie\n elif type(node_curie) is list:\n node_id_list_str = \"[\"\n for index in range(len(node_curie)):\n node = node_curie[index]\n if index + 1 == len(node_curie):\n node_id_list_str = node_id_list_str + str(node) + \"]\"\n else:\n node_id_list_str = node_id_list_str + str(node) + \",\"\n\n query_node_curie = node_id_list_str\n else:\n self.response.error(\n \"The 'node_curie' argument of 'query_size_of_adjacent_nodes' method within FET only accepts str or list\")\n return res\n\n # call the method of ARAXQuery class to query adjacent node\n if rel_type:\n query = {\"previous_message_processing_plan\": {\"processing_actions\": [\n \"create_message\",\n f\"add_qnode(curie={query_node_curie}, type={source_type}, id=FET_n00)\",\n f\"add_qnode(type={adjacent_type}, id=FET_n01)\",\n f\"add_qedge(source_id=FET_n00, target_id=FET_n01, id=FET_e00, type={rel_type})\",\n f\"expand(edge_id=FET_e00,kp={kp})\",\n #\"resultify()\",\n \"return(message=true, store=false)\"\n ]}}\n else:\n query = {\"previous_message_processing_plan\": {\"processing_actions\": [\n \"create_message\",\n f\"add_qnode(curie={query_node_curie}, type={source_type}, id=FET_n00)\",\n f\"add_qnode(type={adjacent_type}, id=FET_n01)\",\n f\"add_qedge(source_id=FET_n00, target_id=FET_n01, id=FET_e00)\",\n f\"expand(edge_id=FET_e00,kp={kp})\",\n #\"resultify()\",\n \"return(message=true, store=false)\"\n ]}}\n\n try:\n result = araxq.query(query)\n if result.status != 'OK':\n self.response.error(f\"Fail to query adjacent nodes from {kp} for {node_curie}\")\n return res\n else:\n res_dict = dict()\n message = araxq.message\n if type(node_curie) is str:\n tmplist = set([edge.id for edge in message.knowledge_graph.edges if edge.source_id == node_curie or edge.target_id == node_curie]) ## edge has no direction\n if len(tmplist) == 0:\n self.response.warning(f\"Fail to query adjacent nodes from {kp} for {node_curie} in FET probably because expander ignores node type. For more details, please see issue897.\")\n return (res_dict,[node_curie])\n res_dict[node_curie] = len(tmplist)\n return (res_dict,[])\n else:\n check_empty = False\n failure_node = list()\n for node in node_curie:\n tmplist = set([edge.id for edge in message.knowledge_graph.edges if edge.source_id == node or edge.target_id == node]) ## edge has no direction\n if len(tmplist) == 0:\n self.response.warning(f\"Fail to query adjacent nodes from {kp} for {node} in FET probably because expander ignores node type. For more details, please see issue897.\")\n failure_node.append(node)\n check_empty = True\n continue\n res_dict[node] = len(tmplist)\n\n if check_empty is True:\n return (res_dict,failure_node)\n else:\n return (res_dict,failure_node)\n except:\n tb = traceback.format_exc()\n error_type, error, _ = sys.exc_info()\n self.response.error(tb, error_code=error_type.__name__)\n self.response.error(f\"Something went wrong with querying adjacent nodes from {kp} for {node_curie}\")\n return res\n\n\n def _query_size_of_adjacent_nodes_parallel(self, this):\n # This method is expected to be run within this class\n \"\"\"\n Query the size of adjacent nodes of a given source node based on adjacent node type in parallel.\n :param this is a list containing five sub-arguments below since this function is exectued in parallel.\n :return the number of adjacent nodes for the query node\n \"\"\"\n #:sub-argument node_curie: (required) the curie id of query node, eg. \"UniProtKB:P14136\"\n #:sub-argument source_type: (required) the type of source node, eg. \"gene\"\n #:sub-argument adjacent_type: (required) the type of adjacent node, eg. \"biological_process\"\n #:sub-argument kp: (optional) the knowledge provider to use, eg. \"ARAX/KG1\"(default)\n #:sub-argument rel_type: (optional) edge type to consider, eg. \"involved_in\"\n\n error_message = []\n if len(this) == 5:\n # this contains four arguments and assign them to different variables\n node_curie, source_type, adjacent_type, kp, rel_type = this\n elif len(this) == 4:\n node_curie, source_type, adjacent_type, kp = this\n rel_type = None\n elif len(this) == 3:\n node_curie, source_type, adjacent_type = this\n kp = \"ARAX/KG1\"\n rel_type = None\n else:\n error_message.append(\"The '_query_size_of_adjacent_nodes_parallel' method within FET only accepts four arguments: node_curie, adjacent_type, kp, rel_type\")\n return error_message\n\n # construct the instance of ARAXQuery class\n araxq = ARAXQuery()\n\n # check if node_curie is a str\n if type(node_curie) is str:\n pass\n else:\n error_message.append(\"The 'node_curie' argument of '_query_size_of_adjacent_nodes_parallel' method within FET only accepts str\")\n return error_message\n\n # call the method of ARAXQuery class to query adjacent node\n\n if rel_type:\n query = {\"previous_message_processing_plan\": {\"processing_actions\": [\n \"create_message\",\n f\"add_qnode(curie={node_curie}, tyep={source_type}, id=FET_n00)\",\n f\"add_qnode(type={adjacent_type}, id=FET_n01)\",\n f\"add_qedge(source_id=FET_n00, target_id=FET_n01, id=FET_e00, type={rel_type})\",\n f\"expand(edge_id=FET_e00,kp={kp})\",\n #\"resultify()\",\n \"return(message=true, store=false)\"\n ]}}\n else:\n query = {\"previous_message_processing_plan\": {\"processing_actions\": [\n \"create_message\",\n f\"add_qnode(curie={node_curie}, tyep={source_type}, id=FET_n00)\",\n f\"add_qnode(type={adjacent_type}, id=FET_n01)\",\n f\"add_qedge(source_id=FET_n00, target_id=FET_n01, id=FET_e00)\",\n f\"expand(edge_id=FET_e00,kp={kp})\",\n #\"resultify()\",\n \"return(message=true, store=false)\"\n ]}}\n\n try:\n result = araxq.query(query)\n if result.status != 'OK':\n error_message.append(f\"Fail to query adjacent nodes from {kp} for {node_curie}\")\n return error_message\n else:\n message = araxq.message\n tmplist = set([edge.id for edge in message.knowledge_graph.edges if edge.source_id == node_curie or edge.target_id == node_curie]) ## edge has no direction\n if len(tmplist) == 0:\n error_message.append(f\"Fail to query adjacent nodes from {kp} for {node_curie}\")\n return error_message\n res = len(tmplist)\n return res\n except:\n tb = traceback.format_exc()\n error_type, error, _ = sys.exc_info()\n error_message.append((tb, error_type.__name__))\n error_message.append(f\"Something went wrong with querying adjacent nodes from {kp} for {node_curie}\")\n return error_message\n\n def size_of_given_type_in_KP(self, node_type, use_cypher_command=True, kg='KG1'):\n \"\"\"\n find all nodes of a certain type in KP\n :param node_type: the query node type\n :param use_cypher_command: Boolean (True or False). If True, it used cypher command to query all nodes otherwise used kgNodeIndex\n :param kg: only allowed for choosing 'KG1' or 'KG2' now. Will extend to BTE later\n \"\"\"\n # TODO: extend this to KG2, BTE, and other KP's we know of\n\n size_of_total = None\n\n if kg == 'KG1' or kg == 'KG2':\n pass\n else:\n self.response.error(f\"Only KG1 or KG2 is allowable to calculate the Fisher's exact test temporally\")\n return size_of_total\n\n if kg == 'KG1':\n if use_cypher_command:\n rtxConfig = RTXConfiguration()\n # Connection information for the neo4j server, populated with orangeboard\n driver = GraphDatabase.driver(rtxConfig.neo4j_bolt,\n auth=basic_auth(rtxConfig.neo4j_username, rtxConfig.neo4j_password))\n session = driver.session()\n\n query = \"MATCH (n:%s) return count(distinct n)\" % (node_type)\n res = session.run(query)\n size_of_total = res.single()[\"count(distinct n)\"]\n return size_of_total\n else:\n kgNodeIndex = KGNodeIndex()\n size_of_total = kgNodeIndex.get_total_entity_count(node_type, kg_name=kg)\n return size_of_total\n else:\n if use_cypher_command:\n self.response.warning(f\"KG2 is only allowable to use kgNodeIndex to query the total number of node with query type. It was set to use kgNodeIndex\")\n kgNodeIndex = KGNodeIndex()\n size_of_total = kgNodeIndex.get_total_entity_count(node_type, kg_name=kg)\n return size_of_total\n\n else:\n kgNodeIndex = KGNodeIndex()\n size_of_total = kgNodeIndex.get_total_entity_count(node_type, kg_name=kg)\n return size_of_total\n\n def _calculate_FET_pvalue_parallel(self, this):\n # *Note*: The arugment 'this' is a list containing five sub-arguments below since this function is exectued in parallel.\n # This method is expected to be run within this class\n \"\"\"\n Calculate Fisher Exact Test' p-value.\n *param this is a list containing five sub-arguments below since this function is exectued in parallel.\n :return a list of FET p-values\n \"\"\"\n #:sub-argument node: (required) the curie name of node, eg. \"UniProtKB:Q13330\"\n #:sub-argument a: (required) count of in_sample and in_pathway\n #:sub-argument b: (required) count of not_in_sample but in_pathway\n #:sub-argument c: (required) count of in_sample but not in_pathway\n #:sub-argument d: (required) count of not in_sample and not in_pathway\n\n # this should contain five variables and assign them to different variables\n node, a, b, c, d = this\n error_message = []\n\n try:\n contingency_table = [[a, b],[c,d]]\n pvalue = stats.fisher_exact(contingency_table)[1]\n return (node, pvalue)\n except:\n tb = traceback.format_exc()\n error_type, error, _ = sys.exc_info()\n error_message.append((tb, error_type.__name__))\n error_message.append(f\"Something went wrong for target node {node} to calculate FET p-value\")\n return error_message\n"
] | [
[
"scipy.stats.fisher_exact"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
xinetzone/self-driving-dash | [
"66860074099c90e620e21bc30b26efd366cc8f1a"
] | [
"callbacks/record.py"
] | [
"import datetime\nimport pandas as pd\nfrom dash.dependencies import Input, Output\nfrom dash.exceptions import PreventUpdate\n\nfrom utils.client import plot_frame, Canvas\nfrom tools.frame import Shape\n\nfrom utils.client import frame2pandas, simulate_shape\nfrom app import app\nfrom layouts.record import layout_options\n\nframes = []\n\[email protected](Output('live-update-text', 'value'),\n Input('interval-frame', 'n_intervals'))\ndef update_metrics(frame_id):\n '''⏲计时,更新时间和帧数'''\n now = datetime.datetime.now()\n now = now.strftime('%H:%M:%S')\n return f'{now} @ {frame_id}'\n\n\[email protected](Output('memory-frame', 'data'),\n Output('memory-frames', 'data'),\n Input('interval-frame', 'n_intervals'))\ndef update_frame(frame_id):\n choice_class = layout_options.class_names + ['other']\n # shapes = [simulate_shape(frame_id, k, class_name)\n # for k, class_name in enumerate(choice_class)]\n shapes = []\n k = 0\n for _ in range(10):\n for class_name in choice_class:\n shape = simulate_shape(frame_id, k, class_name)\n shapes.append(shape)\n # print(shape)\n k += 1\n \n frame = [shape.asdict() for shape in shapes]\n frame = frame2pandas(frame)\n frame.to_hdf(layout_options.save_path, key=f'frame_{frame_id}', mode='a')\n frame_dict = frame.to_dict('records')\n global frames\n frames.extend(frame_dict)\n if len(frames) > 500:\n frames = frames[-500:]\n return frame_dict, frames[1:]\n\n\[email protected](Output('view-graph', 'figure'),\n Input('memory-frame', 'data'))\ndef update_view_graph_frame(frame):\n '''更新鸟瞰图'''\n canvas = Canvas()\n shapes = [Shape(**shape) for shape in frame]\n shapes = [canvas.to_shape(*shape.view) for shape in shapes]\n canvas.view.update_layout(shapes=shapes)\n canvas.update_base()\n return canvas.view\n\n\[email protected](Output('memory-output', 'data'),\n Input('memory-frames', 'data'),\n Input('memory-class', 'value'))\ndef store_frame(frames, class_selected):\n df = pd.DataFrame.from_records(frames)\n if frames == None:\n raise PreventUpdate\n filtered = df[df['class_name'] == class_selected]\n return filtered.to_dict('records')\n\n\[email protected](Output('memory-table', 'data'),\n Input('memory-output', 'data'))\ndef on_data_set_table(data):\n if data is None:\n raise PreventUpdate\n return data\n\n\[email protected](Output('feature-graph', 'figure'),\n Input('memory-output', 'data'))\ndef on_data_set_graph(data):\n if data is None:\n raise PreventUpdate\n filtered = pd.DataFrame.from_records(data)\n fig = plot_frame(filtered)\n return fig\n"
] | [
[
"pandas.DataFrame.from_records"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hbin0701/c3vqg-official | [
"2600940d9a3255f9f4b616313aed968b17d71432"
] | [
"utils/vocab.py"
] | [
"\"\"\"Creates a vocabulary using iq_dataset for the vqa dataset.\n\"\"\"\n\nfrom collections import Counter\nfrom train_utils import Vocabulary\n\nimport argparse\nimport json\nimport logging\nimport nltk\nimport numpy as np\nimport re\n\n\ndef process_text(text, vocab, max_length=20):\n \"\"\"Converts text into a list of tokens surrounded by <start> and <end>.\n\n Args:\n text: String text.\n vocab: The vocabulary instance.\n max_length: The max allowed length.\n\n Returns:\n output: An numpy array with tokenized text.\n length: The length of the text.\n \"\"\"\n tokens = tokenize(text.lower().strip())\n output = []\n output.append(vocab(vocab.SYM_SOQ)) # <start>\n output.extend([vocab(token) for token in tokens])\n output.append(vocab(vocab.SYM_EOS)) # <end>\n length = min(max_length, len(output))\n return np.array(output[:length]), length\n\n\ndef load_vocab(vocab_path):\n \"\"\"Load Vocabulary object from a pickle file.\n\n Args:\n vocab_path: The location of the vocab pickle file.\n\n Returns:\n A Vocabulary object.\n \"\"\"\n vocab = Vocabulary()\n vocab.load(vocab_path)\n return vocab\n\n\ndef tokenize(sentence):\n \"\"\"Tokenizes a sentence into words.\n\n Args:\n sentence: A string of words.\n\n Returns:\n A list of words.\n \"\"\"\n if type(sentence) == bytes:\n sentence = sentence.decode('utf-8')\n if len(sentence) == 0:\n return []\n sentence = re.sub('\\.+', r'.', sentence)\n sentence = re.sub('([a-z])([.,!?()])', r'\\1 \\2 ', sentence)\n sentence = re.sub('\\s+', ' ', sentence)\n\n tokens = nltk.tokenize.word_tokenize(\n sentence.strip().lower())\n return tokens\n\n\ndef build_vocab(questions, cat2ans, threshold):\n \"\"\"Build a vocabulary from the annotations.\n\n Args:\n annotations: A json file containing the questions and answers.\n cat2ans: A json file containing answer types.\n threshold: The minimum number of times a work must occur. Otherwise it\n is treated as an `Vocabulary.SYM_UNK`.\n\n Returns:\n A Vocabulary object.\n \"\"\"\n with open(questions) as f:\n questions = json.load(f)\n with open(cat2ans) as f:\n cat2ans = json.load(f)\n\n words = []\n for category in cat2ans:\n for answer in cat2ans[category]:\n answer = tokenize(answer.encode('utf8'))\n words.extend(answer)\n\n counter = Counter()\n for i, entry in enumerate(questions['questions']):\n question = entry[\"question\"].encode('utf8')\n q_tokens = tokenize(question)\n counter.update(q_tokens)\n\n if i % 1000 == 0:\n logging.info(\"Tokenized %d questions.\" % (i))\n\n # If a word frequency is less than 'threshold', then the word is discarded.\n words.extend([word for word, cnt in counter.items() if cnt >= threshold])\n words = list(set(words))\n vocab = create_vocab(words)\n return vocab\n\n\ndef create_vocab(words):\n # Adds the words to the vocabulary.\n vocab = Vocabulary()\n for i, word in enumerate(words):\n vocab.add_word(word)\n return vocab\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # Inputs.\n parser.add_argument('--questions', type=str,\n default='data/vqa/v2_OpenEnded_mscoco_'\n 'train2014_questions.json',\n help='Path for train questions file.')\n parser.add_argument('--cat2ans', type=str,\n default='data/vqa/iq_dataset.json',\n help='Path for the answer types.')\n\n # Hyperparameters.\n parser.add_argument('--threshold', type=int, default=4,\n help='Minimum word count threshold.')\n\n # Outputs.\n parser.add_argument('--vocab-path', type=str,\n default='data/processed/vocab_iq.json',\n help='Path for saving vocabulary wrapper.')\n args = parser.parse_args()\n\n # Configure logging\n logging.basicConfig(level=logging.INFO)\n vocab = build_vocab(args.questions, args.cat2ans, args.threshold)\n logging.info(\"Total vocabulary size: %d\" % len(vocab))\n vocab.save(args.vocab_path)\n logging.info(\"Saved the vocabulary wrapper to '%s'\" % args.vocab_path)\n\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JannisBush/xs-leaks-browser-web | [
"15ed76a87c1c30e15cd7d0b070e2853d17d322bc"
] | [
"analysis/helper_dyn.py"
] | [
"from database_connector import connect, postgresql_to_dataframe\nimport pandas as pd\nfrom pandas.api.types import CategoricalDtype\nimport datetime\nimport textdistance\nimport difflib\nimport hashlib\nimport pickle\n\nfrom dil_preprocess import get_url_data, basic_pruning\nfrom dil_predict import init, predict_trees, reduce_leaky_endpoints\nfrom dil_postprocess import get_working_incs, get_dyn_urls, get_working_urls_channels, get_dyn_results\n\ndef get_crawl_data():\n \"\"\"Return the data from node_crawler site table.\"\"\"\n conn = connect()\n column_names = [\"job_id\", \"site_id\", \"site\", \"cookies\", \"counter\", \"crawl_status\", \"crawler\"]\n df = postgresql_to_dataframe(conn, \"select * from sites\", column_names)\n conn.close()\n return df\n\ndef get_pipeline_overview():\n \"\"\"Return the data from the complete pipeline.\"\"\"\n # Connect to the database\n conn = connect()\n column_names = [\"id\", \"site\", \"login\", \"cookies\", \"cookie_end\", \"num_urls\", \n \"num_basic_pruning\", \"num_input_rows\", \"crawl_end\", \"dyn_conf_urls\", \n \"dyn_conf_firefox\", \"dyn_conf_chrome\", \"dyn_end\",\n \"dyn_conf_retest_urls\", \"dyn_conf_retest_firefox\", \"dyn_conf_retest_chrome\", \"dyn_retest_end\",\n \"confirmed_urls\", \"confirmed_urls_firefox\", \"confirmed_urls_chrome\",\n \"count\", \"tranco_rank\", \"confirmed_leak_urls\", \"confirmed_df_dict\",\n ]\n non_cat = [\"login\", \"dyn_conf_urls\", \"dyn_conf_retest_urls\", \"confirmed_urls\", \"cookies\", \"confirmed_leak_urls\", \"confirmed_df_dict\"]\n # Execute the \"SELECT *\" query\n site_results = postgresql_to_dataframe(conn, \"select * from db_site_results\", column_names, non_cat=non_cat)\n conn.close()\n return site_results\n\n\ndef get_leak_data():\n \"\"\"Return the data from dbcon_leakresult.\"\"\"\n conn = connect()\n column_names = [\"id\", \"loading_time\", \"timed_out\", \"apg_url\", \"complete_time\",\n \"retest_num\", \"cookies\", \"site\", \"browser_id\", \"events_id\", \"global_properties_id\",\n \"object_properties_id\", \"test_id\", \"window_properties_id\",\n ]\n non_cat = [\"cookies\"]\n # Execute the \"SELECT *\" query\n leak_results = postgresql_to_dataframe(conn, \"select * from dbcon_leakresult\", column_names, non_cat=non_cat)\n conn.close()\n return leak_results\n\n\ndef get_isotime(iso):\n \"\"\"Converts a isostr to datetime or returns None.\"\"\"\n try:\n return datetime.datetime.fromisoformat(iso)\n except ValueError:\n None\n # return datetime.datetime.fromordinal(datetime.date(year=1980, month=1, day=1).toordinal()\n \n \ndef calc_diff(time1, time2):\n \"\"\"Returns the difference between two time objects or returns None.\"\"\"\n try:\n return time1 - time2\n except TypeError:\n return None\n \ndef get_time(row):\n \"\"\"Calculate the timing of a row.\"\"\"\n start = get_isotime(row[\"cookie_end\"])\n end_crawl = get_isotime(row[\"crawl_end\"])\n end_dyn = get_isotime(row[\"dyn_end\"])\n end_final = get_isotime(row[\"dyn_retest_end\"])\n return (row[\"site\"], row[\"tranco_rank\"], calc_diff(end_crawl, start), calc_diff(end_dyn, end_crawl), calc_diff(end_final, end_dyn))\n\n\ndef display_timing(df):\n \"\"\"Calculate and display information on timimg.\"\"\"\n time_crawl = df.loc[df[\"crawl_end\"] != \"\"].apply(get_time, axis=1, result_type=\"expand\")\n time_crawl = time_crawl.rename(columns={0: \"site\", 1: \"tranco_rank\", 2: \"crawling time\", 3: \"dynamic confirmation time\", 4: \"dynamic reconfirmation time\"})\n display(time_crawl) # if time is over 9 hours, this could be because of a bug in our pipeline: e.g., ning, chess and vimeo\n display(time_crawl.agg([\"min\", \"max\", \"mean\", \"std\"]))\n\n \ndef get_cookie_stats(row):\n \"\"\"Row has a column cookies with a list of cookie dicts.\n Every entry in the list will get transformed to one row in a df that is returned.\n \"\"\"\n try:\n cookies = row.iloc[0][\"cookies\"]\n except IndexError:\n return None\n if type(cookies) == list:\n cookie_count = len(cookies)\n row[\"name\"] = \"Not set\"\n row[\"value\"] = \"Not set\"\n row[\"secure\"] = \"Not set\"\n row[\"httpOnly\"] = \"Not set\"\n row[\"sameSite\"] = \"Not set\"\n row = row.loc[row.index.repeat(cookie_count)]\n for count, cookie in enumerate(cookies):\n row[\"name\"].iloc[count] = cookie[\"name\"]\n row[\"value\"].iloc[count] = cookie[\"value\"]\n row[\"secure\"].iloc[count] = cookie.get(\"secure\", \"Not set\")\n row[\"httpOnly\"].iloc[count] = cookie.get(\"httpOnly\", \"Not set\")\n row[\"sameSite\"].iloc[count] = cookie.get(\"sameSite\", \"Not set\")\n # Collect stats for each cookie, guess if session cookie (regex on Name + nature of value?), record security attributes (how many use sameSite, etc)\n # Later see if there is a relation between vulnerable sites and the cookie settings of these sites?!\n # print(cookie[\"name\"], cookie[\"value\"], cookie.get(\"secure\", \"Not set\"), cookie.get(\"httpOnly\", \"Not set\"), cookie.get(\"sameSite\", \"Not set\"))\n return row\n\n\ndef show_only_first(df1, df2, info, head=3):\n \"\"\"Show all rows only existing in the first df, both frames have a column: id.\"\"\"\n c = df1.merge(df2, on=\"id\")\n res = df1.loc[~df1.id.isin(c.id)]\n if len(res) > 0:\n print(f\"{info} for {len(res)} sites\")\n with pd.option_context(\"max_columns\", None):\n display(res.head(head))\n return res\n\n\ndef get_pipeline_stats(df, log=True):\n \"\"\"Df is a (sub)frame of db_site_results.\n Get info of how many sites went missing in the various steps.\n \"\"\"\n cookies_found = df.loc[df[\"cookies\"] != {}]\n pipeline_started = df.loc[df[\"login\"].str.contains(r\"pipepline|actual site\")]\n started_cookie_hunter = df.loc[df[\"login\"].str.contains(\"pipepline\")]\n started_manual = df.loc[df[\"login\"].str.contains(\"actual site\")]\n \n # Add the ones that failed in the unpruned run (\"Bug\": we update the wrong cookiehunter entries for the unpruned runs, so we need to do this)\n pipeline_started = pipeline_started.append(df.loc[df[\"site\"].isin([\"bravenet.com\", \"amazon.in\", \"faucetcrypto.com\", \"bshare.cn\"])])\n cookies_found = cookies_found.append(df.loc[df[\"site\"].isin([\"bravenet.com\", \"amazon.in\", \"faucetcrypto.com\", \"bshare.cn\"])])\n started_cookie_hunter = started_cookie_hunter.append(df.loc[df[\"site\"].isin([\"bravenet.com\", \"amazon.in\", \"faucetcrypto.com\", \"bshare.cn\"])])\n \n crawled = df.loc[df[\"crawl_end\"] != \"\"]\n crawled_min = df.loc[df[\"num_urls\"] >= 1]\n crawled_success = df.loc[df[\"num_urls\"] >= 3]\n pruned = df.loc[df[\"num_basic_pruning\"] > 0]\n num_input_rows = df.loc[df[\"num_input_rows\"] > 0]\n\n pot_ft = df.loc[df[\"dyn_conf_firefox\"] > 0]\n pot_ct = df.loc[df[\"dyn_conf_chrome\"] > 0]\n pot = df.loc[df[\"id\"].isin(list(set(pot_ft[\"id\"].values.tolist()) | set(pot_ct[\"id\"].values.tolist())))]\n pot_both = df.loc[df[\"id\"].isin(list(set(pot_ft[\"id\"].values.tolist()) & set(pot_ct[\"id\"].values.tolist())))]\n\n pot_fr = df.loc[df[\"dyn_conf_retest_firefox\"] > 0]\n pot_cr = df.loc[df[\"dyn_conf_retest_chrome\"] > 0]\n pot_r = df.loc[df[\"id\"].isin(list(set(pot_fr[\"id\"].values.tolist()) | set(pot_cr[\"id\"].values.tolist())))]\n pot_r_both = df.loc[df[\"id\"].isin(list(set(pot_fr[\"id\"].values.tolist()) & set(pot_cr[\"id\"].values.tolist())))]\n\n conf_f = df.loc[df[\"confirmed_urls_firefox\"] > 0]\n conf_c = df.loc[df[\"confirmed_urls_chrome\"] > 0]\n conf = df.loc[df[\"id\"].isin(list(set(conf_f[\"id\"].values.tolist()) | set(conf_c[\"id\"].values.tolist())))]\n conf_both = df.loc[df[\"id\"].isin(list(set(conf_f[\"id\"].values.tolist()) & set(conf_c[\"id\"].values.tolist())))]\n\n info_text = (\n f\"Cookiehunter:\\n\"\n f\"Total sites attempted: {len(df)}, some success (cookies collected): {len(cookies_found)}, full success (pipeline started): {len(pipeline_started)}\\n\"\n f\"Pipeline started cookiehunter: {len(started_cookie_hunter)}, started selenium login replay: {len(started_manual)}\\n\"\n f\"\\nCrawling:\\n\"\n f\"Crawl started: {len(crawled)}, at least one URL crawled: {len(crawled_min)}, at least three URLs crawled: {len(crawled_success)}\\n\"\n f\"\\nPruning:\\n\"\n f\"At least one URL remains after basic pruninng: {len(pruned)}, at least one input row for trees: {len(num_input_rows)}\\n\"\n f\"Trees:\\n\"\n f\"At least one potential vulnerable firefox: {len(pot_ft)}, at least one potential vulnerable chrome: {len(pot_ct)}\\n\"\n f\"At least one potential vulnerable either: {len(pot)}, at least one potential vulnerable both: {len(pot_both)}\\n\"\n f\"\\nSingle confirmation:\\n\"\n f\"At least one different observation firefox: {len(pot_fr)}, at least one different observation chrome: {len(pot_cr)}\\n\"\n f\"At least one different observation either: {len(pot_r)}, at least one different observation both: {len(pot_r_both)}\\n\"\n f\"\\nDouble confirmation:\\n\"\n f\"At least one vulnerable firefox: {len(conf_f)}, at least one vulnerable chrome: {len(conf_c)}\\n\"\n f\"At least one vulnerable either: {len(conf)}, at least one vulnerable both: {len(conf_both)}\\n\"\n )\n if log:\n print(info_text)\n \n \n # Sanity checks, should not occur\n show_only_first(pipeline_started, cookies_found, \"Started without cookies\")\n show_only_first(pipeline_started, crawled, \"Started but not crawled\")\n show_only_first(crawled_min, crawled, \"Crawl check\")\n show_only_first(crawled_success, crawled_min, \"Crawl check\")\n show_only_first(pruned, num_input_rows, \"No input rows after pruning\")\n\n if log:\n print(\"For some sites our testing infrastructure was partially down during testing (67 sites), after the infrastructure was ready again. We retested but for 21 the login failed (e.g., google SSO changed behavior in between and does not allow selenium anymore). We remove these from the following test\")\n cookie_hunter_second_failed = show_only_first(crawled, pipeline_started, \"Crawled without started\", 21)\n \n # Remove the sites that failed a second login, and did never got tested properly\n df = df.loc[~df.index.isin(cookie_hunter_second_failed.index)]\n \n # Interesting cases\n if log:\n show_only_first(crawled, crawled_min, \"Not crawled properly (e.g., cert error)\")\n show_only_first(pot, crawled_success, \"Potential vulnerable with less than 3 URLs crawled\")\n show_only_first(crawled_min, pruned, \"Crawled but excluded after basic pruning\")\n show_only_first(num_input_rows, pot, \"No potential leaks after tree pruning\")\n show_only_first(pot, pot_r, \"No observed difference in potential URLs\")\n show_only_first(pot_r, conf, \"No confirmed URLs after retesting\")\n show_only_first(conf_f, conf_c, \"Only in firefox confirmed\")\n show_only_first(conf_c, conf_f, \"Only in chrome confirmed\")\n \n \n return df, conf_both, conf\n\n\nsec_rel_headers = [\n \"content-type\",\n \"x-frame-options\",\n \"content-disposition\",\n \"cross-origin-opener-policy\",\n \"x-content-type-options\",\n \"cross-origin-resource-policy\",\n \"content-security-policy\", \n \"location\",\n]\n\nto_test = sec_rel_headers + [\"code\"]\n\nacc = {}\ndef process_responses(row):\n \"\"\"Get only the relevant data from the crawl.\"\"\"\n global acc\n headers = row[\"resp_headers\"] # All headers in the db are saved as lowercase\n sec_df = {}\n sec_df[\"url\"] = row[\"req_url\"]\n sec_df[\"site\"] = row[\"site\"]\n sec_df[\"real_site\"] = row[\"real_site\"]\n sec_df[\"cookies\"] = row[\"cookies\"]\n sec_df[\"code\"] = row[\"resp_code\"]\n sec_df[\"body\"] = row[\"resp_body_hash\"]\n headers_basic_pruned = {}\n for header in sec_rel_headers:\n header_val = headers.get(header, \"Empty\") \n # Remove some info from headers here to deduplicate (e.g., filename in content-disposition?)\n if header == \"content-disposition\":\n header_val = header_val.split(\";\")[0]\n # Add post-processing for CSP\n sec_df[header] = header_val\n if not header == \"content-security-policy\":\n headers_basic_pruned[header] = header_val\n for header in headers:\n count = acc.get(header, 0)\n acc[header] = count + 1\n \n # Calculate hashes of the responses, either hash everything, remove some headers including randomness or only keep the tree headers (basic pruning)\n hash_all = [sec_df[\"url\"], sec_df[\"site\"], sec_df[\"code\"], headers, sec_df[\"body\"]]\n headers_min_pruned = headers.copy()\n for header in [\"date\", \"server\", \"cache-control\", \"last-modified\", \"etag\", \"vary\", \"expires\", \"age\"]:\n headers_min_pruned.pop(header, None)\n hash_min_pruned = [sec_df[\"url\"], sec_df[\"site\"], sec_df[\"code\"], headers_min_pruned, sec_df[\"body\"]]\n hash_basic_pruned = [sec_df[\"url\"], sec_df[\"site\"], sec_df[\"code\"], headers_basic_pruned, sec_df[\"body\"]]\n\n sec_df[\"hash_all\"] = hashlib.sha1(pickle.dumps(hash_all)).hexdigest()\n sec_df[\"hash_min_pruned\"] = hashlib.sha1(pickle.dumps(hash_min_pruned)).hexdigest()\n sec_df[\"hash_basic_pruned\"] = hashlib.sha1(pickle.dumps(hash_basic_pruned)).hexdigest()\n \n return sec_df\n\ndef get_acc():\n global acc\n return acc\n \n \ndef display_response_summary(df, index=\"cookies\", check=None):\n \"\"\"Display response groups.\"\"\"\n if check is None:\n global to_test\n to_check = to_test.copy()\n to_check.remove(\"content-security-policy\")\n else:\n to_check = check\n table_dict = {}\n with pd.option_context(\"max_columns\", 200):\n display(df.groupby(index).nunique())\n for prop in to_check:\n pivot = df.pivot_table(index=index, columns=prop, aggfunc=\"size\", fill_value=0)\n pivot.loc[\"Total\"] = pivot.sum()\n res = pivot.loc[:, pivot.max().sort_values(ascending=False).index]\n display(res)\n table_dict[prop] = res\n # display(df[prop].value_counts().to_frame())\n pivot = df.pivot_table(index=index, columns=to_check, aggfunc=\"size\", fill_value=0)\n pivot.loc[\"Total\"] = pivot.sum()\n res = pivot.loc[:, pivot.max().sort_values(ascending=False).index]\n res\n display(res)\n table_dict[\"total\"] = res\n return table_dict\n \n\n\ndef display_changed(df):\n \"\"\"Display rows where different headers/status-code are observed for cookies/no-cookies\"\"\"\n # Drop the ones with only one or more than two observations\n count_urls = df.groupby([\"url\", \"site\", \"real_site\"])[\"cookies\"].count()\n display(count_urls.value_counts())\n count_index = count_urls[count_urls == 2].index\n df = df.set_index([\"url\", \"site\", \"real_site\"])\n df = df.loc[count_index]\n df = df.reset_index()\n print(df.info())\n \n # Drop the ones that are the same for cookies/no-cookies\n df = df.drop_duplicates(subset=to_test + [\"url\", \"site\", \"real_site\"], keep=False)\n \n # Display remaining ones\n display(df.sort_values([\"site\", \"real_site\", \"url\", \"cookies\"]))\n \n \ndef parse_apg_url(apg_url):\n \"\"\"Return the method, url and browser from an apg_url.\"\"\"\n method = apg_url.split(\"/apg/\")[1].split(\"/?url=\")[0]\n url = apg_url.split(\"/?url=\")[1].split(\"&browser\")[0]\n try:\n browser = apg_url.split(\"&browser=\")[1].split(\"&\")[0]\n except IndexError:\n browser = None\n return method, url, browser\n\n \ndef parse_method_url(row, col, acc):\n \"\"\"Get URL, method and browser from the apg url.\"\"\"\n row_dict = row[col]\n site = row[\"site\"]\n if type(row_dict) == dict:\n browser_l = []\n method_l = []\n url_l = []\n l = []\n for browser in row_dict:\n for apg_url in row_dict[browser]:\n method = apg_url.split(\"/apg/\")[1].split(\"/?url=\")[0]\n url = apg_url.split(\"/?url=\")[1]\n browser_l.append(browser)\n method_l.append(method)\n url_l.append(url)\n l.append([browser, method, url])\n acc.append({\"site\": site, \"browser\": browser, \"method\": method, \"url\": url})\n return [browser_l, method_l, url_l]\n \n \ndef get_query(string, pos=1):\n \"\"\"Get query parameter of a URL.\"\"\"\n try:\n return string.split(\"?\")[pos]\n except IndexError:\n if pos == 1:\n return \"\"\n else:\n return string\n\n \ndef info_grouping(grouping, info, info_frame, info_frame_new, log=False):\n for key, group in grouping:\n f = group.loc[group[\"browser\"] == \"firefox\"]\n c = group.loc[group[\"browser\"] == \"chrome\"]\n if log:\n print(f\"Grouping: {key}, number vuln: {len(group)}, chrome: {len(c)}, firefox: {len(f)}\")\n display(group.head())\n \n leak_url_dict = {}\n leak_url_set = leak_url_dict, set(f[[\"site\", \"inc_method\", \"url\"]].drop_duplicates().itertuples(index=False, name=None)), set(c[[\"site\", \"inc_method\", \"url\"]].drop_duplicates().itertuples(index=False, name=None))\n \n leak_channel_dict = {}\n leak_channel_set = leak_channel_dict, set(f[[\"site\", \"inc_method\", \"method\", \"url\"]].drop_duplicates().itertuples(index=False, name=None)), set(c[[\"site\", \"inc_method\", \"method\", \"url\"]].drop_duplicates().itertuples(index=False, name=None))\n \n site_dict = {}\n site_set = site_dict, set(f[\"site\"].unique()), set(c[\"site\"].unique())\n \n url_dict = {}\n url_set = url_dict , set(f[\"url\"].unique()), set(c[\"url\"].unique())\n \n urlb_dict = {}\n urlb_set = urlb_dict, set(f[\"url_base\"].unique()), set(c[\"url_base\"].unique())\n \n for (dic, fs, cs) in [site_set, url_set, urlb_set, leak_url_set, leak_channel_set]:\n dic[\"both\"] = list(fs & cs)\n dic[\"combined\"] = list(fs | cs)\n dic[\"only_one\"] = list(fs ^ cs)\n \n leak_urls_browser = group.groupby([\"url\", \"inc_method\", \"browser\"]).ngroups\n #print(len(leak_url_dict[\"combined\"]), len(leak_url_dict[\"both\"]), len(leak_url_dict[\"only_one\"]), len(leak_url_set[1]), len(leak_url_set[2]))\n #print(len(leak_channel_dict[\"combined\"]), len(leak_channel_dict[\"both\"]), len(leak_channel_dict[\"only_one\"]), len(leak_channel_set[1]), len(leak_channel_set[2]))\n\n info_frame.loc[len(info_frame)] = [info, key, len(leak_url_dict[\"combined\"]), len(c), len(f), c[\"site\"].nunique(), f[\"site\"].nunique(), group[\"site\"].nunique()]\n info_frame_new.loc[len(info_frame_new)] = [info, key, len(leak_url_dict[\"combined\"]), len(leak_url_dict[\"both\"]), len(leak_url_dict[\"only_one\"]), len(leak_url_set[1]), len(leak_url_set[2]), len(url_dict[\"combined\"]), len(url_dict[\"both\"]), len(url_dict[\"only_one\"]), len(url_set[1]), len(url_set[2]), len(urlb_dict[\"combined\"]), len(urlb_dict[\"both\"]), len(urlb_dict[\"only_one\"]), len(urlb_set[1]), len(urlb_set[2]), len(site_dict[\"combined\"]), len(site_dict[\"both\"]), len(site_dict[\"only_one\"]), len(site_set[1]), len(site_set[2]), len(leak_channel_dict[\"combined\"]), len(leak_channel_dict[\"both\"]), len(leak_channel_dict[\"only_one\"]), len(leak_channel_set[1]), len(leak_channel_set[2])] \n \n # Look at manual anlaysis.ipynb: \n # e.g., check if the inc method was even tested/retested for the specific URL\n # was the inc_method excluded from tree pruning or from the retest (or from the second retest)\n # if inc_method was not tested in browser -> edge cases that does only work in one browser (according to our trees/R1)\n # if inc_method was tested in chrome, but not retested -> might be due to SameSite\n # if inc_method was tested and retested in other browser, but did not work -> might be either that leak is unstable in general (pM?) or in one browser (load on embed and co in chrome?)\n \n return info_frame, info_frame_new\n\n \ndef row_sym(row):\n \"\"\"Calculates the simmilarity between the value_cookies and value_no_cookies.\"\"\"\n return textdistance.jaro.normalized_similarity(row[\"value_cookies\"], row[\"value_no_cookies\"])\n\n\ndef get_distances(df):\n \"\"\"Shows the edits between two postMessages.\"\"\"\n for _, row in df.loc[df[\"method\"] == \"gp_window_postMessage\"].iterrows():\n cases = [(row[\"value_cookies\"], row[\"value_no_cookies\"])]\n for a, b in cases: \n print('{} => {}'.format(a,b)) \n for i,s in enumerate(difflib.ndiff(a, b)):\n if s[0]==' ': continue\n elif s[0]=='-':\n print(u'Delete \"{}\" from position {}'.format(s[-1],i))\n elif s[0]=='+':\n print(u'Add \"{}\" to position {}'.format(s[-1],i)) \n print()\n \ndef get_conf_dfs(df, log=False):\n \"\"\"Df is info df, return the collection of dfs in the confirmed_df_dict column with some extra information.\"\"\"\n df_all = pd.DataFrame()\n for _, row in df.iterrows():\n site = row[\"site\"]\n try:\n df_frame = pd.DataFrame(row[\"confirmed_df_dict\"])\n # Fix old data, that has no confirmed_df_dict\n if len(df_frame) == 0:\n print(site) # technologyreview is not vulnerable according to our new definition of \"same\"\n df_frame, _, _ = get_working_urls_channels(get_dyn_results(site))\n df_frame[\"site\"] = site\n df_frame[\"url_len\"] = df_frame[\"url\"].str.len()\n df_frame[\"url_query\"] = df_frame[\"url\"].apply(get_query)\n df_frame[\"url_base\"] = df_frame[\"url\"].apply(get_query, pos=0) # Only the base of the URL without query parameters (maybe the same URL was found vulnerable several times with different query parameters)\n df_frame[\"url_query_len\"] = df_frame[\"url_query\"].str.len()\n df_frame[\"jaro\"] = df_frame.apply(row_sym, axis=1)\n # display(df_frame.sort_values([\"url_len\", \"url\", \"inc_method\", \"method\", \"browser\"]).head())\n df_chrome = df_frame.loc[df_frame[\"browser\"] == \"chrome\"]\n df_firefox = df_frame.loc[df_frame[\"browser\"] == \"firefox\"]\n df_all = df_all.append(df_frame)\n if log:\n print(f\"{df_frame['url'].nunique()} unique URLs, total vuln: {len(df_frame)}, chrome vuln: {len(df_chrome)}, firefox vuln: {len(df_firefox)}\")\n except KeyError as e:\n print(f\"Error: {e}\") \n display(site)\n return df_all\n\ndef remove_leak_urls(row, dyn_conf_data):\n url = row[\"url\"]\n method = row[\"inc_method\"]\n site = row[\"site\"]\n nogroup = \"nogroup\"\n \n in_chrome = True if (method, url, \"chrome\", site, nogroup) in dyn_conf_data else False\n in_firefox = True if (method, url, \"firefox\", site, nogroup) in dyn_conf_data else False\n\n if in_chrome and in_firefox:\n return 2\n elif in_chrome or in_firefox:\n return 1\n else:\n assert False, row\n\n\ndef get_info_frames(df_all, leak_set=None, leave=[1, 2], conv_method=False):\n \"\"\"Get the most important results in two info frames\"\"\"\n # Remove rows?!\n df_all = df_all.copy()\n if leak_set is not None:\n df_all[\"in\"] = df_all.apply(remove_leak_urls, dyn_conf_data=leak_set, axis=1)\n df_all = df_all.loc[df_all[\"in\"].isin(leave)] # Only leave leak channels that were tested in both browsers ([2]), in only one browser ([1]) or do nothing ([1, 2])\n \n # Convert leak method to category\n if conv_method:\n # Remove the ones that are pruned in the attack page already?\n method_cats = CategoricalDtype(categories=[\"event_set\", \"event_list\", \"load_count\", \"gp_download_bar_height\", \"gp_securitypolicyviolation\", \"gp_window_getComputedStyle\", \"gp_window_hasOwnProperty\", \"gp_window_onblur\", \"gp_window_onerror\", \"op_el_buffered\", \"op_el_contentDocument\", \"op_el_duration\", \"op_el_height\", \"op_el_media_error\", \"op_el_naturalHeight\", \"op_el_naturalWidth\", \"op_el_networkState\", \"op_el_paused\", \"op_el_readyState\", \"op_el_seekable\", \"op_el_sheet\", \"op_el_videoHeight\", \"op_el_videoWidth\", \"op_el_width\", \"op_frame_count\", \"op_win_CSS2Properties\", \"op_win_history_length\", \"op_win_opener\", \"op_win_origin\", \"op_win_window\"], ordered=True)\n df_all[\"method\"] = df_all[\"method\"].astype(method_cats)\n \n \n inc_methods = df_all.groupby(\"inc_method\")\n leak_methods = df_all.groupby(\"method\")\n df_all[\"group_key_fake\"] = \"browsers\"\n browsers = df_all.groupby(\"group_key_fake\")\n leak_channels = df_all.groupby([\"inc_method\", \"method\"])\n sites = df_all.groupby(\"site\")\n inc_sites = df_all.groupby([\"site\", \"inc_method\"])\n\n info_frame = pd.DataFrame(columns=[\"type\", \"subtype\", \"leak urls\", \"chrome_channels\", \"firefox_channels\", \"chrome_sites\", \"firefox_sites\", \"sites\"])\n info_frame_new = pd.DataFrame(columns=[\"type\", \"subtype\", \"confirmed leak URLs any browser\", \"confirmed leak URLs both browsers\", \"confirmed leak URLs only one browser\", \"confirmed leak URLs firefox\", \"confirmed leak URLs chrome\", \"confirmed URLs any browser\", \"confirmed URLs both browsers\",\n \"confirmed URLs only one browser\", \"confirmed URLs firefox\", \"confirmed URLs chrome\",\n \"confirmed base URLs browser\", \"confirmed base URLs both browsers\",\n \"confirmed base URLs only one browser\", \"confirmed base URLs firefox\", \"confirmed base URLs chrome\",\n \"confirmed sites any browser\", \"confirmed sites both browsers\", \"confirmed sites only one browser\",\n \"confirmed sites firefox\", \"confirmed sites chrome\",\n \"confirmed channels any browser\", \"confirmed channels both browser\", \"confirmed channels only one browser\", \"confirmed channels firefox\", \"confirmed channels chrome\"])\n\n info_frame, info_frame_new = info_grouping(browsers, \"browsers\", info_frame, info_frame_new)\n info_frame, info_frame_new = info_grouping(inc_methods, \"inc_methods\", info_frame, info_frame_new)\n info_fame, info_frame_new = info_grouping(leak_methods, \"leak_methods\", info_frame, info_frame_new)\n info_fame, info_frame_new = info_grouping(leak_channels, \"leak_channels\", info_frame, info_frame_new)\n info_fame, info_frame_new = info_grouping(sites, \"sites\", info_frame, info_frame_new)\n info_fame, info_frame_new = info_grouping(inc_sites, \"inc_sites\", info_frame, info_frame_new)\n\n \n return info_frame, info_frame_new\n\n\ndef get_only_both(df_dict, keys=(\"chrome\", \"firefox\"), log=False):\n \"\"\"Get info on entries only in one, in both and combined.\n df_dict: dict with keys chrome and firefox, with list as values.\"\"\"\n try:\n c_set = set(df_dict[keys[0]].itertuples(index=False, name=None))\n except KeyError:\n c_set = set()\n try:\n f_set = set(df_dict[keys[1]].itertuples(index=False, name=None))\n except KeyError:\n f_set = set()\n \n both = list(c_set & f_set)\n combined = list(c_set | f_set)\n only_one = list(c_set ^ f_set)\n only = {keys[0]: [], keys[1]: []}\n for entry in only_one:\n try:\n key = keys[0] if entry in c_set else keys[1]\n except KeyError:\n key = keys[1]\n only[key].append(entry)\n \n first = len(c_set)\n second = len(f_set)\n combined = len(combined)\n both = len(both)\n only_first = len(only[keys[0]])\n only_second = len(only[keys[1]])\n \n if log:\n print()\n print(f\"{keys[0]}: {first}, {keys[1]}: {second}\")\n print(f\"Combined: {combined}\")\n print(f\"Both: {both}\")\n #display(both)\n print(f\"Only in one: {len(only_one)}, {keys[0]}: {only_first}, {keys[1]}: {only_second}\")\n # display(only)\n df0 = pd.DataFrame(only[keys[0]])\n df0[\"key\"] = keys[0]\n df1 = pd.DataFrame(only[keys[1]])\n df1[\"key\"] = keys[1]\n return df0.append(df1)\n \n return first, second, combined, both, only_first, only_second\n\n\n\ndef url_list_to_tuples(l, sites, site_cat=False):\n \"\"\"Convert a list of leak url dicts to list of tuples.\"\"\"\n df_list = []\n for apg_dict, site in zip(l, sites):\n if apg_dict is None:\n continue\n for browser in apg_dict:\n for url in apg_dict[browser]:\n method, url, _ = parse_apg_url(url)\n # df_list.append({\"method\": method, \"url\": url, \"browser\": browser})\n df_list.append((method, url, browser, site, \"nogroup\"))\n # df = pd.DataFrame(df_list)\n # print(df_list[:5])\n df = pd.DataFrame(df_list, columns=[\"method\", \"url\", \"browser\", \"site\", \"nogroup\"]).sort_values([\"browser\", \"method\", \"site\", \"url\"])\n method_cats = CategoricalDtype(categories=['audio', 'embed', 'embed-img', 'iframe', 'iframe-csp', 'img', 'link-prefetch', 'link-stylesheet', 'object', 'script', 'video', 'window.open'], ordered=True)\n if site_cat:\n site_cats = CategoricalDtype(categories=['pier1.com-unpruned', 'chartink.com-unpruned', 'pdffiller.com-unpruned', 'staples.ca-unpruned', 'freelogodesign.org-unpruned', 'duplichecker.com-unpruned', 'miro.com-unpruned', 'mnml.la-unpruned', 'redtube.com-unpruned', 'whatfontis.com-unpruned', 'glosbe.com-unpruned', 'wideads.com-unpruned', 'standardmedia.co.ke-unpruned', 'gyazo.com-unpruned', 'megogo.net-unpruned', 'zennioptical.com-unpruned', 'powtoon.com-unpruned', 'italki.com-unpruned', 'themehorse.com-unpruned', 'versobooks.com-unpruned', 'yourstory.com-unpruned', 'korrespondent.net-unpruned', 'transifex.com-unpruned', 'ankiweb.net-unpruned', 'iplocation.net-unpruned', 'youporn.com-unpruned', 'tmj4.com-unpruned', 'nimbusweb.me-unpruned', 'classifiedads.com-unpruned', 'myvidster.com-unpruned', 'cafepress.com-unpruned', 'pakwheels.com-unpruned', 'idntimes.com-unpruned', 'mhthemes.com-unpruned', 'universe.com-unpruned', 'aboutus.com-unpruned'], ordered=True)\n df[\"site\"] = df[\"site\"].astype(site_cats)\n \n browser_cats = CategoricalDtype(categories=[\"firefox\", \"chrome\"], ordered=True)\n df[\"method\"] = df[\"method\"].astype(method_cats)\n df[\"browser\"] = df[\"browser\"].astype(browser_cats)\n return df\n\n\ndef get_predictions_retroactive(df, methods=\"limited\"):\n \"\"\"Returns the tree predictions for a every site in a df.\"\"\"\n init(methods)\n predicted_leak_urls = []\n for site in df[\"site\"].tolist():\n dat = get_url_data(site)\n af, d, poss, results = basic_pruning(dat)\n if af is None:\n urls = {}\n else:\n leaky_endpoints = predict_trees(af)\n if leaky_endpoints == {}:\n urls = {}\n else:\n leaks = reduce_leaky_endpoints(leaky_endpoints)\n incs = get_working_incs(leaks)\n urls = get_dyn_urls(leaks, incs, d, poss)\n predicted_leak_urls.append(urls)\n return predicted_leak_urls\n\n\ndef get_combs_after_basic_pruning(df):\n leak_urls = []\n for site in df[\"site\"].tolist():\n dat = get_url_data(site)\n af, d, poss, results = basic_pruning(dat)\n if af is None:\n urls = {}\n else:\n urls = get_dyn_urls(None, None, poss, None, unpruned=True)\n leak_urls.append(urls)\n return leak_urls \n\n\ndef get_basic_pruning_reduction(row):\n \"\"\"Return the size reduction from basic pruning\"\"\"\n return save_div(row[\"num_urls\"] - row[\"num_basic_pruning\"], row[\"num_urls\"], ret=None)\n\n\ndef save_div(a, b, ret=0):\n \"\"\"Division without 0 error, ret is returned instead.\"\"\"\n if b == 0:\n return ret\n return a/b\n\ndef get_stats(ground_truth, predicted_trees, all_combinations, info):\n \"\"\"Calculate and display the pruning false negative data.\"\"\"\n res = {}\n for group_key in [[\"nogroup\"], [\"method\"], [\"browser\"], [\"site\"]]: #, [\"browser\", \"method\"]]: # Not working as not every group exist\n try:\n gts = ground_truth.groupby(group_key)\n preds = predicted_trees.groupby(group_key)\n all_combs = all_combinations.groupby(group_key) \n df = pd.DataFrame()\n for (name, gt), (_, pred), (_, all_comb) in zip(gts, preds, all_combs):\n gt_len, pred_len, _, tp_len, fn_len, fp_len = get_only_both({\"ground_truth\": gt, \"predicted_trees\": pred}, (\"ground_truth\", \"predicted_trees\"))\n\n all_comb_len = all_comb.drop_duplicates().shape[0]\n gn_len = all_comb_len - gt_len\n\n size_red = save_div(all_comb_len, pred_len)\n fnr = save_div(fn_len, gt_len)\n fpr = save_div(fp_len, gn_len)\n tn_len = all_comb_len - pred_len - fn_len\n\n res_line = [(name, gt_len, all_comb_len, pred_len, size_red, fnr, fpr, tp_len, fn_len, fp_len, tn_len)]\n columns = [\"grouping\", \"gt\", \"all_comb\", \"pred\", \"size_red\", \"fnr\", \"fpr\", \"tp\", \"fn\", \"fp\", \"tn\"]\n df = df.append(pd.DataFrame(res_line, columns=columns))\n if len(df) > 1:\n pass\n # df.loc[\"Mean\"] = df.mean()\n\n res[str(group_key)] = df\n except KeyError as e:\n print(e)\n\n\n # Get size difference in all_combinations/predicted_trees/predicted_trees_all\n for entry in res:\n print(info)\n with pd.option_context(\"max_columns\", None):\n print(entry)\n display(res[entry])\n # display(res[entry].describe())\n return res\n\n \ndef calc_info_frames(site_results_filtered, remove_multiple=None):\n \"\"\"Return the info frames for the input.\"\"\"\n dat, conf_both, conf_any = get_pipeline_stats(site_results_filtered, log=False)\n df_all = get_conf_dfs(conf_any)\n if remove_multiple:\n url_by_leak = df_all.groupby([\"browser\", \"url\"])[[\"method\", \"inc_method\"]].nunique()\n only_one_inc = set(url_by_leak.loc[url_by_leak[remove_multiple] == 1].reset_index()[[\"browser\", \"url\"]].itertuples(name=None, index=False))\n df_all = df_all.loc[df_all[[\"browser\", \"url\"]].apply(lambda x: (x[\"browser\"], x[\"url\"]) in only_one_inc, axis=1)]\n \n sites = dat[\"site\"].tolist()\n leak_urls = url_list_to_tuples(dat[\"dyn_conf_urls\"].tolist(), sites)\n leak_url_set = set(list(leak_urls.itertuples(name=None, index=None)))\n # Complete frame\n info_frame, info_frame_new = get_info_frames(df_all, None)\n # Prune all leak URLs only tested in one browser\n info_frame_both, info_frame_new_both = get_info_frames(df_all, leak_url_set, leave=[2])\n # Prune all leak URLs tested in both browsers\n info_frame_only, info_frame_new_only = get_info_frames(df_all, leak_url_set, leave=[1])\n return (info_frame, info_frame_new), (info_frame_both, info_frame_new_both), (info_frame_only, info_frame_new_only)"
] | [
[
"pandas.option_context",
"pandas.api.types.CategoricalDtype",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
123prashanth123/Fault-Detection-System | [
"fa59ca81ce4627a42648e654b55cdc505cde2103"
] | [
"References and Tests/Patch Pattern Recognition/Processor.py"
] | [
"import cv2\nimport platform\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader as DL\n\nimport utils as u\nfrom Models import build_model\n\n# ******************************************************************************************************************** #\n\n# Dataset Template \nclass DS(Dataset):\n def __init__(self, X=None, transform=None):\n self.transform = transform\n self.X = X\n\n def __len__(self):\n return len(self.X)\n \n def __getitem__(self, idx):\n return self.transform(self.X[idx])\n\n# ******************************************************************************************************************** #\n\ndef process_patches_in_video(patch, similarity):\n if patch is None:\n u.myprint(\"\\nError Reading Patch File\", \"red\")\n return\n \n # Get the model\n model = build_model()\n\n # Infer the patch height and width from the patch\n ph, pw, _ = patch.shape\n\n # Extract the features from the patch\n patch_features = model.get_features(patch)\n \n # Setting up capture object\n if platform.system() != \"Windows\":\n cap = cv2.VideoCapture(u.ID)\n else:\n cap = cv2.VideoCapture(u.ID, cv2.CAP_DSHOW)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, u.CAM_HEIGHT)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, u.CAM_WIDTH)\n cap.set(cv2.CAP_PROP_FPS, u.FPS)\n\n print(\"\")\n # Read data from the capture object\n while cap.isOpened():\n _, frame = cap.read()\n h, w, _ = frame.shape\n\n # Calculate the number of rows and columns that will be present in the patched image\n if h % ph == 0 : num_cols = int(h/ph)\n else : num_cols = int(h/ph) + 1\n if w % pw == 0 : num_rows = int(w/pw)\n else : num_rows = int(w/pw) + 1\n\n # Resize frame to avoid any errors\n frame = cv2.resize(src=frame, dsize=(num_rows*pw, num_cols*ph), interpolation=cv2.INTER_AREA)\n disp_frame = frame.copy()\n\n # patches: Holds the image data of the patches\n # patches_idx: Holds the array indexes at which patch was made\n patches = []\n patches_ = []\n patches_idx = []\n for i in range(0, h, ph):\n for j in range(0, w, pw):\n patches_idx.append([i, ph+i, j, pw+j])\n patches_.append(frame[i:ph+i, j:pw+j, :])\n patches.append(u.preprocess(frame[i:ph+i, j:pw+j, :]))\n \n # Setup the Dataloader\n data_setup = DS(X=patches, transform=model.transform)\n data = DL(data_setup, batch_size=64, shuffle=False)\n \n # Get features of all the patches\n features = model.get_batch_features(data)\n \n # Obtain the Cosine Similarity between the reference patch Feature Vector and all the patches within the frame\n cos_sim = []\n for feat in features:\n cos_sim.append(model.get_cosine_similarity(patch_features, feat.reshape(1, -1)))\n \n # Adjust color of the patch in accordance with its Cosine Similarity metric\n for i in range(num_cols):\n for j in range(num_rows):\n disp_frame[i*ph:(i+1)*ph, j*pw:(j+1)*pw, :] = patches_[i * num_rows + j]\n if cos_sim[i * num_rows + j] > similarity:\n disp_frame[i*ph:(i+1)*ph, j*pw:(j+1)*pw, 1] = 200\n\n # Display the frame\n cv2.imshow(\"Pattern Processed Frame\", disp_frame)\n\n # Press 'q' to Quit\n if cv2.waitKey(u.WAIT_DELAY) == ord(\"q\"):\n break\n \n # Release the capture object and destroy all windows\n cap.release()\n cv2.destroyAllWindows()\n\n# ******************************************************************************************************************** #\n"
] | [
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CaileanCarter/ST-literature-search | [
"af73a8098f336fbb3bf16eef44eae0b254aa7a17"
] | [
"litsearch.py"
] | [
"\"\"\"\nThese are PubMed search results for 'escherichia coli sequence type' from 2000 to 2021, \nwith the first 10,000 of 14,383 results saved to file 'escherichi-set.csv'.\n\n\"\"\"\n\n#TODO: make webpage as UI using flask?\n\nimport re\nfrom collections import Counter\nfrom sys import argv\n\nimport numpy as np\nimport pandas as pd\nfrom Bio import Entrez\n\n\nclass PubMed:\n\n @staticmethod\n def long_summary(pmid: str):\n print(Entrez.efetch(db=\"pubmed\", id=pmid, retmode=\"text\", rettype=\"gb\").read())\n\n\n @staticmethod\n def times_cited(pmid: [str, list]) -> list:\n result = Entrez.read(Entrez.elink(dbfrom=\"pubmed\", id=pmid, linkname=\"pubmed_pubmed_citedin\"))\n \n if isinstance(pmid, str): \n papers = len(result[0]['LinkSetDb'][0]['Link'])\n # print(\"Cited by \", len(papers), \" papers.\")\n return papers\n\n elif isinstance(pmid, list):\n citation_count = []\n for index, _ in enumerate(pmid):\n try:\n citation_count.append(len(result[index]['LinkSetDb'][0]['Link']))\n except IndexError:\n citation_count.append(0)\n return citation_count\n \n else:\n raise TypeError(\"Expected string or list.\")\n\n\n @staticmethod\n def add_times_cited(df) -> pd.DataFrame:\n citation_count = PubMed.times_cited(list(df.index))\n timescited_df = pd.DataFrame(citation_count, index=df.index, columns=[\"Times cited\"])\n df = df.merge(timescited_df, left_index=True, right_index=True)\n df = df.sort_values(by=[\"Times cited\"], ascending=False)\n return df\n\n\ndef uniformST(st: str) -> str:\n \"\"\"Transform sequence type string into ST###\"\"\"\n return re.sub(\"ST |[Ss]equence [Tt]ype \", \"ST\", st)\n\n\ndef find_terms(text: str) -> list:\n result = re.findall(r\"ST\\d+|ST \\d+|[Ss]equence [Tt]ype \\d+\", text)\n return result\n\n\ndef countST(df: pd.DataFrame) -> Counter:\n \"\"\"Find occurences of sequence types as a Counter dict.\"\"\"\n STcount = Counter()\n for STs in df[\"ST\"]:\n for st in STs.split(\", \"):\n STcount[st] += 1\n return STcount\n\n\ndef frequent_journals(df, top=15):\n print(df[\"Journal/Book\"].value_counts().head(top))\n\n\ndef frequent_pub_years(df, top=15):\n print(df[\"Publication Year\"].value_counts().head(top))\n\n\ndef frequent_ST(df, top=15):\n result = countST(df)\n return result.most_common(top)\n \n\ndef plotSTasPie(df: pd.DataFrame):\n result = countST(df)\n STdf = pd.DataFrame.from_dict(result, orient=\"index\")\n STdf.loc[\"Other\"] = [len(STdf[STdf == 1].dropna())]\n STdf.loc[STdf[0] != 1].plot.pie(y=0)\n\n\ndef search(df, search: str, top=10) -> pd.DataFrame:\n \"\"\"Search for a given sequence type (as 'ST###')\"\"\"\n b = df[\"ST\"].str.findall(f\"{search},|{search}$\")\n b = b[b.astype(str) != '[]']\n result = df.loc[b.index]\n if isinstance(top, int):\n return result.head(top)\n else:\n return result\n\n\ndef ask_email():\n try:\n Entrez.email = argv[3]\n except IndexError:\n Entrez.email = input(\"Please provide email address for PubMed access: \")\n\n\ndef main(csv_file):\n lit = pd.read_csv(csv_file, index_col=0) # make DataFrame\n lit = lit.drop([\"PMCID\", \"NIHMS ID\", \"First Author\"], axis=1)\n lit[\"ST\"] = lit[\"Title\"].apply(find_terms) # Identify sequence type from title\n lit[\"ST\"] = lit[\"ST\"].apply(lambda x : uniformST(\", \".join(x))) # Shorten \"sequence type\" to ST\n lit[\"ST\"] = lit[\"ST\"].replace(\"\", np.NaN) # Identify empty values in ST col\n lit = lit[lit[\"ST\"].notna()] # Filter out rows without mention of ST\n lit = PubMed.add_times_cited(lit) # Fetch times each article has been cited\n print(\"Literature search has finished, you can now search for sequence types.\")\n return lit\n\n\nif __name__ == \"__main__\":\n\n if argv[2].endswith(\".csv\"):\n fp = argv[2]\n skip_arg_two = False\n else:\n fp = \"escherichi-set.csv\"\n skip_arg_two = True\n\n\n if argv[1] == \"new\":\n ask_email()\n df = main(fp)\n df.to_csv(fp)\n\n elif argv[1] == \"find\":\n df = pd.read_csv(fp, index_col=0)\n if not skip_arg_two:\n term = argv[3]\n else:\n term = argv[2]\n print(search(df, term))\n\n\n elif argv[1] == \"summary\":\n ask_email()\n if not skip_arg_two:\n pmid = argv[3]\n else:\n pmid = argv[2]\n PubMed.long_summary(pmid)\n\n \n\n \n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
weihezhai/HatefulMemesChallenge | [
"04f52643c0864d1efb6c0a9c674db42764f6834c",
"04f52643c0864d1efb6c0a9c674db42764f6834c",
"04f52643c0864d1efb6c0a9c674db42764f6834c"
] | [
"ERNIE-Vil/reader/vcr_finetuning.py",
"data_utils/ocr.py",
"VL-BERT/common/visual_linguistic_bert.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\" VCR Data Reader implementation \"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport base64\nimport numpy as np\nimport re\nimport random\nimport json\nimport json_lines\nimport csv\nimport sys\nimport itertools\n\nfrom reader._image_features_reader import ImageFeaturesH5Reader\nfrom preprocess import preprocessor\nfrom batching.finetune_batching import prepare_batch_data\n\nimport paddle.fluid as fluid\n\ndef _converId(img_id):\n \"\"\" \n conversion for image ID \n \"\"\"\n img_id = img_id.split('-')\n if 'train' in img_id[0]:\n new_id = int(img_id[1])\n elif 'val' in img_id[0]:\n new_id = int(img_id[1]) + 1000000\n elif 'test' in img_id[0]:\n new_id = int(img_id[1]) + 2000000\n else:\n print(\"no split known\")\n return new_id\n\n\ndef _load_annotationsQ_A(annotations_jsonpath, split):\n \"\"\"\n Build an index out of FOIL annotations, mapping each image ID with its corresponding captions.\n \"\"\"\n entries = []\n with open(annotations_jsonpath) as f:\n for annotation in json_lines.reader(f):\n det_names = \"\"\n question = annotation[\"question\"]\n if split == 'test':\n ans_label = 0\n else:\n ans_label = annotation[\"answer_label\"]\n img_id = _converId(annotation[\"img_id\"])\n anno_id = int(annotation[\"annot_id\"].split('-')[1])\n entries.append(\n {\"question\": question,\n \"answers\": annotation[\"answer_choices\"],\n \"metadata_fn\": annotation[\"metadata_fn\"],\n \"target\": ans_label,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n \"det_names\": annotation['objects']\n })\n return entries\n\n\ndef _load_annotationsQA_R(annotations_jsonpath, split):\n \"\"\"\n Build an index out of FOIL annotations, mapping each image ID with its corresponding captions.\n \"\"\"\n entries = []\n with open(annotations_jsonpath, 'rb') as f: \n for annotation in json_lines.reader(f):\n if split == 'test':\n for answer in annotation[\"answer_choices\"]:\n question = annotation[\"question\"] + [\"[MARK]\"] + answer\n img_id = _converId(annotation[\"img_id\"])\n ans_label = 0\n anno_id = int(annotation[\"annot_id\"].split('-')[1])\n entries.append(\n {\"question\": question,\n \"answers\": annotation[\"rationale_choices\"],\n \"metadata_fn\": annotation[\"metadata_fn\"],\n \"target\": ans_label,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n \"det_names\": annotation['objects']\n })\n else:\n det_names = \"\"\n question = annotation[\"question\"] + [\"[MARK]\"] + \\\n annotation[\"answer_choices\"][annotation['answer_label']]\n ans_label = annotation[\"rationale_label\"]\n img_id = _converId(annotation[\"img_id\"])\n anno_id = int(annotation[\"annot_id\"].split('-')[1])\n entries.append(\n {\"question\": question,\n \"answers\": annotation[\"rationale_choices\"],\n \"metadata_fn\": annotation[\"metadata_fn\"],\n \"target\": ans_label,\n \"img_id\": img_id,\n \"anno_id\": anno_id, \n \"det_names\": annotation['objects']})\n return entries\n\n\nclass VCRDataReader(object):\n \"\"\" \n Data reader for sub VCR task\n \"\"\"\n def __init__(self,\n task_conf,\n split,\n vocab_path=None,\n batch_size=4096,\n shuffle=True,\n epoch=100,\n is_test=False,\n feature_reader_dict={},\n random_seed=None,\n task_index=0,\n task_num=1):\n\n self.task_conf = task_conf\n self.processor = getattr(preprocessor,\n task_conf[\"Proprocessor\"])(\n tokenizer_name=self.task_conf[\"tokenizer_name\"],\n vocab_path=vocab_path)\n self.vocab = self.processor.vocab\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.epoch = epoch\n self.current_epoch = 0\n self.current_file_index = 0\n self.total_file = 0\n self.current_file = None\n self.random_seed = random_seed\n self.max_seq_len = self.task_conf['max_seq_len']\n self.pad_id = self.vocab[\"[PAD]\"]\n self.cls_id = self.vocab[\"[CLS]\"]\n self.sep_id = self.vocab[\"[SEP]\"]\n self.mask_id = self.vocab[\"[MASK]\"]\n self.is_test = is_test\n self.task_index = task_index\n self.task_num = task_num\n\n if self.is_test:\n self.epoch = 1\n self.shuffle_files = False\n if self.shuffle:\n shufflekeep_across_task = self.task_conf.get('shufflekeep_across_task', True)\n if shufflekeep_across_task:\n self.global_rng = np.random.RandomState(random_seed)\n else:\n self.global_rng = np.random.RandomState()\n self.shuffle_every_epoch = self.task_conf.get('shuffle_every_epoch', False)\n task=self.task_conf['task']\n annotations_jsonpath=self.task_conf['annotations_jsonpath_' + split]\n self.num_choice = int(self.task_conf['num_choice'])\n if task == 'VCR_Q-A':\n self._entries = _load_annotationsQ_A(annotations_jsonpath, split)\n elif task == \"VCR_QA-R\":\n self._entries = _load_annotationsQA_R(annotations_jsonpath, split)\n else:\n assert False\n\n self._entries = self._entries[:1024]\n \n self._split = split\n self._names = []\n with open(self.task_conf['unisex_names_table']) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n if row[1] != 'name':\n self._names.append(row[1])\n self._feature_reader = feature_reader_dict[self.task_conf['feature_lmdb_path']]\n self.use_gt_fea = task_conf.get('use_gt_fea', False)\n if self.use_gt_fea:\n self._gt_feature_reader = feature_reader_dict[self.task_conf['gt_feature_lmdb_path']]\n self._max_region_num = self.task_conf.get('max_region_num', 100)\n print(\"use gt featurre\")\n else:\n self._max_region_num = self.task_conf.get('max_region_num', 37)\n print(\"only butd feature\")\n self.tokenize()\n\n def generate_random_name(self, det_names):\n \"\"\" \n Replace \"person\" with a random name\n \"\"\"\n random_name = []\n for name in det_names:\n if name == 'person':\n word = random.choice(self._names)\n else:\n word = name\n random_name.append(word)\n\n return random_name\n\n def replace_det_with_name(self, inputs, random_names):\n \"\"\"\n Replace det with name\n \"\"\"\n tokens = []\n mask = []\n for w in inputs:\n if isinstance(w, list):\n for idx in w:\n word = random_names[idx]\n tokens.append(word)\n else:\n word = w\n # word = w.encode('utf-8')\n tokens.append(word)\n\n return tokens, mask\n\n def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):\n \"\"\"\n Truncates a sequence pair in place to the maximum length.\n \"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n def get_progress(self):\n \"\"\"\n Return current progress of traning data\n \"\"\"\n progress_dict = {\"current_epoch\": self.current_epoch,\n \"current_file_index\": self.current_file_index,\n \"total_file\": self.total_file,\n \"current_file\": self.current_file\n }\n return progress_dict\n\n def tokenize(self):\n \"\"\"\n Tokenizes the captions.\n \"\"\"\n # This will add caption_tokens in each entry of the dataset.\n # -1 represents nil, and should be treated as padding_idx in embedding.\n count = 0\n for entry in self._entries:\n det_names = entry[\"det_names\"]\n random_names = self.generate_random_name(det_names)\n # replace with name\n tokens_a, mask_a = self.replace_det_with_name(entry[\"question\"], random_names)\n q_str = \" \".join(tokens_a)\n ids_a = []\n for i, q in enumerate(q_str.split(\" [MARK] \")):\n if i == 1:\n ids_a.append(self.vocab[\"[SEP]\"])\n ids_a = ids_a + self.processor.convert_sentence_to_ids_without_cls(q)\n\n input_ids_all = []\n segment_ids_all = []\n input_poss_all = []\n input_len_all = []\n\n for answer in entry[\"answers\"]:\n tokens_b, mask_b = self.replace_det_with_name(answer, random_names)\n ids_b = self.processor.convert_sentence_to_ids_without_cls(\" \".join(tokens_b))\n\n self._truncate_seq_pair(ids_a, ids_b, self.max_seq_len - 3)\n\n input_ids = []\n segment_ids = []\n input_ids.append(self.vocab[\"[CLS]\"])\n segment_ids.append(0)\n\n for id in ids_a:\n input_ids.append(id)\n segment_ids.append(0)\n\n input_ids.append(self.vocab[\"[SEP]\"])\n segment_ids.append(0)\n\n assert len(ids_b) > 0\n for id in ids_b:\n input_ids.append(id)\n segment_ids.append(1)\n input_ids.append(self.vocab[\"[SEP]\"])\n segment_ids.append(1)\n\n input_ids_all.append(input_ids)\n segment_ids_all.append(segment_ids)\n input_poss = [str(pos) for pos in range(len(input_ids))]\n input_poss_all.append(input_poss)\n input_len_all.append(len(input_ids))\n\n entry[\"input_ids\"] = input_ids_all\n entry[\"input_poss\"] = input_poss_all\n entry[\"segment_ids\"] = segment_ids_all\n entry[\"input_lens\"] = input_len_all\n\n sys.stdout.write('%d/%d\\r' % (count, len(self._entries)))\n sys.stdout.flush()\n count += 1\n\n def parse_line(self, s_index):\n \"\"\"\n Form slot info with the line information\n \"\"\"\n entry = self._entries[s_index]\n image_id = entry[\"img_id\"]\n image_fea_json = self._feature_reader[image_id]\n features = image_fea_json[\"features\"]\n num_boxes = image_fea_json[\"num_boxes\"]\n boxes = image_fea_json[\"image_location\"]\n if not self.use_gt_fea:\n num_boxes = min(num_boxes, self._max_region_num)\n boxes = boxes[:num_boxes]\n features = features[:num_boxes]\n else:\n boxes = boxes[:num_boxes]\n features = features[:num_boxes]\n image_fea_json = self._gt_feature_reader[image_id]\n gt_features = image_fea_json[\"features\"]\n gt_num_boxes = image_fea_json[\"num_boxes\"]\n gt_boxes = image_fea_json[\"image_location\"]\n features[0] = (features[0] * num_boxes + gt_features[0] * gt_num_boxes) / (num_boxes + gt_num_boxes)\n\n gt_boxes = gt_boxes[1: gt_num_boxes]\n gt_features = gt_features[1: gt_num_boxes]\n gt_num_boxes = gt_num_boxes - 1\n\n gt_box_preserve = min(self._max_region_num - 1, gt_num_boxes)\n gt_boxes = gt_boxes[:gt_box_preserve]\n gt_features = gt_features[:gt_box_preserve]\n gt_num_boxes = gt_box_preserve\n\n num_box_preserve = min(self._max_region_num - int(gt_num_boxes), int(num_boxes))\n boxes = boxes[:num_box_preserve]\n features = features[:num_box_preserve]\n\n # concatenate the boxes\n mix_boxes = np.concatenate((boxes, gt_boxes), axis=0)\n mix_features = np.concatenate((features, gt_features), axis=0)\n mix_num_boxes = num_box_preserve + int(gt_num_boxes)\n\n num_boxes = min(mix_num_boxes, self._max_region_num)\n boxes = mix_boxes[:num_boxes]\n features = mix_features[:num_boxes]\n record = {\n \"input_ids\": entry[\"input_ids\"],\n \"input_pos\": entry[\"input_poss\"],\n \"segment_ids\": entry[\"segment_ids\"],\n \"input_lens\": entry[\"input_lens\"],\n \"target\": int(entry[\"target\"]),\n \"features\": features,\n \"boxes\": boxes,\n \"anno_id\": entry[\"anno_id\"]\n }\n return record\n\n def data_generator(self):\n \"\"\" \n Data_generator \n \"\"\"\n sample_indice = list(range(len(self._entries)))\n def wrapper():\n \"\"\"\n Wrapper\n \"\"\"\n for epoch_index in range(self.epoch):\n if self._split == \"train\":\n self.current_example = 0\n self.current_epoch = epoch_index\n \n if self.shuffle:\n if epoch_index == 0:\n self.global_rng.shuffle(sample_indice)\n print(\"shuffle epoch %d\" % epoch_index)\n elif self.shuffle_every_epoch:\n self.global_rng.shuffle(sample_indice)\n print(\"shuffle epoch %d\" % epoch_index)\n \n batch_records = []\n for index in sample_indice:\n batch_records.append(self.parse_line(index))\n if len(batch_records) == self.batch_size:\n yield prepare_batch_data(\n batch_records, self.num_choice, self.pad_id, \\\n self.task_index, self.task_num), self.task_conf['task']\n batch_records = []\n \n if len(batch_records) > 0:\n yield prepare_batch_data(\n batch_records, self.num_choice, self.pad_id, \\\n self.task_index, self.task_num), self.task_conf['task']\n return wrapper\n\n\nclass VCRDataJointReader(object):\n \"\"\" \n Joint data reader for Q2A task and QA2R task\n \"\"\"\n def __init__(self,\n task_conf_group,\n split,\n batch_size=4096,\n shuffle=True,\n epoch=100,\n vocab_path=None,\n is_test=False):\n\n self.task_readers = []\n feature_reader_dict = {}\n self.task_dup_cnt = []\n for task_conf in task_conf_group:\n \n if 'feature_lmdb_path' in task_conf:\n if task_conf['feature_lmdb_path'] not in feature_reader_dict:\n feature_reader_dict[task_conf['feature_lmdb_path']] = \\\n ImageFeaturesH5Reader(task_conf['feature_lmdb_path'])\n \n if 'gt_feature_lmdb_path' in task_conf and task_conf.get('use_gt_fea', False):\n if task_conf['gt_feature_lmdb_path'] not in feature_reader_dict:\n feature_reader_dict[task_conf['gt_feature_lmdb_path']] = \\\n ImageFeaturesH5Reader(task_conf['gt_feature_lmdb_path'])\n \n task_batch_size = task_conf.get('batch_size', 64)\n self.task_dup_cnt.append(max(int(task_batch_size / batch_size), 1))\n random_seed=np.random.randint(1000)\n \n for task_index, task_conf in enumerate(task_conf_group):\n self.task_readers.append(\n VCRDataReader(\n task_conf, split, vocab_path, batch_size, shuffle,\n epoch, is_test, feature_reader_dict, random_seed,\n task_index, len(task_conf_group)\n )\n )\n self.task_generators = [reader.data_generator() for reader in self.task_readers]\n\n def get_progress(self):\n \"\"\"\n Return current progress of traning data\n \"\"\"\n current_epoch = max([reader.current_epoch for reader in self.task_readers])\n current_file_index = max([reader.current_file_index for reader in self.task_readers])\n total_file = max([reader.total_file for reader in self.task_readers])\n current_file = \"\"\n self.progress_dict = {\n \"current_epoch\": current_epoch,\n \"current_file_index\": current_file_index,\n \"total_file\": total_file,\n \"current_file\": current_file\n }\n return self.progress_dict\n\n def data_generator(self):\n \"\"\" \n Data_generator \n \"\"\"\n def wrapper():\n \"\"\"\n warpper\n \"\"\"\n task_buffer = [[] for i in range(len(self.task_dup_cnt))]\n for data in zip(*[generator() for generator in self.task_generators]):\n for i, d in enumerate(data):\n task_buffer[i].append(d)\n if len(task_buffer[i]) >= self.task_dup_cnt[i]:\n for t in task_buffer[i]:\n yield t[0]\n task_buffer[i] = []\n\n return wrapper\n\n\nif __name__ == \"__main__\":\n pass\n",
"import os\nimport glob\nimport json\nimport shutil\nfrom multiprocessing import Pool\n\nimport fire\nimport easyocr\nimport numpy as np\nimport torch\n\nfrom PIL import Image\nfrom skimage import transform\nfrom skimage.feature import canny\nfrom skimage.color import rgb2gray, gray2rgb\n\n\ndef multi_boxes_mask(image, boxes, pad_crop=5):\n \"\"\"\n image: np.uint8 (h, w, c)\n boxes: np.int32 (n, 4) ymin, xmin, ymax, xmax\n \"\"\"\n image = image.copy()\n mask = np.zeros_like(image)\n ih, iw, ic = image.shape\n resize = lambda a, b: transform.resize(a, b, preserve_range=True).astype(np.uint8)\n import matplotlib.pyplot as plt\n \n for box in boxes:\n # image[box[0]: box[2], box[1]: box[3], :] = 0\n box[:2] = np.maximum(box[:2] - pad_crop, 0)\n box[2:] = np.minimum(box[2:] + pad_crop, image.shape[:2])\n \n patch = image[box[0]: box[2], box[1]: box[3], :]\n pure_white = (patch > 253).all(axis=-1).astype(np.uint8)\n mask[box[0]: box[2], box[1]: box[3], :] = pure_white[..., None]\n \n # plt.subplot(2, 1, 1)\n # plt.imshow(patch)\n # plt.subplot(2, 1, 2)\n # plt.imshow(pure_white)\n # plt.colorbar()\n # plt.show()\n \n print('pure_white ', pure_white.sum())\n \n shift = 3\n shifts = [\n (0, 0), (shift, 0), (-shift, 0), (0, shift), (0, -shift),\n (shift, shift), (-shift, shift), (shift, -shift), (-shift, -shift)\n ]\n # shifts = []\n for offset in shifts:\n ox, oy = offset\n _mask = mask.copy()\n\n slice_y = slice(max(0, 0 + oy), min(ih, ih + oy))\n slice_x = slice(max(0, 0 + ox), min(iw, iw + ox))\n print(slice_y, slice_x)\n _mask = _mask[\n max(0, 0 + oy): min(ih, ih + oy),\n max(0, 0 + ox): min(iw, iw + ox),\n :\n ]\n crop_pad = [\n (max(0, -oy), max(0, oy)),\n (max(0, -ox), max(0, ox)),\n (0, 0)\n ]\n _mask = np.pad(_mask, crop_pad)\n print(\n crop_pad,\n np.abs(_mask - mask).sum(),\n np.abs(mask - np.clip(_mask + mask, 0, 1)).sum()\n )\n mask = np.clip(_mask + mask, 0, 1)\n\n image = image * (1 - mask) + mask * 255 * 0\n mask *= 255\n return image, mask\n\ndef cast_pred_type(pred):\n result = []\n for tup in pred:\n coord, txt, score = tup\n coord = np.array(coord).tolist()\n score = float(score)\n result.append((coord, txt, score))\n return result\n\n\ndef detect(root_dir):\n reader = easyocr.Reader(['en'])\n image_dir = os.path.join(root_dir, 'img')\n images = glob.glob(os.path.join(image_dir, '*.png'))\n images += glob.glob(os.path.join(image_dir, '**', '*.png'))\n # images = images[:3]\n assert len(images) > 9000\n\n out_json = os.path.join(root_dir, 'ocr.json')\n out_anno = {}\n print(f\"Find {len(images)} images!\")\n\n for i, image_path in enumerate(images):\n print(F\"{i}/{len(images)}\")\n img_name = os.path.basename(image_path)\n pred = reader.readtext(image_path)\n out_anno[img_name] = cast_pred_type(pred)\n\n with open(out_json, 'w') as f:\n json.dump(out_anno, f)\n\n\ndef point_to_box(anno_json):\n with open(anno_json, 'r') as f:\n ocr_anno = json.load(f)\n \n boxed_anno = {}\n for k, v in ocr_anno.items():\n img_ocr_infos = []\n for txt_info in v:\n coord, txt, score = txt_info\n xmin = min([p[0] for p in coord])\n xmax = max([p[0] for p in coord])\n ymin = min([p[1] for p in coord])\n ymax = max([p[1] for p in coord])\n box = [xmin, ymin, xmax, ymax]\n img_ocr_infos.append([box, txt, score])\n boxed_anno[k] = img_ocr_infos\n \n out_path = anno_json.replace('.json', '.box.json')\n with open(out_path, 'w') as f:\n json.dump(boxed_anno, f)\n\n\ndef _mask_white_txt(args):\n img_name, img_boxes, img_dir, out_dir = args\n img_path = os.path.join(img_dir, img_name)\n out_path = os.path.join(out_dir, img_name)\n \n if os.path.exists(out_path):\n return\n # if img_name != '01487.png':\n # continue\n \n print(out_path)\n img_boxes = [box_info[0] for box_info in img_boxes]\n if len(img_boxes) > 0:\n boxes = np.asarray(img_boxes, dtype=np.int32)\n # print(boxes)\n boxes = np.concatenate([boxes[:, ::-1][:, 2:], boxes[:,::-1][:, :2]], axis=1)\n # print(boxes)\n # x,y,x,y -> y,x,y,x\n img = np.array(Image.open(img_path).convert('RGB'))\n # res = inpaint_model.inpaint_multi_boxes(img, boxes)\n masked_img, mask = multi_boxes_mask(img, boxes)\n\n Image.fromarray(masked_img).save(out_path)\n out_path = os.path.join(out_dir, img_name.replace('.png', '.mask.png'))\n Image.fromarray(mask).save(out_path)\n else:\n img = np.asarray(Image.open(img_path).convert('RGB'))\n shutil.copy(img_path, out_path)\n\n mask = np.zeros_like(img)\n out_path = os.path.join(out_dir, img_name.replace('.png', '.mask.png'))\n Image.fromarray(mask).save(out_path)\n\ndef generate_mask(ocr_box_anno, img_dir, out_dir):\n os.makedirs(out_dir, exist_ok=True)\n with open(ocr_box_anno, 'r') as f:\n boxes_anno = json.load(f)\n\n # for i, (img_name, img_boxes) in enumerate(boxes_anno.items()):\n # pass\n \n with Pool(16) as pool:\n args = [\n (img_name, img_boxes, img_dir, out_dir)\n for img_name, img_boxes in boxes_anno.items()\n ]\n pool.map(_mask_white_txt, args)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n detect -[ocr.json]-> point_to_box -[ocr.box.json]-> generate_mask\n \"\"\"\n # detect()\n # point_to_box('/home/ron/Downloads/hateful_meme_data/ocr.json')\n # print('hi')\n # generate_mask(\n # '/home/ron/Downloads/hateful_meme_data/ocr.box.json',\n # '/home/ron/Downloads/hateful_meme_data_phase2/img',\n # '/home/ron/Downloads/hateful_meme_data_phase2/img_mask_3px'\n # )\n\n fire.Fire({\n \"detect\": detect,\n \"point_to_box\": point_to_box,\n \"generate_mask\": generate_mask,\n })",
"import torch\nimport torch.nn as nn\nfrom external.pytorch_pretrained_bert.modeling import BertLayerNorm, BertEncoder, BertPooler, ACT2FN, BertOnlyMLMHead\nfrom typing import List, Tuple\n\n# todo: add this to config\nNUM_SPECIAL_WORDS = 1000\n\n\nclass BaseModel(nn.Module):\n def __init__(self, config, **kwargs):\n self.config = config\n super(BaseModel, self).__init__()\n\n def init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n def forward(self, *args, **kwargs):\n raise NotImplemented\n \n @property\n def device(self):\n try:\n return next(self.parameters()).device\n except StopIteration:\n\n def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, torch.Tensor]]:\n tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]\n return tuples\n\n gen = self._named_members(get_members_fn=find_tensor_attributes)\n first_tuple = next(gen)\n return first_tuple[1].device\n\n @property\n def dtype(self):\n try:\n return next(self.parameters()).dtype\n except StopIteration:\n\n def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, torch.Tensor]]:\n tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]\n return tuples\n\n gen = self._named_members(get_members_fn=find_tensor_attributes)\n first_tuple = next(gen)\n return first_tuple[1].dtype\n\n\nclass VisualLinguisticBert(BaseModel):\n def __init__(self, config, language_pretrained_model_path=None):\n super(VisualLinguisticBert, self).__init__(config)\n\n self.config = config\n\n # embeddings\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)\n self.end_embedding = nn.Embedding(1, config.hidden_size)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n self.embedding_LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.embedding_dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # for compatibility of roberta\n self.position_padding_idx = config.position_padding_idx\n\n # visual transform\n self.visual_1x1_text = None\n self.visual_1x1_object = None\n if config.visual_size != config.hidden_size:\n self.visual_1x1_text = nn.Linear(config.visual_size, config.hidden_size)\n self.visual_1x1_object = nn.Linear(config.visual_size, config.hidden_size)\n if config.visual_ln:\n self.visual_ln_text = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.visual_ln_object = BertLayerNorm(config.hidden_size, eps=1e-12)\n else:\n visual_scale_text = nn.Parameter(torch.as_tensor(self.config.visual_scale_text_init, dtype=torch.float),\n requires_grad=True)\n self.register_parameter('visual_scale_text', visual_scale_text)\n visual_scale_object = nn.Parameter(torch.as_tensor(self.config.visual_scale_object_init, dtype=torch.float),\n requires_grad=True)\n self.register_parameter('visual_scale_object', visual_scale_object)\n\n self.encoder = BertEncoder(config)\n\n if self.config.with_pooler:\n self.pooler = BertPooler(config)\n\n # init weights\n self.apply(self.init_weights)\n if config.visual_ln:\n self.visual_ln_text.weight.data.fill_(self.config.visual_scale_text_init)\n self.visual_ln_object.weight.data.fill_(self.config.visual_scale_object_init)\n\n # load language pretrained model\n if language_pretrained_model_path is not None:\n self.load_language_pretrained_model(language_pretrained_model_path)\n\n if config.word_embedding_frozen:\n for p in self.word_embeddings.parameters():\n p.requires_grad = False\n self.special_word_embeddings = nn.Embedding(NUM_SPECIAL_WORDS, config.hidden_size)\n self.special_word_embeddings.weight.data.copy_(self.word_embeddings.weight.data[:NUM_SPECIAL_WORDS])\n\n def word_embeddings_wrapper(self, input_ids):\n if self.config.word_embedding_frozen:\n word_embeddings = self.word_embeddings(input_ids)\n word_embeddings[input_ids < NUM_SPECIAL_WORDS] \\\n = self.special_word_embeddings(input_ids[input_ids < NUM_SPECIAL_WORDS])\n return word_embeddings\n else:\n return self.word_embeddings(input_ids)\n\n def forward(self,\n text_input_ids,\n text_token_type_ids,\n text_visual_embeddings,\n text_mask,\n object_vl_embeddings,\n object_mask,\n output_all_encoded_layers=True,\n output_text_and_object_separately=False,\n output_attention_probs=False):\n\n # get seamless concatenate embeddings and mask\n embedding_output, attention_mask, text_mask_new, object_mask_new = self.embedding(text_input_ids,\n text_token_type_ids,\n text_visual_embeddings,\n text_mask,\n object_vl_embeddings,\n object_mask)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n # extended_attention_mask = 1.0 - extended_attention_mask\n # extended_attention_mask[extended_attention_mask != 0] = float('-inf')\n\n if output_attention_probs:\n encoded_layers, attention_probs = self.encoder(embedding_output,\n extended_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers,\n output_attention_probs=output_attention_probs)\n else:\n encoded_layers = self.encoder(embedding_output,\n extended_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers,\n output_attention_probs=output_attention_probs)\n sequence_output = encoded_layers[-1]\n pooled_output = self.pooler(sequence_output) if self.config.with_pooler else None\n if not output_all_encoded_layers:\n encoded_layers = encoded_layers[-1]\n\n if output_text_and_object_separately:\n if not output_all_encoded_layers:\n encoded_layers = [encoded_layers]\n encoded_layers_text = []\n encoded_layers_object = []\n for encoded_layer in encoded_layers:\n max_text_len = text_input_ids.shape[1]\n max_object_len = object_vl_embeddings.shape[1]\n encoded_layer_text = encoded_layer[:, :max_text_len]\n encoded_layer_object = encoded_layer.new_zeros(\n (encoded_layer.shape[0], max_object_len, encoded_layer.shape[2]))\n encoded_layer_object[object_mask] = encoded_layer[object_mask_new]\n encoded_layers_text.append(encoded_layer_text)\n encoded_layers_object.append(encoded_layer_object)\n if not output_all_encoded_layers:\n encoded_layers_text = encoded_layers_text[0]\n encoded_layers_object = encoded_layers_object[0]\n if output_attention_probs:\n return encoded_layers_text, encoded_layers_object, pooled_output, attention_probs\n else:\n return encoded_layers_text, encoded_layers_object, pooled_output\n else:\n if output_attention_probs:\n return encoded_layers, pooled_output, attention_probs\n else:\n return encoded_layers, pooled_output\n\n def embedding(self,\n text_input_ids,\n text_token_type_ids,\n text_visual_embeddings,\n text_mask,\n object_vl_embeddings,\n object_mask):\n\n text_linguistic_embedding = self.word_embeddings_wrapper(text_input_ids)\n if self.visual_1x1_text is not None:\n text_visual_embeddings = self.visual_1x1_text(text_visual_embeddings)\n if self.config.visual_ln:\n text_visual_embeddings = self.visual_ln_text(text_visual_embeddings)\n else:\n text_visual_embeddings *= self.visual_scale_text\n text_vl_embeddings = text_linguistic_embedding + text_visual_embeddings\n\n object_visual_embeddings = object_vl_embeddings[:, :, :self.config.visual_size]\n if self.visual_1x1_object is not None:\n object_visual_embeddings = self.visual_1x1_object(object_visual_embeddings)\n if self.config.visual_ln:\n object_visual_embeddings = self.visual_ln_object(object_visual_embeddings)\n else:\n object_visual_embeddings *= self.visual_scale_object\n object_linguistic_embeddings = object_vl_embeddings[:, :, self.config.visual_size:]\n object_vl_embeddings = object_linguistic_embeddings + object_visual_embeddings\n\n bs = text_vl_embeddings.size(0)\n vl_embed_size = text_vl_embeddings.size(-1)\n max_length = (text_mask.sum(1) + object_mask.sum(1)).max() + 1\n grid_ind, grid_pos = torch.meshgrid(torch.arange(bs, dtype=torch.long, device=text_vl_embeddings.device),\n torch.arange(max_length, dtype=torch.long, device=text_vl_embeddings.device))\n text_end = text_mask.sum(1, keepdim=True)\n object_end = text_end + object_mask.sum(1, keepdim=True)\n\n # seamlessly concatenate visual linguistic embeddings of text and object\n _zero_id = torch.zeros((bs, ), dtype=torch.long, device=text_vl_embeddings.device)\n vl_embeddings = text_vl_embeddings.new_zeros((bs, max_length, vl_embed_size))\n vl_embeddings[grid_pos < text_end] = text_vl_embeddings[text_mask]\n vl_embeddings[(grid_pos >= text_end) & (grid_pos < object_end)] = object_vl_embeddings[object_mask]\n vl_embeddings[grid_pos == object_end] = self.end_embedding(_zero_id)\n\n # token type embeddings/ segment embeddings\n token_type_ids = text_token_type_ids.new_zeros((bs, max_length))\n token_type_ids[grid_pos < text_end] = text_token_type_ids[text_mask]\n token_type_ids[(grid_pos >= text_end) & (grid_pos <= object_end)] = 2\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n # position embeddings\n position_ids = grid_pos + self.position_padding_idx + 1\n if self.config.obj_pos_id_relative:\n position_ids[(grid_pos >= text_end) & (grid_pos < object_end)] \\\n = text_end.expand((bs, max_length))[(grid_pos >= text_end) & (grid_pos < object_end)] \\\n + self.position_padding_idx + 1\n position_ids[grid_pos == object_end] = (text_end + 1).squeeze(1) + self.position_padding_idx + 1\n else:\n assert False, \"Don't use position id 510/511 for objects and [END]!!!\"\n position_ids[(grid_pos >= text_end) & (grid_pos < object_end)] = self.config.max_position_embeddings - 2\n position_ids[grid_pos == object_end] = self.config.max_position_embeddings - 1\n\n position_embeddings = self.position_embeddings(position_ids)\n mask = text_mask.new_zeros((bs, max_length))\n mask[grid_pos <= object_end] = 1\n\n embeddings = vl_embeddings + position_embeddings + token_type_embeddings\n embeddings = self.embedding_LayerNorm(embeddings)\n embeddings = self.embedding_dropout(embeddings)\n\n return embeddings, mask, grid_pos < text_end, (grid_pos >= text_end) & (grid_pos < object_end)\n\n def load_language_pretrained_model(self, language_pretrained_model_path):\n pretrained_state_dict = torch.load(language_pretrained_model_path, map_location=lambda storage, loc: storage)\n encoder_pretrained_state_dict = {}\n pooler_pretrained_state_dict = {}\n embedding_ln_pretrained_state_dict = {}\n unexpected_keys = []\n for k, v in pretrained_state_dict.items():\n if k.startswith('bert.'):\n k = k[len('bert.'):]\n elif k.startswith('roberta.'):\n k = k[len('roberta.'):]\n else:\n unexpected_keys.append(k)\n continue\n if 'gamma' in k:\n k = k.replace('gamma', 'weight')\n if 'beta' in k:\n k = k.replace('beta', 'bias')\n if k.startswith('encoder.'):\n k_ = k[len('encoder.'):]\n if k_ in self.encoder.state_dict():\n encoder_pretrained_state_dict[k_] = v\n else:\n unexpected_keys.append(k)\n elif k.startswith('embeddings.'):\n k_ = k[len('embeddings.'):]\n if k_ == 'word_embeddings.weight':\n self.word_embeddings.weight.data = v.to(dtype=self.word_embeddings.weight.data.dtype,\n device=self.word_embeddings.weight.data.device)\n elif k_ == 'position_embeddings.weight':\n self.position_embeddings.weight.data = v.to(dtype=self.position_embeddings.weight.data.dtype,\n device=self.position_embeddings.weight.data.device)\n elif k_ == 'token_type_embeddings.weight':\n self.token_type_embeddings.weight.data[:v.size(0)] = v.to(\n dtype=self.token_type_embeddings.weight.data.dtype,\n device=self.token_type_embeddings.weight.data.device)\n if v.size(0) == 1:\n # Todo: roberta token type embedding\n self.token_type_embeddings.weight.data[1] = v[0].clone().to(\n dtype=self.token_type_embeddings.weight.data.dtype,\n device=self.token_type_embeddings.weight.data.device)\n self.token_type_embeddings.weight.data[2] = v[0].clone().to(\n dtype=self.token_type_embeddings.weight.data.dtype,\n device=self.token_type_embeddings.weight.data.device)\n\n elif k_.startswith('LayerNorm.'):\n k__ = k_[len('LayerNorm.'):]\n if k__ in self.embedding_LayerNorm.state_dict():\n embedding_ln_pretrained_state_dict[k__] = v\n else:\n unexpected_keys.append(k)\n else:\n unexpected_keys.append(k)\n elif self.config.with_pooler and k.startswith('pooler.'):\n k_ = k[len('pooler.'):]\n if k_ in self.pooler.state_dict():\n pooler_pretrained_state_dict[k_] = v\n else:\n unexpected_keys.append(k)\n else:\n unexpected_keys.append(k)\n if len(unexpected_keys) > 0:\n print(\"Warnings: Unexpected keys: {}.\".format(unexpected_keys))\n self.embedding_LayerNorm.load_state_dict(embedding_ln_pretrained_state_dict)\n self.encoder.load_state_dict(encoder_pretrained_state_dict)\n if self.config.with_pooler and len(pooler_pretrained_state_dict) > 0:\n self.pooler.load_state_dict(pooler_pretrained_state_dict)\n\n\nclass VisualLinguisticBertForPretraining(VisualLinguisticBert):\n def __init__(self, config, language_pretrained_model_path=None,\n with_rel_head=True, with_mlm_head=True, with_mvrc_head=True):\n\n super(VisualLinguisticBertForPretraining, self).__init__(config, language_pretrained_model_path=None)\n\n self.with_rel_head = with_rel_head\n self.with_mlm_head = with_mlm_head\n self.with_mvrc_head = with_mvrc_head\n if with_rel_head:\n self.relationsip_head = VisualLinguisticBertRelationshipPredictionHead(config)\n if with_mlm_head:\n self.mlm_head = BertOnlyMLMHead(config, self.word_embeddings.weight)\n if with_mvrc_head:\n self.mvrc_head = VisualLinguisticBertMVRCHead(config)\n\n # init weights\n self.apply(self.init_weights)\n if config.visual_ln:\n self.visual_ln_text.weight.data.fill_(self.config.visual_scale_text_init)\n self.visual_ln_object.weight.data.fill_(self.config.visual_scale_object_init)\n\n # load language pretrained model\n if language_pretrained_model_path is not None:\n self.load_language_pretrained_model(language_pretrained_model_path)\n\n if config.word_embedding_frozen:\n for p in self.word_embeddings.parameters():\n p.requires_grad = False\n\n if config.pos_embedding_frozen:\n for p in self.position_embeddings.parameters():\n p.requires_grad = False\n\n def forward(self,\n text_input_ids,\n text_token_type_ids,\n text_visual_embeddings,\n text_mask,\n object_vl_embeddings,\n object_mask,\n output_all_encoded_layers=True,\n output_text_and_object_separately=False):\n\n text_out, object_out, pooled_rep = super(VisualLinguisticBertForPretraining, self).forward(\n text_input_ids,\n text_token_type_ids,\n text_visual_embeddings,\n text_mask,\n object_vl_embeddings,\n object_mask,\n output_all_encoded_layers=False,\n output_text_and_object_separately=True\n )\n\n # if self.with_rel_head:\n # relationship_logits = self.relationsip_head(pooled_rep)\n # else:\n relationship_logits = None\n if self.with_mlm_head:\n mlm_logits = self.mlm_head(text_out)\n else:\n mlm_logits = None\n if self.with_mvrc_head:\n mvrc_logits = self.mvrc_head(object_out)\n else:\n mvrc_logits = None\n\n return relationship_logits, mlm_logits, mvrc_logits, text_out[:, 0, :]\n\n def load_language_pretrained_model(self, language_pretrained_model_path):\n pretrained_state_dict = torch.load(language_pretrained_model_path, map_location=lambda storage, loc: storage)\n encoder_pretrained_state_dict = {}\n pooler_pretrained_state_dict = {}\n embedding_ln_pretrained_state_dict = {}\n relationship_head_pretrained_state_dict = {}\n mlm_head_pretrained_state_dict = {}\n unexpected_keys = []\n for _k, v in pretrained_state_dict.items():\n if _k.startswith('bert.') or _k.startswith('roberta.'):\n k = _k[len('bert.'):] if _k.startswith('bert.') else _k[len('roberta.'):]\n if 'gamma' in k:\n k = k.replace('gamma', 'weight')\n if 'beta' in k:\n k = k.replace('beta', 'bias')\n if k.startswith('encoder.'):\n k_ = k[len('encoder.'):]\n if k_ in self.encoder.state_dict():\n encoder_pretrained_state_dict[k_] = v\n else:\n unexpected_keys.append(_k)\n elif k.startswith('embeddings.'):\n k_ = k[len('embeddings.'):]\n if k_ == 'word_embeddings.weight':\n self.word_embeddings.weight.data = v.to(dtype=self.word_embeddings.weight.data.dtype,\n device=self.word_embeddings.weight.data.device)\n elif k_ == 'position_embeddings.weight':\n self.position_embeddings.weight.data = v.to(dtype=self.position_embeddings.weight.data.dtype,\n device=self.position_embeddings.weight.data.device)\n elif k_ == 'token_type_embeddings.weight':\n self.token_type_embeddings.weight.data[:v.size(0)] = v.to(\n dtype=self.token_type_embeddings.weight.data.dtype,\n device=self.token_type_embeddings.weight.data.device)\n if v.size(0) == 1:\n # Todo: roberta token type embedding\n self.token_type_embeddings.weight.data[1] = v[0].to(\n dtype=self.token_type_embeddings.weight.data.dtype,\n device=self.token_type_embeddings.weight.data.device)\n elif k_.startswith('LayerNorm.'):\n k__ = k_[len('LayerNorm.'):]\n if k__ in self.embedding_LayerNorm.state_dict():\n embedding_ln_pretrained_state_dict[k__] = v\n else:\n unexpected_keys.append(_k)\n else:\n unexpected_keys.append(_k)\n elif self.config.with_pooler and k.startswith('pooler.'):\n k_ = k[len('pooler.'):]\n if k_ in self.pooler.state_dict():\n pooler_pretrained_state_dict[k_] = v\n else:\n unexpected_keys.append(_k)\n elif _k.startswith('cls.seq_relationship.') and self.with_rel_head:\n k_ = _k[len('cls.seq_relationship.'):]\n if 'gamma' in k_:\n k_ = k_.replace('gamma', 'weight')\n if 'beta' in k_:\n k_ = k_.replace('beta', 'bias')\n if k_ in self.relationsip_head.caption_image_relationship.state_dict():\n relationship_head_pretrained_state_dict[k_] = v\n else:\n unexpected_keys.append(_k)\n elif (_k.startswith('cls.predictions.') or _k.startswith('lm_head.')) and self.with_mlm_head:\n k_ = _k[len('cls.predictions.'):] if _k.startswith('cls.predictions.') else _k[len('lm_head.'):]\n if _k.startswith('lm_head.'):\n if 'dense' in k_ or 'layer_norm' in k_:\n k_ = 'transform.' + k_\n if 'layer_norm' in k_:\n k_ = k_.replace('layer_norm', 'LayerNorm')\n if 'gamma' in k_:\n k_ = k_.replace('gamma', 'weight')\n if 'beta' in k_:\n k_ = k_.replace('beta', 'bias')\n if k_ in self.mlm_head.predictions.state_dict():\n mlm_head_pretrained_state_dict[k_] = v\n else:\n unexpected_keys.append(_k)\n else:\n unexpected_keys.append(_k)\n if len(unexpected_keys) > 0:\n print(\"Warnings: Unexpected keys: {}.\".format(unexpected_keys))\n self.embedding_LayerNorm.load_state_dict(embedding_ln_pretrained_state_dict)\n self.encoder.load_state_dict(encoder_pretrained_state_dict)\n if self.config.with_pooler and len(pooler_pretrained_state_dict) > 0:\n self.pooler.load_state_dict(pooler_pretrained_state_dict)\n if self.with_rel_head and len(relationship_head_pretrained_state_dict) > 0:\n self.relationsip_head.caption_image_relationship.load_state_dict(relationship_head_pretrained_state_dict)\n if self.with_mlm_head:\n self.mlm_head.predictions.load_state_dict(mlm_head_pretrained_state_dict)\n\n\nclass VisualLinguisticBertMVRCHeadTransform(BaseModel):\n def __init__(self, config):\n super(VisualLinguisticBertMVRCHeadTransform, self).__init__(config)\n\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.act = ACT2FN[config.hidden_act]\n\n self.apply(self.init_weights)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.act(hidden_states)\n\n return hidden_states\n\n\nclass VisualLinguisticBertMVRCHead(BaseModel):\n def __init__(self, config):\n super(VisualLinguisticBertMVRCHead, self).__init__(config)\n\n self.transform = VisualLinguisticBertMVRCHeadTransform(config)\n self.region_cls_pred = nn.Linear(config.hidden_size, config.visual_region_classes)\n self.apply(self.init_weights)\n\n def forward(self, hidden_states):\n\n hidden_states = self.transform(hidden_states)\n logits = self.region_cls_pred(hidden_states)\n\n return logits\n\n\nclass VisualLinguisticBertRelationshipPredictionHead(BaseModel):\n def __init__(self, config):\n super(VisualLinguisticBertRelationshipPredictionHead, self).__init__(config)\n\n self.caption_image_relationship = nn.Linear(config.hidden_size, 2)\n self.apply(self.init_weights)\n\n def forward(self, pooled_rep):\n\n relationship_logits = self.caption_image_relationship(pooled_rep)\n\n return relationship_logits\n\n\n\n\n"
] | [
[
"numpy.concatenate",
"numpy.random.RandomState",
"numpy.random.randint"
],
[
"numpy.maximum",
"numpy.minimum",
"numpy.pad",
"numpy.clip",
"numpy.asarray",
"numpy.abs",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.array"
],
[
"torch.nn.Dropout",
"torch.load",
"torch.zeros",
"torch.is_tensor",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.arange",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
girisagar46/main | [
"da358985b38dc4b776129b02f84694577e6f6190"
] | [
"mindsdb/libs/ml_models/pytorch/models/fully_connected_net/fully_connected_net.py"
] | [
"\n\nfrom mindsdb.config import *\nfrom mindsdb.libs.constants.mindsdb import *\n\nimport math\nimport torch\nimport torch.nn as nn\nfrom mindsdb.libs.ml_models.pytorch.libs.base_model import BaseModel\n\n\n\nclass FullyConnectedNet(BaseModel):\n\n\n def setup(self, sample_batch):\n \"\"\"\n Here we define the basic building blocks of our model, in forward we define how we put it all together along wiht an input\n\n :param sample_batch: this is used to understand the characteristics of the input and target, it is an object of type utils.libs.data_types.batch.Batch\n \"\"\"\n\n self.flatTarget = True # True means that we will expect target to be a flat vector per row, even if its multiple variables\n self.flatInput = True # True means that we will expect input to be a a flat vector per row, even if it smade of multiple variables\n\n sample_input = sample_batch.getInput(flatten=self.flatInput)\n sample_target = sample_batch.getTarget(flatten=self.flatTarget)\n input_size = sample_input.size()[1]\n output_size = sample_target.size()[1]\n\n self.net = nn.Sequential(\n nn.Linear(input_size, input_size),\n torch.nn.LeakyReLU(),\n nn.Linear(input_size, int(math.ceil(input_size/2))),\n torch.nn.LeakyReLU(),\n nn.Linear(int(math.ceil(input_size/2)), output_size)\n )\n\n if USE_CUDA:\n self.net.cuda()\n\n\n\n def forward(self, input):\n \"\"\"\n In this particular model, we just need to forward the network defined in setup, with our input\n\n :param input: a pytorch tensor with the input data of a batch\n :return:\n \"\"\"\n output = self.net(input)\n return output\n\n\n\n\n\n\n"
] | [
[
"torch.nn.Linear",
"torch.nn.LeakyReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dimasad/numpy | [
"85188530bffae563eb274b9c12b77981cfa4e1d2"
] | [
"numpy/lib/utils.py"
] | [
"from __future__ import division, absolute_import, print_function\n\nimport os\nimport sys\nimport types\nimport re\nimport warnings\n\nfrom numpy.core.numerictypes import issubclass_, issubsctype, issubdtype\nfrom numpy.core import ndarray, ufunc, asarray\n\n__all__ = [\n 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate',\n 'deprecate_with_doc', 'get_include', 'info', 'source', 'who',\n 'lookfor', 'byte_bounds', 'safe_eval'\n ]\n\ndef get_include():\n \"\"\"\n Return the directory that contains the NumPy \\\\*.h header files.\n\n Extension modules that need to compile against NumPy should use this\n function to locate the appropriate include directory.\n\n Notes\n -----\n When using ``distutils``, for example in ``setup.py``.\n ::\n\n import numpy as np\n ...\n Extension('extension_name', ...\n include_dirs=[np.get_include()])\n ...\n\n \"\"\"\n import numpy\n if numpy.show_config is None:\n # running from numpy source directory\n d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')\n else:\n # using installed numpy core headers\n import numpy.core as core\n d = os.path.join(os.path.dirname(core.__file__), 'include')\n return d\n\n\ndef _set_function_name(func, name):\n func.__name__ = name\n return func\n\n\nclass _Deprecate(object):\n \"\"\"\n Decorator class to deprecate old functions.\n\n Refer to `deprecate` for details.\n\n See Also\n --------\n deprecate\n\n \"\"\"\n\n def __init__(self, old_name=None, new_name=None, message=None):\n self.old_name = old_name\n self.new_name = new_name\n self.message = message\n\n def __call__(self, func, *args, **kwargs):\n \"\"\"\n Decorator call. Refer to ``decorate``.\n\n \"\"\"\n old_name = self.old_name\n new_name = self.new_name\n message = self.message\n\n import warnings\n if old_name is None:\n try:\n old_name = func.__name__\n except AttributeError:\n old_name = func.__name__\n if new_name is None:\n depdoc = \"`%s` is deprecated!\" % old_name\n else:\n depdoc = \"`%s` is deprecated, use `%s` instead!\" % \\\n (old_name, new_name)\n\n if message is not None:\n depdoc += \"\\n\" + message\n\n def newfunc(*args,**kwds):\n \"\"\"`arrayrange` is deprecated, use `arange` instead!\"\"\"\n warnings.warn(depdoc, DeprecationWarning)\n return func(*args, **kwds)\n\n newfunc = _set_function_name(newfunc, old_name)\n doc = func.__doc__\n if doc is None:\n doc = depdoc\n else:\n doc = '\\n\\n'.join([depdoc, doc])\n newfunc.__doc__ = doc\n try:\n d = func.__dict__\n except AttributeError:\n pass\n else:\n newfunc.__dict__.update(d)\n return newfunc\n\ndef deprecate(*args, **kwargs):\n \"\"\"\n Issues a DeprecationWarning, adds warning to `old_name`'s\n docstring, rebinds ``old_name.__name__`` and returns the new\n function object.\n\n This function may also be used as a decorator.\n\n Parameters\n ----------\n func : function\n The function to be deprecated.\n old_name : str, optional\n The name of the function to be deprecated. Default is None, in\n which case the name of `func` is used.\n new_name : str, optional\n The new name for the function. Default is None, in which case the\n deprecation message is that `old_name` is deprecated. If given, the\n deprecation message is that `old_name` is deprecated and `new_name`\n should be used instead.\n message : str, optional\n Additional explanation of the deprecation. Displayed in the\n docstring after the warning.\n\n Returns\n -------\n old_func : function\n The deprecated function.\n\n Examples\n --------\n Note that ``olduint`` returns a value after printing Deprecation\n Warning:\n\n >>> olduint = np.deprecate(np.uint)\n >>> olduint(6)\n /usr/lib/python2.5/site-packages/numpy/lib/utils.py:114:\n DeprecationWarning: uint32 is deprecated\n warnings.warn(str1, DeprecationWarning)\n 6\n\n \"\"\"\n # Deprecate may be run as a function or as a decorator\n # If run as a function, we initialise the decorator class\n # and execute its __call__ method.\n\n if args:\n fn = args[0]\n args = args[1:]\n\n # backward compatibility -- can be removed\n # after next release\n if 'newname' in kwargs:\n kwargs['new_name'] = kwargs.pop('newname')\n if 'oldname' in kwargs:\n kwargs['old_name'] = kwargs.pop('oldname')\n\n return _Deprecate(*args, **kwargs)(fn)\n else:\n return _Deprecate(*args, **kwargs)\n\ndeprecate_with_doc = lambda msg: _Deprecate(message=msg)\n\n\n#--------------------------------------------\n# Determine if two arrays can share memory\n#--------------------------------------------\n\ndef byte_bounds(a):\n \"\"\"\n Returns pointers to the end-points of an array.\n\n Parameters\n ----------\n a : ndarray\n Input array. It must conform to the Python-side of the array\n interface.\n\n Returns\n -------\n (low, high) : tuple of 2 integers\n The first integer is the first byte of the array, the second\n integer is just past the last byte of the array. If `a` is not\n contiguous it will not use every byte between the (`low`, `high`)\n values.\n\n Examples\n --------\n >>> I = np.eye(2, dtype='f'); I.dtype\n dtype('float32')\n >>> low, high = np.byte_bounds(I)\n >>> high - low == I.size*I.itemsize\n True\n >>> I = np.eye(2, dtype='G'); I.dtype\n dtype('complex192')\n >>> low, high = np.byte_bounds(I)\n >>> high - low == I.size*I.itemsize\n True\n\n \"\"\"\n ai = a.__array_interface__\n a_data = ai['data'][0]\n astrides = ai['strides']\n ashape = ai['shape']\n bytes_a = asarray(a).dtype.itemsize\n\n a_low = a_high = a_data\n if astrides is None:\n # contiguous case\n a_high += a.size * bytes_a\n else:\n for shape, stride in zip(ashape, astrides):\n if stride < 0:\n a_low += (shape-1)*stride\n else:\n a_high += (shape-1)*stride\n a_high += bytes_a\n return a_low, a_high\n\n\n#-----------------------------------------------------------------------------\n# Function for output and information on the variables used.\n#-----------------------------------------------------------------------------\n\n\ndef who(vardict=None):\n \"\"\"\n Print the Numpy arrays in the given dictionary.\n\n If there is no dictionary passed in or `vardict` is None then returns\n Numpy arrays in the globals() dictionary (all Numpy arrays in the\n namespace).\n\n Parameters\n ----------\n vardict : dict, optional\n A dictionary possibly containing ndarrays. Default is globals().\n\n Returns\n -------\n out : None\n Returns 'None'.\n\n Notes\n -----\n Prints out the name, shape, bytes and type of all of the ndarrays\n present in `vardict`.\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> b = np.ones(20)\n >>> np.who()\n Name Shape Bytes Type\n ===========================================================\n a 10 40 int32\n b 20 160 float64\n Upper bound on total bytes = 200\n\n >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',\n ... 'idx':5}\n >>> np.who(d)\n Name Shape Bytes Type\n ===========================================================\n y 3 24 float64\n x 2 16 float64\n Upper bound on total bytes = 40\n\n \"\"\"\n if vardict is None:\n frame = sys._getframe().f_back\n vardict = frame.f_globals\n sta = []\n cache = {}\n for name in vardict.keys():\n if isinstance(vardict[name], ndarray):\n var = vardict[name]\n idv = id(var)\n if idv in cache.keys():\n namestr = name + \" (%s)\" % cache[idv]\n original = 0\n else:\n cache[idv] = name\n namestr = name\n original = 1\n shapestr = \" x \".join(map(str, var.shape))\n bytestr = str(var.nbytes)\n sta.append([namestr, shapestr, bytestr, var.dtype.name,\n original])\n\n maxname = 0\n maxshape = 0\n maxbyte = 0\n totalbytes = 0\n for k in range(len(sta)):\n val = sta[k]\n if maxname < len(val[0]):\n maxname = len(val[0])\n if maxshape < len(val[1]):\n maxshape = len(val[1])\n if maxbyte < len(val[2]):\n maxbyte = len(val[2])\n if val[4]:\n totalbytes += int(val[2])\n\n if len(sta) > 0:\n sp1 = max(10, maxname)\n sp2 = max(10, maxshape)\n sp3 = max(10, maxbyte)\n prval = \"Name %s Shape %s Bytes %s Type\" % (sp1*' ', sp2*' ', sp3*' ')\n print(prval + \"\\n\" + \"=\"*(len(prval)+5) + \"\\n\")\n\n for k in range(len(sta)):\n val = sta[k]\n print(\"%s %s %s %s %s %s %s\" % (val[0], ' '*(sp1-len(val[0])+4),\n val[1], ' '*(sp2-len(val[1])+5),\n val[2], ' '*(sp3-len(val[2])+5),\n val[3]))\n print(\"\\nUpper bound on total bytes = %d\" % totalbytes)\n return\n\n#-----------------------------------------------------------------------------\n\n\n# NOTE: pydoc defines a help function which works simliarly to this\n# except it uses a pager to take over the screen.\n\n# combine name and arguments and split to multiple lines of width\n# characters. End lines on a comma and begin argument list indented with\n# the rest of the arguments.\ndef _split_line(name, arguments, width):\n firstwidth = len(name)\n k = firstwidth\n newstr = name\n sepstr = \", \"\n arglist = arguments.split(sepstr)\n for argument in arglist:\n if k == firstwidth:\n addstr = \"\"\n else:\n addstr = sepstr\n k = k + len(argument) + len(addstr)\n if k > width:\n k = firstwidth + 1 + len(argument)\n newstr = newstr + \",\\n\" + \" \"*(firstwidth+2) + argument\n else:\n newstr = newstr + addstr + argument\n return newstr\n\n_namedict = None\n_dictlist = None\n\n# Traverse all module directories underneath globals\n# to see if something is defined\ndef _makenamedict(module='numpy'):\n module = __import__(module, globals(), locals(), [])\n thedict = {module.__name__:module.__dict__}\n dictlist = [module.__name__]\n totraverse = [module.__dict__]\n while True:\n if len(totraverse) == 0:\n break\n thisdict = totraverse.pop(0)\n for x in thisdict.keys():\n if isinstance(thisdict[x], types.ModuleType):\n modname = thisdict[x].__name__\n if modname not in dictlist:\n moddict = thisdict[x].__dict__\n dictlist.append(modname)\n totraverse.append(moddict)\n thedict[modname] = moddict\n return thedict, dictlist\n\n\ndef _info(obj, output=sys.stdout):\n \"\"\"Provide information about ndarray obj.\n\n Parameters\n ----------\n obj: ndarray\n Must be ndarray, not checked.\n output:\n Where printed output goes.\n\n Notes\n -----\n Copied over from the numarray module prior to its removal.\n Adapted somewhat as only numpy is an option now.\n\n Called by info.\n\n \"\"\"\n extra = \"\"\n tic = \"\"\n bp = lambda x: x\n cls = getattr(obj, '__class__', type(obj))\n nm = getattr(cls, '__name__', cls)\n strides = obj.strides\n endian = obj.dtype.byteorder\n\n print(\"class: \", nm, file=output)\n print(\"shape: \", obj.shape, file=output)\n print(\"strides: \", strides, file=output)\n print(\"itemsize: \", obj.itemsize, file=output)\n print(\"aligned: \", bp(obj.flags.aligned), file=output)\n print(\"contiguous: \", bp(obj.flags.contiguous), file=output)\n print(\"fortran: \", obj.flags.fortran, file=output)\n print(\n \"data pointer: %s%s\" % (hex(obj.ctypes._as_parameter_.value), extra),\n file=output\n )\n print(\"byteorder: \", end=' ', file=output)\n if endian in ['|', '=']:\n print(\"%s%s%s\" % (tic, sys.byteorder, tic), file=output)\n byteswap = False\n elif endian == '>':\n print(\"%sbig%s\" % (tic, tic), file=output)\n byteswap = sys.byteorder != \"big\"\n else:\n print(\"%slittle%s\" % (tic, tic), file=output)\n byteswap = sys.byteorder != \"little\"\n print(\"byteswap: \", bp(byteswap), file=output)\n print(\"type: %s\" % obj.dtype, file=output)\n\n\ndef info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):\n \"\"\"\n Get help information for a function, class, or module.\n\n Parameters\n ----------\n object : object or str, optional\n Input object or name to get information about. If `object` is a\n numpy object, its docstring is given. If it is a string, available\n modules are searched for matching objects. If None, information\n about `info` itself is returned.\n maxwidth : int, optional\n Printing width.\n output : file like object, optional\n File like object that the output is written to, default is\n ``stdout``. The object has to be opened in 'w' or 'a' mode.\n toplevel : str, optional\n Start search at this level.\n\n See Also\n --------\n source, lookfor\n\n Notes\n -----\n When used interactively with an object, ``np.info(obj)`` is equivalent\n to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython\n prompt.\n\n Examples\n --------\n >>> np.info(np.polyval) # doctest: +SKIP\n polyval(p, x)\n Evaluate the polynomial p at x.\n ...\n\n When using a string for `object` it is possible to get multiple results.\n\n >>> np.info('fft') # doctest: +SKIP\n *** Found in numpy ***\n Core FFT routines\n ...\n *** Found in numpy.fft ***\n fft(a, n=None, axis=-1)\n ...\n *** Repeat reference found in numpy.fft.fftpack ***\n *** Total of 3 references found. ***\n\n \"\"\"\n global _namedict, _dictlist\n # Local import to speed up numpy's import time.\n import pydoc\n import inspect\n\n if (hasattr(object, '_ppimport_importer') or\n hasattr(object, '_ppimport_module')):\n object = object._ppimport_module\n elif hasattr(object, '_ppimport_attr'):\n object = object._ppimport_attr\n\n if object is None:\n info(info)\n elif isinstance(object, ndarray):\n _info(object, output=output)\n elif isinstance(object, str):\n if _namedict is None:\n _namedict, _dictlist = _makenamedict(toplevel)\n numfound = 0\n objlist = []\n for namestr in _dictlist:\n try:\n obj = _namedict[namestr][object]\n if id(obj) in objlist:\n print(\"\\n \"\n \"*** Repeat reference found in %s *** \" % namestr,\n file=output\n )\n else:\n objlist.append(id(obj))\n print(\" *** Found in %s ***\" % namestr, file=output)\n info(obj)\n print(\"-\"*maxwidth, file=output)\n numfound += 1\n except KeyError:\n pass\n if numfound == 0:\n print(\"Help for %s not found.\" % object, file=output)\n else:\n print(\"\\n \"\n \"*** Total of %d references found. ***\" % numfound,\n file=output\n )\n\n elif inspect.isfunction(object):\n name = object.__name__\n arguments = inspect.formatargspec(*inspect.getargspec(object))\n\n if len(name+arguments) > maxwidth:\n argstr = _split_line(name, arguments, maxwidth)\n else:\n argstr = name + arguments\n\n print(\" \" + argstr + \"\\n\", file=output)\n print(inspect.getdoc(object), file=output)\n\n elif inspect.isclass(object):\n name = object.__name__\n arguments = \"()\"\n try:\n if hasattr(object, '__init__'):\n arguments = inspect.formatargspec(\n *inspect.getargspec(object.__init__.__func__)\n )\n arglist = arguments.split(', ')\n if len(arglist) > 1:\n arglist[1] = \"(\"+arglist[1]\n arguments = \", \".join(arglist[1:])\n except:\n pass\n\n if len(name+arguments) > maxwidth:\n argstr = _split_line(name, arguments, maxwidth)\n else:\n argstr = name + arguments\n\n print(\" \" + argstr + \"\\n\", file=output)\n doc1 = inspect.getdoc(object)\n if doc1 is None:\n if hasattr(object, '__init__'):\n print(inspect.getdoc(object.__init__), file=output)\n else:\n print(inspect.getdoc(object), file=output)\n\n methods = pydoc.allmethods(object)\n if methods != []:\n print(\"\\n\\nMethods:\\n\", file=output)\n for meth in methods:\n if meth[0] == '_':\n continue\n thisobj = getattr(object, meth, None)\n if thisobj is not None:\n methstr, other = pydoc.splitdoc(\n inspect.getdoc(thisobj) or \"None\"\n )\n print(\" %s -- %s\" % (meth, methstr), file=output)\n\n elif (sys.version_info[0] < 3\n and isinstance(object, types.InstanceType)):\n # check for __call__ method\n # types.InstanceType is the type of the instances of oldstyle classes\n print(\"Instance of class: \", object.__class__.__name__, file=output)\n print(file=output)\n if hasattr(object, '__call__'):\n arguments = inspect.formatargspec(\n *inspect.getargspec(object.__call__.__func__)\n )\n arglist = arguments.split(', ')\n if len(arglist) > 1:\n arglist[1] = \"(\"+arglist[1]\n arguments = \", \".join(arglist[1:])\n else:\n arguments = \"()\"\n\n if hasattr(object, 'name'):\n name = \"%s\" % object.name\n else:\n name = \"<name>\"\n if len(name+arguments) > maxwidth:\n argstr = _split_line(name, arguments, maxwidth)\n else:\n argstr = name + arguments\n\n print(\" \" + argstr + \"\\n\", file=output)\n doc = inspect.getdoc(object.__call__)\n if doc is not None:\n print(inspect.getdoc(object.__call__), file=output)\n print(inspect.getdoc(object), file=output)\n\n else:\n print(inspect.getdoc(object), file=output)\n\n elif inspect.ismethod(object):\n name = object.__name__\n arguments = inspect.formatargspec(\n *inspect.getargspec(object.__func__)\n )\n arglist = arguments.split(', ')\n if len(arglist) > 1:\n arglist[1] = \"(\"+arglist[1]\n arguments = \", \".join(arglist[1:])\n else:\n arguments = \"()\"\n\n if len(name+arguments) > maxwidth:\n argstr = _split_line(name, arguments, maxwidth)\n else:\n argstr = name + arguments\n\n print(\" \" + argstr + \"\\n\", file=output)\n print(inspect.getdoc(object), file=output)\n\n elif hasattr(object, '__doc__'):\n print(inspect.getdoc(object), file=output)\n\n\ndef source(object, output=sys.stdout):\n \"\"\"\n Print or write to a file the source code for a Numpy object.\n\n The source code is only returned for objects written in Python. Many\n functions and classes are defined in C and will therefore not return\n useful information.\n\n Parameters\n ----------\n object : numpy object\n Input object. This can be any object (function, class, module,\n ...).\n output : file object, optional\n If `output` not supplied then source code is printed to screen\n (sys.stdout). File object must be created with either write 'w' or\n append 'a' modes.\n\n See Also\n --------\n lookfor, info\n\n Examples\n --------\n >>> np.source(np.interp) #doctest: +SKIP\n In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py\n def interp(x, xp, fp, left=None, right=None):\n \\\"\\\"\\\".... (full docstring printed)\\\"\\\"\\\"\n if isinstance(x, (float, int, number)):\n return compiled_interp([x], xp, fp, left, right).item()\n else:\n return compiled_interp(x, xp, fp, left, right)\n\n The source code is only returned for objects written in Python.\n\n >>> np.source(np.array) #doctest: +SKIP\n Not available for this object.\n\n \"\"\"\n # Local import to speed up numpy's import time.\n import inspect\n try:\n print(\"In file: %s\\n\" % inspect.getsourcefile(object), file=output)\n print(inspect.getsource(object), file=output)\n except:\n print(\"Not available for this object.\", file=output)\n\n\n# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}\n# where kind: \"func\", \"class\", \"module\", \"object\"\n# and index: index in breadth-first namespace traversal\n_lookfor_caches = {}\n\n# regexp whose match indicates that the string may contain a function\n# signature\n_function_signature_re = re.compile(r\"[a-z0-9_]+\\(.*[,=].*\\)\", re.I)\n\ndef lookfor(what, module=None, import_modules=True, regenerate=False,\n output=None):\n \"\"\"\n Do a keyword search on docstrings.\n\n A list of of objects that matched the search is displayed,\n sorted by relevance. All given keywords need to be found in the\n docstring for it to be returned as a result, but the order does\n not matter.\n\n Parameters\n ----------\n what : str\n String containing words to look for.\n module : str or list, optional\n Name of module(s) whose docstrings to go through.\n import_modules : bool, optional\n Whether to import sub-modules in packages. Default is True.\n regenerate : bool, optional\n Whether to re-generate the docstring cache. Default is False.\n output : file-like, optional\n File-like object to write the output to. If omitted, use a pager.\n\n See Also\n --------\n source, info\n\n Notes\n -----\n Relevance is determined only roughly, by checking if the keywords occur\n in the function name, at the start of a docstring, etc.\n\n Examples\n --------\n >>> np.lookfor('binary representation')\n Search results for 'binary representation'\n ------------------------------------------\n numpy.binary_repr\n Return the binary representation of the input number as a string.\n numpy.core.setup_common.long_double_representation\n Given a binary dump as given by GNU od -b, look for long double\n numpy.base_repr\n Return a string representation of a number in the given base system.\n ...\n\n \"\"\"\n import pydoc\n\n # Cache\n cache = _lookfor_generate_cache(module, import_modules, regenerate)\n\n # Search\n # XXX: maybe using a real stemming search engine would be better?\n found = []\n whats = str(what).lower().split()\n if not whats:\n return\n\n for name, (docstring, kind, index) in cache.items():\n if kind in ('module', 'object'):\n # don't show modules or objects\n continue\n ok = True\n doc = docstring.lower()\n for w in whats:\n if w not in doc:\n ok = False\n break\n if ok:\n found.append(name)\n\n # Relevance sort\n # XXX: this is full Harrison-Stetson heuristics now,\n # XXX: it probably could be improved\n\n kind_relevance = {'func': 1000, 'class': 1000,\n 'module': -1000, 'object': -1000}\n\n def relevance(name, docstr, kind, index):\n r = 0\n # do the keywords occur within the start of the docstring?\n first_doc = \"\\n\".join(docstr.lower().strip().split(\"\\n\")[:3])\n r += sum([200 for w in whats if w in first_doc])\n # do the keywords occur in the function name?\n r += sum([30 for w in whats if w in name])\n # is the full name long?\n r += -len(name) * 5\n # is the object of bad type?\n r += kind_relevance.get(kind, -1000)\n # is the object deep in namespace hierarchy?\n r += -name.count('.') * 10\n r += max(-index / 100, -100)\n return r\n\n def relevance_value(a):\n return relevance(a, *cache[a])\n found.sort(key=relevance_value)\n\n # Pretty-print\n s = \"Search results for '%s'\" % (' '.join(whats))\n help_text = [s, \"-\"*len(s)]\n for name in found[::-1]:\n doc, kind, ix = cache[name]\n\n doclines = [line.strip() for line in doc.strip().split(\"\\n\")\n if line.strip()]\n\n # find a suitable short description\n try:\n first_doc = doclines[0].strip()\n if _function_signature_re.search(first_doc):\n first_doc = doclines[1].strip()\n except IndexError:\n first_doc = \"\"\n help_text.append(\"%s\\n %s\" % (name, first_doc))\n\n if not found:\n help_text.append(\"Nothing found.\")\n\n # Output\n if output is not None:\n output.write(\"\\n\".join(help_text))\n elif len(help_text) > 10:\n pager = pydoc.getpager()\n pager(\"\\n\".join(help_text))\n else:\n print(\"\\n\".join(help_text))\n\ndef _lookfor_generate_cache(module, import_modules, regenerate):\n \"\"\"\n Generate docstring cache for given module.\n\n Parameters\n ----------\n module : str, None, module\n Module for which to generate docstring cache\n import_modules : bool\n Whether to import sub-modules in packages.\n regenerate : bool\n Re-generate the docstring cache\n\n Returns\n -------\n cache : dict {obj_full_name: (docstring, kind, index), ...}\n Docstring cache for the module, either cached one (regenerate=False)\n or newly generated.\n\n \"\"\"\n global _lookfor_caches\n # Local import to speed up numpy's import time.\n import inspect\n\n if sys.version_info[0] >= 3:\n # In Python3 stderr, stdout are text files.\n from io import StringIO\n else:\n from StringIO import StringIO\n\n if module is None:\n module = \"numpy\"\n\n if isinstance(module, str):\n try:\n __import__(module)\n except ImportError:\n return {}\n module = sys.modules[module]\n elif isinstance(module, list) or isinstance(module, tuple):\n cache = {}\n for mod in module:\n cache.update(_lookfor_generate_cache(mod, import_modules,\n regenerate))\n return cache\n\n if id(module) in _lookfor_caches and not regenerate:\n return _lookfor_caches[id(module)]\n\n # walk items and collect docstrings\n cache = {}\n _lookfor_caches[id(module)] = cache\n seen = {}\n index = 0\n stack = [(module.__name__, module)]\n while stack:\n name, item = stack.pop(0)\n if id(item) in seen:\n continue\n seen[id(item)] = True\n\n index += 1\n kind = \"object\"\n\n if inspect.ismodule(item):\n kind = \"module\"\n try:\n _all = item.__all__\n except AttributeError:\n _all = None\n\n # import sub-packages\n if import_modules and hasattr(item, '__path__'):\n for pth in item.__path__:\n for mod_path in os.listdir(pth):\n this_py = os.path.join(pth, mod_path)\n init_py = os.path.join(pth, mod_path, '__init__.py')\n if (os.path.isfile(this_py) and\n mod_path.endswith('.py')):\n to_import = mod_path[:-3]\n elif os.path.isfile(init_py):\n to_import = mod_path\n else:\n continue\n if to_import == '__init__':\n continue\n\n try:\n # Catch SystemExit, too\n base_exc = BaseException\n except NameError:\n # Python 2.4 doesn't have BaseException\n base_exc = Exception\n\n try:\n old_stdout = sys.stdout\n old_stderr = sys.stderr\n try:\n sys.stdout = StringIO()\n sys.stderr = StringIO()\n __import__(\"%s.%s\" % (name, to_import))\n finally:\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n except base_exc:\n continue\n\n for n, v in _getmembers(item):\n try:\n item_name = getattr(v, '__name__', \"%s.%s\" % (name, n))\n mod_name = getattr(v, '__module__', None)\n except NameError:\n # ref. SWIG's global cvars\n # NameError: Unknown C global variable\n item_name = \"%s.%s\" % (name, n)\n mod_name = None\n if '.' not in item_name and mod_name:\n item_name = \"%s.%s\" % (mod_name, item_name)\n\n if not item_name.startswith(name + '.'):\n # don't crawl \"foreign\" objects\n if isinstance(v, ufunc):\n # ... unless they are ufuncs\n pass\n else:\n continue\n elif not (inspect.ismodule(v) or _all is None or n in _all):\n continue\n stack.append((\"%s.%s\" % (name, n), v))\n elif inspect.isclass(item):\n kind = \"class\"\n for n, v in _getmembers(item):\n stack.append((\"%s.%s\" % (name, n), v))\n elif hasattr(item, \"__call__\"):\n kind = \"func\"\n\n try:\n doc = inspect.getdoc(item)\n except NameError:\n # ref SWIG's NameError: Unknown C global variable\n doc = None\n if doc is not None:\n cache[name] = (doc, kind, index)\n\n return cache\n\ndef _getmembers(item):\n import inspect\n try:\n members = inspect.getmembers(item)\n except Exception:\n members = [(x, getattr(item, x)) for x in dir(item)\n if hasattr(item, x)]\n return members\n\n#-----------------------------------------------------------------------------\n\n# The following SafeEval class and company are adapted from Michael Spencer's\n# ASPN Python Cookbook recipe:\n# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469\n# Accordingly it is mostly Copyright 2006 by Michael Spencer.\n# The recipe, like most of the other ASPN Python Cookbook recipes was made\n# available under the Python license.\n# http://www.python.org/license\n\n# It has been modified to:\n# * handle unary -/+\n# * support True/False/None\n# * raise SyntaxError instead of a custom exception.\n\nclass SafeEval(object):\n \"\"\"\n Object to evaluate constant string expressions.\n\n This includes strings with lists, dicts and tuples using the abstract\n syntax tree created by ``compiler.parse``.\n\n .. deprecated:: 1.10.0\n\n See Also\n --------\n safe_eval\n\n \"\"\"\n def __init__(self):\n # 2014-10-15, 1.10\n warnings.warn(\"SafeEval is deprecated in 1.10 and will be removed.\",\n DeprecationWarning)\n\n def visit(self, node):\n cls = node.__class__\n meth = getattr(self, 'visit' + cls.__name__, self.default)\n return meth(node)\n\n def default(self, node):\n raise SyntaxError(\"Unsupported source construct: %s\"\n % node.__class__)\n\n def visitExpression(self, node):\n return self.visit(node.body)\n\n def visitNum(self, node):\n return node.n\n\n def visitStr(self, node):\n return node.s\n\n def visitBytes(self, node):\n return node.s\n\n def visitDict(self, node,**kw):\n return dict([(self.visit(k), self.visit(v))\n for k, v in zip(node.keys, node.values)])\n\n def visitTuple(self, node):\n return tuple([self.visit(i) for i in node.elts])\n\n def visitList(self, node):\n return [self.visit(i) for i in node.elts]\n\n def visitUnaryOp(self, node):\n import ast\n if isinstance(node.op, ast.UAdd):\n return +self.visit(node.operand)\n elif isinstance(node.op, ast.USub):\n return -self.visit(node.operand)\n else:\n raise SyntaxError(\"Unknown unary op: %r\" % node.op)\n\n def visitName(self, node):\n if node.id == 'False':\n return False\n elif node.id == 'True':\n return True\n elif node.id == 'None':\n return None\n else:\n raise SyntaxError(\"Unknown name: %s\" % node.id)\n\n def visitNameConstant(self, node):\n return node.value\n\n\ndef safe_eval(source):\n \"\"\"\n Protected string evaluation.\n\n Evaluate a string containing a Python literal expression without\n allowing the execution of arbitrary non-literal code.\n\n Parameters\n ----------\n source : str\n The string to evaluate.\n\n Returns\n -------\n obj : object\n The result of evaluating `source`.\n\n Raises\n ------\n SyntaxError\n If the code has invalid Python syntax, or if it contains\n non-literal code.\n\n Examples\n --------\n >>> np.safe_eval('1')\n 1\n >>> np.safe_eval('[1, 2, 3]')\n [1, 2, 3]\n >>> np.safe_eval('{\"foo\": (\"bar\", 10.0)}')\n {'foo': ('bar', 10.0)}\n\n >>> np.safe_eval('import os')\n Traceback (most recent call last):\n ...\n SyntaxError: invalid syntax\n\n >>> np.safe_eval('open(\"/home/user/.ssh/id_dsa\").read()')\n Traceback (most recent call last):\n ...\n SyntaxError: Unsupported source construct: compiler.ast.CallFunc\n\n \"\"\"\n # Local import to speed up numpy's import time.\n import ast\n\n return ast.literal_eval(source)\n#-----------------------------------------------------------------------------\n"
] | [
[
"numpy.core.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.13",
"1.16",
"1.9",
"1.7",
"1.15",
"1.14",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mamdamin/MVCNN-PyTorch-Airways | [
"47b420ede0aed20e2abbd209aa262cf5ee35af25"
] | [
"custom_dataset.py"
] | [
"from torch.utils.data.dataset import Dataset\nimport os\nimport pandas as pd\nimport re\nfrom PIL import Image\n#import cv2\nimport sys\nimport tensorflow as tf\nimport numpy as np\n#from augment import augmentImages\n\nclass MultiViewDataSet(Dataset):\n \n \n def find_classes(self, dir):\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes = ['Standard', 'AccB', 'AbsRB7']\n #classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n\n return classes, class_to_idx\n\n def __init__(self, root, data_type, step = 1,transform=None, target_transform=None):\n self.x = []\n self.y = []\n self.root = root\n\n self.classes, self.class_to_idx = self.find_classes(root)\n\n self.transform = transform\n self.target_transform = target_transform\n #self.sess = tf.Session(graph=g_1,config=tf.ConfigProto(log_device_placement=True,gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=.02)))\n\n # root / <label> / <train/test> / <item> / <view>.png\n # Change here to read txt files directly\n\n data_type += '.txt'\n root = os.path.join(root,'sets')\n myset = os.path.join(root,data_type)\n print(myset)\n subjects = pd.read_csv(myset,header=None,sep=' ',names=['MVtxt','label'])\n #subjects = subjects.iloc[:24,:]\n #print(subjects.head(100),'\\n')\n c = 0\n Max = len(subjects)\n for idx, subject,label in subjects.itertuples():\n viewfiles = pd.read_csv(subject,header=None, sep=' ',names=['MVtxt','angle'],skiprows=2)\n views = []\n for i in range(len(viewfiles)):\n AA = re.search(\"Angle_[0-9]*\",viewfiles.iloc[i,0]).group().upper()\n viewfiles.iloc[i,1] = int(AA[6:])\n viewfiles.sort_values(by='angle',inplace=True)\n views = viewfiles.MVtxt.tolist()\n views = views[::step]\n sys.stdout.flush()\n sys.stdout.write('Loading {} data: {:.0f}% \\r'.format(data_type.split('.')[0],c*100/Max))\n\n c += 1\n #print((views))\n #halt\n image_views = []\n\n for view in views:\n im = Image.open(view)\n im = im.convert('RGB')\n if self.transform is not None:\n im = self.transform(im)\n image_views.append(im)\n image_views = np.stack(image_views, axis=0)\n #return views, self.y[index]\n\n self.x.append(image_views)\n self.y.append(label)\n assert image_views.shape[0]==48/step,\"Object with number of views other than {} was found!\".format(int(48/step))\n #print(self.x[0].shape)\n self.nofviews, _ , self.width , self.height = self.x[0].shape\n #print('Data Loaded!')\n\n # Override to give PyTorch access to any image on the dataset\n def __getitem__(self, index):\n original_views = self.x[index]\n #views = []\n #print(\"shape of Orginal_Views: \", original_views.shape)\n #for im in original_views:\n #im = Image.open(view)\n #im = im.convert('RGB')\n #if self.transform is not None:\n # im = self.transform(im)\n #views.append(im)\n #augment_on_GPU(original_views)\n #aug_views = augment_on_GPU(original_views)\n return original_views, self.y[index]\n\n # Override to give PyTorch size of dataset\n def __len__(self):\n return len(self.x)\n"
] | [
[
"pandas.read_csv",
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
nitamago/deepfix | [
"5e478d2998abf46afb78eeed8f1a14e18aaeb640"
] | [
"neural_net/model.py"
] | [
"\"\"\"\nCopyright 2017 Rahul Gupta, Soham Pal, Aditya Kanade, Shirish Shevade.\nIndian Institute of Science.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nimport custom_seq2seq\n\nclass model:\n def _new_RNN_cell(self, memory_dim):\n if self.rnn_cell == 'LSTM':\n constituent_cell = tf.nn.rnn_cell.BasicLSTMCell(memory_dim)\n elif self.rnn_cell == 'GRU':\n constituent_cell = tf.nn.rnn_cell.GRUCell(memory_dim)\n elif self.rnn_cell == 'RNN':\n constituent_cell = tf.nn.rnn_cell.BasicRNNCell(memory_dim)\n else:\n raise Exception('unsupported rnn cell type: %s' % self.rnn_cell)\n \n if self.dropout != 0:\n constituent_cell = tf.nn.rnn_cell.DropoutWrapper(constituent_cell, input_keep_prob=self.keep_prob, output_keep_prob=self.keep_prob)\n\n if self.num_layers > 1:\n return tf.nn.rnn_cell.MultiRNNCell([constituent_cell] * self.num_layers)\n \n return constituent_cell\n \n def __init__(self, in_seq_length, out_seq_length, vocabulary_size, rnn_cell=\"GRU\", memory_dim=300, num_layers=4, dropout=0.2, embedding_dim=50, bidirectional=False, trainable=True):\n self.in_seq_length = in_seq_length\n self.out_seq_length = out_seq_length\n self.vocabulary_size = vocabulary_size\n self.rnn_cell = rnn_cell\n self.memory_dim = memory_dim\n self.num_layers = num_layers\n self.dropout = dropout\n self.embedding_dim = embedding_dim\n self.bidirectional = bidirectional\n self.trainable = trainable\n\n self.encoder_input = [tf.placeholder(tf.int32, shape=(None,), name=\"inp%i\" % t) for t in range(in_seq_length)]\n self.labels = [tf.placeholder(tf.int32, shape=(None,), name=\"labels%i\" % t) for t in range(out_seq_length)]\n self.weights = [tf.ones_like(labels_t, dtype=tf.float32) for labels_t in self.labels]\n\n self.decoder_input = [tf.zeros_like(self.encoder_input[0], dtype=np.int32, name=\"GO\")] + \\\n [tf.placeholder(tf.int32, shape=(None,), name=\"dec_inp%i\" % t) for t in range(out_seq_length - 1)]\n\n if dropout != 0:\n self.keep_prob = tf.placeholder(tf.float32)\n\n if not self.bidirectional:\n self.cell = self._new_RNN_cell(self.memory_dim)\n #self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_attention_seq2seq(self.encoder_input, self.decoder_input,\n self.dec_outputs, self.dec_memory = tf.contrib.legacy_seq2seq.embedding_attention_seq2seq(self.encoder_input, self.decoder_input,\n self.cell, vocabulary_size, vocabulary_size,\n embedding_dim, feed_previous=True)\n else:\n self.input_cell_forward = self._new_RNN_cell(self.memory_dim/2)\n self.input_cell_backward = self._new_RNN_cell(self.memory_dim/2)\n self.output_cell = self._new_RNN_cell(self.memory_dim)\n \n self.dec_outputs, self.dec_memory, = custom_seq2seq.embedding_attention_bidirectional_seq2seq(self.encoder_input, self.decoder_input, self.input_cell_forward,\n self.input_cell_backward, self.output_cell, self.vocabulary_size,\n self.vocabulary_size, self.embedding_dim, feed_previous=True)\n \n \n if trainable:\n #self.loss = tf.nn.seq2seq.sequence_loss(self.dec_outputs, self.labels, self.weights, vocabulary_size)\n self.loss = tf.contrib.legacy_seq2seq.sequence_loss(self.dec_outputs, self.labels, self.weights, vocabulary_size)\n \n self.optimizer = tf.train.AdamOptimizer()\n gvs = self.optimizer.compute_gradients(self.loss)\n capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]\n \n self.train_op = self.optimizer.apply_gradients(capped_gvs)\n \n self.saver = tf.train.Saver(tf.all_variables(), max_to_keep=5)\n\n def load_parameters(self, sess, filename):\n self.saver.restore(sess, filename)\n\n def save_parameters(self, sess, filename, global_step=None):\n self.saver.save(sess, filename, global_step=global_step)\n\n def train_step(self, sess, X, Y):\n if not self.trainable:\n raise Exception\n \n X = np.array(X).T\n Y = np.array(Y).T\n \n feed_dict = {self.encoder_input[t]: X[t] for t in range(self.in_seq_length)}\n feed_dict.update({self.labels[t]: Y[t] for t in range(self.out_seq_length)})\n \n if self.dropout != 0:\n feed_dict.update({self.keep_prob: 1.0-self.dropout})\n\n _, loss_t = sess.run([self.train_op, self.loss], feed_dict)\n \n return loss_t\n\n def validate_step(self, sess, X, Y):\n X = np.array(X).T\n Y = np.array(Y).T\n\n feed_dict = {self.encoder_input[t]: X[t] for t in range(self.in_seq_length)}\n feed_dict.update({self.labels[t]: Y[t] for t in range(self.out_seq_length)})\n\n if self.dropout != 0:\n feed_dict.update({self.keep_prob: 1.0})\n\n loss_t = sess.run([self.loss], feed_dict)\n dec_outputs_batch = sess.run(self.dec_outputs, feed_dict)\n Y_hat = [logits_t.argmax(axis=1) for logits_t in dec_outputs_batch]\n \n return loss_t, np.array(Y_hat).T\n\n def sample(self, sess, X):\n X = np.array(X).T\n\n feed_dict = {self.encoder_input[t]: X[t] for t in range(self.in_seq_length)}\n\n if self.dropout != 0:\n feed_dict.update({self.keep_prob: 1.0})\n\n dec_outputs_batch = sess.run(self.dec_outputs, feed_dict)\n Y_hat = [logits_t.argmax(axis=1) for logits_t in dec_outputs_batch]\n\n return np.array(Y_hat).T\n\n def get_attention_vectors(self, sess, X):\n pass\n"
] | [
[
"tensorflow.clip_by_value",
"tensorflow.nn.rnn_cell.BasicLSTMCell",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.all_variables",
"tensorflow.ones_like",
"tensorflow.placeholder",
"tensorflow.zeros_like",
"tensorflow.contrib.legacy_seq2seq.embedding_attention_seq2seq",
"tensorflow.nn.rnn_cell.MultiRNNCell",
"tensorflow.train.AdamOptimizer",
"tensorflow.contrib.legacy_seq2seq.sequence_loss",
"tensorflow.nn.rnn_cell.GRUCell",
"numpy.array",
"tensorflow.nn.rnn_cell.BasicRNNCell"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
batermj/DeepVideoAnalytics | [
"daad116b87370fce1799b7948af73b92f617cf41"
] | [
"repos/insightface/src/common/face_preprocess.py"
] | [
"\nimport cv2\nimport numpy as np\nfrom skimage import transform as trans\n\ndef parse_lst_line(line):\n vec = line.strip().split(\"\\t\")\n assert len(vec)>=3\n aligned = False\n if int(vec[0])==1:\n aligned = True\n image_path = vec[1]\n label = int(vec[2])\n bbox = None\n landmark = None\n #print(vec)\n if len(vec)>3:\n bbox = np.zeros( (4,), dtype=np.int32)\n for i in xrange(3,7):\n bbox[i-3] = int(vec[i])\n landmark = None\n if len(vec)>7:\n _l = []\n for i in xrange(7,17):\n _l.append(float(vec[i]))\n landmark = np.array(_l).reshape( (2,5) ).T\n #print(aligned)\n return image_path, label, bbox, landmark, aligned\n\n\n\n\ndef read_image(img_path, **kwargs):\n mode = kwargs.get('mode', 'rgb')\n layout = kwargs.get('layout', 'HWC')\n if mode=='gray':\n img = cv2.imread(img_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n else:\n img = cv2.imread(img_path, cv2.CV_LOAD_IMAGE_COLOR)\n if mode=='rgb':\n #print('to rgb')\n img = img[...,::-1]\n if layout=='CHW':\n img = np.transpose(img, (2,0,1))\n return img\n\n\ndef preprocess(img, bbox=None, landmark=None, **kwargs):\n if isinstance(img, str):\n img = read_image(img, **kwargs)\n M = None\n image_size = []\n str_image_size = kwargs.get('image_size', '')\n if len(str_image_size)>0:\n image_size = [int(x) for x in str_image_size.split(',')]\n if len(image_size)==1:\n image_size = [image_size[0], image_size[0]]\n assert len(image_size)==2\n assert image_size[0]==112\n assert image_size[0]==112 or image_size[1]==96\n if landmark is not None:\n assert len(image_size)==2\n src = np.array([\n [30.2946, 51.6963],\n [65.5318, 51.5014],\n [48.0252, 71.7366],\n [33.5493, 92.3655],\n [62.7299, 92.2041] ], dtype=np.float32 )\n if image_size[1]==112:\n src[:,0] += 8.0\n dst = landmark.astype(np.float32)\n\n tform = trans.SimilarityTransform()\n tform.estimate(dst, src)\n M = tform.params[0:2,:]\n #M = cv2.estimateRigidTransform( dst.reshape(1,5,2), src.reshape(1,5,2), False)\n\n if M is None:\n if bbox is None: #use center crop\n det = np.zeros(4, dtype=np.int32)\n det[0] = int(img.shape[1]*0.0625)\n det[1] = int(img.shape[0]*0.0625)\n det[2] = img.shape[1] - det[0]\n det[3] = img.shape[0] - det[1]\n else:\n det = bbox\n margin = kwargs.get('margin', 44)\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0]-margin/2, 0)\n bb[1] = np.maximum(det[1]-margin/2, 0)\n bb[2] = np.minimum(det[2]+margin/2, img.shape[1])\n bb[3] = np.minimum(det[3]+margin/2, img.shape[0])\n ret = img[bb[1]:bb[3],bb[0]:bb[2],:]\n if len(image_size)>0:\n ret = cv2.resize(ret, (image_size[1], image_size[0]))\n return ret \n else: #do align using landmark\n assert len(image_size)==2\n\n #src = src[0:3,:]\n #dst = dst[0:3,:]\n\n\n #print(src.shape, dst.shape)\n #print(src)\n #print(dst)\n #print(M)\n warped = cv2.warpAffine(img,M,(image_size[1],image_size[0]), borderValue = 0.0)\n\n #tform3 = trans.ProjectiveTransform()\n #tform3.estimate(src, dst)\n #warped = trans.warp(img, tform3, output_shape=_shape)\n return warped\n\n\n"
] | [
[
"numpy.maximum",
"numpy.minimum",
"numpy.transpose",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
seyuboglu/weakly-supervised-petct | [
"fa96a07734afade475f6a1e1587ec14965fe2de3"
] | [
"pet_ct/data/manual.py"
] | [
"\"\"\"\nProcess subclass that reads reports and outputs a labels csv.\n\"\"\"\n\nimport os\nimport copy\nfrom collections import defaultdict, OrderedDict\n\nimport pandas as pd\nfrom tqdm import tqdm, tqdm_notebook\nimport json\nimport numpy as np\nimport networkx as nx\nimport torch\nimport logging\nfrom torch.utils.data import DataLoader\nfrom scipy.sparse import coo_matrix\nfrom ipywidgets import interact\nimport ipywidgets as widgets\nfrom IPython.display import display, Markdown\n\nfrom pet_ct.util.util import Process\nfrom pet_ct.util.graphs import TriangleGraph\nimport pet_ct.learn.dataloaders as dataloaders\nimport pet_ct.learn.datasets as datasets\nfrom pet_ct.data.report_transforms import extract_impression, split_impression_sections, word_tokenize, sent_tokenize\nfrom metal.multitask.mt_label_model import MTLabelModel\nfrom metal.analysis import lf_summary\nimport pet_ct.model.models as models\nimport pet_ct.data.labeler as labeler\nimport pet_ct.data.task_graphs as task_graphs\nfrom pet_ct.data.term_graphs import TermGraph\nfrom pet_ct.data.vocab import WordPieceVocab\n\n\nclass ExamLabelsPredictor(Process):\n \"\"\"\n \"\"\"\n def __init__(self, dir,\n model_dir,\n dataset_class=\"ReportDataset\",\n dataset_args={},\n term_graph_dir=\"data/pet_ct_terms/terms.json\",\n terms=\"all\",\n match_task=\"fdg_abnorm\",\n split_fn=\"split_impression_sections\",\n max_len=200,\n vocab_args={},\n seed=123,\n cuda=True,\n devices=[0]):\n \"\"\"\n \"\"\"\n super().__init__(dir)\n self.cuda = cuda\n self.devices = devices\n self.device = devices[0]\n\n self.split_fn = globals()[split_fn]\n\n dataset = getattr(datasets, dataset_class)(**dataset_args)\n self.dataloader = DataLoader(dataset, batch_size=1)\n\n logging.info(\"Loading TermGraph and Vocab...\")\n self.match_task = match_task\n self.terms = terms\n self.term_graph = TermGraph(term_graph_dir)\n if terms == \"all\":\n self.terms = self.term_graph.term_names\n else:\n self.terms = terms\n\n self.vocab = WordPieceVocab(**vocab_args)\n self.max_len = max_len\n\n logging.info(\"Loading Model...\")\n self._load_model(model_dir)\n\n\n def _load_model(self, model_dir):\n \"\"\"\n \"\"\"\n with open(os.path.join(model_dir, \"params.json\")) as f:\n args = json.load(f)[\"process_args\"]\n model_class = args[\"model_class\"]\n model_args = args[\"model_args\"]\n if \"task_configs\" in args:\n new_task_configs = []\n for task_config in args[\"task_configs\"]:\n new_task_config = args[\"default_task_config\"].copy()\n new_task_config.update(task_config)\n new_task_configs.append(new_task_config)\n task_configs = new_task_configs\n\n model_args[\"task_configs\"] = task_configs\n\n model_class = getattr(models, model_class)\n self.model = model_class(cuda=self.cuda, devices=self.devices, **model_args)\n\n model_dir = os.path.join(model_dir, \"best\")\n model_path = os.path.join(model_dir, \"weights.pth.tar\")\n if not os.path.isfile(model_path):\n model_path = os.path.join(model_dir, \"weights.link\")\n\n self.model.load_weights(model_path, device=self.device)\n\n def label_exam(self, label, report, info):\n \"\"\"\n \"\"\"\n report_sections = self.split_fn(report[0].lower())\n term_to_outputs = defaultdict(list)\n\n #logging.info(f\"exam_id: {info['exam_id']}\")\n for report_section in report_sections:\n curr_matches = self.term_graph.match_string(report_section)\n if not curr_matches:\n # skip report sections without matches\n continue\n\n tokens = self.vocab.tokenize(report_section)\n\n if len(tokens) > self.max_len:\n tokens = tokens[:self.max_len]\n\n tokens = self.vocab.wrap_sentence(tokens)\n inputs = {\"report\": [tokens]}\n output = self.model.predict(inputs)[self.match_task]\n output = output.cpu().detach().numpy().squeeze()\n\n #logging.info(f\"section:{report_section}\")\n for match in curr_matches:\n match_idxs = self.vocab.get_tokens_in_range(tokens,\n report_section,\n match[\"start\"],\n match[\"end\"])\n\n match[\"output\"] = output[match_idxs, 1]\n term = match[\"term_name\"]\n term_to_outputs[term].append(np.mean(match[\"output\"]))\n #logging.info(f\"term: {match['term_name']} - {match['output']}\")\n #logging.info(\"-\"*5)\n\n labels = {}\n for term in self.terms:\n all_outputs = term_to_outputs[term][:]\n for descendant in self.term_graph.get_descendants(term):\n all_outputs.extend(term_to_outputs[descendant])\n all_outputs = np.array(all_outputs)\n prob = 1 - np.prod(1 - all_outputs)\n\n labels[(term, 0)] = 1 - prob\n labels[(term, 1)] = prob\n\n #logging.info(f\"term: {term}\")\n #logging.info(f\"all_outputs: {all_outputs}\")\n #logging.info(f\"prob: {prob}\")\n\n #logging.info(\"=\"*30 + \"\\n\")\n return labels\n\n\n def _run(self, overwrite=False):\n \"\"\"\n \"\"\"\n exam_id_to_labels = {}\n for idx, (label, report, info) in enumerate(tqdm(self.dataloader)):\n labels = self.label_exam(label, report, info)\n exam_id_to_labels[info[\"exam_id\"][0]] = labels\n\n labels_df = pd.DataFrame.from_dict(exam_id_to_labels, orient=\"index\")\n labels_df.to_csv(os.path.join(self.dir, \"exam_labels.csv\"))\n\n\nclass ExamLabelsBuilder(Process):\n \"\"\"\n \"\"\"\n\n def __init__(self, dir, exams_path, match_labeler_dir, term_graph_dir, terms, task,\n propagate_labels=True):\n \"\"\"\n \"\"\"\n super().__init__(dir)\n self.task = task\n self.propagate_labels = propagate_labels\n self.terms = terms\n self.exams = pd.read_csv(exams_path, index_col=0).index\n self.match_labels_df = pd.read_csv(os.path.join(match_labeler_dir, \"match_labels.csv\"), engine='python')\n self.term_graph = TermGraph(term_graph_dir)\n\n def _run(self, overwrite=False):\n \"\"\"\n \"\"\"\n exam_id_to_labels = {}\n for exam_id in self.exams:\n exam_id_to_labels[exam_id] = self.label_exam(exam_id)\n\n labels_df = pd.DataFrame.from_dict(exam_id_to_labels, orient=\"index\")\n labels_df.to_csv(os.path.join(self.dir, \"exam_labels.csv\"))\n\n def label_exam(self, exam_id):\n \"\"\"\n \"\"\"\n term_to_label = {term: False for term in self.terms}\n exam_matches = self.match_labels_df[self.match_labels_df[\"exam_id\"] == exam_id]\n for idx, exam_match in exam_matches.iterrows():\n term = exam_match[\"term_name\"]\n label = exam_match[f\"{self.task}_label\"] == \"abnormal\"\n if term in term_to_label:\n term_to_label[term] |= label\n\n if self.propagate_labels:\n for ancestor_term in self.term_graph.get_ancestors(term):\n if ancestor_term in term_to_label:\n term_to_label[ancestor_term] |= label\n labels = {}\n for term, abnorm in term_to_label.items():\n labels[(term, 0)] = float(not abnorm)\n labels[(term, 1)] = float(abnorm)\n\n return labels\n\nclass LabelerDatasetBuilder(Process):\n \"\"\"\n \"\"\"\n def __init__(self, dir, manual_dirs=[]):\n \"\"\"\n \"\"\"\n super().__init__(dir)\n self.manual_dirs = manual_dirs\n\n def _run(self, overwrite=False):\n \"\"\"\n \"\"\"\n dfs = []\n for manual_dir in self.manual_dirs:\n curr_df = pd.read_csv(os.path.join(manual_dir,\n \"match_labels.csv\"))\n dfs.append(curr_df)\n match_labels_df = pd.concat(dfs)\n match_labels_df.to_csv(os.path.join(self.dir, \"match_labels.csv\"))\n\n\nclass MatchLabeler(Process):\n \"\"\"\n A manual labeler for labeling term matches in PET/CT impressions./\n \"\"\"\n def __init__(self, dir,\n dataset_class=\"ReportDataset\",\n dataset_args={},\n term_graph_dir=\"data/pet_ct_terms/terms.json\",\n task_configs=[],\n split_fn=\"split_impression_sections\",\n num_exams=None):\n \"\"\"\n \"\"\"\n super().__init__(dir)\n self.split_fn = globals()[split_fn]\n dataset = getattr(datasets, dataset_class)(**dataset_args)\n dataloader = DataLoader(dataset, batch_size=1)\n\n logging.info(\"Loading tasks...\")\n self.task_configs = task_configs\n self.term_graph = TermGraph(term_graph_dir)\n\n labels_path = os.path.join(self.dir, \"match_labels.csv\")\n if os.path.exists(labels_path):\n logging.info(\"Loading labels...\")\n self.labels_df = pd.read_csv(labels_path)\n self.labeled_exams = set(self.labels_df[\"exam_id\"].unique())\n else:\n self.labels_df = None\n self.labeled_exams = set()\n self.skipped_exams = set()\n\n logging.info(\"Loading reports...\")\n self.exams = {}\n for idx, (label, report, info) in tqdm(enumerate(dataloader), total=len(dataloader)):\n if num_exams is not None and idx == num_exams:\n break\n\n self.exams[info[\"exam_id\"][0]] = {\n \"patient_id\": info[\"patient_id\"][0],\n \"report\": report[0].lower(),\n \"curr_label\": label[0]\n }\n\n def build_match_labeler(self, match, text):\n \"\"\"\n Builds the match labeler GUI. This GUI includes:\n 1) The matched term (e.g. lymph_node)\n 2) The report text with the matched words in bold\n 3) A ToggleButtons interface for each task. (task_buttons)\n 4) A not applicable ToggleButton, to be selected when the matched words\n don't actually correspond with the matched term. (na_button)\n \"\"\"\n match_labeler = {}\n match_labeler[\"match\"] = match\n out = widgets.Output()\n term_name = match['term_name']\n\n bold_text = text[:]\n bold_text = (bold_text[:match[\"start\"]] + \"** `\" +\n text[match[\"start\"] : match[\"end\"]] + \"` **\" +\n bold_text[match[\"end\"]:])\n display(Markdown(f\"## {term_name}\"))\n display(Markdown(f\"> {bold_text}\"))\n\n # task buttons\n task_buttons = {}\n for task_config in self.task_configs:\n # abnormality buttons\n task_button = widgets.ToggleButtons(\n value=task_config[\"default\"],\n options=task_config[\"options\"],\n description=task_config[\"description\"])\n\n def task_change(change):\n \"\"\"\n Callback function triggered by the task buttons toggles.\n \"\"\"\n with out:\n pass\n task_button.observe(task_change)\n task_buttons[task_config[\"task\"]] = task_button\n\n match_labeler['task_buttons'] = task_buttons\n\n # not applicable\n na_button = widgets.ToggleButton(value=False, description='Not applicable', icon='')\n\n def na_change(change):\n \"\"\"\n Function triggered by not applicable button toggles.\n \"\"\"\n with out:\n for task_button in task_buttons.values():\n task_button.value = None\n na_button.icon = 'check' if na_button.icon else ''\n na_button.observe(na_change)\n match_labeler[\"na_button\"] = na_button\n\n display(widgets.VBox(list(task_buttons.values())))\n display(na_button)\n return match_labeler\n\n def label_next(self):\n \"\"\"\n Label next example\n \"\"\"\n # get next unlabeled exam\n for exam_id in self.exams:\n if (exam_id not in self.labeled_exams and\n exam_id not in self.skipped_exams):\n break\n exam = self.exams[exam_id]\n report = exam['report']\n\n report_sections = self.split_fn(exam[\"report\"])\n\n # display report and exam\n display(Markdown(f\"### Progress\"))\n display(Markdown(f\"Exams labeled: {len(self.labeled_exams)}\"))\n display(Markdown(f\"Exams skipped: {len(self.skipped_exams)}\"))\n display(Markdown(f\"Exams remaining: {len(self.exams) - len(self.skipped_exams) - len(self.labeled_exams)}\"))\n display(Markdown(f\"# Exam: {exam_id}\"))\n display(Markdown(f\"## Full Impression\"))\n display(Markdown(f\"> {report}\"))\n\n # get all matches\n match_labelers = []\n for idx, report_section in enumerate(report_sections):\n matches = self.term_graph.match_string(report_section)\n if len(matches) == 0:\n continue\n display(Markdown(f\"---\\n ## Section {idx + 1}\"))\n for match in matches:\n labeler = self.build_match_labeler(match, report_section)\n labeler[\"match\"][\"section_idx\"] = idx\n match_labelers.append(labeler)\n\n out = widgets.Output()\n save_button = widgets.Button(\n description='Save',\n disabled=False,\n button_style='info', # 'success', 'info', 'warning', 'danger' or ''\n tooltip='Save',\n icon=''\n )\n\n def record_labels(b):\n \"\"\"\n Record the labels currently entered in the match_labelers.\n \"\"\"\n with out:\n labeled_matches = []\n for match_labeler in match_labelers:\n match = match_labeler[\"match\"]\n labeled_match = copy.deepcopy(match)\n labeled_match[\"exam_id\"] = exam_id\n for task, task_button in match_labeler[\"task_buttons\"].items():\n labeled_match[f\"{task}_label\"] = task_button.value\n labeled_match[\"not_applicable\"] = match_labeler[\"na_button\"].value\n labeled_matches.append(labeled_match)\n\n if self.labels_df is not None:\n # if exam already labeled, filter out old labels, concat new labels\n self.labels_df = self.labels_df[self.labels_df[\"exam_id\"] != exam_id]\n self.labels_df = pd.concat([self.labels_df,\n pd.DataFrame(labeled_matches)])\n else:\n self.labels_df = pd.DataFrame(labeled_matches)\n self.labels_df.to_csv(os.path.join(self.dir, \"match_labels.csv\"))\n self.labeled_exams.add(exam_id)\n\n # update save button\n save_button.button_style = 'success'\n save_button.icon = 'check'\n save_button.description = 'Saved.'\n skip_button.disabled = True\n save_button.on_click(record_labels)\n\n skip_button = widgets.Button(\n description='Skip',\n disabled=False,\n button_style='warning', # 'success', 'info', 'warning', 'danger' or ''\n tooltip='Skip',\n icon=''\n )\n def skip(b):\n \"\"\"\n Record the labels currently entered in the match_labelers.\n \"\"\"\n with out:\n self.skipped_exams.add(exam_id)\n\n # update save button\n skip_button.button_style = 'danger'\n skip_button.description = 'Skipped.'\n save_button.disabled = True\n skip_button.on_click(skip)\n\n display(Markdown(\"---\"))\n display(widgets.HBox([save_button, skip_button]))\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"torch.utils.data.DataLoader",
"pandas.DataFrame",
"numpy.mean",
"numpy.prod",
"pandas.DataFrame.from_dict",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
rylezhou/sunet-pytorch | [
"46473f4ba6ce442335f318b45aee50a357af92bf"
] | [
"inference/ensemble_predictions.py"
] | [
"# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport shutil\nfrom copy import deepcopy\n\nfrom inference.segmentation_export import save_segmentation_nifti_from_softmax\nfrom batchgenerators.utilities.file_and_folder_operations import *\nimport numpy as np\nfrom multiprocessing import Pool\nfrom postprocessing.connected_components import apply_postprocessing_to_folder, load_postprocessing\n\n\ndef merge_files(files, properties_files, out_file, override, store_npz):\n if override or not isfile(out_file):\n softmax = [np.load(f)['softmax'][None] for f in files]\n softmax = np.vstack(softmax)\n softmax = np.mean(softmax, 0)\n props = [load_pickle(f) for f in properties_files]\n\n reg_class_orders = [p['regions_class_order'] if 'regions_class_order' in p.keys() else None\n for p in props]\n\n if not all([i is None for i in reg_class_orders]):\n # if reg_class_orders are not None then they must be the same in all pkls\n tmp = reg_class_orders[0]\n for r in reg_class_orders[1:]:\n assert tmp == r, 'If merging files with regions_class_order, the regions_class_orders of all ' \\\n 'files must be the same. regions_class_order: %s, \\n files: %s' % \\\n (str(reg_class_orders), str(files))\n regions_class_order = tmp\n else:\n regions_class_order = None\n\n # Softmax probabilities are already at target spacing so this will not do any resampling (resampling parameters\n # don't matter here)\n save_segmentation_nifti_from_softmax(softmax, out_file, props[0], 3, regions_class_order, None, None,\n force_separate_z=None)\n if store_npz:\n np.savez_compressed(out_file[:-7] + \".npz\", softmax=softmax)\n save_pickle(props, out_file[:-7] + \".pkl\")\n\n\ndef merge(folders, output_folder, threads, override=True, postprocessing_file=None, store_npz=False):\n maybe_mkdir_p(output_folder)\n\n if postprocessing_file is not None:\n output_folder_orig = deepcopy(output_folder)\n output_folder = join(output_folder, 'not_postprocessed')\n maybe_mkdir_p(output_folder)\n else:\n output_folder_orig = None\n\n patient_ids = [subfiles(i, suffix=\".npz\", join=False) for i in folders]\n patient_ids = [i for j in patient_ids for i in j]\n patient_ids = [i[:-4] for i in patient_ids]\n patient_ids = np.unique(patient_ids)\n\n for f in folders:\n assert all([isfile(join(f, i + \".npz\")) for i in patient_ids]), \"Not all patient npz are available in \" \\\n \"all folders\"\n assert all([isfile(join(f, i + \".pkl\")) for i in patient_ids]), \"Not all patient pkl are available in \" \\\n \"all folders\"\n\n files = []\n property_files = []\n out_files = []\n for p in patient_ids:\n files.append([join(f, p + \".npz\") for f in folders])\n property_files.append([join(f, p + \".pkl\") for f in folders])\n out_files.append(join(output_folder, p + \".nii.gz\"))\n\n p = Pool(threads)\n p.starmap(merge_files, zip(files, property_files, out_files, [override] * len(out_files), [store_npz] * len(out_files)))\n p.close()\n p.join()\n\n if postprocessing_file is not None:\n for_which_classes, min_valid_obj_size = load_postprocessing(postprocessing_file)\n print('Postprocessing...')\n apply_postprocessing_to_folder(output_folder, output_folder_orig,\n for_which_classes, min_valid_obj_size, threads)\n shutil.copy(postprocessing_file, output_folder_orig)\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(description=\"This script will merge predictions (that were prdicted with the \"\n \"-npz option!). You need to specify a postprocessing file so that \"\n \"we know here what postprocessing must be applied. Failing to do so \"\n \"will disable postprocessing\")\n parser.add_argument('-f', '--folders', nargs='+', help=\"list of folders to merge. All folders must contain npz \"\n \"files\", required=True)\n parser.add_argument('-o', '--output_folder', help=\"where to save the results\", required=True, type=str)\n parser.add_argument('-t', '--threads', help=\"number of threads used to saving niftis\", required=False, default=2,\n type=int)\n parser.add_argument('-pp', '--postprocessing_file', help=\"path to the file where the postprocessing configuration \"\n \"is stored. If this is not provided then no postprocessing \"\n \"will be made. It is strongly recommended to provide the \"\n \"postprocessing file!\",\n required=False, type=str, default=None)\n parser.add_argument('--npz', action=\"store_true\", required=False, help=\"stores npz and pkl\")\n\n args = parser.parse_args()\n\n folders = args.folders\n threads = args.threads\n output_folder = args.output_folder\n pp_file = args.postprocessing_file\n npz = args.npz\n\n merge(folders, output_folder, threads, override=True, postprocessing_file=pp_file, store_npz=npz)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.unique",
"numpy.savez_compressed",
"numpy.mean",
"numpy.load",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
l-bat/tensorflow | [
"f7bf8594bb102cf6ad7e8ab4193ea5a25e2b8ce2"
] | [
"tensorflow/python/ops/structured/structured_array_ops.py"
] | [
"# Lint as python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"StructuredTensor array ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom typing import Sequence\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.ops.ragged.row_partition import RowPartition\nfrom tensorflow.python.ops.structured.structured_tensor import StructuredTensor\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import dispatch\n\n\[email protected]_for_types(array_ops.expand_dims, StructuredTensor)\[email protected]_args(None, 'Use the `axis` argument instead', 'dim')\ndef expand_dims(input, axis=None, name=None, dim=None): # pylint: disable=redefined-builtin\n \"\"\"Creates a StructuredTensor with a length 1 axis inserted at index `axis`.\n\n This is an implementation of tf.expand_dims for StructuredTensor. Note\n that the `axis` must be less than or equal to rank.\n\n >>> st = StructuredTensor.from_pyval([[{\"x\": 1}, {\"x\": 2}], [{\"x\": 3}]])\n >>> tf.expand_dims(st, 0).to_pyval()\n [[[{'x': 1}, {'x': 2}], [{'x': 3}]]]\n >>> tf.expand_dims(st, 1).to_pyval()\n [[[{'x': 1}, {'x': 2}]], [[{'x': 3}]]]\n >>> tf.expand_dims(st, 2).to_pyval()\n [[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]\n >>> tf.expand_dims(st, -1).to_pyval() # -1 is the same as 2\n [[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]\n\n Args:\n input: the original StructuredTensor.\n axis: the axis to insert the dimension: `-(rank + 1) <= axis <= rank`\n name: the name of the op.\n dim: deprecated: use axis.\n\n Returns:\n a new structured tensor with larger rank.\n\n Raises:\n an error if `axis < -(rank + 1)` or `rank < axis`.\n \"\"\"\n axis = deprecation.deprecated_argument_lookup('axis', axis, 'dim', dim)\n return _expand_dims_impl(input, axis, name=name)\n\n\[email protected]_for_types(array_ops.expand_dims_v2, StructuredTensor)\ndef expand_dims_v2(input, axis, name=None): # pylint: disable=redefined-builtin\n \"\"\"Creates a StructuredTensor with a length 1 axis inserted at index `axis`.\n\n This is an implementation of tf.expand_dims for StructuredTensor. Note\n that the `axis` must be less than or equal to rank.\n\n >>> st = StructuredTensor.from_pyval([[{\"x\": 1}, {\"x\": 2}], [{\"x\": 3}]])\n >>> tf.expand_dims(st, 0).to_pyval()\n [[[{'x': 1}, {'x': 2}], [{'x': 3}]]]\n >>> tf.expand_dims(st, 1).to_pyval()\n [[[{'x': 1}, {'x': 2}]], [[{'x': 3}]]]\n >>> tf.expand_dims(st, 2).to_pyval()\n [[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]\n >>> tf.expand_dims(st, -1).to_pyval() # -1 is the same as 2\n [[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]\n\n Args:\n input: the original StructuredTensor.\n axis: the axis to insert the dimension: `-(rank + 1) <= axis <= rank`\n name: the name of the op.\n\n Returns:\n a new structured tensor with larger rank.\n\n Raises:\n an error if `axis < -(rank + 1)` or `rank < axis`.\n \"\"\"\n return _expand_dims_impl(input, axis, name=name)\n\n\[email protected]_for_types(array_ops.concat, StructuredTensor)\ndef concat(values, axis, name: str = 'concat'):\n \"\"\"tf.concat for structured tensors.\n\n Does not support (yet) checks on illegal axis values, et cetera.\n\n Args:\n values: a sequence of StructuredTensors.\n axis: an axis to concatenate upon.\n name: the name of the op(s).\n\n Returns:\n the params reorganized according to indices.\n \"\"\"\n if name is None:\n name = 'concat'\n _assert_concat_compatible_structured_tensors(values)\n def leaf_op(values):\n return array_ops.concat(values, axis)\n # TODO(martinz): handle axis when it is a tensor.\n axis = array_ops.get_positive_axis(axis, values[0].rank)\n with ops.name_scope(name, 'StructuredConcat', values):\n return _extend_op(values, leaf_op)\n\n\n# pylint: disable=protected-access\ndef zeros_like_object(st, dtype=None):\n \"\"\"Replace every object with a zero.\n\n Example:\n >>> st = StructuredTensor.from_pyval([{\"x\":[3]}, {\"x\":[4,5]}])\n >>> zeros_like_object(st)\n <tf.Tensor: shape=(2,), dtype=int32, numpy=array([0.0, 0.0], dtype=float32)>\n >>> st = StructuredTensor.from_pyval([[{\"x\":[3]}], [{\"x\":[4,5]}, {\"x\":[]}]])\n >>> zeros_like_object(st, dtype=tf.int32)\n <tf.RaggedTensor [[0], [0, 0]]>\n\n Args:\n st: a structured tensor.\n dtype: the dtype of the resulting zeros. (default is tf.float32)\n\n Returns:\n a tensor of zeros of the same shape.\n \"\"\"\n if dtype is None:\n dtype = dtypes.float32\n if not st._row_partitions:\n if st._nrows is not None:\n return array_ops.zeros([st._nrows], dtype) # vector.\n else:\n return array_ops.zeros([], dtype) # scalar.\n # 2D and up.\n last_row_partition = st._row_partitions[-1]\n\n result = ragged_tensor.RaggedTensor._from_nested_row_partitions(\n array_ops.zeros(last_row_partition.nvals(), dtype=dtype),\n st._row_partitions)\n return result\n\n\ndef _expand_dims_impl(st, axis, name=None): # pylint: disable=redefined-builtin\n \"\"\"Creates a StructuredTensor with a length 1 axis inserted at index `axis`.\n\n This is an implementation of tf.expand_dims for StructuredTensor. Note\n that the `axis` must be less than or equal to rank.\n\n >>> st = StructuredTensor.from_pyval([[{\"x\": 1}, {\"x\": 2}], [{\"x\": 3}]])\n >>> tf.expand_dims(st, 0).to_pyval()\n [[[{'x': 1}, {'x': 2}], [{'x': 3}]]]\n >>> tf.expand_dims(st, 1).to_pyval()\n [[[{'x': 1}, {'x': 2}]], [[{'x': 3}]]]\n >>> tf.expand_dims(st, 2).to_pyval()\n [[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]\n >>> tf.expand_dims(st, -1).to_pyval() # -1 is the same as 2\n [[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]\n\n Args:\n st: the original StructuredTensor.\n axis: the axis to insert the dimension: `-(rank + 1) <= axis <= rank`\n name: the name of the op.\n\n Returns:\n a new structured tensor with larger rank.\n\n Raises:\n an error if `axis < -(rank + 1)` or `rank < axis`.\n \"\"\"\n axis = array_ops.get_positive_axis(\n axis, st.rank + 1, axis_name='axis', ndims_name='rank(st)')\n with ops.name_scope(name, 'ExpandDims', [st, axis]):\n new_fields = {\n k: array_ops.expand_dims(v, axis) for (k, v) in st._fields.items()\n }\n new_shape = st.shape[:axis] + (1,) + st.shape[axis:]\n new_row_partitions = _expand_st_row_partitions(st, axis)\n new_nrows = st.nrows() if (axis > 0) else 1\n return StructuredTensor.from_fields(\n new_fields,\n shape=new_shape,\n row_partitions=new_row_partitions,\n nrows=new_nrows)\n\n\ndef _expand_st_row_partitions(st, axis):\n \"\"\"Create the row_partitions for expand_dims.\"\"\"\n if axis == 0:\n if st.shape.rank == 0:\n return ()\n nvals = st.nrows()\n new_partition = RowPartition.from_uniform_row_length(\n nvals, nvals, nrows=1, validate=False)\n return (new_partition,) + st.row_partitions\n elif axis == st.rank:\n nvals = (\n st.row_partitions[axis - 2].nvals() if (axis - 2 >= 0) else st.nrows())\n return st.row_partitions + (RowPartition.from_uniform_row_length(\n 1, nvals, nrows=nvals, validate=False),)\n else:\n nvals = (\n st.row_partitions[axis - 1].nrows() if (axis - 1 >= 0) else st.nrows())\n return st.row_partitions[:axis - 1] + (RowPartition.from_uniform_row_length(\n 1, nvals, nrows=nvals, validate=False),) + st.row_partitions[axis - 1:]\n\n\ndef _extend_op(values, leaf_op, empty_st_op=None):\n \"\"\"Extend an op from RaggedTensor and Tensor to StructuredTensor.\n\n Visits all children of the structured tensor, and children of children,\n applying leaf_op whenever it reaches a leaf, and empty_st_op whenever\n it reaches an internal node without children.\n\n Args:\n values: a list of structured tensors, ragged tensors, or tensors. All must\n have the same type. If they are structured tensors, they must have the\n same paths.\n leaf_op: an op for handling non-structured tensor.\n empty_st_op: op to create a structured tensor without fields.\n\n Returns:\n the result of the extended op (a StructuredTensor, RaggedTensor, or Tensor)\n\n Raises:\n ValueError:\n If values is not a Sequence or is empty.\n \"\"\"\n if not isinstance(values, Sequence):\n raise ValueError('Expected a list')\n\n if not values:\n raise ValueError('List cannot be empty')\n\n if empty_st_op is None:\n empty_st_op = empty_st_op_like_zeros(leaf_op)\n # Use the structure of the first StructuredTensor. They are all assumed to\n # be the same.\n value = values[0]\n\n if isinstance(value, StructuredTensor):\n # TODO(martinz): Calling empty_st_op may add unnecessary ops. Revisit later.\n empty_result = empty_st_op(values)\n if not value.field_names():\n return empty_result\n new_fields = {}\n for k in value.field_names():\n new_fields[k] = _extend_op([v.field_value(k) for v in values], leaf_op,\n empty_st_op)\n return StructuredTensor.from_fields(new_fields, shape=empty_result.shape)\n else:\n return leaf_op(values)\n\n\ndef empty_st_op_like_zeros(leaf_op):\n\n def empty_st_op(values):\n as_zeros = [\n zeros_like_object(value, dtype=dtypes.int32) for value in values\n ]\n result = leaf_op(as_zeros)\n return _structured_tensor_like(result)\n\n return empty_st_op\n\n\ndef _structured_tensor_from_dense_tensor(t):\n \"\"\"Create a structured tensor with the shape of a dense tensor.\"\"\"\n # Note: If a tensor will have rank 0,\n # it either has a fully defined shape or has unknown rank.\n if t.shape.is_fully_defined():\n return StructuredTensor.from_fields({}, shape=t.shape)\n elif t.shape.rank is None:\n raise ValueError(\"Can't build StructuredTensor w/ unknown rank\")\n elif t.shape.rank == 1:\n return StructuredTensor.from_fields({}, shape=t.shape,\n nrows=array_ops.shape(t)[0])\n else:\n rt = ragged_tensor.RaggedTensor.from_tensor(t)\n return _structured_tensor_from_row_partitions(t.shape,\n rt._nested_row_partitions)\n\n\ndef _structured_tensor_from_row_partitions(shape, row_partitions):\n return StructuredTensor.from_fields({},\n shape=shape,\n row_partitions=row_partitions)\n\n\n# pylint: disable=protected_access\ndef _all_nested_row_partitions(rt):\n \"\"\"Returns all nested row partitions in rt, including for dense dimensions.\"\"\"\n if isinstance(rt, ops.Tensor):\n if rt.shape.rank <= 1:\n return ()\n else:\n rt2 = ragged_tensor.RaggedTensor.from_tensor(rt)\n return rt2._nested_row_partitions\n else:\n tail_partitions = _all_nested_row_partitions(rt.flat_values)\n head_partitions = rt._nested_row_partitions # pylint: disable=protected_access\n return head_partitions + tail_partitions\n\n\ndef _structured_tensor_like(t):\n \"\"\"Create a StructuredTensor with the shape of a (composite) tensor.\"\"\"\n if isinstance(t, ops.Tensor):\n return _structured_tensor_from_dense_tensor(t)\n if ragged_tensor.is_ragged(t):\n return StructuredTensor.from_fields(\n {}, shape=t.get_shape(), row_partitions=_all_nested_row_partitions(t))\n # here, it is a StructuredTensor\n return StructuredTensor.from_fields({},\n shape=t.shape,\n row_partitions=t.row_partitions,\n nrows=t.nrows())\n\n\ndef _get_all_paths(st):\n \"\"\"Get all the paths from a StructuredTensor.\"\"\"\n fields = st.field_names()\n all_paths = {()}\n for k in fields:\n v = st.field_value(k)\n if isinstance(v, StructuredTensor):\n all_paths = all_paths.union([(k,) + p for p in _get_all_paths(v)])\n else:\n all_paths.add((k,))\n return all_paths\n\n\ndef _get_all_ranks(st):\n \"\"\"Get ranks of all submessages of a StructuredTensor.\"\"\"\n fields = st.field_names()\n all_ranks = {(): st.rank}\n for k in fields:\n v = st.field_value(k)\n if isinstance(v, StructuredTensor):\n for (k2, v2) in _get_all_ranks(v).items():\n all_ranks[(k,) + k2] = v2\n return all_ranks\n\n\ndef _assert_all_paths_match(values):\n \"\"\"Raises an error if the paths are not identical.\"\"\"\n paths = [_get_all_paths(st) for st in values]\n path_diff = set()\n for other_paths in paths[1:]:\n path_diff = path_diff.union(paths[0].symmetric_difference(other_paths))\n if path_diff:\n raise ValueError(\n 'Some paths are present in some, but not all, structured tensors: %r' %\n (path_diff,))\n\n\ndef _assert_all_ranks_match(values):\n \"\"\"Raises an error if the ranks of submessages are not identical.\"\"\"\n ranks = [_get_all_ranks(st) for st in values]\n for other_ranks in ranks[1:]:\n if other_ranks != ranks[0]:\n # TODO(martinz): If this becomes common, we can provide more detail.\n # e.g.: which path is inconsistent.\n raise ValueError('Ranks of sub-message do not match')\n\n\ndef _assert_concat_compatible_structured_tensors(values):\n \"\"\"Sometimes raises an error if concat doesn't make sense statically on values.\n\n values must be a sequence, and each element in values must be a structured\n tensor, and must have the same paths. Additionally, each path that is a\n submessage must have the same rank.\n\n These constraints are sufficient for concat on the fields to be the same\n as concat on structured tensors. This is meant to capture scenarios like\n paths that are not in the first structured tensor, but are in later\n structured tensors, which will just be ignored by the recursive algorithm.\n\n If the rank of a submessage was different for two structured tensors,\n then that is also a non-sensical merge.\n\n Note that all of these checks are static, as paths and submessage ranks\n are known.\n\n Args:\n values: a Sequence of StructuredTensors.\n\n Raises:\n ValueError: if there is any inconsistency as described above.\n \"\"\"\n if not isinstance(values, Sequence):\n raise ValueError('values must be a list of StructuredTensors (not a list)')\n if not values:\n raise ValueError('values must not be an empty list')\n for st in values:\n if not isinstance(st, StructuredTensor):\n raise ValueError('values must be a list of StructuredTensors')\n _assert_all_paths_match(values)\n _assert_all_ranks_match(values)\n"
] | [
[
"tensorflow.python.ops.structured.structured_tensor.StructuredTensor.from_fields",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.ragged.row_partition.RowPartition.from_uniform_row_length",
"tensorflow.python.ops.array_ops.get_positive_axis",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.ops.ragged.ragged_tensor.is_ragged",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.util.dispatch.dispatch_for_types",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_tensor",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.util.deprecation.deprecated_argument_lookup"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
shippingwang/PaddleVideo | [
"48e6bb5f67ad44f7ef3c5cd683e8e7b8c50f0918"
] | [
"paddlevideo/loader/dataset/frame.py"
] | [
"# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os.path as osp\nimport copy\nimport numpy as np\n\nfrom ..registry import DATASETS\nfrom .base import BaseDataset \n\[email protected]()\nclass FrameDataset(BaseDataset):\n \"\"\"Rawframe dataset for action recognition.\n The dataset loads raw frames from frame files, and apply specified transform operatation them.\n The indecx file is a text file with multiple lines, and each line indicates the directory of frames of a video, toatl frames of the video, and its label, which split with a whitespace.\n Example of an index file:\n\n .. code-block:: txt\n\n file_path-1 150 1\n file_path-2 160 1\n file_path-3 170 2\n file_path-4 180 2\n\n Args:\n file_path (str): Path to the index file.\n pipeline(XXX):\n data_prefix (str): directory path of the data. Default: None.\n valid_mode (bool): Whether to bulid the valid dataset. Default: False.\n suffix (str): suffix of file. Default: 'img_{:05}.jpg'.\n\n \"\"\"\n def __init__(self,\n file_path,\n pipeline,\n data_prefix=None,\n valid_mode=False,\n suffix='img_{:05}.jpg'):\n\n #unique attribute in frames dataset.\n self.suffix = suffix\n\n super().__init__(\n file_path,\n pipeline,\n data_prefix,\n valid_mode)\n\n def load_file(self):\n \"\"\"Load index file to get video information.\"\"\"\n info = []\n with open(self.file_path, 'r') as fin:\n for line in fin:\n line_split = line.strip().split()\n frame_dir, frames_len, labels = line_split\n if self.data_prefix is not None:\n frame_dir = osp.join(self.data_prefix, frame_dir)\n info.append(dict(frame_dir=frame_dir, frames_len=frames_len, labels=int(labels)))\n return info\n\n def prepare_train(self, idx):\n \"\"\"Prepare the frames for training given index. \"\"\"\n results = copy.deepcopy(self.info[idx])\n results['suffix'] = self.suffix\n #Note: For now, paddle.io.DataLoader cannot support dict type retval, so convert to list here\n to_list = self.pipeline(results)\n #XXX have to unsqueeze label here or before calc metric!\n return [to_list['imgs'], np.array([to_list['labels']])]\n\n\n def prepare_valid(self, idx):\n \"\"\"Prepare the frames for training given index. \"\"\"\n results = copy.deepcopy(self.info[idx])\n results['suffix'] = self.suffix\n #Note: For now, paddle.io.DataLoader cannot support dict type retval, so convert to list here\n to_list = self.pipeline(results)\n #XXX have to unsqueeze label here or before calc metric!\n return [to_list['imgs'], np.array([to_list['labels']])]\n\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TalSchuster/FewRel | [
"af68f52b13977ca29808c38a54995363f76cdcad"
] | [
"models/d.py"
] | [
"import sys\nimport FewRel.fewshot_re_kit as fewshot_re_kit\nimport torch\nfrom torch import autograd, optim, nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\n\nclass Discriminator(nn.Module):\n \n def __init__(self, hidden_size=230, num_labels=2):\n nn.Module.__init__(self)\n self.hidden_size = hidden_size\n self.num_labels = num_labels\n self.fc1 = nn.Linear(hidden_size, hidden_size)\n self.relu1 = nn.ReLU()\n self.drop = nn.Dropout()\n self.fc2 = nn.Linear(hidden_size, 2)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.relu1(x)\n x = self.drop(x)\n logits = self.fc2(x)\n return logits\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.nn.Module.__init__"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
henniekim/filelist | [
"a0f791000d76c23e65564ff426aae13c8875a1b0"
] | [
"makeFileList.py"
] | [
"import numpy as np\n\nfilelist = '/datahdd/workdir/donghyun/filelist/result/filelist.csv'\nframelist = '/datahdd/workdir/donghyun/filelist/result/framelist.csv'\nnewFilePath = '/datahdd/workdir/donghyun/filelist/result/'\ndelimiter = ','\n\nfileName = np.genfromtxt(filelist, delimiter=delimiter, usecols=0, dtype=None, encoding='utf-8')\nframeList = np.genfromtxt(framelist, delimiter=delimiter, usecols=0, dtype=None, encoding='utf-8')\n\nprint(str(len(fileName)))\nfileName= np.reshape(fileName, (len(fileName),1))\nframeList= np.reshape(frameList, (len(frameList),1))\n\nfileList = np.insert(fileName, 1, values=0, axis=1)\nfileList = np.append(fileList, frameList, axis=1)\n\nnp.savetxt(newFilePath+'FileList.csv', fileList, '%s', delimiter=delimiter)"
] | [
[
"numpy.savetxt",
"numpy.append",
"numpy.insert",
"numpy.genfromtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hungnt55/RaRecognize | [
"be3ae11de56da0aa6a33fdf50bfc5bf601ff8a01"
] | [
"code/predict_1k.py"
] | [
"from tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport sys\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import StratifiedKFold\nimport random as rnd\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import confusion_matrix, precision_recall_curve, auc, roc_auc_score, roc_curve, recall_score, classification_report\nimport sor_wc_wk_joint as sor\nimport cv_split\nimport time\nimport pickle\n\npfile = 'test_data/test_' + sys.argv[1] + '.npz'\n\nparams = np.load(pfile)\n\ntrain_classes = params['train_classes']\ntest_classes = params['test_classes']\ntrain_index = params['train_index']\ntest_index = params['test_index']\n\nrisk_class_files = ['Drought.csv', 'Earthquakes.csv', 'Explosions.csv', 'Floods.csv', 'Forest_and_Brush_Fire.csv', 'Hazardous_and_Toxic_Substance.csv', 'Landslides.csv', 'Lighting.csv', 'Snowstorms.csv', 'Tornado.csv', 'Tropical Storms.csv', 'Volcanoes.csv', 'Water_Pollution.csv']\n\nrisk_class_dict = {}\nfor i in range(len(risk_class_files)):\n risk_class_dict[i+1] = risk_class_files[i]\n\ndef remove_label(docs):\n for i in range(len(docs)):\n docs[i] = docs[i].replace('\"1, ','').replace('\"0, ','').replace(\"'0, \",'').replace(\"'0, \",'')\n return docs\n\nrisk_classes = {}\nfor risk_file in risk_class_files:\n risk_classes[risk_file] = pd.read_csv('../data/NYTimes_data/'+risk_file, header = None)[0].tolist()\n\nnon_risk_file = 'non_risk_docs.csv'\n\nnon_risk_class = pd.read_csv('../data/NYTimes_data/'+non_risk_file, header = None)[0].tolist()\n\nX = []\nY = []\n\nclass_id = 1\n\nfor risk_file in risk_class_files:\n X += risk_classes[risk_file]\n Y += [class_id] * len(risk_classes[risk_file])\n class_id += 1\n\nX += non_risk_class\nY += [0] * len(non_risk_class) \n\nX = remove_label(X)\n\ntfidf = TfidfVectorizer(max_features=1000, ngram_range=(1,1), stop_words='english', token_pattern=u'(?ui)\\\\b\\\\w*[a-z]+\\\\w*\\\\b')\n\nfeatures = tfidf.fit_transform(X).toarray()\nlabels = Y\n\ndef run_test(features, labels, train_classes, test_classes, train_index, test_index):\n cpu_time = 0\n features = np.array(features)\n labels = np.array(labels)\n xtrain = features[np.isin(labels,train_classes),:]\n ytrain = labels[np.isin(labels,train_classes)]\n\n xtest = features[np.isin(labels,test_classes),:]\n ytest = labels[np.isin(labels,test_classes)]\n\n X_train, X_test = xtrain[train_index], xtrain[test_index]\n y_train, y_test = ytrain[train_index], ytrain[test_index]\n\n y_test_l = y_test.tolist()\n\n model = sor.HierarchicalClassifierModel(input_size = X_train[0].size, num_classes = len(risk_class_files), learning_rate = 1e-3, num_epochs = 1000, batch_size = 100, l1 = 0, l2 = 0, train_classes = train_classes)\n\n model_s = pickle.load(open('test_results/trained_model_'+sys.argv[1] + '_joint.m', 'rb'))\n\n model.evt_fit_threshold(X_train, y_train)\n\n y_pred = model.predict(X_test, 0)\n\t\n np.savetxt('test_results/' +sys.argv[1] + '_joint_seen.pred', y_pred)\n\n for classk in range(len(test_classes)):\n print('test class', test_classes[classk])\n xtest_ri = xtest[ytest == test_classes[classk]]\n y_pred_ri = model.predict(xtest_ri, 0)\n np.savetxt('test_results/' + sys.argv[1] + '_joint_unseen_' + str(classk) + '.pred', y_pred_ri)\n\nrun_test(features, labels, train_classes, test_classes, train_index, test_index)"
] | [
[
"pandas.read_csv",
"numpy.savetxt",
"numpy.load",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.isin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
yuzeh/metagrok | [
"27f71441653611de939f1fe43e7aee6a7cdf1981"
] | [
"metagrok/config.py"
] | [
"import numpy as np\nimport torch\n\nfrom metagrok.constants import DTYPE\nfrom metagrok import np_json as json\n\n# Load the project config. This contains directories for everything\ntry:\n _config = json.load('config.json')\nexcept IOError:\n raise Exception('`config.json` file not found')\nexcept ValueError:\n raise Exception('`config.json` file is malformed')\n\n_use_cuda = []\n\ndef use_cuda():\n assert len(_use_cuda) == 1, 'please call set_cuda first'\n return _use_cuda[0]\n\ndef set_cuda(cuda):\n assert len(_use_cuda) == 0, 'set_cuda already called'\n\n if cuda and not torch.cuda.is_available():\n raise ValueError('Cannot use cuda, not available')\n _use_cuda.append(cuda)\n\ndef nt():\n return DTYPE\n\n_nt_to_tt = {\n 'float16': 'HalfTensor',\n 'float32': 'FloatTensor',\n 'float64': 'DoubleTensor',\n}\n\ndef tt():\n tt = _nt_to_tt[nt()]\n if use_cuda():\n return '.'.join(['torch', 'cuda', tt])\n return '.'.join(['torch', tt])\n\ndef nptype(v):\n return np.dtype(nt()).type(v)\n\ndef get(key):\n return _config[key]\n "
] | [
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Vignesh-95/astronomical-observation-classification-neural-network | [
"11615afbffe22a54e8fb940f8dc49b56b4cf47bc"
] | [
"neuralNetwork.py"
] | [
"import tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style('whitegrid')\n\nSMALL_SIZE = 10\nMEDIUM_SIZE = 12\n\nplt.rc('font', size=SMALL_SIZE)\nplt.rc('axes', titlesize=MEDIUM_SIZE)\nplt.rc('axes', labelsize=MEDIUM_SIZE)\nplt.rcParams['figure.dpi']=150\n\n\ndef next_batch(num, train_data, labels):\n batch = idx[:num]\n data_shuffle = [train_data[j] for j in batch]\n labels_shuffle = [labels[j] for j in batch]\n\n return np.asarray(data_shuffle), np.asarray(labels_shuffle)\n\n\nif __name__ == \"__main__\":\n # TODO: Ensure all steps performed\n averageTrainingError = []\n averageGeneralisationError = []\n averageClassificationError = []\n num_runs = 3\n\n # Importing Data\n data = pd.read_csv(\"/home/vignesh/PycharmProjects/SloanDigitalSkySurvey/\"\n \"astronomical-observation-classification-neural-network/\"\n \"Skyserver_SQL2_27_2018 6_51_39 PM.csv\", skiprows=1)\n\n # TODO: Data Analysis and Exploration - Statistics and Visual Graphs\n\n # Take Action on Data - Data Filtering\n data.drop(['objid', 'run', 'rerun', 'camcol', 'field', 'specobjid'], axis=1, inplace=True)\n\n # TODO: Feature Engineering\n\n # Try Different Feature Scaling and Normalisation Techniques\n #\n # Standardisation/Normalisation/Z-score\n data_num = data.select_dtypes(include=[np.number])\n data_num = (data_num - data_num.mean()) / data_num.std()\n data[data_num.columns] = data_num\n one_hot = pd.get_dummies(data['class'])\n # Linear Min-Max Scaling\n # data_num = data.select_dtypes(include=[np.number])\n # minimum = data_num.min()\n # data_num = (data_num - minimum)/(data_num.max() - minimum)\n # data[data_num.columns] = data_num\n # one_hot = pd.get_dummies(data['class'], dtype=np.float32)\n\n x_values = data_num.values\n y_values = one_hot.values\n\n # Python optimisation variables\n learning_rate = 0.5\n epochs = 100\n batch_size = 128\n lamb = 0.000001\n total_hidden_neurons_1 = 5\n weight_stdevs = 0.03\n keep_prob = 0.9\n\n train_test_split_ratio = 0.8\n validation_train_split_size = 0.2\n total_patterns = y_values.shape[0]\n train_set_size = int(total_patterns * train_test_split_ratio)\n test_set_size = int(total_patterns - train_set_size)\n validation_set_size = int(train_set_size * validation_train_split_size)\n train_set_size = train_set_size - validation_set_size\n total_input_dimensions = len(data_num.columns)\n total_output_dimensions = len(one_hot.columns)\n\n indices_array = np.arange(0, total_patterns)\n np.random.seed(0)\n np.random.shuffle(indices_array)\n train_indices = indices_array[:train_set_size]\n validation_indices = indices_array[train_set_size:train_set_size + validation_set_size]\n test_indices = indices_array[train_set_size + validation_set_size:]\n x_train = [x_values[index] for index in train_indices]\n y_train = [y_values[index] for index in train_indices]\n x_validate = [x_values[index] for index in validation_indices]\n y_validate = [y_values[index] for index in validation_indices]\n x_test = [x_values[index] for index in test_indices]\n y_test = [y_values[index] for index in test_indices]\n\n for run in range(num_runs):\n keep_prob = 0.9\n learning_rate = 0.5\n np.random.seed(run)\n tf.set_random_seed(run)\n\n # declare the training data placeholders\n x = tf.placeholder(tf.float32, [None, total_input_dimensions])\n y = tf.placeholder(tf.float32, [None, total_output_dimensions])\n\n W1 = tf.Variable(tf.random_normal([total_input_dimensions, total_hidden_neurons_1],\n stddev=weight_stdevs), name='W1')\n b1 = tf.Variable(tf.random_normal([total_hidden_neurons_1]), name='b1')\n W2 = tf.Variable(tf.random_normal([total_hidden_neurons_1, total_output_dimensions],\n stddev=weight_stdevs), name='W2')\n b2 = tf.Variable(tf.random_normal([total_output_dimensions]), name='b2')\n\n # calculate the output of the hidden layer\n hidden_out1 = tf.add(tf.matmul(x, W1), b1)\n hidden_out1 = tf.nn.sigmoid(hidden_out1)\n\n # Regularization using dropout\n # Mannual\n # dropout_output = np.random.rand(total_input_dimensions, total_hidden_neurons_1)\n # for i in range(dropout_output.shape[0]):\n # for j in range(dropout_output.shape[1]):\n # if dropout_output[i][j] > keep_prob:\n # dropout_output[i][j] = 0\n # hidden_out1 = tf.multiply(hidden_out1, dropout_output)\n # hidden_out1 /= keep_prob\n # Library Function\n # hidden_out1 = tf.nn.dropout(hidden_out1, keep_prob)\n\n # now calculate the hidden layer output - in this case, let's use a softmax activated\n # output layer\n y_ = tf.nn.softmax(tf.add(tf.matmul(hidden_out1, W2), b2))\n\n # now let's define the cost function which we are going to train the model on\n y_clipped = tf.clip_by_value(y_, 1e-10, 0.9999999)\n cross_entropy = -tf.reduce_mean(tf.reduce_sum(y * tf.log(y_clipped)\n + (1 - y) * tf.log(1 - y_clipped), axis=1))\n\n # TODO: Try different regularization schemes\n # # Weight Decay Regularization\n # regularization = (lamb/2) * (tf.reduce_sum(tf.square(W1)) + tf.reduce_sum(tf.square(W2)))\n # cross_entropy = cross_entropy + regularization\n\n # add an optimiser\n optimiser = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)\n\n # finally setup the initialisation operator\n init_op = tf.global_variables_initializer()\n\n # define an accuracy assessment operation\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # start the session\n with tf.Session() as sess:\n # initialise the variables\n sess.run(init_op)\n total_batch = int(train_set_size / batch_size)\n average_epochs = 0\n average_epochs_generalisation = 0\n generalisation_errors = []\n for epoch in range(epochs):\n avg_cost = 0\n idx = np.arange(0, len(x_train))\n np.random.shuffle(idx)\n for i in range(total_batch):\n batch_x, batch_y = next_batch(batch_size, x_train, y_train)\n idx = idx[batch_size:]\n # learning_rate = learning_rate * 0.99999\n _, c = sess.run([optimiser, cross_entropy], feed_dict={x: batch_x, y: batch_y})\n avg_cost += c / total_batch\n print(\"Epoch:\", (epoch + 1), \"cost =\", \"{:.3f}\".format(avg_cost))\n average_epochs += avg_cost\n gen = sess.run(cross_entropy, feed_dict={x: np.asarray(x_validate),\n y: np.asarray(y_validate)})\n generalisation_errors.append(gen)\n average_epochs_generalisation += gen\n\n average_epochs = average_epochs/epochs\n average_epochs_generalisation = average_epochs_generalisation/epochs\n averageTrainingError.append(average_epochs)\n averageGeneralisationError.append(average_epochs_generalisation)\n\n # print(\"\\n\\nTraining complete!\\n\\n\")\n # keep_prob = 1\n averageClassificationError.append(sess.run(accuracy, feed_dict={x: np.asarray(x_test),\n y: np.asarray(y_test)}))\n\n print(\"\\n\\nAverage Training Error: \", np.mean(averageTrainingError), \"\\t\\tStandard Deviation: \",\n np.std(averageTrainingError))\n print(\"\\n\\nAverage Generalisation Error: \", np.mean(averageGeneralisationError), \"\\t\\tStandard Deviation: \",\n np.std(averageGeneralisationError))\n print(\"\\n\\nAverage Classification Error: \", np.mean(averageClassificationError), \"\\t\\tStandard Deviation: \",\n np.std(averageClassificationError))\n"
] | [
[
"numpy.asarray",
"tensorflow.cast",
"matplotlib.pyplot.rc",
"numpy.mean",
"pandas.read_csv",
"numpy.arange",
"numpy.std",
"tensorflow.Session",
"tensorflow.argmax",
"pandas.get_dummies",
"tensorflow.matmul",
"tensorflow.nn.sigmoid",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.set_random_seed",
"tensorflow.clip_by_value",
"numpy.random.seed",
"numpy.random.shuffle",
"tensorflow.log",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Somraz/Spam-Emails-Classifier | [
"79553e46640229ca2e3d65163da1d2c0a0bfa776"
] | [
"Classifier.py"
] | [
"import re\r\nimport csv\r\nimport os\r\nimport pandas as pd\r\nfrom string import punctuation\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom nltk import stem\r\nfrom nltk.corpus import stopwords\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom nltk.stem import WordNetLemmatizer\r\n\r\nrow=['tag','data']\r\nwith open('emails.csv', 'a',newline='') as csvFile:\r\n writer = csv.writer(csvFile)\r\n writer.writerow(row)\r\ncsvFile.close()\r\nfor i in range(1,11):\r\n for filename in os.listdir(\"D:\\\\Semester 5th\\\\ML\\\\bare\\\\part\"+str(i)+\"\"):\r\n if filename.endswith(\".txt\"):\r\n loc=\"D:\\\\Semester 5th\\\\ML\\\\bare\\\\part\"+str(i)+\"\\\\\"+filename\r\n f = open(loc, \"r\")\r\n text=f.read()\r\n name=filename[0]\r\n if(name=='s'):\r\n name=\"spam\"\r\n else:\r\n name=\"ham\"\r\n row=[name,text]\r\n f.close()\r\n with open('emails.csv', 'a',newline='') as csvFile:\r\n writer = csv.writer(csvFile)\r\n writer.writerow(row)\r\n csvFile.close()\r\n\r\nstopwords = set(stopwords.words('english'))\r\nstemmer = stem.SnowballStemmer('english')\r\nlemmatizer = WordNetLemmatizer()\r\n\r\ndef lemmetization(email):\r\n \r\n email = \" \".join([stemmer.stem(word) for word in email.split()])\r\n email = \" \".join([lemmatizer.lemmatize(word, pos='v') for word in email.split()])\r\n return email\r\n\r\ndef stop_words(email):\r\n\r\n email = \" \".join([word for word in email.split() if word not in stopwords])\r\n return email\r\n\r\ndef clean_email(email):\r\n \r\n email = re.sub(r'http\\S+', ' ', email)\r\n email = re.sub(\"\\d+\", \" \", email)\r\n email = email.replace('\\n', ' ')\r\n email = email.translate(str.maketrans(\"\", \"\", punctuation))\r\n email = email.lower()\r\n email = re.sub(' +', ' ',email)\r\n return email\r\n\r\nwith open('emails.csv', 'r') as readFile:\r\n reader = csv.reader(readFile)\r\n lines = list(reader)\r\nfor data in lines:\r\n data[1]=clean_email(data[1])\r\n \r\nwith open('emails1.csv', 'w', newline='') as writeFile:\r\n writer = csv.writer(writeFile)\r\n writer.writerows(lines)\r\nwriteFile.close()\r\n\r\nfor data in lines:\r\n data[1]=stop_words(data[1])\r\n \r\nwith open('emails2.csv', 'w', newline='') as writeFile:\r\n writer = csv.writer(writeFile)\r\n writer.writerows(lines)\r\nwriteFile.close()\r\n\r\n\r\nfor data in lines:\r\n data[1]=lemmetization(data[1])\r\n \r\nwith open('emails3.csv', 'w', newline='') as writeFile:\r\n writer = csv.writer(writeFile)\r\n writer.writerows(lines)\r\nreadFile.close()\r\nwriteFile.close()\r\n\r\nresult1=['TP','TN','FP','FN','recall','precision','f1 score','accuracy']\r\nwith open('results.csv', 'a',newline='') as csvFile:\r\n writer = csv.writer(csvFile)\r\n writer.writerow(result1)\r\ncsvFile.close()\r\nfor i in range(0,4):\r\n if (i==3):\r\n my_file = pd.read_csv(\"emails\"+str(i)+\".csv\")\r\n else:\r\n my_file = pd.read_csv(\"emails\"+str(i+1)+\".csv\")\r\n my_file = my_file[['tag', 'data']]\r\n final_score=0\r\n total_TN,total_TP,total_FN,total_FP,total_recall,total_precision,total_f1_score=0,0,0,0,0,0,0\r\n start_row=0\r\n end_row=289\r\n\r\n for j in range(0,10):\r\n msg_train=pd.concat([my_file.iloc[1:start_row,1],my_file.iloc[end_row:,1]])\r\n msg_test=my_file.iloc[start_row:end_row,1]\r\n class_train=pd.concat([my_file.iloc[1:start_row,0],my_file.iloc[end_row:,0]])\r\n class_test=my_file.iloc[start_row:end_row,0]\r\n\r\n if i==3:\r\n vectorizer = CountVectorizer(max_df=0.99,min_df=0.008)\r\n else:\r\n vectorizer = CountVectorizer()\r\n counts = vectorizer.fit_transform(msg_train.values)\r\n\r\n classifier= MultinomialNB()\r\n targets=class_train.values\r\n classifier.fit(counts, targets)\r\n\r\n test_count=vectorizer.transform(msg_test)\r\n predictions=classifier.predict(test_count)\r\n #final_score=final_score+accuracy_score(class_test,predictions)\r\n #print(classification_report(class_test,predictions))\r\n CM = confusion_matrix(class_test,predictions)\r\n #print(CM)\r\n\r\n total_TN,TN=total_TN+CM[0][0],CM[0][0]\r\n total_FN,FN=total_FN+CM[1][0],CM[1][0]\r\n total_TP,TP=total_TP+CM[1][1],CM[1][1]\r\n total_FP,FP=total_FP+CM[0][1],CM[0][1]\r\n print(\"True Negative=\",TN)\r\n print(\"False Negative=\",FN)\r\n print(\"True Positive=\",TP)\r\n print(\"False Positive=\",FP)\r\n \r\n recall=CM[1][1]/(CM[1][1]+CM[1][0])\r\n total_recall=recall+total_recall\r\n print(\"recall=\",recall)\r\n\r\n precision=CM[1][1]/(CM[1][1]+CM[0][1])\r\n precision=total_precision+precision\r\n print(\"precision=\",precision)\r\n\r\n f1_score=2*((precision*recall)/(precision+recall))\r\n total_f1_score=total_f1_score+f1_score\r\n print(\"f1 score=\",f1_score)\r\n\r\n accuracy=(CM[0][0]+CM[1][1])/(CM[0][0]+CM[1][1]+CM[0][1]+CM[1][0])\r\n print(\"accuracy=\",accuracy)\r\n\r\n final_score=final_score+accuracy\r\n result1=[TP,TN,FP,FN,recall,precision, f1_score,accuracy]\r\n with open('results.csv', 'a',newline='') as csvFile:\r\n writer = csv.writer(csvFile)\r\n writer.writerow(result1)\r\n csvFile.close()\r\n \r\n start_row=start_row+289\r\n end_row=end_row+289\r\n\r\n result1=[total_TP/10,total_TN/10,total_FP/10,total_FN/10,total_recall/10,total_precision/10,total_f1_score/10,final_score/10,\"Average scores\"]\r\n with open('results.csv','a') as csvFile:\r\n writer=csv.writer(csvFile)\r\n writer.writerow(result1)\r\n csvFile.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"sklearn.naive_bayes.MultinomialNB",
"pandas.concat",
"sklearn.metrics.confusion_matrix",
"sklearn.feature_extraction.text.CountVectorizer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
grotheer/pyAFQ | [
"3a531b5bdc3d53f4a76d5d604a26fde488e1aaf6"
] | [
"AFQ/viz/fury_backend.py"
] | [
"import tempfile\nimport os\nimport os.path as op\nimport logging\n\nimport numpy as np\nimport IPython.display as display\n\nimport AFQ.viz.utils as vut\n\ntry:\n from dipy.viz import window, actor, ui\n from fury.colormap import line_colors\nexcept ImportError:\n raise ImportError(vut.viz_import_msg_error(\"fury\"))\n\nviz_logger = logging.getLogger(\"AFQ.viz\")\n\n\ndef _inline_interact(scene, inline, interact):\n \"\"\"\n Helper function to reuse across viz functions\n \"\"\"\n if interact:\n viz_logger.info(\"Showing interactive scene...\")\n window.show(scene)\n\n if inline:\n viz_logger.info(\"Showing inline scene...\")\n tdir = tempfile.gettempdir()\n fname = op.join(tdir, \"fig.png\")\n window.snapshot(scene, fname=fname, size=(1200, 1200))\n display.display_png(display.Image(fname))\n\n return scene\n\n\ndef visualize_bundles(sft, affine=None, n_points=None, bundle_dict=None,\n bundle=None, colors=None, color_by_volume=None,\n cbv_lims=[None, None], figure=None, background=(1, 1, 1),\n interact=False, inline=False, flip_axes=None):\n \"\"\"\n Visualize bundles in 3D using VTK\n\n Parameters\n ----------\n sft : Stateful Tractogram, str\n A Stateful Tractogram containing streamline information\n or a path to a trk file\n In order to visualize individual bundles, the Stateful Tractogram\n must contain a bundle key in it's data_per_streamline which is a list\n of bundle `'uid'`.\n\n affine : ndarray, optional\n An affine transformation to apply to the streamlines before\n visualization. Default: no transform.\n\n n_points : int or None\n n_points to resample streamlines to before plotting. If None, no\n resampling is done.\n\n bundle_dict : dict, optional\n Keys are names of bundles and values are dicts that should include\n a key `'uid'` with values as integers for selection from the sft\n metadata. Default: bundles are either not identified, or identified\n only as unique integers in the metadata.\n\n bundle : str or int, optional\n The name of a bundle to select from among the keys in `bundle_dict`\n or an integer for selection from the sft metadata.\n\n colors : dict or list\n If this is a dict, keys are bundle names and values are RGB tuples.\n If this is a list, each item is an RGB tuple. Defaults to a list\n with Tableau 20 RGB values if bundle_dict is None, or dict from\n bundles to Tableau 20 RGB values if bundle_dict is not None.\n\n color_by_volume : ndarray or str, optional\n 3d volume use to shade the bundles. If None, no shading\n is performed. Only works when using the plotly backend.\n Default: None\n\n cbv_lims : ndarray\n Of the form (lower bound, upper bound). Shading based on\n color_by_volume will only differentiate values within these bounds.\n If lower bound is None, will default to 0.\n If upper bound is None, will default to the maximum value in\n color_by_volume.\n Default: [None, None]\n\n background : tuple, optional\n RGB values for the background. Default: (1, 1, 1), which is white\n background.\n\n figure : fury Scene object, optional\n If provided, the visualization will be added to this Scene. Default:\n Initialize a new Scene.\n\n interact : bool\n Whether to provide an interactive VTK window for interaction.\n Default: False\n\n inline : bool\n Whether to embed the visualization inline in a notebook. Only works\n in the notebook context. Default: False.\n\n flip_axes : None\n This parameter is to conform fury and plotly APIs.\n\n Returns\n -------\n Fury Scene object\n \"\"\"\n\n if figure is None:\n figure = window.Scene()\n\n figure.SetBackground(background[0], background[1], background[2])\n\n for (sls, color, name, _) in vut.tract_generator(\n sft, affine, bundle, bundle_dict, colors, n_points):\n sls = list(sls)\n if name == \"all_bundles\":\n color = line_colors(sls)\n\n sl_actor = actor.line(sls, color)\n figure.add(sl_actor)\n sl_actor.GetProperty().SetRenderLinesAsTubes(1)\n sl_actor.GetProperty().SetLineWidth(6)\n\n return _inline_interact(figure, inline, interact)\n\n\ndef scene_rotate_forward(scene):\n scene.elevation(90)\n scene.set_camera(view_up=(0.0, 0.0, 1.0))\n scene.reset_camera()\n return scene\n\n\ndef create_gif(figure,\n file_name,\n n_frames=60,\n zoom=1,\n z_offset=0.5,\n size=(600, 600),\n rotate_forward=True):\n \"\"\"\n Convert a Fury Scene object into a gif\n\n Parameters\n ----------\n figure: Fury Scene object\n Scene to be converted to a gif\n\n file_name: str\n File to save gif to.\n\n n_frames: int, optional\n Number of frames in gif.\n Will be evenly distributed throughout the rotation.\n Default: 60\n\n zoom: int, optional\n How much to magnify the figure in the fig.\n Default: 1\n\n size: tuple, optional\n Size of the gif.\n Default: (600, 600)\n\n rotate_forward: bool, optional\n Whether to rotate the figure forward before converting to a gif.\n Generally necessary for fury scenes.\n Default: True\n \"\"\"\n if rotate_forward:\n figure = scene_rotate_forward(figure)\n\n tdir = tempfile.gettempdir()\n window.record(figure, az_ang=360.0 / n_frames, n_frames=n_frames,\n path_numbering=True, out_path=tdir + '/tgif',\n magnification=zoom,\n size=size)\n\n vut.gif_from_pngs(tdir, file_name, n_frames,\n png_fname=\"tgif\", add_zeros=True)\n\n\ndef visualize_roi(roi, affine_or_mapping=None, static_img=None,\n roi_affine=None, static_affine=None, reg_template=None,\n name='ROI', figure=None, color=np.array([1, 0, 0]),\n flip_axes=None,\n opacity=1.0, inline=False, interact=False):\n \"\"\"\n Render a region of interest into a VTK viz as a volume\n\n Parameters\n ----------\n roi : str or Nifti1Image\n The ROI information\n\n affine_or_mapping : ndarray, Nifti1Image, or str, optional\n An affine transformation or mapping to apply to the ROIs before\n visualization. Default: no transform.\n\n static_img: str or Nifti1Image, optional\n Template to resample roi to.\n Default: None\n\n roi_affine: ndarray, optional\n Default: None\n\n static_affine: ndarray, optional\n Default: None\n\n reg_template: str or Nifti1Image, optional\n Template to use for registration.\n Default: None\n\n name: str, optional\n Name of ROI for the legend.\n Default: 'ROI'\n\n color : ndarray, optional\n RGB color for ROI.\n Default: np.array([1, 0, 0])\n\n flip_axes : None\n This parameter is to conform fury and plotly APIs.\n\n opacity : float, optional\n Opacity of ROI.\n Default: 1.0\n\n figure : fury Scene object, optional\n If provided, the visualization will be added to this Scene. Default:\n Initialize a new Scene.\n\n interact : bool\n Whether to provide an interactive VTK window for interaction.\n Default: False\n\n inline : bool\n Whether to embed the visualization inline in a notebook. Only works\n in the notebook context. Default: False.\n\n Returns\n -------\n Fury Scene object\n \"\"\"\n roi = vut.prepare_roi(roi, affine_or_mapping, static_img,\n roi_affine, static_affine, reg_template)\n\n if figure is None:\n figure = window.Scene()\n\n roi_actor = actor.contour_from_roi(roi, color=color, opacity=opacity)\n figure.add(roi_actor)\n\n return _inline_interact(figure, inline, interact)\n\n\ndef visualize_volume(volume, x=None, y=None, z=None, figure=None,\n flip_axes=None,\n opacity=0.6, inline=True, interact=False):\n \"\"\"\n Visualize a volume\n\n Parameters\n ----------\n volume : ndarray or str\n 3d volume to visualize.\n\n figure : fury Scene object, optional\n If provided, the visualization will be added to this Scene. Default:\n Initialize a new Scene.\n\n flip_axes : None\n This parameter is to conform fury and plotly APIs.\n\n opacity : float, optional\n Initial opacity of slices.\n Default: 0.6\n\n interact : bool\n Whether to provide an interactive VTK window for interaction.\n Default: False\n\n inline : bool\n Whether to embed the visualization inline in a notebook. Only works\n in the notebook context. Default: False.\n\n Returns\n -------\n Fury Scene object\n \"\"\"\n volume = vut.load_volume(volume)\n\n if figure is None:\n figure = window.Scene()\n\n shape = volume.shape\n image_actor_z = actor.slicer(volume)\n slicer_opacity = opacity\n image_actor_z.opacity(slicer_opacity)\n\n image_actor_x = image_actor_z.copy()\n if x is None:\n x = int(np.round(shape[0] / 2))\n image_actor_x.display_extent(x,\n x,\n 0,\n shape[1] - 1,\n 0,\n shape[2] - 1)\n\n image_actor_y = image_actor_z.copy()\n\n if y is None:\n y = int(np.round(shape[1] / 2))\n image_actor_y.display_extent(0,\n shape[0] - 1,\n y,\n y,\n 0,\n shape[2] - 1)\n\n figure.add(image_actor_z)\n figure.add(image_actor_x)\n figure.add(image_actor_y)\n\n show_m = window.ShowManager(figure, size=(1200, 900))\n show_m.initialize()\n\n if interact:\n line_slider_z = ui.LineSlider2D(min_value=0,\n max_value=shape[2] - 1,\n initial_value=shape[2] / 2,\n text_template=\"{value:.0f}\",\n length=140)\n\n line_slider_x = ui.LineSlider2D(min_value=0,\n max_value=shape[0] - 1,\n initial_value=shape[0] / 2,\n text_template=\"{value:.0f}\",\n length=140)\n\n line_slider_y = ui.LineSlider2D(min_value=0,\n max_value=shape[1] - 1,\n initial_value=shape[1] / 2,\n text_template=\"{value:.0f}\",\n length=140)\n\n opacity_slider = ui.LineSlider2D(min_value=0.0,\n max_value=1.0,\n initial_value=slicer_opacity,\n length=140)\n\n def change_slice_z(slider):\n z = int(np.round(slider.value))\n image_actor_z.display_extent(\n 0, shape[0] - 1, 0, shape[1] - 1, z, z)\n\n def change_slice_x(slider):\n x = int(np.round(slider.value))\n image_actor_x.display_extent(\n x, x, 0, shape[1] - 1, 0, shape[2] - 1)\n\n def change_slice_y(slider):\n y = int(np.round(slider.value))\n image_actor_y.display_extent(\n 0, shape[0] - 1, y, y, 0, shape[2] - 1)\n\n def change_opacity(slider):\n slicer_opacity = slider.value\n image_actor_z.opacity(slicer_opacity)\n image_actor_x.opacity(slicer_opacity)\n image_actor_y.opacity(slicer_opacity)\n\n line_slider_z.on_change = change_slice_z\n line_slider_x.on_change = change_slice_x\n line_slider_y.on_change = change_slice_y\n opacity_slider.on_change = change_opacity\n\n def build_label(text):\n label = ui.TextBlock2D()\n label.message = text\n label.font_size = 18\n label.font_family = 'Arial'\n label.justification = 'left'\n label.bold = False\n label.italic = False\n label.shadow = False\n label.background = (0, 0, 0)\n label.color = (1, 1, 1)\n\n return label\n\n line_slider_label_z = build_label(text=\"Z Slice\")\n line_slider_label_x = build_label(text=\"X Slice\")\n line_slider_label_y = build_label(text=\"Y Slice\")\n opacity_slider_label = build_label(text=\"Opacity\")\n\n panel = ui.Panel2D(size=(300, 200),\n color=(1, 1, 1),\n opacity=0.1,\n align=\"right\")\n panel.center = (1030, 120)\n\n panel.add_element(line_slider_label_x, (0.1, 0.75))\n panel.add_element(line_slider_x, (0.38, 0.75))\n panel.add_element(line_slider_label_y, (0.1, 0.55))\n panel.add_element(line_slider_y, (0.38, 0.55))\n panel.add_element(line_slider_label_z, (0.1, 0.35))\n panel.add_element(line_slider_z, (0.38, 0.35))\n panel.add_element(opacity_slider_label, (0.1, 0.15))\n panel.add_element(opacity_slider, (0.38, 0.15))\n\n show_m.scene.add(panel)\n\n global size\n size = figure.GetSize()\n\n def win_callback(obj, event):\n global size\n if size != obj.GetSize():\n size_old = size\n size = obj.GetSize()\n size_change = [size[0] - size_old[0], 0]\n panel.re_align(size_change)\n\n show_m.initialize()\n\n figure.zoom(1.5)\n figure.reset_clipping_range()\n\n if interact:\n show_m.add_window_callback(win_callback)\n show_m.render()\n show_m.start()\n\n return _inline_interact(figure, inline, interact)\n"
] | [
[
"numpy.round",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eddy-geek/interpret | [
"d75e1b1f53b6fbd78611a08b856bfe59dc1d8e2f"
] | [
"python/interpret-core/interpret/glassbox/ebm/bin.py"
] | [
"# Copyright (c) 2019 Microsoft Corporation\n# Distributed under the MIT software license\n\nimport math\nfrom collections import Counter\nfrom itertools import count, repeat, groupby\nfrom warnings import warn\nfrom multiprocessing.sharedctypes import RawArray\nimport numpy as np\nimport numpy.ma as ma\nfrom sklearn.base import (\n BaseEstimator,\n TransformerMixin,\n)\nfrom sklearn.utils.validation import check_is_fitted\n\nimport logging\n_log = logging.getLogger(__name__)\n\ntry:\n import pandas as pd\n _pandas_installed = True\nexcept ImportError:\n _pandas_installed = False\n\ntry:\n import scipy as sp\n _scipy_installed = True\nexcept ImportError:\n _scipy_installed = False\n\nfrom .internal import Native\nfrom .utils import DPUtils\n\n# BIG TODO LIST:\n#- review this entire bin.py file\n#- write a cython single instance prediction pathway\n#- consider re-writing most of this bin.py functionality in cython for anything that gets used during prediction for speed\n#- test: clean_vector with ma.masked_array... and other stuff in there\n#- test: clean_X with pd.Series with missing values and maybe a categorical -> gets converted as N features and 1 sample\n#- test: clean_X with list that CONTAINS a ma.masked_array sample entry with missing data and without missing data\n#- add better processing for ignored columsn where we return the existing data if we can, and we return all None\n# values if not which our caller can detect. Then unify_data2 can convert that to int(0) values which should work for\n# all feature types\n#- disable 'ignore' columns temporarily. We need to update C++ to make a distinction because you can have 3 real columns and 5 referencable columsn and our datastructures need to be updated to handle this in C++ first\n#- handle the thorny questions of converting float to int for categorical strings\n# - in the object converter, convert all int64/uint64 and all floats objects to float64, then use the floor check\n# and compare with +-9007199254740991 to decide if they should be expressed as integers or floats\n# - after np.unique for categoricals, convert int64 and uint64 types to float64 and then re-run np.unique on those\n# values to figure out if there are collisions in the float64 space for integers. We actually have more\n# work to do in this case since we'll also get bad reverse indexes with more categories than we have unique values\n# Perhaps we can just detect this scenario in the integer space by checking for 9007199254740991 < abs(x) with\n# integers and if it's true then convert to float64 before calling np.unique again? It'll be infrequent to have\n# such large integers, and we only need to check with int64 and np.uint64 since they are the only ones that can make non-unique floats\n# - leave bools as \"False\"/\"True\", BUT we have a corner case in _densify_object_ndarray if we have mixed types\n# we convert to unicode, and bools become \"False\"/\"True\" and then subequently fail the test of being able to \n# be converted to floats, so we need to record the bool types and convert them to 0/1 for the conversion to float\n# test. First, we can detect if there are any bools via \"types = set(map(type, X_col))\", then we can \n# find all the bools with np.logical_or(X_col == np.array(False), X_col == np.array(True)) or something like that\n# - strip leading and trailing spaces when attempting to convert to float BUT NOT FOR STRING CATEGORICALS!\n# - def convert_float_category_str(vals):\n# vals = vals.astype(np.float64, copy=False)\n# integerizable = np.logical_and(vals == np.floor(vals), vals.abs() <= THE_MAX_FLOAT)\n# integers = vals[integerizable]\n# floats = vals[~integerizable]\n# integers = integers.astype(np.int64).astype(np.unicode_)\n# floats = integers.astype(np.unicode_) # or perhaps shuttle it to C++\n# objs = np.empty(len(vals), dtype=np.object)\n# np.place(objs, integerizable, integers)\n# np.place(objs, ~integerizable, floats)\n# vals = objs.astype(np.unicode_)\n# return vals\n# - add support for a \"ordinal_fast\" and \"nominal_fast\". We would accept these in feature_types as\n# a dict of (int/float -> string) for 'ordinal_fast', and (string -> int/float) for 'nominal_fast'\n# the we'd write our feature_types_in values as \"ordinal_fast\" and \"nominal_fast\" and we'd exepct\n# integers in whatever evaluation format we got. This would allow us to accept a float64 numpy array\n# and have inside that nominal/ordinal/continuous/missing values that would be highly compressed. Both of these\n# would have restriction in that the numbers would have to be contiguous (maybe allowing for compression??) and\n# would start from 1, with 0 as reserved for missing values. A big issues is that with this encoding, the \n# system on which we do predict needs to also encode them as integers and they have no flexibility to change\n# that, except perhaps they could edit the model to change from 'nominal_fast' to 'nominal'\n# { \"Canada\" : 1, \"Japan\" : 2, \"Seychelles\" : 3} => string to int mapping -> nominals\n# { 1: \"low\", 2: \"medium\", 3: \"high\" } => int to object(string) mapping -> ordinals\n# We still record these as [\"low\", \"medium\", \"high\"] and [\"Canada\", \"Japan\", \"Seychelles\"] and we use the\n# feature type value to know that these are \"ordinal_fast\" and \"nominal_fast\"\n\n\n# FUTURE TODOS in our callers and in JSON:\n# - look into ISO 6093:1985 -> https://www.titanwolf.org/Network/q/4d680399-6711-4742-9900-74a42ad9f5d7/y\n# - support \"category compression\" where we take a number like 10 and compress any categories together that\n# have less than that number of samples. Internally, this works well for the prior_categories parameter since\n# we can have multiple strings map to identical numbers, so \"low\" and \"medium\" can be groups and separate from high\n# with {\"low\": 1, \"medium\": 1, \"high\":2} and in JSON we can record these as [[\"low\", \"medium\"], \"high\"]\n# We support different category compressions for pairs or even individual features since we allow\n# separate category definitios per pair axis. Our unify_columns generator can support these by extracting the\n# raw data once and then applying different category dictionaries to the raw data and then yielding those\n# the caller to the generator can quickly determine which categories we're responding to using the pointer id(..)\n# comparisons without examining all the internal dictionary definitions, and we can minimize\n# work done by having a single object with a single id(..) pointer that is shared between prior_categories objects\n# if they are identical at model load time.\n# - if we recieve an unknown float64 value in a 'nominal' or 'ordinal', then check if all the categorical\n# value strings are convertible to float64. If that's the case then find the mid-point between the categories\n# after they are converted to strings and create a pseudo-continuous value of the feature and figure out where\n# the previously unseen float64 should go. WE do need to sort the category strings by float64, but we don't\n# to to compute the split points because we can just do a binary search against the categories after they are\n# converted to floats and then look at the distance between the upper and lower category and choose the one\n# that is closest, and choose the upper one if the distance is equal since then the cut would be on the value\n# and we use lower bound semantics (where the value gets into the upper bin if it's exactly the cut value)\n# - eventually, we'll want to have an EBMData data frame that'll store just\n# floats and integers and convert strings to integers on the fly as data is added\n# AND more importantly, you could create this EBMData with a reference to a model\n# and then you could populate it with the correct integer mapping, so \"low\", \"medium\", \"high\"\n# get populated internally as 1, 2, 3 IDENTICALLY to the model from which the\n# EBMData frame was created from. If we get a dataframe from anywhere else then\n# we can't be confident the mapping is identical, and we need to use a dictionary\n# of some kind, either from string to integer or integer to integer to do the mapping\n# so having our own dataframe makes it possible to have faster prediction scenarios\n# Unfortunately, taking a Pandas dataframe as input doesn't allow us to escape the hashtable\n# step, so whehter we get strings or integers is kind of similar in terms of processing speed\n# although hashing strings is slower.\n# - the EBMData frame should be constructable by itself without a model reference if it's going to \n# be used to train a model, so we sort of have 2 states:\n# - 1: no model reference, convert strings to integers using hashes on the fly\n# - 2: model reference. Use the model's dictionary mapping initially, but allow new strings or integers\n# to be added as necessary, but anything below what the model knows about we map diretly to the right integers\n# - we should create post-model modification routines so someone could construct an integer based\n# ordinal/categorical and build their model and evaluate it efficiently, BUT when they want\n# to view the model they can replace the \"1\", \"2\", \"3\" values with \"low\", \"medium\", \"high\" for graphing\n\n\n\n\n# NOTES:\n# - IMPORTANT INFO FOR BELOW: All newer hardware (including all Intel processors) use the IEEE-754 floating point\n# standard when encoding floating point numbers. In IEEE-754, smaller whole integers have perfect representations \n# in float64 representation. Float64 looses the ability to distinquish between integers though above the number \n# 9007199254740991. 9007199254740992 and 9007199254740993 both become 9007199254740992 when converted to float64 \n# and back to ints. All int32 and uint32 values have perfect float64 representation, but there are collisions\n# for int64 and uint64 values above these high numbers.\n# - a desirable property for EBM models is that we can serialize them and evaluate them in different \n# programming languages like C++, R, JavaScript, etc\n# - ideally, we'd have just 1 serialization format, and JSON is a good choice as that format since we can then\n# load models into JavaScript easily, and it's also well supported accross other languages as well.\n# - JSON also has the benefit that it's human readable, which is important for an intelligible model.\n# - JSON and JavaScript have fairly limited support for data types. Only strings and float64 numbers are recognized.\n# There are no integer datatypes in JavaScript or JSON. This works for us though since we can use strings to \n# encode nominals/ordinals, and float64 values to define 'continuous' cut points.\n# - 'continuous' features should always be converted to float64 before discretization because:\n# - float64 is more universal accross programming languages. Python's float type is a float64. R only supports\n# float64. JavaScript is only float64, etc. GPUs are the excpetion where only float32 are sometimes supported\n# but we only do discretization at the injestion point before any GPUs get used, so that isn't a concern.\n# - our model definition in JSON is exclusively float64, and we don't to add complexity to indicate if a number\n# is a float64 or float32, and even then what would we do with a float32 in JavaScript?\n# - float64 continuous values gives us perfect separation and conversion of float32 values, which isn't true \n# for the inverse\n# - The long double (float80) equivalent is pretty much dead and new hardware doesn't support it. In the off\n# chance someone has data with this type then we loose some precision and some values which might have been\n# separable will be lumped together, but for continuous values the cut points are somewhat arbitary anyways, so\n# this is acceptable.\n# - Some big int64 or uint64 values collide when converting to float64 for numbers above 9007199254740991, \n# so we loose the ability to distinquish them, but like for float80 values \n# this loss in precision is acceptable since continuous features by nature group similar values together. \n# The problem is worse for float32, so float64 is better in this regard.\n# - 'nominal' and 'ordinal' features are pretty compatible between languages when presented to us as strings\n# but the caller can specify that integer/boolean/float values should be treated as 'nominal'/'ordinal' and \n# then things become tricky for a number of reasons:\n# - it's pretty easy in python and in other languages to silently convert integers to floats. Let's say \n# we have a categorical where the possible values are 1, 2, 3, and 4.1, but 4.1 is very unlikely and might\n# occur zero times in any particular dataset. If during training our unique values are np.array([1, 2, 3]), \n# but during predict time let's say we observe np.array([1, 2, 3, 4.1]). Python will silently convert these to \n# floats resulting in np.array([1.0, 2.0, 3.0, 4.1]), and then when we convert to strings we get \n# [\"1.0\", \"2.0\", \"3.0\", \"4.1\"] instead of our original categories of [\"1\", \"2\", \"3\"], so now none of our \n# categories match. This would be a very easy mistake to make and would result in a hard to diagnose bug.\n# A solution to this problem of silently converting integers to floats would be to change our text conversion\n# such that floats which are whole numbers are converted to integers in text. So then we'd get\n# [\"1\", \"2\", \"3\", \"4.1\"] as our categories. We can do this efficiently in python and in many other languages\n# by checking if floor(x) == x for float64 values. I think it's also nicer visually in graphs of categoricals\n# that any numbers are shown as integers when possible\n# - another benefit of making whole number floats as integers is that integer to string conversions are relatively\n# easy to do cross-language, but floats are almost never converted to identical strings the same way across \n# languages since there are many legal conversions. \n# \"33.3\", \"33.299999999999997\", \"3.3e1\", \"3.3e+01\" are all legal text representations for the float value of 33.3\n# - we have an issue in that all numbers above 9007199254740991 (and in fact some numbers below that) will \n# be equal to their floor, so will appear to be whole numbers. We don't want 1.0e300 to be converted \n# to an integer, so we need some kind of maximum value above which we change to floating point representation\n# Since integers don't exist in JavaScript, we can't really represent all numbers above 9007199254740991\n# with unique categoricals, so we can't have truely cross-platform integers above that value, so it makes\n# sense for us to make all whole numbers equal to or less than 9007199254740991 integers, and any number\n# above that point as a floating point. This has the disadvantage that some integers above 9007199254740991\n# will have the same categorical strings and be non-separable, but having some collisions in extreme values\n# is probably better than the alternative of getting different categorical strings in different programming\n# languages where integers do not exist. By making all numbers larger than 9007199254740991 as floating\n# point values, the caller will at least see that we're using exponential float representations instead of\n# integers, so although they may not understand why we switch to float representation above 9007199254740991\n# it will at least be apparent what is happening so they can correct the issues by converting to strings themselves.\n# - The only way we could guarantee that identical float64 values in different programming languages generate\n# the same text would be if we implemented a float to text converter in C++ (the standard library provides no\n# cross platform guarantees), and if we sent our floating point values into C++ for conversion. This is possible\n# to do because we only care about performance during predict time for this converstion to strings, and at predict\n# time we already know if a feature is nominal/ordinal/continuous, and presumably there aren't too many\n# categories because otherwise the feature wouldn't be very useful, so we can pass the relatively few floating\n# point values into C++ and get back a single string separated by spaces of the text conversions.\n# - if we're presented with an array of np.object_, we can't give a guarantee that unique inputs will generate unique\n# categories since the caller could present us with int(0) and \"0\", or some object type who's __str__ function\n# generates a \"0\". We can't obviously support generalized object types when we serialize to JSON, or any\n# other cross-language model serialization format.\n# - here's an interesting conundrum. np.float64(np.float32(\"1.1\")) != np.float64(\"1.1\"). Also,\n# np.float64(np.float32(1.1)) gives \"1.100000023841858\". The problem here is that the float32 converter finds \n# the float32 value that is closest to 1.1. That value is a float though so if you convert that to a float64\n# value all the lower mantissa bits are zeros in the float64 value. If you take the string \"1.1\" and convert\n# it to float64 though the converter will find the closest float64 value where the text after the 1.1... isn't\n# required for roundtripping. That float64 will have non-zero bits in the lower mantissa where the float32\n# value for \"1.1\" does not, so they are not equal. This is a problem because if we build an EBM model in one\n# language with a float32 and in annother language with a float64 that is the same value we expect them to have\n# the same nominal or ordinal string, but they don't. In the language with the float32 value we get \"1.1\"\n# and in the language with the float64 we get \"1.100000023841858\" and they don't match. The solution is to \n# convert all float32 values to float64 in all languages so that we get \"1.100000023841858\" in both. This feels\n# odd since str(my_float32) might give \"1.1\" so it'll be confusing to the caller, but at least we'll get\n# consistent results. I think we need to make the assumption that the caller has the same binary float\n# represetation in both langauges. If that's true then any errors are caused by the caller really since\n# they are presenting slightly different data in both languages. They should be able to resolve it by using\n# float64 everywhere which should be available in all mainstream languages, unlike float32.\n# - other double to text and text to double:\n# https://github.com/google/double-conversion/blob/master/LICENSE -> BSD-3\n# https://stackoverflow.com/questions/28494758/how-does-javascript-print-0-1-with-such-accuracy -> https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf\n# https://github.com/juj/MathGeoLib/blob/master/src/Math/grisu3.c -> ?\n# https://github.com/dvidelabs/flatcc/blob/master/external/grisu3/grisu3_print.h -> Apache 2.0\n# https://github.com/dvidelabs/flatcc/tree/master/external/grisu3\n# https://www.ryanjuckett.com/printing-floating-point-numbers/\n# https://github.com/catboost/catboost/blob/ff34a3aadeb2e31e573519b4371a252ff5e5f209/contrib/python/numpy/py3/numpy/core/src/multiarray/dragon4.h\n# Apparently Numpy has a copy of Ryan Juckett's code liceded in MIT instead of Zlib license\n# YES! -> float to string in MIT license: \n# https://github.com/numpy/numpy/blob/3de252be1215c0f9bc0a2f5c3aebdd7ffc86e410/numpy/core/src/multiarray/dragon4.h\n# https://github.com/numpy/numpy/blob/3de252be1215c0f9bc0a2f5c3aebdd7ffc86e410/numpy/core/src/multiarray/dragon4.c\n# - Python uses the gold standard for float/string conversion: http://www.netlib.org/fp/dtoa.c\n# https://github.com/python/cpython/blob/main/Python/dtoa.c\n# This code outputs the shortest possible string that uses IEEE 754 \"exact rounding\" using bankers' rounding \n# which also guarantees rountrips precicely. This is great for interpretability. Unfortunatetly this means\n# that we'll need code in the other languages that generates the same strings and for converting back to floats.\n# Fortunately the python C++ code is available and we can use that to get the exact same conversions and make\n# that available in other languages to call into the C++ to harmonize floating point formats.\n# Python is our premier language and has poor performance if you try to do operations in loops, so we'll\n# force all the other platforms to conform to python specifications.\n# - when we recieve bool values in python we can probably keep the python string representations of \"False\" and \"True\".\n# Unlike float64 values, there are just 2 possible bool values and we express them as JavaScript bool items,\n# and with just 2 possible values there are no issues with different\n# hard to standardize string formats. I like giving the user a little more context of the underlying value in\n# the graphs, and \"True\", \"False\" are a bit nicer than \"false\" and \"true\" or \"FALSE\" and \"TRUE\"\n# - If our caller gives us strings [\" a \", \"a\"] we will consider those to be two separate categories since the caller \n# could have some requirement to keep these as separate categories. Eliminating the whitespace makes it impossible\n# for our caller to differentiate these. If the caller wants these to be the same string then they can preprocess this\n# aspect themselves.\n# - np.unique has some issues. It doesn't like None values. It considers int(4) and float(4.0) to be identical\n# it sucks in performance with np.object_ arrays since it uses python comparers. It doesn't call\n# __str__ on objects, so we get collisions if the object later converts to a string that is already a category.\n# If there are many np.nan values, then the uniques array has many np.nan entries! We've fixed all of these \n# by filtering out None and np.nan values, and we've converted objects to a strong types\n# - If we aren't given a feature type and we get data that is just [0, 1], should we treat this as\n# 'nominal' or a 'continuous' value with a split at 0.5? We'd rather our graphs be bar graphs showing \n# a bar for 0 and annother bar for 1, which implies nominal, but this has a problem if the\n# feature can rarely be something like 1.1. Maybe we just never saw a 1.1 in our data even though\n# it can occur. If this happens then a string label of 1.1 doesn't match '1' and we fail. If\n# we treated data this way then it wouldn't really be legal for production systems to not\n# specify one of the feature types since an unlikely occurence could produce a nominal type\n# from a continuous type and then fail at predict time. Our solution is if we see new categories at predict time\n# to check if the new categories are convertible to float64 and if that's true and if all the other prior categories\n# that we saw during fit time are also convertible to float64, then we are allowed to switch to treating them as continuous\n# during predict time. This way we get to have nice bar graphs of '0' and '1', but we won't generate an error\n# if we see 1.1 at predict time since it gets put into the [0.5 +inf) bin. We treat\n# [0, 1, 2] and [0, 1, 9] and [1.1, 2.2] the same way and have a threshold of categories below which we treat these\n# as cateogoricals during training.\n# - If we recieve pure floats from the caller we'll either generate a continuous feature_type and any differences\n# in the floating point cut points should be fairly minor. Alternatively, we'll get a 'nominal' which is \n# also ok since our floating point strings won't match the ones at fit time and then they'll be converted to \n# continuous values and very likely end up in the same bin as the original floats as they'll be very close in value\n# since we soft-convert nominals with all float64 values into continuous values when necessary/possible\n# - Let's say we get the strings ['0', '00', '0.0', '0.0e10']. If the caller forced this as a nominal we'd have\n# 4 values, but if we decided that this should be a 'continuous_auto' value then we'd be converting this to only \n# one floating point value, which makes it useless. What this is highlighting is that our unique cutoff point\n# where we choose whether a feature should be 'nominal_auto' or 'continuous_auto' should be decided by the number\n# of unique float64 values that the strings convert into. Hopefully different platforms get the same floating point\n# values based on string inputs, which is annother reason why we should have a consistent C++ implementation.\n# - we use the terms ordinal and nominal to indicate different types of categoricals \n# (https://en.wikipedia.org/wiki/Ordinal_data). A lot of ML pacakges use categorical instead of the more \n# specific term nominal since they don't support ordinals (requiring ordinal data to be handled as \n# continuous/numerical). We however, being an interpretable package, want to have a built in oridinal \n# feature type so that we can display \"low\", \"medium\", \"high\" instead of 1, 2, 3 on graphs, so\n# it makes sense for us to make the distinction of having nominal and ordinal features which are both categoricals\n# This also aligns nicely with the pandas.CategoricalDtype which is used to specify both ordinals and nominals.\n\n\n\n\n\n\n\n_disallowed_types = frozenset([complex, list, tuple, range, bytes, bytearray, memoryview, set, frozenset, dict, Ellipsis, np.csingle, np.complex_, np.clongfloat, np.void])\n_none_list = [None]\n_none_ndarray = np.array(None)\n\ndef _densify_object_ndarray(X_col):\n # called under: fit or predict\n\n # numpy hierarchy of types\n # https://numpy.org/doc/stable/reference/arrays.scalars.html\n\n # TODO: add special case handling if there is only 1 sample to make that faster\n\n types = set(map(type, X_col))\n if len(types) == 1:\n if str in types:\n return X_col.astype(np.unicode_)\n elif bool in types:\n return X_col.astype(np.bool_)\n\n if all(one_type is int or issubclass(one_type, np.integer) for one_type in types):\n if all(issubclass(one_type, np.unsignedinteger) for one_type in types):\n if all(one_type is np.uint8 for one_type in types):\n return X_col.astype(np.uint8)\n types.discard(np.uint8)\n\n if all(one_type is np.uint16 for one_type in types):\n return X_col.astype(np.uint16)\n types.discard(np.uint16)\n\n if all(one_type is np.uint32 for one_type in types):\n return X_col.astype(np.uint32)\n\n return X_col.astype(np.uint64)\n\n if all(one_type is np.int8 for one_type in types):\n return X_col.astype(np.int8)\n types.discard(np.int8)\n\n if all(one_type is np.uint8 or one_type is np.int16 for one_type in types):\n return X_col.astype(np.int16)\n types.discard(np.uint8)\n types.discard(np.int16)\n\n if all(one_type is np.uint16 or one_type is np.int32 for one_type in types):\n return X_col.astype(np.int32)\n\n try:\n return X_col.astype(np.int64)\n except OverflowError:\n # we must have a big number that can only be represented by np.uint64 AND also signed integers mixed together\n # if we do X_col.astype(np.uint64), it will silently convert negative integers to unsigned!\n\n # TODO : should this be np.float64 with a check for big integers\n return X_col.astype(np.unicode_)\n\n if all(one_type is float or issubclass(one_type, np.floating) for one_type in types):\n if all(one_type is np.float16 for one_type in types):\n return X_col.astype(np.float16)\n types.discard(np.float16)\n\n if all(one_type is np.float32 for one_type in types):\n return X_col.astype(np.float32)\n\n return X_col.astype(np.float64)\n\n # TODO: also check for bool conversion since \"False\"/\"True\" strings don't later convert to 'continuous'\n is_float_conversion = False\n for one_type in types:\n if one_type is str:\n pass # str objects have __iter__, so special case this to allow\n elif one_type is int:\n pass # int objects use the default __str__ function, so special case this to allow\n elif one_type is float:\n is_float_conversion = True # force to np.float64 to guarantee consistent string formatting\n elif issubclass(one_type, np.generic):\n # numpy objects have __getitem__, so special case this to allow\n if one_type is np.float64:\n pass # np.float64 is what we convert to for floats, so no need to convert this\n elif issubclass(one_type, np.floating):\n is_float_conversion = True # force to np.float64 to ensure consistent string formatting of floats\n elif one_type in _disallowed_types:\n # list of python types primarily from: https://docs.python.org/3/library/stdtypes.html\n msg = f\"X contains the disallowed type {one_type}\"\n _log.error(msg)\n raise TypeError(msg)\n elif hasattr(one_type, '__iter__') or hasattr(one_type, '__getitem__'):\n # check for __iter__ and __getitem__ to filter out iterables\n # https://stackoverflow.com/questions/1952464/in-python-how-do-i-determine-if-an-object-is-iterable\n msg = f\"X contains the disallowed iterable type {one_type}\"\n _log.error(msg)\n raise TypeError(msg)\n elif hasattr(one_type, '__contains__'):\n msg = f\"X contains the disallowed set type {one_type}\"\n _log.error(msg)\n raise TypeError(msg)\n elif one_type.__str__ is object.__str__:\n # if any object in our list uses the default object __str__ function then it'll\n # include the id(val) pointer in the string text, which isn't going to be useful as a categorical\n\n # use type(val) instead of val.__str__ to detect inherited __str__ functions per:\n # https://stackoverflow.com/questions/19628421/how-to-check-if-str-is-implemented-by-an-object\n\n msg = f\"X contains the type {one_type} which does not define a __str__ function\"\n _log.error(msg)\n raise TypeError(msg)\n\n if is_float_conversion:\n # TODO: handle ints here too which need to be checked if they are larger than the safe int max value\n\n X_col = X_col.copy()\n places = np.fromiter((val_type is float or issubclass(val_type, np.floating) for val_type in map(type, X_col)), dtype=np.bool_, count=len(X_col))\n np.place(X_col, places, X_col[places].astype(np.float64))\n\n # TODO: converting object types first to pd.CatigoricalDType is somewhat faster than our code here which converts\n # to unicode. We should consider either using a CatigoricalDTypes conversion first if pandas is installed, or\n # writing our own cython code that can be more efficient at walking through items in an array. If we write\n # our own cython there is the added advantage that we can check types in the same loop and therefore eliminate\n # the costly \"set(map(type, X_col))\" calls above\n return X_col.astype(np.unicode_)\n\ndef _process_column_initial(X_col, nonmissings, processing, min_unique_continuous):\n # called under: fit\n\n if issubclass(X_col.dtype.type, np.floating):\n missings = np.isnan(X_col)\n if missings.any():\n nonmissings = ~missings\n X_col = X_col[nonmissings]\n elif X_col.dtype.type is np.object_:\n X_col = _densify_object_ndarray(X_col)\n\n uniques, indexes, counts = np.unique(X_col, return_inverse=True, return_counts=True)\n\n if issubclass(uniques.dtype.type, np.floating):\n floats = uniques.astype(np.float64, copy=False)\n uniques = floats.astype(np.unicode_)\n else:\n uniques = uniques.astype(np.unicode_, copy=False)\n try:\n # we rely here on there being a round trip format within this language from float64 to text to float64\n\n # TODO: does this work if there are spaces or bools?\n\n floats = uniques.astype(dtype=np.float64) \n except ValueError:\n floats = None\n\n if min_unique_continuous is not None and floats is not None:\n # floats can have more than one string representation, so run unique again to check if we have \n # min_unique_continuous unique float64s in binary representation\n if min_unique_continuous <= len(np.unique(floats)):\n floats = floats[indexes] # expand from the unique floats to expanded floats\n if nonmissings is not None:\n floats_tmp = np.full(len(nonmissings), np.nan, dtype=np.float64)\n np.place(floats_tmp, nonmissings, floats)\n floats = floats_tmp\n\n return floats, None\n\n # TODO: add a callback function option here that allows the caller to sort, remove, combine\n if processing == 'nominal_prevalence':\n if floats is None:\n categories = [(-item[0], item[1]) for item in zip(counts, uniques)]\n else:\n categories = [(-item[0], item[1], item[2]) for item in zip(counts, floats, uniques)]\n categories.sort()\n categories = [x[-1] for x in categories]\n elif processing != 'nominal_alphabetical' and floats is not None:\n categories = [(item[0], item[1]) for item in zip(floats, uniques)]\n categories.sort()\n categories = [x[1] for x in categories]\n else:\n categories = uniques.tolist()\n categories.sort()\n\n categories = dict(zip(categories, count(1)))\n mapping = np.fromiter((categories[val] for val in uniques), dtype=np.int64, count=len(uniques))\n encoded = mapping[indexes]\n\n if nonmissings is not None:\n encoded_tmp = np.zeros(len(nonmissings), dtype=np.int64)\n np.place(encoded_tmp, nonmissings, encoded)\n encoded = encoded_tmp\n\n return encoded, categories\n\ndef _encode_categorical_existing(X_col, nonmissings, categories):\n # called under: predict\n\n # TODO: add special case handling if there is only 1 sample to make that faster\n # if we have just 1 sample, we can avoid making the mapping below\n\n if issubclass(X_col.dtype.type, np.floating):\n missings = np.isnan(X_col)\n if missings.any():\n nonmissings = ~missings\n X_col = X_col[nonmissings]\n elif X_col.dtype.type is np.object_:\n X_col = _densify_object_ndarray(X_col)\n\n uniques, indexes = np.unique(X_col, return_inverse=True)\n\n if issubclass(X_col.dtype.type, np.floating):\n uniques = uniques.astype(np.float64, copy=False)\n uniques = uniques.astype(np.unicode_, copy=False)\n\n mapping = np.fromiter((categories.get(val, -1) for val in uniques), dtype=np.int64, count=len(uniques))\n encoded = mapping[indexes]\n\n if (mapping < 0).any():\n if nonmissings is not None:\n encoded_tmp = np.zeros(len(nonmissings), dtype=np.int64)\n np.place(encoded_tmp, nonmissings, encoded)\n bad = np.full(len(nonmissings), None, dtype=np.object_)\n np.place(bad, encoded_tmp < 0, uniques[indexes[encoded < 0]])\n encoded = encoded_tmp\n else:\n bad = np.full(len(encoded), None, dtype=np.object_)\n unknowns = encoded < 0\n np.place(bad, unknowns, uniques[indexes[unknowns]])\n else:\n bad = None\n if nonmissings is not None:\n encoded_tmp = np.zeros(len(nonmissings), dtype=np.int64)\n np.place(encoded_tmp, nonmissings, encoded)\n encoded = encoded_tmp\n\n return encoded, bad\n\ndef _encode_pandas_categorical_initial(X_col, pd_categories, is_ordered, processing):\n # called under: fit\n\n if processing == 'nominal':\n if is_ordered:\n msg = \"nominal type invalid for ordered pandas.CategoricalDtype\"\n _log.error(msg)\n raise ValueError(msg)\n elif processing == 'ordinal':\n if not is_ordered:\n msg = \"ordinal type invalid for unordered pandas.CategoricalDtype\"\n _log.error(msg)\n raise ValueError(msg)\n elif processing is None or processing == 'auto':\n pass\n elif processing == 'nominal_prevalence' or processing == 'nominal_alphabetical':\n # TODO: we could instead handle this by re-ordering the pandas pd_categories. Someone might want to construct it quickly but then override the pd_categories\n msg = f\"{processing} type invalid for pandas.CategoricalDtype\"\n _log.error(msg)\n raise ValueError(msg)\n else:\n if isinstance(processing, str):\n # don't allow strings to get to the for loop below\n msg = f\"{processing} type invalid for pandas.CategoricalDtype\"\n _log.error(msg)\n raise ValueError(msg)\n\n n_items = 0\n n_ordinals = 0\n n_continuous = 0\n try:\n for item in processing:\n n_items += 1\n if isinstance(item, str):\n n_ordinals += 1\n elif isinstance(item, float) or isinstance(item, int) or isinstance(item, np.floating) or isinstance(item, np.integer):\n n_continuous += 1\n except TypeError:\n msg = f\"{processing} type invalid for pandas.CategoricalDtype\"\n _log.error(msg)\n raise ValueError(msg)\n\n if n_continuous == n_items:\n msg = \"continuous type invalid for pandas.CategoricalDtype\"\n _log.error(msg)\n raise ValueError(msg)\n elif n_ordinals == n_items:\n if not is_ordered:\n msg = \"ordinal type invalid for unordered pandas.CategoricalDtype\"\n _log.error(msg)\n raise ValueError(msg)\n\n # TODO: instead of throwing, we could match the ordinal values with the pandas pd_categories and\n # report the rest as bad items. For now though, just assume it's bad to specify this\n msg = \"cannot specify ordinal categories for a pandas.CategoricalDtype which already has categories\"\n _log.error(msg)\n raise ValueError(msg)\n else:\n msg = f\"{processing} type invalid for pandas.CategoricalDtype\"\n _log.error(msg)\n raise ValueError(msg)\n\n categories = dict(zip(pd_categories, count(1)))\n X_col = X_col.astype(dtype=np.int64, copy=False) # we'll need int64 for calling C++ anyways\n X_col = X_col + 1\n return X_col, categories\n\ndef _encode_pandas_categorical_existing(X_col, pd_categories, categories):\n # called under: predict\n\n # TODO: add special case handling if there is only 1 sample to make that faster\n # if we have just 1 sample, we can avoid making the mapping below\n\n mapping = np.fromiter((categories.get(val, -1) for val in pd_categories), dtype=np.int64, count=len(pd_categories))\n\n if len(mapping) <= len(categories):\n mapping_cmp = np.arange(1, len(mapping) + 1, dtype=np.int64)\n if np.array_equal(mapping, mapping_cmp):\n X_col = X_col.astype(dtype=np.int64, copy=False) # avoid overflows for np.int8\n X_col = X_col + 1\n return X_col, None\n else:\n mapping_cmp = np.arange(1, len(categories) + 1, dtype=np.int64)\n if np.array_equal(mapping[0:len(mapping_cmp)], mapping_cmp):\n unknowns = len(categories) <= X_col\n bad = np.full(len(X_col), None, dtype=np.object_)\n bad[unknowns] = pd_categories[X_col[unknowns]]\n X_col = X_col.astype(dtype=np.int64, copy=False) # avoid overflows for np.int8\n X_col = X_col + 1\n X_col[unknowns] = -1\n return X_col, bad\n\n mapping = np.insert(mapping, 0, 0)\n encoded = mapping[X_col + 1]\n\n bad = None\n unknowns = encoded < 0\n if unknowns.any():\n bad = np.full(len(X_col), None, dtype=np.object_)\n bad[unknowns] = pd_categories[X_col[unknowns]]\n\n return encoded, bad\n\ndef _process_continuous(X_col, nonmissings):\n # called under: fit or predict\n\n if issubclass(X_col.dtype.type, np.floating):\n X_col = X_col.astype(dtype=np.float64, copy=False)\n return X_col, None\n elif issubclass(X_col.dtype.type, np.integer) or X_col.dtype.type is np.bool_:\n X_col = X_col.astype(dtype=np.float64)\n if nonmissings is not None:\n X_col_tmp = np.full(len(nonmissings), np.nan, dtype=np.float64)\n np.place(X_col_tmp, nonmissings, X_col)\n X_col = X_col_tmp\n\n return X_col, None\n else:\n # we either have an np.object_ or np.unicode_/np.str_\n try:\n floats = X_col.astype(dtype=np.float64)\n bad = None\n except (TypeError, ValueError):\n # we get a TypeError whenever we have an np.object_ array and numpy attempts to call float(), but the \n # object doesn't have a __float__ function. We get a ValueError when either a str object inside an \n # np.object_ array or when an np.unicode_ array attempts to convert a string to a float and fails\n\n n_samples = len(X_col)\n bad = np.full(n_samples, None, dtype=np.object_)\n floats = np.zeros(n_samples, dtype=np.float64)\n for idx in range(n_samples):\n one_item_array = X_col[idx:idx + 1] # slice one item at a time keeping as an np.ndarray\n try:\n # use .astype(..) instead of float(..) to ensure identical conversion results\n floats[idx] = one_item_array.astype(dtype=np.float64)\n except TypeError:\n # use .astype instead of str(one_item_array) here to ensure identical string categories\n one_str_array = one_item_array.astype(dtype=np.unicode_)\n try:\n # use .astype(..) instead of float(..) to ensure identical conversion results\n floats[idx] = one_str_array.astype(dtype=np.float64)\n except ValueError:\n bad.itemset(idx, one_str_array.item())\n except ValueError:\n bad.itemset(idx, one_item_array.item())\n\n # bad.any() would fail to work if bad was allowed to be either None or False, but None\n # values in X_col should always be identified as missing by our caller, and False should be successfully \n # converted to 0.0 above, so neither should end up in the bad array other than non-bad indicators\n bad = bad if bad.any() else None\n\n if nonmissings is not None:\n floats_tmp = np.full(len(nonmissings), np.nan, dtype=np.float64)\n np.place(floats_tmp, nonmissings, floats)\n floats = floats_tmp\n\n if bad is not None:\n bad_tmp = np.full(len(nonmissings), None, dtype=np.object_)\n np.place(bad_tmp, nonmissings, bad)\n bad = bad_tmp\n\n return floats, bad\n\ndef _process_ndarray(X_col, nonmissings, categories, processing, min_unique_continuous):\n if processing == 'continuous':\n # called under: fit or predict\n X_col, bad = _process_continuous(X_col, nonmissings)\n return 'continuous', X_col, None, bad\n elif processing == 'nominal':\n if categories is None:\n # called under: fit\n X_col, categories = _process_column_initial(X_col, nonmissings, None, None)\n return 'nominal', X_col, categories, None\n else:\n # called under: predict\n X_col, bad = _encode_categorical_existing(X_col, nonmissings, categories)\n return 'nominal', X_col, categories, bad\n elif processing == 'ordinal':\n if categories is None:\n # called under: fit\n # It's an error since we need to also provide the ordinal definition during fit\n msg = \"ordinal category definition missing for ordinal type\"\n _log.error(msg)\n raise ValueError(msg)\n else:\n # called under: predict\n X_col, bad = _encode_categorical_existing(X_col, nonmissings, categories)\n return 'ordinal', X_col, categories, bad\n elif processing is None or processing == 'auto':\n # called under: fit\n X_col, categories = _process_column_initial(X_col, nonmissings, None, min_unique_continuous)\n return 'continuous' if categories is None else 'nominal', X_col, categories, None\n elif processing == 'nominal_prevalence' or processing == 'nominal_alphabetical':\n # called under: fit\n X_col, categories = _process_column_initial(X_col, nonmissings, processing, None)\n return 'nominal', X_col, categories, None\n elif processing == 'quantile' or processing == 'quantile_humanized' or processing == 'uniform' or processing == 'winsorized':\n # called under: fit\n X_col, bad = _process_continuous(X_col, nonmissings)\n return 'continuous', X_col, None, bad\n elif isinstance(processing, int):\n # called under: fit\n X_col, categories = _process_column_initial(X_col, nonmissings, None, processing)\n return 'continuous' if categories is None else 'nominal', X_col, categories, None\n elif processing == 'ignore':\n # called under: fit or predict\n X_col, categories = _process_column_initial(X_col, nonmissings, None, None)\n mapping = np.empty(len(categories) + 1, dtype=np.object_)\n mapping.itemset(0, None)\n for category, idx in categories.items():\n mapping.itemset(idx, category)\n bad = mapping[X_col]\n return 'ignore', None, None, bad\n elif isinstance(processing, str):\n # called under: fit\n\n # don't allow strings to get to the np.array conversion below\n msg = f\"{processing} type invalid\"\n _log.error(msg)\n raise ValueError(msg)\n else:\n # called under: fit\n\n n_items = 0\n n_ordinals = 0\n n_continuous = 0\n try:\n for item in processing:\n n_items += 1\n if isinstance(item, str):\n n_ordinals += 1\n elif isinstance(item, float) or isinstance(item, int) or isinstance(item, np.floating) or isinstance(item, np.integer):\n n_continuous += 1\n except TypeError:\n msg = f\"{processing} type invalid\"\n _log.error(msg)\n raise TypeError(msg)\n\n if n_continuous == n_items:\n # if n_items == 0 then it must be continuous since we can have zero cut points, but not zero ordinal categories\n X_col, bad = _process_continuous(X_col, nonmissings)\n return 'continuous', X_col, None, bad\n elif n_ordinals == n_items:\n categories = dict(zip(processing, count(1)))\n X_col, bad = _encode_categorical_existing(X_col, nonmissings, categories)\n return 'ordinal', X_col, categories, bad\n else:\n msg = f\"{processing} type invalid\"\n _log.error(msg)\n raise TypeError(msg)\n\ndef _reshape_1D_if_possible(col):\n if col.ndim != 1:\n # ignore dimensions that have just 1 item and assume the intent was to give us 1D\n is_found = False\n for n_items in col.shape:\n if n_items != 1:\n if is_found:\n msg = f\"Cannot reshape to 1D. Original shape was {col.shape}\"\n _log.error(msg)\n raise ValueError(msg)\n is_found = True\n col = col.reshape(-1)\n return col\n\ndef _process_numpy_column(X_col, categories, feature_type, min_unique_continuous):\n nonmissings = None\n\n if isinstance(X_col, ma.masked_array):\n mask = X_col.mask\n if mask is ma.nomask:\n X_col = X_col.data\n else:\n X_col = X_col.compressed()\n # it's legal for a mask to exist and yet have all valid entries in the mask, so check for this\n if len(X_col) != len(mask):\n nonmissings = ~mask\n\n if X_col.dtype.type is np.object_:\n if _pandas_installed:\n # pandas also has the pd.NA value that indicates missing. If Pandas is available though\n # we can use it's function that checks for pd.NA, np.nan, and None\n nonmissings2 = pd.notna(X_col)\n else:\n # X_col == X_col is a check for nan that works even with mixed types, since nan != nan\n nonmissings2 = np.logical_and(X_col != _none_ndarray, X_col == X_col)\n if not nonmissings2.all():\n X_col = X_col[nonmissings2]\n if nonmissings is None:\n nonmissings = nonmissings2\n else:\n # it's a little weird and possibly dangerous to place inside the array being read,\n # but algorithmically this is the fastest thing to do, and it seems to work..\n np.place(nonmissings, nonmissings, nonmissings2)\n\n return _process_ndarray(X_col, nonmissings, categories, feature_type, min_unique_continuous)\n\ndef _process_pandas_column(X_col, categories, feature_type, min_unique_continuous):\n if isinstance(X_col.dtype, np.dtype):\n if issubclass(X_col.dtype.type, np.floating) or issubclass(X_col.dtype.type, np.integer) or X_col.dtype.type is np.bool_:\n X_col = X_col.values\n return _process_ndarray(X_col, None, categories, feature_type, min_unique_continuous)\n elif X_col.dtype.type is np.object_:\n nonmissings = None\n if X_col.hasnans:\n # if hasnans is true then there is definetly a real missing value in there and not just a mask\n nonmissings = X_col.notna().values\n X_col = X_col.dropna()\n X_col = X_col.values\n return _process_ndarray(X_col, nonmissings, categories, feature_type, min_unique_continuous)\n elif isinstance(X_col.dtype, pd.CategoricalDtype):\n # unlike other missing value types, we get back -1's for missing here, so no need to drop them\n X_col = X_col.values\n is_ordered = X_col.ordered\n pd_categories = X_col.categories.values.astype(dtype=np.unicode_, copy=False)\n X_col = X_col.codes\n\n if feature_type == 'ignore':\n pd_categories = pd_categories.astype(dtype=np.object_)\n pd_categories = np.insert(pd_categories, 0, None)\n bad = pd_categories[X_col + 1]\n return None, None, bad, 'ignore'\n else:\n if categories is None:\n # called under: fit\n X_col, categories = _encode_pandas_categorical_initial(X_col, pd_categories, is_ordered, feature_type)\n bad = None\n else:\n # called under: predict\n X_col, bad = _encode_pandas_categorical_existing(X_col, pd_categories, categories)\n\n return 'ordinal' if is_ordered else 'nominal', X_col, categories, bad\n elif issubclass(X_col.dtype.type, np.integer) or X_col.dtype.type is np.bool_:\n # this handles Int8Dtype to Int64Dtype, UInt8Dtype to UInt64Dtype, and BooleanDtype\n nonmissings = None\n if X_col.hasnans:\n # if hasnans is true then there is definetly a real missing value in there and not just a mask\n nonmissings = X_col.notna().values\n X_col = X_col.dropna()\n X_col = X_col.values\n X_col = X_col.astype(dtype=X_col.dtype.type, copy=False)\n return _process_ndarray(X_col, nonmissings, categories, feature_type, min_unique_continuous)\n\n # TODO: implement pd.SparseDtype\n # TODO: implement pd.StringDtype both the numpy and arrow versions\n # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.StringDtype.html#pandas.StringDtype\n msg = f\"{type(X_col.dtype)} not supported\"\n _log.error(msg)\n raise TypeError(msg)\n\ndef _process_scipy_column(X_col, categories, feature_type, min_unique_continuous):\n X_col = X_col.toarray().reshape(-1)\n\n nonmissings = None\n if X_col.dtype.type is np.object_:\n if _pandas_installed:\n # pandas also has the pd.NA value that indicates missing. If Pandas is available though\n # we can use it's function that checks for pd.NA, np.nan, and None\n nonmissings = pd.notna(X_col)\n else:\n # X_col == X_col is a check for nan that works even with mixed types, since nan != nan\n nonmissings = np.logical_and(X_col != _none_ndarray, X_col == X_col)\n\n if nonmissings.all():\n nonmissings = None\n else:\n X_col = X_col[nonmissings]\n\n return _process_ndarray(X_col, nonmissings, categories, feature_type, min_unique_continuous)\n\ndef _process_dict_column(X_col, categories, feature_type, min_unique_continuous):\n if isinstance(X_col, np.ndarray): # this includes ma.masked_array\n pass\n elif _pandas_installed and isinstance(X_col, pd.Series):\n return _process_pandas_column(X_col, categories, feature_type, min_unique_continuous)\n elif _pandas_installed and isinstance(X_col, pd.DataFrame):\n if X_col.shape[1] == 1:\n X_col = X_col.iloc[:, 0]\n return _process_pandas_column(X_col, categories, feature_type, min_unique_continuous)\n elif X_col.shape[0] == 1:\n X_col = X_col.astype(np.object_, copy=False).values.reshape(-1)\n else:\n msg = f\"Cannot reshape to 1D. Original shape was {X_col.shape}\"\n _log.error(msg)\n raise ValueError(msg)\n elif _scipy_installed and isinstance(X_col, sp.sparse.spmatrix):\n if X_col.shape[1] == 1 or X_col.shape[0] == 1:\n return _process_scipy_column(X_col, categories, feature_type, min_unique_continuous)\n else:\n msg = f\"Cannot reshape to 1D. Original shape was {X_col.shape}\"\n _log.error(msg)\n raise ValueError(msg)\n elif isinstance(X_col, list) or isinstance(X_col, tuple):\n X_col = np.array(X_col, dtype=np.object_)\n elif isinstance(X_col, str):\n # don't allow strings to get to the np.array conversion below\n X_col_tmp = np.empty(shape=1, dtype=np.object_)\n X_col_tmp.itemset(0, X_col)\n X_col = X_col_tmp\n else:\n try:\n # we don't support iterables that get exhausted on their first examination. This condition\n # should be detected though in clean_X where we get the length or bin_native where we check the\n # number of samples on the 2nd run through the generator\n X_col = list(X_col)\n X_col = np.array(X_col, dtype=np.object_)\n except TypeError:\n # if our item isn't iterable, assume it has just 1 item and we'll check below if that's consistent\n X_col_tmp = np.empty(shape=1, dtype=np.object_)\n X_col_tmp.itemset(0, X_col)\n X_col = X_col_tmp\n\n X_col = _reshape_1D_if_possible(X_col)\n return _process_numpy_column(X_col, categories, feature_type, min_unique_continuous)\n\ndef unify_columns(X, requests, feature_names_in, feature_types=None, min_unique_continuous=3, go_fast=False):\n # If the requests paramter contains a categories dictionary, then that same categories object is guaranteed to\n # be yielded back to the caller. This guarantee can be used to rapidly identify which request is being \n # yielded by using the id(categories) along with the feature_idx\n\n if isinstance(X, np.ndarray): # this includes ma.masked_array\n if issubclass(X.dtype.type, np.complexfloating):\n msg = \"Complex data not supported\"\n _log.error(msg)\n raise TypeError(msg)\n elif issubclass(X.dtype.type, np.void):\n msg = \"np.void data not supported\"\n _log.error(msg)\n raise TypeError(msg)\n\n # TODO: in the future special case this to make single samples faster at predict time\n\n if X.ndim == 1:\n X = np.expand_dims(X, axis=0)\n elif X.ndim != 2:\n msg = f\"X cannot have {X.ndim} dimensions\"\n _log.error(msg)\n raise ValueError(msg)\n\n n_cols = X.shape[1]\n col_map = None\n if n_cols != len(feature_names_in):\n # during fit time unify_feature_names would only allow us to get here if this was legal, which requires \n # feature_types to not be None. During predict time feature_types_in cannot be None, but we need \n # to check for legality on the dimensions of X\n keep_cols = np.fromiter((val != 'ignore' for val in feature_types), dtype=np.bool_, count=len(feature_types))\n if n_cols != keep_cols.sum():\n # called under: predict\n msg = f\"The model has {len(feature_types)} features, but X has {n_cols} columns\"\n _log.error(msg)\n raise ValueError(msg)\n col_map = np.empty(len(feature_types), dtype=np.int64)\n np.place(col_map, keep_cols, np.arange(len(feature_types), dtype=np.int64))\n\n # TODO: I'm not sure that simply checking X.flags.c_contiguous handles all the situations that we'd want\n # to know about some data. If we recieved a transposed array that was C ordered how would that look?\n # so read up on this more\n # https://numpy.org/doc/stable/reference/arrays.ndarray.html#internal-memory-layout-of-an-ndarray\n # https://numpy.org/doc/stable/reference/arrays.interface.html\n # memoryview \n\n # TODO: create a C++ transposer that takes the stride length between items, so we can pass in 1 for bytes\n # 2 for int16, 4 for int32, 8 for int64 and special case those sizes to be fast. We can then also transpose\n # np.object_ and np.unicode by passing in whatever lengths those are, which we can get from numpy reliably\n # Inisde C++ we can use a templated function that takes the stride length or 0, so we'll get compiled\n # versions that specialize the 1,2,4,8 sizes, and use memcpy to make the cell copies. memcpy is an\n # intrinsic that'll optimize down to avoid loops when possible, so that should give us fast results.\n #\n # For some reason numpy really sucks at transposing data and asfortranarray makes it slower, so let's do it ourselves.\n # Allocate an empty fortran array here in python and have C++ fill it. Then we can keep all the\n # rest of the code below the same since it'll just be accessed internally more efficiently.\n #if go_fast and X.flags.c_contiguous:\n # # called under: predict\n # # during predict we don't care as much about memory consumption, so speed it by transposing everything\n # X = np.asfortranarray(X)\n\n for feature_idx, categories in requests:\n col_idx = feature_idx if col_map is None else col_map[feature_idx]\n X_col = X[:, col_idx]\n feature_type = None if feature_types is None else feature_types[feature_idx]\n feature_type_in, X_col, categories, bad = _process_numpy_column(X_col, categories, feature_type, min_unique_continuous)\n yield feature_type_in, X_col, categories, bad\n elif _pandas_installed and isinstance(X, pd.DataFrame):\n names_original = X.columns\n names_dict = dict(zip(map(str, names_original), count()))\n n_cols = len(names_original)\n if len(names_dict) != n_cols:\n # this can happen if for instance one column is \"0\" and annother is int(0)\n # Pandas also allows duplicate labels by default:\n # https://pandas.pydata.org/docs/user_guide/duplicates.html#duplicates-disallow\n # we can tollerate duplicate labels here, provided none of them are being used by our model\n for name, n_count in Counter(map(str, names_original)).items():\n if n_count != 1:\n names_dict.remove(name)\n\n if feature_types is None:\n if any(feature_name_in not in names_dict for feature_name_in in feature_names_in):\n names_dict = None\n else:\n if any(feature_name_in not in names_dict for feature_name_in, feature_type in zip(feature_names_in, feature_types) if feature_type != 'ignore'):\n names_dict = None\n\n if names_dict is None:\n if n_cols == len(feature_names_in):\n names_dict = dict(zip(feature_names_in, count()))\n else:\n # during fit time unify_feature_names would only allow us to get here if this was legal, which requires \n # feature_types to not be None. During predict time feature_types_in cannot be None, but we need \n # to check for legality on the dimensions of X\n names_dict = dict(zip((feature_name_in for feature_name_in, feature_type in zip(feature_names_in, feature_types) if feature_type != 'ignore'), count()))\n if n_cols != len(names_dict):\n msg = f\"The model has {len(feature_types)} features, but X has {n_cols} columns\"\n _log.error(msg)\n raise ValueError(msg)\n\n # Pandas also sometimes uses a dense 2D ndarray instead of per column 1D ndarrays, which would benefit from \n # transposing, but accessing the BlockManager is currently unsupported behavior. They are also planning to eliminate\n # the BlockManager in Pandas2, so not much benefit in special casing this while they move in that direction\n # https://uwekorn.com/2020/05/24/the-one-pandas-internal.html\n\n for feature_idx, categories in requests:\n col_idx = names_dict[feature_names_in[feature_idx]]\n X_col = X.iloc[:, col_idx]\n feature_type = None if feature_types is None else feature_types[feature_idx]\n feature_type_in, X_col, categories, bad = _process_pandas_column(X_col, categories, feature_type, min_unique_continuous)\n yield feature_type_in, X_col, categories, bad\n elif _scipy_installed and isinstance(X, sp.sparse.spmatrix):\n n_cols = X.shape[1]\n\n col_map = None\n if n_cols != len(feature_names_in):\n # during fit time unify_feature_names would only allow us to get here if this was legal, which requires \n # feature_types to not be None. During predict time feature_types_in cannot be None, but we need \n # to check for legality on the dimensions of X\n keep_cols = np.fromiter((val != 'ignore' for val in feature_types), dtype=np.bool_, count=len(feature_types))\n if n_cols != keep_cols.sum():\n msg = f\"The model has {len(feature_types)} features, but X has {n_cols} columns\"\n _log.error(msg)\n raise ValueError(msg)\n col_map = np.empty(len(feature_types), dtype=np.int64)\n np.place(col_map, keep_cols, np.arange(len(feature_types), dtype=np.int64))\n\n for feature_idx, categories in requests:\n col_idx = feature_idx if col_map is None else col_map[feature_idx]\n X_col = X.getcol(col_idx)\n feature_type = None if feature_types is None else feature_types[feature_idx]\n feature_type_in, X_col, categories, bad = _process_scipy_column(X_col, categories, feature_type, min_unique_continuous)\n yield feature_type_in, X_col, categories, bad\n elif isinstance(X, dict):\n for feature_idx, categories in requests:\n X_col = X[feature_names_in[feature_idx]]\n feature_type = None if feature_types is None else feature_types[feature_idx]\n feature_type_in, X_col, categories, bad = _process_dict_column(X_col, categories, feature_type, min_unique_continuous)\n yield feature_type_in, X_col, categories, bad\n else:\n msg = \"internal error\"\n _log.error(msg)\n raise ValueError(msg)\n\ndef unify_feature_names(X, feature_names_given=None, feature_types_given=None):\n # called under: fit\n\n if isinstance(X, np.ndarray): # this includes ma.masked_array\n X_names = None\n n_cols = X.shape[0] if X.ndim == 1 else X.shape[1]\n elif _pandas_installed and isinstance(X, pd.DataFrame):\n X_names = list(map(str, X.columns))\n n_cols = len(X_names)\n elif _scipy_installed and isinstance(X, sp.sparse.spmatrix):\n X_names = None\n n_cols = X.shape[1]\n elif isinstance(X, dict):\n X_names = list(map(str, X.keys()))\n # there is no natural order for dictionaries, but we want a consistent order, so sort them by string\n # python uses unicode code points for sorting, which is what we want for cross-language equivalent results\n X_names.sort()\n n_cols = len(X_names)\n else:\n msg = \"internal error\"\n _log.error(msg)\n raise ValueError(msg)\n\n n_ignored = 0\n if feature_types_given is not None:\n n_ignored = sum(1 for feature_type_given in feature_types_given if feature_type_given == 'ignore')\n\n if feature_names_given is None:\n if feature_types_given is not None:\n if len(feature_types_given) != n_cols and len(feature_types_given) != n_cols + n_ignored:\n msg = f\"There are {len(feature_types_given)} feature_types, but X has {n_cols} columns\"\n _log.error(msg)\n raise ValueError(msg)\n n_cols = len(feature_types_given)\n\n feature_names_in = X_names\n if X_names is None:\n feature_names_in = []\n # this isn't used other than to indicate new names need to be created\n feature_types_given = ['ignore'] * n_cols \n else:\n n_final = len(feature_names_given)\n if feature_types_given is not None:\n n_final = len(feature_types_given)\n if n_final != len(feature_names_given) and n_final != len(feature_names_given) + n_ignored:\n msg = f\"There are {n_final} feature_types and {len(feature_names_given)} feature_names which is a mismatch\"\n _log.error(msg)\n raise ValueError(msg)\n\n feature_names_in = list(map(str, feature_names_given))\n\n if X_names is None:\n # ok, need to use position indexing\n if n_final != n_cols and n_final != n_cols + n_ignored:\n msg = f\"There are {n_final} features, but X has {n_cols} columns\"\n _log.error(msg)\n raise ValueError(msg)\n else:\n # we might be indexing by name\n names_used = feature_names_in\n if feature_types_given is not None and len(feature_names_in) == len(feature_types_given):\n names_used = [feature_name_in for feature_name_in, feature_type_given in zip(feature_names_in, feature_types_given) if feature_type_given != 'ignore']\n\n X_names_unique = set(name for name, n_count in Counter(X_names).items() if n_count == 1)\n if any(name not in X_names_unique for name in names_used):\n # ok, need to use position indexing\n if n_final != n_cols and n_final != n_cols + n_ignored:\n msg = f\"There are {n_final} features, but X has {n_cols} columns\"\n _log.error(msg)\n raise ValueError(msg)\n\n if feature_types_given is not None:\n if len(feature_types_given) == len(feature_names_in):\n if len(feature_names_in) - n_ignored != len(set(feature_name_in for feature_name_in, feature_type_given in zip(feature_names_in, feature_types_given) if feature_type_given != 'ignore')):\n msg = \"cannot have duplicate feature names\"\n _log.error(msg)\n raise ValueError(msg)\n\n return feature_names_in\n\n names_set = set(feature_names_in)\n\n names = []\n names_idx = 0\n feature_idx = 0\n for feature_type_given in feature_types_given:\n if feature_type_given == 'ignore':\n while True:\n # non-devs looking at our models will like 1 indexing better than 0 indexing\n # give 4 digits to the number so that anything below 9999 gets sorted in the right order in string format\n feature_idx += 1\n name = f\"feature_{feature_idx:04}\"\n if name not in names_set:\n break\n else:\n name = feature_names_in[names_idx]\n names_idx += 1\n names.append(name)\n\n feature_names_in = names\n\n if len(feature_names_in) != len(set(feature_names_in)):\n msg = \"cannot have duplicate feature names\"\n _log.error(msg)\n raise ValueError(msg)\n\n return feature_names_in\n\ndef clean_vector(vec, is_y_for_classification, param_name):\n # called under: fit\n\n if isinstance(vec, ma.masked_array):\n # do this before np.ndarray since ma.masked_array is a subclass of np.ndarray\n mask = vec.mask\n if mask is not ma.nomask:\n if mask.any():\n msg = f\"{param_name} cannot contain missing values\"\n _log.error(msg)\n raise ValueError(msg)\n vec = vec.data\n elif isinstance(vec, np.ndarray):\n pass\n elif _pandas_installed and isinstance(vec, pd.Series):\n if vec.hasnans:\n # if hasnans is true then there is definetly a real missing value in there and not just a mask\n msg = f\"{param_name} cannot contain missing values\"\n _log.error(msg)\n raise ValueError(msg)\n # this can result in be a non-numpy datatype, but we use astype below to ensure numpyness\n vec = vec.values\n elif _pandas_installed and isinstance(vec, pd.DataFrame):\n if vec.shape[1] == 1:\n vec = vec.iloc[:, 0]\n if vec.hasnans:\n # if hasnans is true then there is definetly a real missing value in there and not just a mask\n msg = f\"{param_name} cannot contain missing values\"\n _log.error(msg)\n raise ValueError(msg)\n # this can result in be a non-numpy datatype, but we use astype below to ensure numpyness\n vec = vec.values\n elif vec.shape[0] == 1:\n # transition to np.object_ first to detect any missing values\n vec = vec.astype(np.object_, copy=False).values\n else:\n msg = f\"{param_name} cannot be a multidimensional pandas.DataFrame\"\n _log.error(msg)\n raise ValueError(msg)\n elif _scipy_installed and isinstance(vec, sp.sparse.spmatrix):\n if vec.shape[0] == 1 or vec.shape[1] == 1:\n vec = vec.toarray()\n else:\n msg = f\"{param_name} cannot be a multidimensional scipy.sparse.spmatrix\"\n _log.error(msg)\n raise ValueError(msg)\n elif isinstance(vec, list) or isinstance(vec, tuple):\n # transition to np.object_ first to detect any missing values\n vec = np.array(vec, dtype=np.object_)\n elif callable(getattr(vec, '__array__', None)):\n vec = vec.__array__()\n elif isinstance(vec, str):\n msg = f\"{param_name} cannot be a single object\"\n _log.error(msg)\n raise TypeError(msg)\n else:\n try:\n vec = list(vec)\n except TypeError:\n msg = f\"{param_name} cannot be a single object\"\n _log.error(msg)\n raise TypeError(msg)\n # transition to np.object_ first to detect any missing values\n vec = np.array(vec, dtype=np.object_)\n\n vec = _reshape_1D_if_possible(vec)\n\n if vec.dtype.type is np.object_:\n if _pandas_installed:\n # pandas also has the pd.NA value that indicates missing. If Pandas is available though\n # we can use it's function that checks for pd.NA, np.nan, and None\n if pd.isna(vec).any():\n msg = f\"{param_name} cannot contain missing values\"\n _log.error(msg)\n raise ValueError(msg)\n else:\n # vec != vec is a check for nan that works even with mixed types, since nan != nan\n if (vec == _none_ndarray).any() or (vec != vec).any():\n msg = f\"{param_name} cannot contain missing values\"\n _log.error(msg)\n raise ValueError(msg)\n elif issubclass(vec.dtype.type, np.floating):\n if np.isnan(vec).any():\n msg = f\"{param_name} cannot contain missing values\"\n _log.error(msg)\n raise ValueError(msg)\n\n if is_y_for_classification:\n # Per scikit-learn, we need to accept y of list or numpy array that contains either strings or integers.\n # We want to serialize these models to/from JSON, and JSON allows us to differentiate between string\n # and integer types with just the JSON type, so that's nice. JSON also allows boolean types,\n # and that seems like a type someone might pass us for binary classification, so accept bools too.\n # https://scikit-learn.org/stable/developers/develop.html\n\n if issubclass(vec.dtype.type, np.integer):\n # this also handles pandas Int8Dtype to Int64Dtype, UInt8Dtype to UInt64Dtype\n # JSON has a number datatype, so we can preserve this information in JSON!\n dtype = np.int64\n elif issubclass(vec.dtype.type, np.bool_):\n # this also handles pandas BooleanDtype\n # JSON has a boolean datatype, so we can preserve this information in JSON!\n dtype = np.bool_\n elif issubclass(vec.dtype.type, np.object_):\n types = set(map(type, vec))\n if all(one_type is int or issubclass(one_type, np.integer) for one_type in types):\n # the vec.astype call below can fail if we're passed an unsigned np.uint64\n # array with big values, but we don't want to surprise anyone by converting to\n # strings in that special case, so throw if we're presented this unusual type\n dtype = np.int64\n elif all(one_type is bool or issubclass(one_type, np.bool_) for one_type in types):\n dtype = np.bool_\n else:\n dtype = np.unicode_\n else:\n dtype = np.unicode_\n else:\n dtype = np.float64\n\n return vec.astype(dtype, copy=False)\n\ndef clean_X(X):\n # called under: fit or predict\n\n if isinstance(X, np.ndarray): # this includes ma.masked_array\n return X, 1 if X.ndim == 1 else X.shape[0]\n elif _pandas_installed and isinstance(X, pd.DataFrame):\n return X, X.shape[0]\n elif _scipy_installed and isinstance(X, sp.sparse.spmatrix):\n return X, X.shape[0]\n elif isinstance(X, dict):\n for val in X.values():\n # we don't support iterators for dict, so len should work\n return X, len(val)\n return X, -1\n elif isinstance(X, list) or isinstance(X, tuple):\n is_copied = False\n elif callable(getattr(X, '__array__', None)):\n X = X.__array__()\n return X, 1 if X.ndim == 1 else X.shape[0]\n elif X is None:\n msg = \"X cannot be None\"\n _log.error(msg)\n raise TypeError(msg)\n elif isinstance(X, str):\n # str objects are iterable, so don't allow them to get to the list() conversion below\n msg = \"X cannot be a single str\"\n _log.error(msg)\n raise TypeError(msg)\n else:\n try:\n X = list(X)\n is_copied = True\n except TypeError:\n msg = \"X must be an iterable\"\n _log.error(msg)\n raise TypeError(msg)\n\n # for consistency with what the caller expects, we should mirror what np.array([[..], [..], .., [..]]) does\n # [1, 2, 3] is one sample with 3 features\n # [[1], [2], [3]] is three samples with 1 feature\n # [[1], [2], 3] is bug prone. You could argue that it has to be a single sample since\n # the 3 only makes sense in that context, but if the 2 value was removed it would change \n # from being a single sample with 3 features to being two samples with a single feature, \n # so force the user to have consistent inner lists/objects\n\n for idx in range(len(X)):\n sample = X[idx]\n if isinstance(sample, list) or isinstance(sample, tuple):\n pass\n elif isinstance(sample, ma.masked_array):\n # do this before np.ndarray since ma.masked_array is a subclass of np.ndarray\n if not is_copied:\n is_copied = True\n X = list(X)\n X[idx] = _reshape_1D_if_possible(sample.astype(np.object_, copy=False).filled(np.nan))\n elif isinstance(sample, np.ndarray):\n if sample.ndim == 1:\n pass\n else:\n if not is_copied:\n is_copied = True\n X = list(X)\n X[idx] = _reshape_1D_if_possible(sample)\n elif _pandas_installed and isinstance(sample, pd.Series):\n if not is_copied:\n is_copied = True\n X = list(X)\n X[idx] = sample.astype(np.object_, copy=False).values\n elif _pandas_installed and isinstance(sample, pd.DataFrame):\n if sample.shape[0] == 1 or sample.shape[1] == 1:\n if not is_copied:\n is_copied = True\n X = list(X)\n X[idx] = sample.astype(np.object_, copy=False).values.reshape(-1)\n else:\n msg = f\"Cannot reshape to 1D. Original shape was {sample.shape}\"\n _log.error(msg)\n raise ValueError(msg)\n elif _scipy_installed and isinstance(sample, sp.sparse.spmatrix):\n if sample.shape[0] == 1 or sample.shape[1] == 1:\n if not is_copied:\n is_copied = True\n X = list(X)\n X[idx] = sample.toarray().reshape(-1)\n else:\n msg = f\"Cannot reshape to 1D. Original shape was {sample.shape}\"\n _log.error(msg)\n raise ValueError(msg)\n elif isinstance(sample, str):\n break # this only legal if we have one sample\n else:\n try:\n sample = list(sample)\n if not is_copied:\n is_copied = True\n X = list(X)\n X[idx] = sample\n except TypeError:\n break # this only legal if we have one sample\n\n # leave these as np.object_ for now and we'll try to densify per column where we're more likely to \n # succeed in densification since columns should generally be a single type\n X = np.array(X, dtype=np.object_)\n return X, 1 if X.ndim == 1 else X.shape[0]\n\ndef _cut_continuous(native, X_col, processing, binning, max_bins, min_samples_bin):\n # called under: fit\n \n if processing != 'quantile' and processing != 'quantile_humanized' and processing != 'uniform' and processing != 'winsorized' and not isinstance(processing, list) and not isinstance(processing, np.ndarray):\n if isinstance(binning, list) or isinstance(binning, np.ndarray):\n msg = f\"illegal binning type {binning}\"\n _log.error(msg)\n raise ValueError(msg)\n processing = binning\n\n if processing == 'quantile':\n # one bin for missing, one bin for unknown, and # of cuts is one less again\n cuts = native.cut_quantile(X_col, min_samples_bin, 0, max_bins - 3)\n elif processing == 'quantile_humanized':\n # one bin for missing, one bin for unknown, and # of cuts is one less again\n cuts = native.cut_quantile(X_col, min_samples_bin, 1, max_bins - 3)\n elif processing == 'uniform':\n # one bin for missing, one bin for unknown, and # of cuts is one less again\n cuts = native.cut_uniform(X_col, max_bins - 3)\n elif processing == 'winsorized':\n # one bin for missing, one bin for unknown, and # of cuts is one less again\n cuts = native.cut_winsorized(X_col, max_bins - 3)\n elif isinstance(processing, np.ndarray):\n cuts = processing.astype(dtype=np.float64, copy=False)\n elif isinstance(processing, list):\n cuts = np.array(processing, dtype=np.float64)\n else:\n msg = f\"illegal binning type {processing}\"\n _log.error(msg)\n raise ValueError(msg)\n\n return cuts\n\nclass EBMPreprocessor(BaseEstimator, TransformerMixin):\n \"\"\" Transformer that preprocesses data to be ready before EBM. \"\"\"\n\n def __init__(\n self, feature_names=None, feature_types=None, max_bins=256, binning=\"quantile\", min_samples_bin=1, \n min_unique_continuous=3, epsilon=None, delta=None, composition=None, privacy_schema=None\n ):\n \"\"\" Initializes EBM preprocessor.\n\n Args:\n feature_names: Feature names as list.\n feature_types: Feature types as list, for example \"continuous\" or \"categorical\".\n max_bins: Max number of bins to process numeric features.\n binning: Strategy to compute bins: \"quantile\", \"quantile_humanized\", \"uniform\", or \"private\". \n min_samples_bin: minimum number of samples to put into a quantile or quantile_humanized bin\n min_unique_continuous: number of unique numbers required before a feature is considered continuous\n epsilon: Privacy budget parameter. Only applicable when binning is \"private\".\n delta: Privacy budget parameter. Only applicable when binning is \"private\".\n privacy_schema: User specified min/max values for numeric features as dictionary. Only applicable when binning is \"private\".\n \"\"\"\n self.feature_names = feature_names\n self.feature_types = feature_types\n self.max_bins = max_bins\n self.binning = binning\n self.min_samples_bin = min_samples_bin\n self.min_unique_continuous = min_unique_continuous\n self.epsilon = epsilon\n self.delta = delta\n self.composition = composition\n self.privacy_schema = privacy_schema\n\n def fit(self, X, y=None, sample_weight=None):\n \"\"\" Fits transformer to provided samples.\n\n Args:\n X: Numpy array for training samples.\n y: Unused. Only included for scikit-learn compatibility\n sample_weight: Per-sample weights\n\n Returns:\n Itself.\n \"\"\"\n\n X, n_samples = clean_X(X)\n if n_samples <= 0:\n msg = \"X has no samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n if sample_weight is not None:\n sample_weight = clean_vector(sample_weight, False, \"sample_weight\")\n if n_samples != len(sample_weight):\n msg = f\"X has {n_samples} samples and sample_weight has {len(sample_weight)} samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n min_weight = sample_weight.min() # NaN values are guaranteed to be the min if they exist\n # TODO: for now weights of zero are illegal, but in the future accept them\n if math.isnan(min_weight) or min_weight <= 0 or math.isinf(sample_weight.max()):\n msg = \"illegal sample_weight value\"\n _log.error(msg)\n raise ValueError(msg)\n else:\n # TODO: eliminate this eventually\n sample_weight = np.ones_like(y, dtype=np.float64)\n\n feature_names_in = unify_feature_names(X, self.feature_names, self.feature_types)\n n_features = len(feature_names_in)\n\n noise_scale = None # only applicable for private binning\n if self.binning == 'private':\n DPUtils.validate_eps_delta(self.epsilon, self.delta)\n if self.composition == 'classic':\n noise_scale = DPUtils.calc_classic_noise_multi(\n total_queries = n_features, \n target_epsilon = self.epsilon, \n delta = self.delta, \n sensitivity = np.max(sample_weight)\n )\n elif self.composition == 'gdp':\n noise_scale = DPUtils.calc_gdp_noise_multi(\n total_queries = n_features, \n target_epsilon = self.epsilon, \n delta = self.delta\n ) * np.max(sample_weight) # Alg Line 17\"\n else:\n raise NotImplementedError(f\"Unknown composition method provided: {self.composition}. Please use 'gdp' or 'classic'.\")\n\n feature_types_in = _none_list * n_features\n bins = _none_list * n_features\n bin_weights = _none_list * n_features\n min_vals = np.full(n_features, np.nan, dtype=np.float64)\n max_vals = np.full(n_features, np.nan, dtype=np.float64)\n histogram_cuts = _none_list * n_features\n histogram_counts = _none_list * n_features\n unique_counts = np.full(n_features, 0, dtype=np.int64)\n zero_counts = np.full(n_features, 0, dtype=np.int64)\n\n native = Native.get_native_singleton()\n is_privacy_warning = False\n for feature_idx, (feature_type_in, X_col, categories, bad) in enumerate(unify_columns(X, zip(range(n_features), repeat(None)), feature_names_in, self.feature_types, self.min_unique_continuous, False)):\n if n_samples != len(X_col):\n msg = \"The columns of X are mismatched in the number of of samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n max_bins = self.max_bins # TODO: in the future allow this to be per-feature\n if max_bins < 3:\n raise ValueError(f\"max_bins was {max_bins}, but must be 3 or higher. One bin for missing, one bin for unknown, and one or more bins for the non-missing values.\")\n\n if not X_col.flags.c_contiguous:\n # X_col could be a slice that has a stride. We need contiguous for caling into C\n X_col = X_col.copy()\n\n feature_types_in[feature_idx] = feature_type_in\n if categories is None:\n # continuous feature\n if bad is not None:\n msg = f\"Feature {feature_names_in[feature_idx]} is indicated as continuous, but has non-numeric data\"\n _log.error(msg)\n raise ValueError(msg)\n\n if self.binning == 'private':\n if np.isnan(X_col).any():\n msg = \"missing values in X not supported for private binning\"\n _log.error(msg)\n raise ValueError(msg)\n\n bounds = None if self.privacy_schema is None else self.privacy_schema.get(feature_idx, None)\n if bounds is None:\n is_privacy_warning = True\n min_val = np.nanmin(X_col)\n max_val = np.nanmax(X_col)\n else:\n min_val = bounds[0]\n max_val = bounds[1]\n cuts, feature_bin_weights = DPUtils.private_numeric_binning(X_col, sample_weight, noise_scale, max_bins - 1, min_val, max_val)\n feature_bin_weights.append(0)\n feature_bin_weights = np.array(feature_bin_weights, dtype=np.float64)\n else:\n min_val = np.nanmin(X_col)\n max_val = np.nanmax(X_col)\n feature_type_given = None if self.feature_types is None else self.feature_types[feature_idx]\n cuts = _cut_continuous(native, X_col, feature_type_given, self.binning, max_bins, self.min_samples_bin)\n discretized = native.discretize(X_col, cuts)\n feature_bin_weights = np.bincount(discretized, weights=sample_weight, minlength=len(cuts) + 3)\n feature_bin_weights = feature_bin_weights.astype(np.float64, copy=False)\n\n n_cuts = native.get_histogram_cut_count(X_col)\n feature_histogram_cuts = native.cut_uniform(X_col, n_cuts)\n discretized = native.discretize(X_col, feature_histogram_cuts)\n feature_histogram_counts = np.bincount(discretized, minlength=len(feature_histogram_cuts) + 3)\n feature_histogram_counts = feature_histogram_counts.astype(np.int64, copy=False)\n\n histogram_cuts[feature_idx] = feature_histogram_cuts\n histogram_counts[feature_idx] = feature_histogram_counts\n\n X_col = X_col[~np.isnan(X_col)]\n unique_counts.itemset(feature_idx, len(np.unique(X_col)))\n zero_counts.itemset(feature_idx, len(X_col) - np.count_nonzero(X_col))\n\n bins[feature_idx] = cuts\n min_vals.itemset(feature_idx, min_val)\n max_vals.itemset(feature_idx, max_val)\n else:\n # categorical feature\n if bad is not None:\n msg = f\"Feature {feature_names_in[feature_idx]} has unrecognized ordinal values\"\n _log.error(msg)\n raise ValueError(msg)\n\n if self.binning == 'private':\n if np.count_nonzero(X_col) != len(X_col):\n msg = \"missing values in X not supported for private binning\"\n _log.error(msg)\n raise ValueError(msg)\n\n # TODO: clean up this hack that uses strings of the indexes\n keep_bins, old_feature_bin_weights = DPUtils.private_categorical_binning(X_col, sample_weight, noise_scale, max_bins - 1)\n unknown_weight = 0\n if keep_bins[-1] == 'DPOther':\n unknown_weight = old_feature_bin_weights[-1]\n keep_bins = keep_bins[:-1]\n old_feature_bin_weights = old_feature_bin_weights[:-1]\n\n keep_bins = keep_bins.astype(np.int64)\n keep_bins = dict(zip(keep_bins, old_feature_bin_weights))\n\n feature_bin_weights = np.empty(len(keep_bins) + 2, dtype=np.float64)\n feature_bin_weights[0] = 0\n feature_bin_weights[-1] = unknown_weight\n\n categories = list(map(tuple, map(reversed, categories.items())))\n categories.sort() # groupby requires sorted data\n\n new_categories = {}\n new_idx = 1\n for idx, category_iter in groupby(categories, lambda x: x[0]):\n bin_weight = keep_bins.get(idx, None)\n if bin_weight is not None:\n feature_bin_weights.itemset(new_idx, bin_weight)\n for _, category in category_iter:\n new_categories[category] = new_idx\n new_idx += 1\n\n categories = new_categories\n else:\n n_unique_indexes = 0 if len(categories) == 0 else max(categories.values())\n feature_bin_weights = np.bincount(X_col, weights=sample_weight, minlength=n_unique_indexes + 2)\n feature_bin_weights = feature_bin_weights.astype(np.float64, copy=False)\n unique_counts.itemset(feature_idx, len(categories))\n zero_indexes = _none_list * n_unique_indexes\n for category, idx in categories.items():\n try:\n val = float(category)\n if val == 0.0:\n zero_indexes[idx - 1] = True\n except ValueError:\n pass\n\n n_zeros = 0\n for idx, is_zero in enumerate(zero_indexes):\n if is_zero:\n n_zeros += np.count_nonzero(X_col == (idx + 1))\n zero_counts.itemset(feature_idx, n_zeros)\n\n bins[feature_idx] = categories\n bin_weights[feature_idx] = feature_bin_weights\n\n if is_privacy_warning:\n warn(\"Possible privacy violation: assuming min/max values per feature are public info. \"\n \"Pass a privacy schema with known public ranges per feature to avoid this warning.\")\n\n self.feature_names_in_ = feature_names_in\n self.feature_types_in_ = feature_types_in\n self.bins_ = bins\n self.bin_weights_ = bin_weights\n self.noise_scale_ = noise_scale\n self.min_vals_ = min_vals\n self.max_vals_ = max_vals\n self.histogram_cuts_ = histogram_cuts\n self.histogram_counts_ = histogram_counts\n self.unique_counts_ = unique_counts\n self.zero_counts_ = zero_counts\n self.has_fitted_ = True\n return self\n\n def transform(self, X):\n \"\"\" Transform on provided samples.\n\n Args:\n X: Numpy array for samples.\n\n Returns:\n Transformed numpy array.\n \"\"\"\n check_is_fitted(self, \"has_fitted_\")\n\n X, n_samples = clean_X(X)\n if n_samples <= 0:\n msg = \"X has no samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n X_binned = np.empty((n_samples, len(self.feature_names_in_)), dtype=np.int64, order='F')\n\n native = Native.get_native_singleton()\n category_iter = (category if isinstance(category, dict) else None for category in self.bins_)\n requests = zip(count(), category_iter)\n cols = unify_columns(X, requests, self.feature_names_in_, self.feature_types_in_, None, False)\n for feature_idx, bins, (_, X_col, _, _) in zip(count(), self.bins_, cols):\n if n_samples != len(X_col):\n msg = \"The columns of X are mismatched in the number of of samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n if not isinstance(bins, dict):\n # continuous feature\n\n if not X_col.flags.c_contiguous:\n # X_col could be a slice that has a stride. We need contiguous for caling into C\n X_col = X_col.copy()\n\n X_col = native.discretize(X_col, bins)\n\n X_binned[:, feature_idx] = X_col\n\n return X_binned\n\n def fit_transform(self, X, y=None, sample_weight=None):\n \"\"\" Fits and Transform on provided samples.\n\n Args:\n X: Numpy array for samples.\n y: Unused. Only included for scikit-learn compatibility\n sample_weight: Per-sample weights\n\n Returns:\n Transformed numpy array.\n \"\"\"\n\n X, _ = clean_X(X)\n return self.fit(X, y, sample_weight).transform(X)\n\ndef deduplicate_bins(bins):\n # calling this function before calling score_terms allows score_terms to operate more efficiently since it'll\n # be able to avoid re-binning data for pairs that have already been processed in mains or other pairs since we \n # use the id of the bins to identify feature data that was previously binned\n\n uniques = dict()\n for feature_idx in range(len(bins)):\n bin_levels = bins[feature_idx]\n highest_key = None\n for level_idx in range(len(bin_levels)):\n feature_bins = bin_levels[level_idx]\n if isinstance(feature_bins, dict):\n key = frozenset(feature_bins.items())\n else:\n key = tuple(feature_bins)\n existing = uniques.get(key, None)\n if existing is None:\n uniques[key] = feature_bins\n else:\n bin_levels[level_idx] = existing\n\n if highest_key != key:\n highest_key = key\n highest_idx = level_idx\n del bin_levels[highest_idx + 1:]\n\ndef construct_bins(\n X,\n sample_weight,\n feature_names_given, \n feature_types_given, \n max_bins_leveled, \n binning='quantile', \n min_samples_bin=1, \n min_unique_continuous=3, \n epsilon=None, \n delta=None, \n composition=None, \n privacy_schema=None,\n):\n is_mains = True\n for max_bins in max_bins_leveled:\n preprocessor = EBMPreprocessor(\n feature_names_given, \n feature_types_given, \n max_bins, \n binning, \n min_samples_bin, \n min_unique_continuous, \n epsilon, \n delta, \n composition, \n privacy_schema\n )\n preprocessor.fit(X, None, sample_weight)\n if is_mains:\n is_mains = False\n bins = preprocessor.bins_\n for feature_idx in range(len(bins)):\n bins[feature_idx] = [bins[feature_idx]]\n\n feature_names_in = preprocessor.feature_names_in_\n feature_types_in = preprocessor.feature_types_in_\n bin_weights = preprocessor.bin_weights_\n min_vals = preprocessor.min_vals_\n max_vals = preprocessor.max_vals_\n histogram_cuts = preprocessor.histogram_cuts_\n histogram_counts = preprocessor.histogram_counts_\n unique_counts = preprocessor.unique_counts_\n zero_counts = preprocessor.zero_counts_\n else:\n if feature_names_in != preprocessor.feature_names_in_:\n raise RuntimeError(\"Mismatched feature_names\")\n if feature_types_in != preprocessor.feature_types_in_:\n raise RuntimeError(\"Mismatched feature_types\")\n\n for bin_levels, feature_bins in zip(bins, preprocessor.bins_):\n bin_levels.append(feature_bins)\n\n deduplicate_bins(bins)\n return feature_names_in, feature_types_in, bins, bin_weights, min_vals, max_vals, histogram_cuts, histogram_counts, unique_counts, zero_counts\n\ndef bin_python(\n X,\n n_dimensions,\n bins,\n feature_names_in, \n feature_types_in, \n):\n X, n_samples = clean_X(X)\n if n_samples <= 0:\n msg = \"X has no samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n X_binned = np.empty((n_samples, len(feature_names_in)), dtype=np.int64, order='F')\n\n native = Native.get_native_singleton()\n bin_iter = [bin_levels[-1 if len(bin_levels) < n_dimensions else n_dimensions - 1] for bin_levels in bins]\n category_iter = (category if isinstance(category, dict) else None for category in bin_iter)\n requests = zip(count(), category_iter)\n cols = unify_columns(X, requests, feature_names_in, feature_types_in, None, False)\n native_bin_counts = np.empty(len(feature_names_in), dtype=np.int64)\n for feature_idx, feature_bins, (_, X_col, _, bad) in zip(count(), bin_iter, cols):\n if n_samples != len(X_col):\n msg = \"The columns of X are mismatched in the number of of samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n if isinstance(feature_bins, dict):\n # categorical feature\n n_bins = 1 if len(feature_bins) == 0 else max(feature_bins.values()) + 1\n else:\n # continuous feature\n \n if not X_col.flags.c_contiguous:\n # X_col could be a slice that has a stride. We need contiguous for caling into C\n X_col = X_col.copy()\n \n # the fix was to remove a tab for the line below\n X_col = native.discretize(X_col, feature_bins)\n n_bins = len(feature_bins) + 2\n\n if bad is not None:\n n_bins += 1\n X_col[bad != _none_ndarray] = n_bins - 1\n\n native_bin_counts.itemset(feature_idx, n_bins)\n X_binned[:, feature_idx] = X_col\n\n return X_binned, native_bin_counts\n\ndef bin_native(\n is_classification, \n feature_idxs, \n bins_iter,\n X, \n y, \n sample_weight, \n feature_names_in, \n feature_types_in, \n):\n # called under: fit\n\n _log.info(\"Creating native dataset\")\n\n X, n_samples = clean_X(X)\n if n_samples <= 0:\n msg = \"X has no samples to train on\"\n _log.error(msg)\n raise ValueError(msg)\n\n if is_classification:\n y = clean_vector(y, True, \"y\")\n # use pure alphabetical ordering for the classes. It's tempting to sort by frequency first\n # but that could lead to a lot of bugs if the # of categories is close and we flip the ordering\n # in two separate runs, which would flip the ordering of the classes within our score tensors.\n classes, y = np.unique(y, return_inverse=True)\n else:\n y = clean_vector(y, False, \"y\")\n classes = None\n\n if n_samples != len(y):\n msg = f\"X has {n_samples} samples and y has {len(y)} samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n if sample_weight is not None:\n sample_weight = clean_vector(sample_weight, False, \"sample_weight\")\n if n_samples != len(sample_weight):\n msg = f\"X has {n_samples} samples and sample_weight has {len(sample_weight)} samples\"\n _log.error(msg)\n raise ValueError(msg)\n else:\n # TODO: eliminate this eventually\n sample_weight = np.ones_like(y, dtype=np.float64)\n\n native = Native.get_native_singleton()\n\n responses = []\n requests = []\n for request in zip(feature_idxs, bins_iter):\n responses.append(request)\n if not isinstance(request[1], dict):\n # continuous feature. Don't include the continuous definition\n request = (request[0], None)\n requests.append(request)\n\n native_bin_counts = []\n n_bytes = native.size_data_set_header(len(requests), 1, 1)\n for (feature_idx, feature_bins), (_, X_col, _, bad) in zip(responses, unify_columns(X, requests, feature_names_in, feature_types_in, None, False)):\n if n_samples != len(X_col):\n # re-check that that number of samples is identical since iterators can be used up by looking at them\n # this also protects us from badly behaved iterators from causing a segfault in C++ by returning an\n # unexpected number of items and thus a buffer overrun on the second pass through the data\n msg = \"The columns of X are mismatched in the number of of samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n if not X_col.flags.c_contiguous:\n # X_col could be a slice that has a stride. We need contiguous for caling into C\n X_col = X_col.copy()\n\n if isinstance(feature_bins, dict):\n # categorical feature\n n_bins = 1 if len(feature_bins) == 0 else max(feature_bins.values()) + 1\n else:\n # continuous feature\n X_col = native.discretize(X_col, feature_bins)\n n_bins = len(feature_bins) + 2\n\n if bad is not None:\n n_bins += 1\n X_col[bad != _none_ndarray] = n_bins - 1\n\n native_bin_counts.append(n_bins)\n\n n_bytes += native.size_feature(feature_types_in[feature_idx] == 'nominal', n_bins, X_col)\n\n n_bytes += native.size_weight(sample_weight)\n if is_classification:\n n_bytes += native.size_classification_target(len(classes), y)\n else:\n n_bytes += native.size_regression_target(y)\n\n shared_dataset = RawArray('B', n_bytes)\n\n native.fill_data_set_header(len(requests), 1, 1, n_bytes, shared_dataset)\n\n for (feature_idx, feature_bins), n_bins, (_, X_col, _, bad) in zip(responses, native_bin_counts, unify_columns(X, requests, feature_names_in, feature_types_in, None, False)):\n if n_samples != len(X_col):\n # re-check that that number of samples is identical since iterators can be used up by looking at them\n # this also protects us from badly behaved iterators from causing a segfault in C++ by returning an\n # unexpected number of items and thus a buffer overrun on the second pass through the data\n msg = \"The columns of X are mismatched in the number of of samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n if not X_col.flags.c_contiguous:\n # X_col could be a slice that has a stride. We need contiguous for caling into C\n X_col = X_col.copy()\n\n if not isinstance(feature_bins, dict):\n # continuous feature\n X_col = native.discretize(X_col, feature_bins)\n\n if bad is not None:\n X_col[bad != _none_ndarray] = n_bins - 1\n\n native.fill_feature(feature_types_in[feature_idx] == 'nominal', n_bins, X_col, n_bytes, shared_dataset)\n\n native.fill_weight(sample_weight, n_bytes, shared_dataset)\n if is_classification:\n native.fill_classification_target(len(classes), y, n_bytes, shared_dataset)\n else:\n native.fill_regression_target(y, n_bytes, shared_dataset)\n\n # TODO: use the unknowns array instead of using the last count bin in the rest of our code\n return shared_dataset, classes, np.array(native_bin_counts, dtype=np.int64)\n\ndef bin_native_by_dimension(\n is_classification, \n n_dimensions,\n bins,\n X, \n y, \n sample_weight, \n feature_names_in, \n feature_types_in, \n):\n # called under: fit\n\n feature_idxs = range(len(feature_names_in))\n bins_iter = []\n for feature_idx in feature_idxs:\n bin_levels = bins[feature_idx]\n feature_bins = bin_levels[-1 if len(bin_levels) < n_dimensions else n_dimensions - 1]\n bins_iter.append(feature_bins)\n\n return bin_native(\n is_classification, \n feature_idxs, \n bins_iter,\n X, \n y, \n sample_weight, \n feature_names_in, \n feature_types_in, \n )\n\ndef eval_terms(X, feature_names_in, feature_types_in, bins, feature_groups):\n # called under: predict\n\n # prior to calling this function, call deduplicate_bins which will eliminate extra work in this function\n\n # this generator function returns data in whatever order it thinks is most efficient. Normally for \n # mains it returns them in order, but pairs will be returned as their data completes and they can\n # be mixed in with mains. So, if we request data for [(0), (1), (2), (3), (4), (1, 3)] the return sequence\n # could be [(0), (1), (2), (3), (1, 3), (4)]. More complicated pair/triples return even more randomized ordering.\n # For additive models the results can be processed in any order, so this imposes no penalities on us.\n\n _log.info(\"eval_terms\")\n\n X, n_samples = clean_X(X)\n\n requests = []\n waiting = dict()\n for feature_group_idx, feature_idxs in enumerate(feature_groups):\n # the last position holds the feature_group_idx object\n # the first len(feature_idxs) items hold the binned data that we get back as it arrives\n requirements = _none_list * (len(feature_idxs) + 1)\n requirements[-1] = feature_group_idx\n for feature_idx in feature_idxs:\n bin_levels = bins[feature_idx]\n feature_bins = bin_levels[-1 if len(bin_levels) < len(feature_idxs) else len(feature_idxs) - 1]\n if isinstance(feature_bins, dict):\n # categorical feature\n request = (feature_idx, feature_bins)\n key = (feature_idx, id(feature_bins))\n else:\n # continuous feature\n request = (feature_idx, None)\n key = feature_idx\n waiting_list = waiting.get(key, None)\n if waiting_list is None:\n requests.append(request)\n waiting[key] = [requirements]\n else:\n waiting_list.append(requirements)\n\n native = Native.get_native_singleton()\n\n for (column_feature_idx, _), (_, X_col, column_categories, bad) in zip(requests, unify_columns(X, requests, feature_names_in, feature_types_in, None, True)):\n if n_samples != len(X_col):\n msg = \"The columns of X are mismatched in the number of of samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n if column_categories is None:\n # continuous feature\n\n if bad is not None:\n # TODO: we could pass out a bool array instead of objects for this function only\n bad = bad != _none_ndarray\n\n if not X_col.flags.c_contiguous:\n # we requrested this feature, so at some point we're going to call discretize, \n # which requires contiguous memory\n X_col = X_col.copy()\n\n cuts_completed = dict()\n bin_levels = bins[column_feature_idx]\n for requirements in waiting[column_feature_idx]:\n if len(requirements) != 0:\n feature_group_idx = requirements[-1]\n feature_idxs = feature_groups[feature_group_idx]\n is_done = True\n for dimension_idx, term_feature_idx in enumerate(feature_idxs):\n if term_feature_idx == column_feature_idx:\n cuts = bin_levels[-1 if len(bin_levels) < len(feature_idxs) else len(feature_idxs) - 1]\n discretized = cuts_completed.get(id(cuts), None)\n if discretized is None:\n discretized = native.discretize(X_col, cuts)\n if bad is not None:\n discretized[bad] = -1\n\n cuts_completed[id(cuts)] = discretized\n requirements[dimension_idx] = discretized\n elif requirements[dimension_idx] is None:\n is_done = False\n\n if is_done:\n # the requirements can contain features with both categoricals or continuous\n binned_data = requirements[:-1]\n # clear references so that the garbage collector can free them\n requirements.clear()\n yield feature_group_idx, binned_data\n else:\n # categorical feature\n\n if bad is not None:\n # TODO: we could pass out a single bool (not an array) if these aren't continuous convertible\n pass # TODO: improve this handling\n\n for requirements in waiting[(column_feature_idx, id(column_categories))]:\n if len(requirements) != 0:\n feature_group_idx = requirements[-1]\n feature_idxs = feature_groups[feature_group_idx]\n is_done = True\n for dimension_idx, term_feature_idx in enumerate(feature_idxs):\n if term_feature_idx == column_feature_idx:\n # \"term_categories is column_categories\" since any term in the waiting_list must have\n # one of it's elements match this (feature_idx, categories) index, and all items in this\n # term need to have the same categories since they came from the same bin_level\n requirements[dimension_idx] = X_col\n elif requirements[dimension_idx] is None:\n is_done = False\n\n if is_done:\n # the requirements can contain features with both categoricals or continuous\n binned_data = requirements[:-1]\n # clear references so that the garbage collector can free them\n requirements.clear()\n yield feature_group_idx, binned_data\n\ndef ebm_decision_function(X, n_samples, feature_names_in, feature_types_in, bins, intercept, additive_terms, feature_groups):\n if type(intercept) is float or len(intercept) == 1:\n scores = np.full(n_samples, intercept, dtype=np.float64)\n else:\n scores = np.full((n_samples, len(intercept)), intercept, dtype=np.float64)\n\n for feature_group_idx, binned_data in eval_terms(X, feature_names_in, feature_types_in, bins, feature_groups):\n scores += additive_terms[feature_group_idx][tuple(binned_data)]\n\n return scores\n\ndef ebm_decision_function_and_explain(\n X, \n n_samples, \n feature_names_in, \n feature_types_in, \n bins, \n intercept, \n additive_terms, \n feature_groups\n):\n if type(intercept) is float or len(intercept) == 1:\n scores = np.full(n_samples, intercept, dtype=np.float64)\n explanations = np.empty((n_samples, len(feature_groups)), dtype=np.float64)\n else:\n # TODO: add a test for multiclass calls to ebm_decision_function_and_explain\n scores = np.full((n_samples, len(intercept)), intercept, dtype=np.float64)\n explanations = np.empty((n_samples, len(feature_groups), len(intercept)), dtype=np.float64)\n\n for feature_group_idx, binned_data in eval_terms(X, feature_names_in, feature_types_in, bins, feature_groups):\n term_scores = additive_terms[feature_group_idx][tuple(binned_data)]\n scores += term_scores\n explanations[:, feature_group_idx] = term_scores\n\n return scores, explanations\n\ndef get_counts_and_weights(X, sample_weight, feature_names_in, feature_types_in, bins, feature_groups):\n bin_counts = _none_list * len(feature_groups)\n bin_weights = _none_list * len(feature_groups)\n\n for feature_group_idx, binned_data in eval_terms(X, feature_names_in, feature_types_in, bins, feature_groups):\n features = feature_groups[feature_group_idx]\n multiple = 1\n dimensions = []\n for dimension_idx in range(len(features) - 1, -1, -1):\n feature_idx = features[dimension_idx]\n bin_levels = bins[feature_idx]\n feature_bins = bin_levels[-1 if len(bin_levels) < len(features) else len(features) - 1]\n if isinstance(feature_bins, dict):\n # categorical feature\n n_bins = 2 if len(feature_bins) == 0 else max(feature_bins.values()) + 2\n else:\n # continuous feature\n n_bins = len(feature_bins) + 3\n\n dimensions.append(n_bins)\n dim_data = binned_data[dimension_idx]\n dim_data = np.where(dim_data < 0, n_bins - 1, dim_data)\n if multiple == 1:\n flat_indexes = dim_data\n else:\n flat_indexes += dim_data * multiple\n multiple *= n_bins\n dimensions = tuple(reversed(dimensions))\n\n term_bin_counts = np.bincount(flat_indexes, minlength=multiple)\n term_bin_counts = term_bin_counts.astype(np.int64, copy=False)\n term_bin_counts = term_bin_counts.reshape(dimensions)\n\n bin_counts[feature_group_idx] = term_bin_counts\n\n if sample_weight is None:\n term_bin_weights = term_bin_counts.astype(np.float64)\n else:\n term_bin_weights = np.bincount(flat_indexes, weights=sample_weight, minlength=multiple)\n term_bin_weights = term_bin_weights.astype(np.float64, copy=False)\n term_bin_weights = term_bin_weights.reshape(dimensions)\n \n bin_weights[feature_group_idx] = term_bin_weights\n\n return bin_counts, bin_weights\n\ndef unify_data2(is_classification, X, y=None, sample_weight=None, feature_names=None, feature_types=None, missing_data_allowed=False, min_unique_continuous=3):\n _log.info(\"Unifying data\")\n\n X, n_samples = clean_X(X)\n if n_samples <= 0:\n msg = \"X has no samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n classes = None\n if y is not None:\n if is_classification:\n y = clean_vector(y, True, \"y\")\n # use pure alphabetical ordering for the classes. It's tempting to sort by frequency first\n # but that could lead to a lot of bugs if the # of categories is close and we flip the ordering\n # in two separate runs, which would flip the ordering of the classes within our score tensors.\n classes, y = np.unique(y, return_inverse=True)\n else:\n y = clean_vector(y, False, \"y\")\n\n if n_samples != len(y):\n msg = f\"X has {n_samples} samples and y has {len(y)} samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n if sample_weight is not None:\n sample_weight = clean_vector(sample_weight, False, \"sample_weight\")\n if n_samples != len(sample_weight):\n msg = f\"X has {n_samples} samples and sample_weight has {len(sample_weight)} samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n feature_names_in = unify_feature_names(X, feature_names, feature_types)\n feature_types_in = _none_list * len(feature_names_in)\n\n # TODO: this could be made more efficient by storing continuous and categorical values in separate numpy arrays\n # and merging afterwards. Categoricals are going to share the same objects, but we don't want object\n # fragmentation for continuous values which generates a lot of garbage to collect later\n X_unified = np.empty((n_samples, len(feature_names_in)), dtype=np.object_, order='F')\n\n for feature_idx, (feature_type_in, X_col, categories, bad) in zip(count(), unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in, feature_types, min_unique_continuous, False)):\n if n_samples != len(X_col):\n msg = \"The columns of X are mismatched in the number of of samples\"\n _log.error(msg)\n raise ValueError(msg)\n\n feature_types_in[feature_idx] = feature_type_in\n if categories is None:\n # continuous feature\n if bad is not None:\n msg = f\"Feature {feature_names_in[feature_idx]} is indicated as continuous, but has non-numeric data\"\n _log.error(msg)\n raise ValueError(msg)\n\n if not missing_data_allowed and np.isnan(X_col).any():\n msg = f\"X cannot contain missing values\"\n _log.error(msg)\n raise ValueError(msg)\n\n X_unified[:, feature_idx] = X_col\n else:\n # categorical feature\n if bad is not None:\n msg = f\"Feature {feature_names_in[feature_idx]} has unrecognized ordinal values\"\n _log.error(msg)\n raise ValueError(msg)\n\n if not missing_data_allowed and np.count_nonzero(X_col) != len(X_col):\n msg = f\"X cannot contain missing values\"\n _log.error(msg)\n raise ValueError(msg)\n\n mapping = np.empty(len(categories) + 1, dtype=np.object_)\n mapping.itemset(0, np.nan)\n for category, idx in categories.items():\n mapping.itemset(idx, category)\n X_unified[:, feature_idx] = mapping[X_col]\n\n return X_unified, y, sample_weight, classes, feature_names_in, feature_types_in\n\ndef append_tensor(tensor, append_low=None, append_high=None):\n if append_low is None:\n if append_high is None:\n return tensor\n dim_slices = [slice(0, dim_len) for dim_len in tensor.shape]\n new_shape = [dim_len + int(is_high) for dim_len, is_high in zip(tensor.shape, append_high)]\n else:\n dim_slices = [slice(int(is_low), dim_len + int(is_low)) for dim_len, is_low in zip(tensor.shape, append_low)]\n if append_high is None:\n new_shape = [dim_len + int(is_low) for dim_len, is_low in zip(tensor.shape, append_low)]\n else:\n new_shape = [dim_len + int(is_low) + int(is_high) for dim_len, is_low, is_high in zip(tensor.shape, append_low, append_high)]\n\n if len(new_shape) != tensor.ndim:\n # multiclass\n new_shape.append(tensor.shape[-1])\n\n new_tensor = np.zeros(tuple(new_shape), dtype=tensor.dtype)\n new_tensor[tuple(dim_slices)] = tensor\n return new_tensor\n\ndef trim_tensor(tensor, trim_low=None, trim_high=None):\n if trim_low is None:\n if trim_high is None:\n return tensor\n dim_slices = [slice(0, -1 if is_high else None) for dim_len, is_high in zip(tensor.shape, trim_high)]\n else:\n if trim_high is None:\n dim_slices = [slice(int(is_low), None) for dim_len, is_low in zip(tensor.shape, trim_low)]\n else:\n dim_slices = [slice(int(is_low), -1 if is_high else None) for dim_len, is_low, is_high in zip(tensor.shape, trim_low, trim_high)]\n return tensor[tuple(dim_slices)]\n\ndef zero_tensor(tensor, zero_low=None, zero_high=None):\n entire_tensor = [slice(None) for _ in range(tensor.ndim)]\n if zero_low is not None:\n for dimension_idx, is_zero in enumerate(zero_low):\n if is_zero:\n dim_slices = entire_tensor.copy()\n dim_slices[dimension_idx] = 0\n tensor[tuple(dim_slices)] = 0\n if zero_high is not None:\n for dimension_idx, is_zero in enumerate(zero_high):\n if is_zero:\n dim_slices = entire_tensor.copy()\n dim_slices[dimension_idx] = -1\n tensor[tuple(dim_slices)] = 0\n\ndef make_boosting_weights(term_bin_weights):\n # TODO: replace this function with a bool array that we generate in bin_native.. this function will crash\n # if there are samples with zero weights\n bin_data_weights = []\n for term_weights in term_bin_weights:\n if term_weights[-1] == 0:\n bin_data_weights.append(term_weights[:-1])\n else:\n bin_data_weights.append(term_weights)\n return bin_data_weights\n\ndef restore_missing_value_zeros2(tensors, term_bin_weights):\n for tensor, weights in zip(tensors, term_bin_weights):\n n_dimensions = weights.ndim\n entire_tensor = [slice(None)] * n_dimensions\n lower = []\n higher = []\n for dimension_idx in range(n_dimensions):\n dim_slices = entire_tensor.copy()\n dim_slices[dimension_idx] = 0\n total_sum = np.sum(weights[tuple(dim_slices)])\n lower.append(True if total_sum == 0 else False)\n dim_slices[dimension_idx] = -1\n total_sum = np.sum(weights[tuple(dim_slices)])\n higher.append(True if total_sum == 0 else False)\n zero_tensor(tensor, lower, higher)\n\ndef after_boosting(feature_groups, tensors, feature_bin_weights):\n # TODO: this isn't a problem today since any unnamed categories in the mains and the pairs are the same\n # (they don't exist in the pairs today at all since DP-EBMs aren't pair enabled yet and we haven't\n # made the option for them in regular EBMs), but when we eventually go that way then we'll\n # need to examine the tensored term based bin weights to see what to do. Alternatively, we could\n # obtain this information from bin_native which would be cleaner since we only need it during boosting\n new_tensors=[]\n for feature_group_idx, feature_group in enumerate(feature_groups):\n higher = [feature_bin_weights[feature_idx][-1] == 0 for feature_idx in feature_group]\n new_tensors.append(append_tensor(tensors[feature_group_idx], None, higher))\n return new_tensors\n\ndef remove_last2(tensors, term_bin_weights):\n new_tensors=[]\n for idx, tensor, weights in zip(count(), tensors, term_bin_weights):\n n_dimensions = weights.ndim\n entire_tensor = [slice(None)] * n_dimensions\n higher = []\n for dimension_idx in range(n_dimensions):\n dim_slices = entire_tensor.copy()\n dim_slices[dimension_idx] = -1\n total_sum = np.sum(weights[tuple(dim_slices)])\n higher.append(True if total_sum == 0 else False)\n new_tensors.append(trim_tensor(tensor, None, higher))\n return new_tensors\n"
] | [
[
"numpy.nanmax",
"numpy.expand_dims",
"sklearn.utils.validation.check_is_fitted",
"numpy.nanmin",
"numpy.max",
"pandas.isna",
"numpy.place",
"numpy.where",
"pandas.notna",
"numpy.ones_like",
"numpy.unique",
"numpy.full",
"numpy.insert",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.isnan",
"numpy.logical_and",
"numpy.array",
"numpy.array_equal",
"numpy.bincount",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.