repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
speglich/devito | [
"f535a44dff12de2837eb6e3217a65ffb2d371cb8"
] | [
"tests/test_dimension.py"
] | [
"from itertools import product\n\nimport numpy as np\nfrom sympy import And\nimport pytest\n\nfrom conftest import skipif\nfrom devito import (ConditionalDimension, Grid, Function, TimeFunction, SparseFunction, # noqa\n Eq, Operator, Constant, Dimension, SubDimension, switchconfig,\n SubDomain, Lt, Le, Gt, Ge, Ne, Buffer)\nfrom devito.ir.iet import Expression, Iteration, FindNodes, retrieve_iteration_tree\nfrom devito.symbolics import indexify, retrieve_functions\nfrom devito.types import Array\n\n\nclass TestBufferedDimension(object):\n\n def test_multi_buffer(self):\n grid = Grid((3, 3))\n f = TimeFunction(name=\"f\", grid=grid)\n g = TimeFunction(name=\"g\", grid=grid, save=Buffer(7))\n\n op = Operator([Eq(f.forward, 1), Eq(g, f.forward)])\n op(time_M=3)\n # f looped all time_order buffer and is 1 everywhere\n assert np.allclose(f.data, 1)\n # g looped indices 0 to 3, rest is still 0\n assert np.allclose(g.data[0:4], 1)\n assert np.allclose(g.data[4:], 0)\n\n def test_multi_buffer_long_time(self):\n grid = Grid((3, 3))\n time = grid.time_dim\n f = TimeFunction(name=\"f\", grid=grid)\n g = TimeFunction(name=\"g\", grid=grid, save=Buffer(7))\n\n op = Operator([Eq(f.forward, time), Eq(g, time+1)])\n op(time_M=20)\n # f[0] is time=19, f[1] is time=20\n assert np.allclose(f.data[0], 19)\n assert np.allclose(f.data[1], 20)\n # g is time 15 to 21 (loop twice the 7 buffer then 15->21)\n for i in range(7):\n assert np.allclose(g.data[i], 14+i+1)\n\n\nclass TestSubDimension(object):\n\n def test_interior(self):\n \"\"\"\n Tests application of an Operator consisting of a single equation\n over the ``interior`` subdomain.\n \"\"\"\n grid = Grid(shape=(4, 4, 4))\n x, y, z = grid.dimensions\n\n interior = grid.interior\n\n u = TimeFunction(name='u', grid=grid)\n\n eqn = [Eq(u.forward, u + 2, subdomain=interior)]\n\n op = Operator(eqn)\n op.apply(time_M=2)\n assert np.all(u.data[1, 1:-1, 1:-1, 1:-1] == 6.)\n assert np.all(u.data[1, :, 0] == 0.)\n assert np.all(u.data[1, :, -1] == 0.)\n assert np.all(u.data[1, :, :, 0] == 0.)\n assert np.all(u.data[1, :, :, -1] == 0.)\n\n def test_domain_vs_interior(self):\n \"\"\"\n Tests application of an Operator consisting of two equations, one\n over the whole domain (default), and one over the ``interior`` subdomain.\n \"\"\"\n grid = Grid(shape=(4, 4, 4))\n x, y, z = grid.dimensions\n t = grid.stepping_dim # noqa\n\n interior = grid.interior\n\n u = TimeFunction(name='u', grid=grid) # noqa\n eqs = [Eq(u.forward, u + 1),\n Eq(u.forward, u.forward + 2, subdomain=interior)]\n\n op = Operator(eqs, opt='noop')\n trees = retrieve_iteration_tree(op)\n assert len(trees) == 2\n\n op.apply(time_M=1)\n assert np.all(u.data[1, 0, :, :] == 1)\n assert np.all(u.data[1, -1, :, :] == 1)\n assert np.all(u.data[1, :, 0, :] == 1)\n assert np.all(u.data[1, :, -1, :] == 1)\n assert np.all(u.data[1, :, :, 0] == 1)\n assert np.all(u.data[1, :, :, -1] == 1)\n assert np.all(u.data[1, 1:3, 1:3, 1:3] == 3)\n\n def test_subdim_middle(self):\n \"\"\"\n Tests that instantiating SubDimensions using the classmethod\n constructors works correctly.\n \"\"\"\n grid = Grid(shape=(4, 4, 4))\n x, y, z = grid.dimensions\n t = grid.stepping_dim # noqa\n\n u = TimeFunction(name='u', grid=grid) # noqa\n xi = SubDimension.middle(name='xi', parent=x,\n thickness_left=1,\n thickness_right=1)\n eqs = [Eq(u.forward, u + 1)]\n eqs = [e.subs(x, xi) for e in eqs]\n\n op = Operator(eqs)\n\n u.data[:] = 1.0\n op.apply(time_M=1)\n assert np.all(u.data[1, 0, :, :] == 1)\n assert np.all(u.data[1, -1, :, :] == 1)\n assert np.all(u.data[1, 1:3, :, :] == 2)\n\n def test_symbolic_size(self):\n \"\"\"Check the symbolic size of all possible SubDimensions is as expected.\"\"\"\n grid = Grid(shape=(4,))\n x, = grid.dimensions\n thickness = 4\n\n xleft = SubDimension.left(name='xleft', parent=x, thickness=thickness)\n assert xleft.symbolic_size == xleft.thickness.left[0]\n\n xi = SubDimension.middle(name='xi', parent=x,\n thickness_left=thickness, thickness_right=thickness)\n assert xi.symbolic_size == (x.symbolic_max - x.symbolic_min -\n xi.thickness.left[0] - xi.thickness.right[0] + 1)\n\n xright = SubDimension.right(name='xright', parent=x, thickness=thickness)\n assert xright.symbolic_size == xright.thickness.right[0]\n\n def test_bcs(self):\n \"\"\"\n Tests application of an Operator consisting of multiple equations\n defined over different sub-regions, explicitly created through the\n use of SubDimensions.\n \"\"\"\n grid = Grid(shape=(20, 20))\n x, y = grid.dimensions\n t = grid.stepping_dim\n thickness = 4\n\n u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)\n\n xleft = SubDimension.left(name='xleft', parent=x, thickness=thickness)\n xi = SubDimension.middle(name='xi', parent=x,\n thickness_left=thickness, thickness_right=thickness)\n xright = SubDimension.right(name='xright', parent=x, thickness=thickness)\n\n yi = SubDimension.middle(name='yi', parent=y,\n thickness_left=thickness, thickness_right=thickness)\n\n t_in_centre = Eq(u[t+1, xi, yi], 1)\n leftbc = Eq(u[t+1, xleft, yi], u[t+1, xleft+1, yi] + 1)\n rightbc = Eq(u[t+1, xright, yi], u[t+1, xright-1, yi] + 1)\n\n op = Operator([t_in_centre, leftbc, rightbc])\n\n op.apply(time_m=1, time_M=1)\n\n assert np.all(u.data[0, :, 0:thickness] == 0.)\n assert np.all(u.data[0, :, -thickness:] == 0.)\n assert all(np.all(u.data[0, i, thickness:-thickness] == (thickness+1-i))\n for i in range(thickness))\n assert all(np.all(u.data[0, -i, thickness:-thickness] == (thickness+2-i))\n for i in range(1, thickness + 1))\n assert np.all(u.data[0, thickness:-thickness, thickness:-thickness] == 1.)\n\n def test_flow_detection_interior(self):\n \"\"\"\n Test detection of flow directions when SubDimensions are used\n (in this test they are induced by the ``interior`` subdomain).\n\n Stencil uses values at new timestep as well as those at previous ones\n This forces an evaluation order onto x.\n Weights are:\n\n x=0 x=1 x=2 x=3\n t=N 2 ---3\n v /\n t=N+1 o--+----4\n\n Flow dependency should traverse x in the negative direction\n\n x=2 x=3 x=4 x=5 x=6\n t=0 0 --- 0 -- 1 -- 0\n v / v / v /\n t=1 44 -+--- 11 -+--- 2--+ -- 0\n \"\"\"\n grid = Grid(shape=(10, 10))\n x, y = grid.dimensions\n\n interior = grid.interior\n\n u = TimeFunction(name='u', grid=grid, save=10, time_order=1, space_order=0)\n\n step = Eq(u.forward, 2*u\n + 3*u.subs(x, x+x.spacing)\n + 4*u.forward.subs(x, x+x.spacing),\n subdomain=interior)\n op = Operator(step)\n\n u.data[0, 5, 5] = 1.0\n op.apply(time_M=0)\n assert u.data[1, 5, 5] == 2\n assert u.data[1, 4, 5] == 11\n assert u.data[1, 3, 5] == 44\n assert u.data[1, 2, 5] == 4*44\n assert u.data[1, 1, 5] == 4*4*44\n\n # This point isn't updated because of the `interior` selection\n assert u.data[1, 0, 5] == 0\n\n assert np.all(u.data[1, 6:, :] == 0)\n assert np.all(u.data[1, :, 0:5] == 0)\n assert np.all(u.data[1, :, 6:] == 0)\n\n @pytest.mark.parametrize('exprs,expected,', [\n # Carried dependence in both /t/ and /x/\n (['Eq(u[t+1, x, y], u[t+1, x-1, y] + u[t, x, y])'], 'y'),\n (['Eq(u[t+1, x, y], u[t+1, x-1, y] + u[t, x, y], subdomain=interior)'], 'i0y'),\n # Carried dependence in both /t/ and /y/\n (['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y])'], 'x'),\n (['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y], subdomain=interior)'], 'i0x'),\n # Carried dependence in /y/, leading to separate /y/ loops, one\n # going forward, the other backward\n (['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y], subdomain=interior)',\n 'Eq(u[t+1, x, y], u[t+1, x, y+1] + u[t, x, y], subdomain=interior)'], 'i0x'),\n ])\n def test_iteration_property_parallel(self, exprs, expected):\n \"\"\"Tests detection of sequental and parallel Iterations when applying\n equations over different subdomains.\"\"\"\n grid = Grid(shape=(20, 20))\n x, y = grid.dimensions # noqa\n t = grid.time_dim # noqa\n\n interior = grid.interior # noqa\n\n u = TimeFunction(name='u', grid=grid, save=10, time_order=1) # noqa\n\n # List comprehension would need explicit locals/globals mappings to eval\n for i, e in enumerate(list(exprs)):\n exprs[i] = eval(e)\n\n op = Operator(exprs, opt='noop')\n iterations = FindNodes(Iteration).visit(op)\n assert all(i.is_Sequential for i in iterations if i.dim.name != expected)\n assert all(i.is_Parallel for i in iterations if i.dim.name == expected)\n\n @skipif(['device'])\n @pytest.mark.parametrize('exprs,expected,', [\n # All parallel, the innermost Iteration gets vectorized\n (['Eq(u[time, x, yleft], u[time, x, yleft] + 1.)'], ['yleft']),\n # All outers are parallel, carried dependence in `yleft`, so the middle\n # Iteration over `x` gets vectorized\n (['Eq(u[time, x, yleft], u[time, x, yleft+1] + 1.)'], ['x']),\n # Only the middle Iteration is parallel, so no vectorization (the Iteration\n # is left non-vectorised for OpenMP parallelism)\n (['Eq(u[time+1, x, yleft], u[time, x, yleft+1] + u[time+1, x, yleft+1])'], [])\n ])\n def test_iteration_property_vector(self, exprs, expected):\n \"\"\"Tests detection of vector Iterations when using subdimensions.\"\"\"\n grid = Grid(shape=(20, 20))\n x, y = grid.dimensions # noqa\n time = grid.time_dim # noqa\n\n # The leftmost 10 elements\n yleft = SubDimension.left(name='yleft', parent=y, thickness=10) # noqa\n\n u = TimeFunction(name='u', grid=grid, save=10, time_order=0, space_order=1) # noqa\n\n # List comprehension would need explicit locals/globals mappings to eval\n for i, e in enumerate(list(exprs)):\n exprs[i] = eval(e)\n\n op = Operator(exprs, opt='simd')\n iterations = FindNodes(Iteration).visit(op)\n vectorized = [i.dim.name for i in iterations if i.is_Vectorized]\n assert set(vectorized) == set(expected)\n\n def test_subdimmiddle_parallel(self):\n \"\"\"\n Tests application of an Operator consisting of a subdimension\n defined over different sub-regions, explicitly created through the\n use of SubDimensions.\n \"\"\"\n grid = Grid(shape=(20, 20))\n x, y = grid.dimensions\n t = grid.stepping_dim\n thickness = 4\n\n u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)\n\n xi = SubDimension.middle(name='xi', parent=x,\n thickness_left=thickness, thickness_right=thickness)\n\n yi = SubDimension.middle(name='yi', parent=y,\n thickness_left=thickness, thickness_right=thickness)\n\n # a 5 point stencil that can be computed in parallel\n centre = Eq(u[t+1, xi, yi], u[t, xi, yi] + u[t, xi-1, yi]\n + u[t, xi+1, yi] + u[t, xi, yi-1] + u[t, xi, yi+1])\n\n u.data[0, 10, 10] = 1.0\n\n op = Operator([centre])\n\n iterations = FindNodes(Iteration).visit(op)\n assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim in [xi, yi])\n\n op.apply(time_m=0, time_M=0)\n\n assert np.all(u.data[1, 9:12, 10] == 1.0)\n assert np.all(u.data[1, 10, 9:12] == 1.0)\n\n # Other than those, it should all be 0\n u.data[1, 9:12, 10] = 0.0\n u.data[1, 10, 9:12] = 0.0\n assert np.all(u.data[1, :] == 0)\n\n def test_subdimleft_parallel(self):\n \"\"\"\n Tests application of an Operator consisting of a subdimension\n defined over different sub-regions, explicitly created through the\n use of SubDimensions.\n\n This tests that flow direction is not being automatically inferred\n from whether the subdimension is on the left or right boundary.\n \"\"\"\n grid = Grid(shape=(20, 20))\n x, y = grid.dimensions\n t = grid.stepping_dim\n thickness = 4\n\n u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)\n\n xl = SubDimension.left(name='xl', parent=x, thickness=thickness)\n\n yi = SubDimension.middle(name='yi', parent=y,\n thickness_left=thickness, thickness_right=thickness)\n\n # Can be done in parallel\n eq = Eq(u[t+1, xl, yi], u[t, xl, yi] + 1)\n\n op = Operator([eq])\n\n iterations = FindNodes(Iteration).visit(op)\n assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim in [xl, yi])\n\n op.apply(time_m=0, time_M=0)\n\n assert np.all(u.data[1, 0:thickness, 0:thickness] == 0)\n assert np.all(u.data[1, 0:thickness, -thickness:] == 0)\n assert np.all(u.data[1, 0:thickness, thickness:-thickness] == 1)\n assert np.all(u.data[1, thickness+1:, :] == 0)\n\n def test_subdimmiddle_notparallel(self):\n \"\"\"\n Tests application of an Operator consisting of a subdimension\n defined over different sub-regions, explicitly created through the\n use of SubDimensions.\n\n Different from ``test_subdimmiddle_parallel`` because an interior\n dimension cannot be evaluated in parallel.\n \"\"\"\n grid = Grid(shape=(20, 20))\n x, y = grid.dimensions\n t = grid.stepping_dim\n thickness = 4\n\n u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)\n\n xi = SubDimension.middle(name='xi', parent=x,\n thickness_left=thickness, thickness_right=thickness)\n\n yi = SubDimension.middle(name='yi', parent=y,\n thickness_left=thickness, thickness_right=thickness)\n\n # flow dependencies in x and y which should force serial execution\n # in reverse direction\n centre = Eq(u[t+1, xi, yi], u[t, xi, yi] + u[t+1, xi+1, yi+1])\n u.data[0, 10, 10] = 1.0\n\n op = Operator([centre])\n\n iterations = FindNodes(Iteration).visit(op)\n assert all(i.is_Affine and i.is_Sequential for i in iterations if i.dim == xi)\n assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim == yi)\n\n op.apply(time_m=0, time_M=0)\n\n for i in range(4, 11):\n assert u.data[1, i, i] == 1.0\n u.data[1, i, i] = 0.0\n\n assert np.all(u.data[1, :] == 0)\n\n def test_subdimleft_notparallel(self):\n \"\"\"\n Tests application of an Operator consisting of a subdimension\n defined over different sub-regions, explicitly created through the\n use of SubDimensions.\n\n This tests that flow direction is not being automatically inferred\n from whether the subdimension is on the left or right boundary.\n \"\"\"\n grid = Grid(shape=(20, 20))\n x, y = grid.dimensions\n t = grid.stepping_dim\n thickness = 4\n\n u = TimeFunction(name='u', save=None, grid=grid, space_order=1, time_order=0)\n\n xl = SubDimension.left(name='xl', parent=x, thickness=thickness)\n\n yi = SubDimension.middle(name='yi', parent=y,\n thickness_left=thickness, thickness_right=thickness)\n\n # Flows inward (i.e. forward) rather than outward\n eq = Eq(u[t+1, xl, yi], u[t+1, xl-1, yi] + 1)\n\n op = Operator([eq])\n\n iterations = FindNodes(Iteration).visit(op)\n assert all(i.is_Affine and i.is_Sequential for i in iterations if i.dim == xl)\n assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim == yi)\n\n op.apply(time_m=1, time_M=1)\n\n assert all(np.all(u.data[0, :thickness, thickness+i] == [1, 2, 3, 4])\n for i in range(12))\n assert np.all(u.data[0, thickness:] == 0)\n assert np.all(u.data[0, :, thickness+12:] == 0)\n\n def test_subdim_fd(self):\n \"\"\"\n Test that the FD shortcuts are handled correctly with SubDimensions\n \"\"\"\n grid = Grid(shape=(20, 20))\n x, y = grid.dimensions\n\n u = TimeFunction(name='u', save=None, grid=grid, space_order=1, time_order=1)\n u.data[:] = 2.\n\n # Flows inward (i.e. forward) rather than outward\n eq = [Eq(u.forward, u.dx + u.dy, subdomain=grid.interior)]\n\n op = Operator(eq)\n\n op.apply(time_M=0)\n\n assert np.all(u.data[1, -1, :] == 2.)\n assert np.all(u.data[1, :, 0] == 2.)\n assert np.all(u.data[1, :, -1] == 2.)\n assert np.all(u.data[1, 0, :] == 2.)\n assert np.all(u.data[1, 1:18, 1:18] == 0.)\n\n def test_arrays_defined_over_subdims(self):\n \"\"\"\n Check code generation when an Array uses a SubDimension.\n \"\"\"\n grid = Grid(shape=(3,))\n x, = grid.dimensions\n xi, = grid.interior.dimensions\n\n f = Function(name='f', grid=grid)\n a = Array(name='a', dimensions=(xi,), dtype=grid.dtype)\n op = Operator([Eq(a[xi], 1), Eq(f, f + a[xi + 1], subdomain=grid.interior)],\n openmp=False)\n assert len(op.parameters) == 6\n # neither `x_size` nor `xi_size` are expected here\n assert not any(i.name in ('x_size', 'xi_size') for i in op.parameters)\n # Try running it -- regardless of what it will produce, this should run\n # ie, this checks this error isn't raised:\n # \"ValueError: No value found for parameter xi_size\"\n op()\n\n\nclass TestConditionalDimension(object):\n\n \"\"\"\n A collection of tests to check the correct functioning of ConditionalDimensions.\n \"\"\"\n\n def test_basic(self):\n nt = 19\n grid = Grid(shape=(11, 11))\n time = grid.time_dim\n\n u = TimeFunction(name='u', grid=grid)\n assert(grid.stepping_dim in u.indices)\n\n u2 = TimeFunction(name='u2', grid=grid, save=nt)\n assert(time in u2.indices)\n\n factor = 4\n time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)\n usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,\n time_dim=time_subsampled)\n assert(time_subsampled in usave.indices)\n\n eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.), Eq(usave, u)]\n op = Operator(eqns)\n op.apply(t_M=nt-2)\n assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))\n assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])\n assert np.all([np.allclose(usave.data[i], i*factor)\n for i in range((nt+factor-1)//factor)])\n\n def test_basic_shuffles(self):\n \"\"\"\n Like ``test_basic``, but with different equation orderings. Nevertheless,\n we assert against the same exact values as in ``test_basic``, since we\n save `u`, not `u.forward`.\n \"\"\"\n nt = 19\n grid = Grid(shape=(11, 11))\n time = grid.time_dim\n\n u = TimeFunction(name='u', grid=grid)\n\n u2 = TimeFunction(name='u2', grid=grid, save=nt)\n\n factor = 4\n time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)\n usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,\n time_dim=time_subsampled)\n\n # Shuffle 1\n eqns = [Eq(usave, u), Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.)]\n op = Operator(eqns)\n op.apply(t_M=nt-2)\n assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))\n assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])\n assert np.all([np.allclose(usave.data[i], i*factor)\n for i in range((nt+factor-1)//factor)])\n\n # Shuffle 2\n usave.data[:] = 0.\n u.data[:] = 0.\n u2.data[:] = 0.\n eqns = [Eq(u.forward, u + 1.), Eq(usave, u), Eq(u2.forward, u2 + 1.)]\n op = Operator(eqns)\n op.apply(t_M=nt-2)\n assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))\n assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])\n assert np.all([np.allclose(usave.data[i], i*factor)\n for i in range((nt+factor-1)//factor)])\n\n def test_spacial_subsampling(self):\n \"\"\"\n Test conditional dimension for the spatial ones.\n This test saves u every two grid points :\n u2[x, y] = u[2*x, 2*y]\n \"\"\"\n nt = 19\n grid = Grid(shape=(12, 12))\n time = grid.time_dim\n\n u = TimeFunction(name='u', grid=grid, save=nt)\n assert(grid.time_dim in u.indices)\n\n # Creates subsampled spatial dimensions and accordine grid\n dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)\n for d in u.grid.dimensions])\n grid2 = Grid((6, 6), dimensions=dims, time_dimension=time)\n u2 = TimeFunction(name='u2', grid=grid2, save=nt)\n assert(time in u2.indices)\n\n eqns = [Eq(u.forward, u + 1.), Eq(u2, u)]\n op = Operator(eqns)\n op.apply(time_M=nt-2)\n # Verify that u2[x,y]= u[2*x, 2*y]\n assert np.allclose(u.data[:-1, 0:-1:2, 0:-1:2], u2.data[:-1, :, :])\n\n def test_time_subsampling_fd(self):\n nt = 19\n grid = Grid(shape=(11, 11))\n x, y = grid.dimensions\n time = grid.time_dim\n\n factor = 4\n time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)\n usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,\n time_dim=time_subsampled, time_order=2)\n\n dx2 = [indexify(i) for i in retrieve_functions(usave.dt2.evaluate)]\n assert dx2 == [usave[time_subsampled - 1, x, y],\n usave[time_subsampled + 1, x, y],\n usave[time_subsampled, x, y]]\n\n def test_subsampled_fd(self):\n \"\"\"\n Test that the FD shortcuts are handled correctly with ConditionalDimensions\n \"\"\"\n grid = Grid(shape=(21, 21))\n time = grid.time_dim\n # Creates subsampled spatial dimensions and accordine grid\n dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)\n for d in grid.dimensions])\n grid2 = Grid((6, 6), dimensions=dims, time_dimension=time)\n u2 = TimeFunction(name='u2', grid=grid2, space_order=2, time_order=1)\n u2.data.fill(2.)\n eqns = [Eq(u2.forward, u2.dx + u2.dy)]\n op = Operator(eqns)\n op.apply(time_M=0, x_M=11, y_M=11)\n # Verify that u2 contains subsampled fd values\n assert np.all(u2.data[0, :, :] == 2.)\n assert np.all(u2.data[1, 0, 0] == 0.)\n assert np.all(u2.data[1, -1, -1] == -40.)\n assert np.all(u2.data[1, 0, -1] == -20.)\n assert np.all(u2.data[1, -1, 0] == -20.)\n assert np.all(u2.data[1, 1:-1, 0] == 0.)\n assert np.all(u2.data[1, 0, 1:-1] == 0.)\n assert np.all(u2.data[1, 1:-1, -1] == -20.)\n assert np.all(u2.data[1, -1, 1:-1] == -20.)\n assert np.all(u2.data[1, 1:4, 1:4] == 0.)\n\n # This test generates an openmp loop form which makes older gccs upset\n @switchconfig(openmp=False)\n def test_nothing_in_negative(self):\n \"\"\"Test the case where when the condition is false, there is nothing to do.\"\"\"\n nt = 4\n grid = Grid(shape=(11, 11))\n time = grid.time_dim\n\n u = TimeFunction(name='u', save=nt, grid=grid)\n assert(grid.time_dim in u.indices)\n\n factor = 4\n time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)\n usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,\n time_dim=time_subsampled)\n assert(time_subsampled in usave.indices)\n\n eqns = [Eq(usave, u)]\n op = Operator(eqns)\n\n u.data[:] = 1.0\n usave.data[:] = 0.0\n op.apply(time_m=1, time_M=1)\n assert np.allclose(usave.data, 0.0)\n\n op.apply(time_m=0, time_M=0)\n assert np.allclose(usave.data, 1.0)\n\n def test_laplace(self):\n grid = Grid(shape=(20, 20, 20))\n x, y, z = grid.dimensions\n time = grid.time_dim\n t = grid.stepping_dim\n tsave = ConditionalDimension(name='tsave', parent=time, factor=2)\n\n u = TimeFunction(name='u', grid=grid, save=None, time_order=2)\n usave = TimeFunction(name='usave', grid=grid, time_dim=tsave,\n time_order=0, space_order=0)\n\n steps = []\n # save of snapshot\n steps.append(Eq(usave, u))\n # standard laplace-like thing\n steps.append(Eq(u[t+1, x, y, z],\n u[t, x, y, z] - u[t-1, x, y, z]\n + u[t, x-1, y, z] + u[t, x+1, y, z]\n + u[t, x, y-1, z] + u[t, x, y+1, z]\n + u[t, x, y, z-1] + u[t, x, y, z+1]))\n\n op = Operator(steps)\n\n u.data[:] = 0.0\n u.data[0, 10, 10, 10] = 1.0\n op.apply(time_m=0, time_M=0)\n assert np.sum(u.data[0, :, :, :]) == 1.0\n assert np.sum(u.data[1, :, :, :]) == 7.0\n assert np.all(usave.data[0, :, :, :] == u.data[0, :, :, :])\n\n def test_as_expr(self):\n nt = 19\n grid = Grid(shape=(11, 11))\n time = grid.time_dim\n\n u = TimeFunction(name='u', grid=grid)\n assert(grid.stepping_dim in u.indices)\n\n u2 = TimeFunction(name='u2', grid=grid, save=nt)\n assert(time in u2.indices)\n\n factor = 4\n time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)\n usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,\n time_dim=time_subsampled)\n assert(time_subsampled in usave.indices)\n\n eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.),\n Eq(usave, time_subsampled * u)]\n op = Operator(eqns)\n op.apply(t=nt-2)\n assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))\n assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])\n assert np.all([np.allclose(usave.data[i], i*factor*i)\n for i in range((nt+factor-1)//factor)])\n\n def test_shifted(self):\n nt = 19\n grid = Grid(shape=(11, 11))\n time = grid.time_dim\n\n u = TimeFunction(name='u', grid=grid)\n assert(grid.stepping_dim in u.indices)\n\n u2 = TimeFunction(name='u2', grid=grid, save=nt)\n assert(time in u2.indices)\n\n factor = 4\n time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)\n usave = TimeFunction(name='usave', grid=grid, save=2, time_dim=time_subsampled)\n assert(time_subsampled in usave.indices)\n\n t_sub_shift = Constant(name='t_sub_shift', dtype=np.int32)\n\n eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.),\n Eq(usave.subs(time_subsampled, time_subsampled - t_sub_shift), u)]\n op = Operator(eqns)\n\n # Starting at time_m=10, so time_subsampled - t_sub_shift is in range\n op.apply(time_m=10, time_M=nt-2, t_sub_shift=3)\n assert np.all(np.allclose(u.data[0], 8))\n assert np.all([np.allclose(u2.data[i], i - 10) for i in range(10, nt)])\n assert np.all([np.allclose(usave.data[i], 2+i*factor) for i in range(2)])\n\n def test_no_index(self):\n \"\"\"Test behaviour when the ConditionalDimension is used as a symbol in\n an expression.\"\"\"\n nt = 19\n grid = Grid(shape=(11, 11))\n time = grid.time_dim\n\n u = TimeFunction(name='u', grid=grid)\n assert(grid.stepping_dim in u.indices)\n\n v = Function(name='v', grid=grid)\n\n factor = 4\n time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)\n\n eqns = [Eq(u.forward, u + 1), Eq(v, v + u*u*time_subsampled)]\n op = Operator(eqns)\n op.apply(t_M=nt-2)\n assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))\n # expected result is 1024\n # v = u[0]**2 * 0 + u[4]**2 * 1 + u[8]**2 * 2 + u[12]**2 * 3 + u[16]**2 * 4\n # with u[t] = t\n # v = 16 * 1 + 64 * 2 + 144 * 3 + 256 * 4 = 1600\n assert np.all(np.allclose(v.data, 1600))\n\n def test_no_index_sparse(self):\n \"\"\"Test behaviour when the ConditionalDimension is used as a symbol in\n an expression over sparse data objects.\"\"\"\n grid = Grid(shape=(4, 4), extent=(3.0, 3.0))\n time = grid.time_dim\n\n f = TimeFunction(name='f', grid=grid, save=1)\n f.data[:] = 0.\n\n coordinates = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)]\n sf = SparseFunction(name='sf', grid=grid, npoint=4, coordinates=coordinates)\n sf.data[:] = 1.\n sd = sf.dimensions[sf._sparse_position]\n\n # We want to write to `f` through `sf` so that we obtain the\n # following 4x4 grid (the '*' show the position of the sparse points)\n # We do that by emulating an injection\n #\n # 0 --- 0 --- 0 --- 0\n # | * | | * |\n # 0 --- 1 --- 1 --- 0\n # | | | |\n # 0 --- 1 --- 1 --- 0\n # | * | | * |\n # 0 --- 0 --- 0 --- 0\n\n radius = 1\n indices = [(i, i+radius) for i in sf._coordinate_indices]\n bounds = [i.symbolic_size - radius for i in grid.dimensions]\n\n eqs = []\n for e, i in enumerate(product(*indices)):\n args = [j > 0 for j in i]\n args.extend([j < k for j, k in zip(i, bounds)])\n condition = And(*args, evaluate=False)\n cd = ConditionalDimension('sfc%d' % e, parent=sd, condition=condition)\n index = [time] + list(i)\n eqs.append(Eq(f[index], f[index] + sf[cd]))\n\n op = Operator(eqs)\n op.apply(time=0)\n\n assert np.all(f.data[0, 1:-1, 1:-1] == 1.)\n assert np.all(f.data[0, 0] == 0.)\n assert np.all(f.data[0, -1] == 0.)\n assert np.all(f.data[0, :, 0] == 0.)\n assert np.all(f.data[0, :, -1] == 0.)\n\n def test_symbolic_factor(self):\n \"\"\"\n Test ConditionalDimension with symbolic factor (provided as a Constant).\n \"\"\"\n g = Grid(shape=(4, 4, 4))\n\n u = TimeFunction(name='u', grid=g, time_order=0)\n\n fact = Constant(name='fact', dtype=np.int32, value=4)\n tsub = ConditionalDimension(name='tsub', parent=g.time_dim, factor=fact)\n usave = TimeFunction(name='usave', grid=g, time_dim=tsub, save=4)\n\n op = Operator([Eq(u, u + 1), Eq(usave, u)])\n\n op.apply(time=7) # Use `fact`'s default value, 4\n assert np.all(usave.data[0] == 1)\n assert np.all(usave.data[1] == 5)\n\n u.data[:] = 0.\n op.apply(time=7, fact=2)\n assert np.all(usave.data[0] == 1)\n assert np.all(usave.data[1] == 3)\n assert np.all(usave.data[2] == 5)\n assert np.all(usave.data[3] == 7)\n\n def test_implicit_dims(self):\n \"\"\"\n Test ConditionalDimension as an implicit dimension for an equation.\n \"\"\"\n\n # This test makes an Operator that should create a vector of increasing\n # integers, but stop incrementing when a certain stop value is reached\n\n shape = (50,)\n stop_value = 20\n\n time = Dimension(name='time')\n f = TimeFunction(name='f', shape=shape, dimensions=[time])\n\n # The condition to stop incrementing\n cond = ConditionalDimension(name='cond',\n parent=time, condition=f[time] < stop_value)\n\n eqs = [Eq(f.forward, f), Eq(f.forward, f.forward + 1, implicit_dims=[cond])]\n op = Operator(eqs)\n op.apply(time_M=shape[0] - 2)\n\n # Make the same calculation in python to assert the result\n F = np.zeros(shape[0])\n for i in range(shape[0]):\n F[i] = i if i < stop_value else stop_value\n\n assert np.all(f.data == F)\n\n def test_stepping_dim_in_condition_lowering(self):\n \"\"\"\n Check that the compiler performs lowering on conditions\n with TimeDimensions and generates the expected code::\n\n if (g[t][x + 1][y + 1] <= 10){ if (g[t0][x + 1][y + 1] <= 10){\n ... --> ...\n } }\n\n This test increments a function by one at every timestep until it is\n less-or-equal to 10 (g<=10) while although operator runs for 13 timesteps.\n \"\"\"\n grid = Grid(shape=(4, 4))\n _, y = grid.dimensions\n\n ths = 10\n g = TimeFunction(name='g', grid=grid)\n\n ci = ConditionalDimension(name='ci', parent=y, condition=Le(g, ths))\n\n op = Operator(Eq(g.forward, g + 1, implicit_dims=ci))\n\n op.apply(time_M=ths+3)\n assert np.all(g.data[0, :, :] == ths)\n assert np.all(g.data[1, :, :] == ths + 1)\n assert 'if (g[t0][x + 1][y + 1] <= 10)\\n'\n '{\\n g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1' in str(op.ccode)\n\n def test_expr_like_lowering(self):\n \"\"\"\n Test the lowering of an expr-like ConditionalDimension's condition.\n This test makes an Operator that should indexify and lower the condition\n passed in the Conditional Dimension\n \"\"\"\n\n grid = Grid(shape=(3, 3))\n g1 = Function(name='g1', grid=grid)\n g2 = Function(name='g2', grid=grid)\n\n g1.data[:] = 0.49\n g2.data[:] = 0.49\n x, y = grid.dimensions\n ci = ConditionalDimension(name='ci', parent=y, condition=Le((g1 + g2),\n 1.01*(g1 + g2)))\n\n f = Function(name='f', shape=grid.shape, dimensions=(x, ci))\n Operator(Eq(f, g1+g2)).apply()\n\n assert np.all(f.data[:] == g1.data[:] + g2.data[:])\n\n @pytest.mark.parametrize('setup_rel, rhs, c1, c2, c3, c4', [\n # Relation, RHS, c1 to c4 used as indexes in assert\n (Lt, 3, 2, 4, 4, -1), (Le, 2, 2, 4, 4, -1), (Ge, 3, 4, 6, 1, 4),\n (Gt, 2, 4, 6, 1, 4), (Ne, 5, 2, 6, 1, 2)\n ])\n def test_relational_classes(self, setup_rel, rhs, c1, c2, c3, c4):\n \"\"\"\n Test ConditionalDimension using conditions based on Relations over SubDomains.\n \"\"\"\n\n class InnerDomain(SubDomain):\n name = 'inner'\n\n def define(self, dimensions):\n return {d: ('middle', 2, 2) for d in dimensions}\n\n inner_domain = InnerDomain()\n grid = Grid(shape=(8, 8), subdomains=(inner_domain,))\n g = Function(name='g', grid=grid)\n g2 = Function(name='g2', grid=grid)\n\n for i in [g, g2]:\n i.data[:4, :4] = 1\n i.data[4:, :4] = 2\n i.data[4:, 4:] = 3\n i.data[:4, 4:] = 4\n\n xi, yi = grid.subdomains['inner'].dimensions\n\n cond = setup_rel(0.25*g + 0.75*g2, rhs, subdomain=grid.subdomains['inner'])\n ci = ConditionalDimension(name='ci', parent=yi, condition=cond)\n f = Function(name='f', shape=grid.shape, dimensions=(xi, ci))\n\n eq1 = Eq(f, 0.4*g + 0.6*g2)\n eq2 = Eq(f, 5)\n\n Operator([eq1, eq2]).apply()\n assert np.all(f.data[2:6, c1:c2] == 5.)\n assert np.all(f.data[:, c3:c4] < 5.)\n\n def test_from_cond_to_param(self):\n \"\"\"\n Test that Functions appearing in the condition of a ConditionalDimension\n but not explicitly in an Eq are actually part of the Operator input\n (stems from issue #1298).\n \"\"\"\n grid = Grid(shape=(8, 8))\n x, y = grid.dimensions\n\n g = Function(name='g', grid=grid)\n h = Function(name='h', grid=grid)\n ci = ConditionalDimension(name='ci', parent=y, condition=Lt(g, 2 + h))\n f = Function(name='f', shape=grid.shape, dimensions=(x, ci))\n\n for _ in range(5):\n # issue #1298 was non deterministic\n Operator(Eq(f, 5)).apply()\n\n @skipif('device')\n def test_no_fusion_simple(self):\n \"\"\"\n If ConditionalDimensions are present, then Clusters must not be fused so\n that ultimately Eqs get scheduled to different loop nests.\n \"\"\"\n grid = Grid(shape=(4, 4, 4))\n time = grid.time_dim\n\n f = TimeFunction(name='f', grid=grid)\n g = Function(name='g', grid=grid)\n h = Function(name='h', grid=grid)\n\n # No ConditionalDimensions yet. Will be fused and optimized\n eqns = [Eq(f.forward, f + 1),\n Eq(h, f + 1),\n Eq(g, f + 1)]\n\n op = Operator(eqns)\n\n exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)\n assert len(exprs) == 4\n assert exprs[1].expr.rhs is exprs[0].output\n assert exprs[2].expr.rhs is exprs[0].output\n assert exprs[3].expr.rhs is exprs[0].output\n\n # Now with a ConditionalDimension. No fusion, no optimization\n ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)\n\n eqns = [Eq(f.forward, f + 1),\n Eq(h, f + 1),\n Eq(g, f + 1, implicit_dims=[ctime])]\n\n op = Operator(eqns)\n exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)\n assert len(exprs) == 3\n assert exprs[1].expr.rhs is exprs[0].output\n assert exprs[2].expr.rhs is exprs[0].output\n exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)\n assert len(exprs) == 1\n\n @skipif('device')\n def test_no_fusion_convoluted(self):\n \"\"\"\n Conceptually like `test_no_fusion_simple`, but with more expressions\n and non-trivial data flow.\n \"\"\"\n grid = Grid(shape=(4, 4, 4))\n time = grid.time_dim\n\n f = TimeFunction(name='f', grid=grid)\n g = Function(name='g', grid=grid)\n h = Function(name='h', grid=grid)\n\n ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)\n\n eqns = [Eq(f.forward, f + 1),\n Eq(h, f + 1),\n Eq(g, f + 1, implicit_dims=[ctime]),\n Eq(f.forward, f + 1, implicit_dims=[ctime]),\n Eq(f.forward, f + 1),\n Eq(g, f + 1)]\n\n op = Operator(eqns)\n\n exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)\n assert len(exprs) == 3\n assert exprs[1].expr.rhs is exprs[0].output\n assert exprs[2].expr.rhs is exprs[0].output\n\n exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)\n assert len(exprs) == 3\n\n exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)\n assert len(exprs) == 3\n assert exprs[1].expr.rhs is exprs[0].output\n assert exprs[2].expr.rhs is exprs[0].output\n\n\nclass TestMashup(object):\n\n \"\"\"\n Check the correct functioning of the compiler in presence of many Dimension types.\n \"\"\"\n\n def test_topofusion_w_subdims_conddims(self):\n \"\"\"\n Check that topological fusion works across guarded Clusters over different\n iteration spaces and in presence of anti-dependences.\n\n This test uses both SubDimensions (via SubDomains) and ConditionalDimensions.\n \"\"\"\n grid = Grid(shape=(4, 4, 4))\n time = grid.time_dim\n\n f = TimeFunction(name='f', grid=grid, time_order=2)\n g = TimeFunction(name='g', grid=grid, time_order=2)\n h = TimeFunction(name='h', grid=grid, time_order=2)\n fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)\n gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)\n\n ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)\n\n eqns = [Eq(f.forward, f + 1),\n Eq(g.forward, g + 1),\n Eq(fsave, f.dt2, implicit_dims=[ctime]),\n Eq(h, f + g, subdomain=grid.interior),\n Eq(gsave, g.dt2, implicit_dims=[ctime])]\n\n op = Operator(eqns)\n\n # Check generated code -- expect the gsave equation to be scheduled together\n # in the same loop nest with the fsave equation\n assert len(op._func_table) == 3\n\n exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)\n assert len(exprs) == 2\n assert exprs[0].write is f\n assert exprs[1].write is g\n\n exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)\n assert len(exprs) == 3\n assert exprs[1].write is fsave\n assert exprs[2].write is gsave\n\n exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)\n assert len(exprs) == 1\n assert exprs[0].write is h\n\n def test_topofusion_w_subdims_conddims_v2(self):\n \"\"\"\n Like `test_topofusion_w_subdims_conddims` but with more SubDomains,\n so we expect fewer loop nests.\n \"\"\"\n grid = Grid(shape=(4, 4, 4))\n time = grid.time_dim\n\n f = TimeFunction(name='f', grid=grid, time_order=2)\n g = TimeFunction(name='g', grid=grid, time_order=2)\n h = TimeFunction(name='h', grid=grid, time_order=2)\n fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)\n gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)\n\n ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)\n\n eqns = [Eq(f.forward, f + 1, subdomain=grid.interior),\n Eq(g.forward, g + 1, subdomain=grid.interior),\n Eq(fsave, f.dt2, implicit_dims=[ctime]),\n Eq(h, f + g, subdomain=grid.interior),\n Eq(gsave, g.dt2, implicit_dims=[ctime])]\n\n op = Operator(eqns)\n\n # Check generated code -- expect the gsave equation to be scheduled together\n # in the same loop nest with the fsave equation\n assert len(op._func_table) == 2\n assert len(FindNodes(Expression).visit(op._func_table['bf0'].root)) == 3\n assert len(FindNodes(Expression).visit(op._func_table['bf1'].root)) == 2 + 1 # r0\n\n def test_topofusion_w_subdims_conddims_v3(self):\n \"\"\"\n Like `test_topofusion_w_subdims_conddims_v2` but with an extra anti-dependence,\n which causes scheduling over more loop nests.\n \"\"\"\n grid = Grid(shape=(4, 4, 4))\n time = grid.time_dim\n\n f = TimeFunction(name='f', grid=grid, time_order=2)\n g = TimeFunction(name='g', grid=grid, time_order=2)\n h = TimeFunction(name='h', grid=grid, time_order=2)\n fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)\n gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)\n\n ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)\n\n eqns = [Eq(f.forward, f + 1, subdomain=grid.interior),\n Eq(g.forward, g + 1, subdomain=grid.interior),\n Eq(fsave, f.dt2, implicit_dims=[ctime]),\n Eq(h, f.dt2.dx + g, subdomain=grid.interior),\n Eq(gsave, g.dt2, implicit_dims=[ctime])]\n\n op = Operator(eqns)\n\n # Check generated code -- expect the gsave equation to be scheduled together\n # in the same loop nest with the fsave equation\n assert len(op._func_table) == 3\n\n exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)\n assert len(exprs) == 2\n assert exprs[0].write is f\n assert exprs[1].write is g\n\n exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)\n assert len(exprs) == 3\n assert exprs[1].write is fsave\n assert exprs[2].write is gsave\n\n exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)\n assert len(exprs) == 2\n assert exprs[1].write is h\n"
] | [
[
"numpy.all",
"numpy.sum",
"numpy.zeros",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Bartdoekemeijer/FLORIS | [
"be3c9fbb559354b5bf848792d84caff90606ed05",
"be3c9fbb559354b5bf848792d84caff90606ed05"
] | [
"tests/reg_tests/cumulative_curl_regression_test.py",
"floris/tools/optimization/yaw_optimization/yaw_optimization_base.py"
] | [
"# Copyright 2021 NREL\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\n# See https://floris.readthedocs.io for documentation\n\nimport numpy as np\n\nfrom floris.simulation import Floris\nfrom floris.simulation import Ct, power, axial_induction, average_velocity\nfrom tests.conftest import N_TURBINES, N_WIND_DIRECTIONS, N_WIND_SPEEDS, print_test_values, assert_results_arrays\n\nDEBUG = True\nVELOCITY_MODEL = \"cc\"\nDEFLECTION_MODEL = \"gauss\"\n\nbaseline = np.array(\n [\n # 8 m/s\n [\n [7.9803783, 0.7634300, 1695368.7987130, 0.2568077],\n [5.3585771, 0.8698045, 492577.1572448, 0.3195870],\n [4.9669368, 0.8944963, 379718.4177412, 0.3375933],\n ],\n # 9 m/s\n [\n [8.9779256, 0.7625731, 2413658.0981405, 0.2563676],\n [6.0299374, 0.8340431, 719536.2896199, 0.2963110],\n [5.5776779, 0.8570341, 560907.3153174, 0.3109458],\n ],\n # 10 m/s\n [\n [9.9754729, 0.7527803, 3306006.2306084, 0.2513940],\n [6.7205268, 0.8035032, 1012640.0064192, 0.2783602],\n [6.2110904, 0.8256933, 792936.0055473, 0.2912497],\n ],\n # 11 m/s\n [\n [10.9730201, 0.7304328, 4373596.1594956, 0.2404007],\n [7.4516751, 0.7774337, 1381908.5141696, 0.2641154],\n [6.8606000, 0.7978670, 1077836.5443900, 0.2752040],\n ]\n ]\n)\n\nyawed_baseline = np.array(\n [\n # 8 m/s\n [\n [7.9803783, 0.7605249, 1683956.5765064, 0.2548147],\n [5.4029703, 0.8670436, 505567.9316776, 0.3176841],\n [4.9742156, 0.8939700, 381463.8369041, 0.3371887],\n ],\n # 9 m/s\n [\n [8.9779256, 0.7596713, 2397236.5542849, 0.2543815],\n [6.0798421, 0.8317429, 739756.7356164, 0.2949042],\n [5.5879702, 0.8565074, 564477.6027506, 0.3105979],\n ],\n # 10 m/s\n [\n [9.9754729, 0.7499157, 3283591.8023665, 0.2494847],\n [6.7754450, 0.8012935, 1038201.4571916, 0.2771174],\n [6.2236744, 0.8251133, 798034.8027193, 0.2909027],\n ],\n # 11 m/s\n [\n [10.9730201, 0.7276532, 4344222.0129382, 0.2386508],\n [7.5103951, 0.7755790, 1413728.7289467, 0.2631345],\n [6.8746872, 0.7973002, 1084393.3749950, 0.2748890],\n ]\n ]\n)\n\nyaw_added_recovery_baseline = np.array(\n [\n # 8 m/s\n [\n [7.9803783, 0.7605249, 1683956.5765064, 0.2548147],\n [5.4219904, 0.8658607, 511133.7736997, 0.3168748],\n [4.9902533, 0.8928102, 385309.6126320, 0.3363008],\n ],\n # 9 m/s\n [\n [8.9779256, 0.7596713, 2397236.5542849, 0.2543815],\n [6.1011855, 0.8307591, 748404.6404163, 0.2943055],\n [5.6072171, 0.8555225, 571154.1495386, 0.3099490],\n ],\n # 10 m/s\n [\n [9.9754729, 0.7499157, 3283591.8023665, 0.2494847],\n [6.7984638, 0.8003672, 1048915.4794254, 0.2765986],\n [6.2452220, 0.8241201, 806765.4479110, 0.2903098],\n ],\n # 11 m/s\n [\n [10.9730201, 0.7276532, 4344222.0129382, 0.2386508],\n [7.5339320, 0.7749706, 1427833.3888763, 0.2628137],\n [6.8971848, 0.7963949, 1094864.8116422, 0.2743869],\n ],\n ]\n)\n\nsecondary_steering_baseline = np.array(\n [\n # 8 m/s\n [\n [7.9803783, 0.7605249, 1683956.5765064, 0.2548147],\n [5.4029709, 0.8670436, 505568.1176628, 0.3176840],\n [4.9791408, 0.8936138, 382644.8719082, 0.3369155],\n ],\n # 9 m/s\n [\n [8.9779256, 0.7596713, 2397236.5542849, 0.2543815],\n [6.0798429, 0.8317428, 739757.0246720, 0.2949042],\n [5.5938124, 0.8562085, 566504.2126629, 0.3104007],\n ],\n # 10 m/s\n [\n [9.9754729, 0.7499157, 3283591.8023665, 0.2494847],\n [6.7754458, 0.8012934, 1038201.8164555, 0.2771174],\n [6.2302537, 0.8248100, 800700.5867580, 0.2907215],\n ],\n # 11 m/s\n [\n [10.9730201, 0.7276532, 4344222.0129382, 0.2386508],\n [7.5103959, 0.7755790, 1413729.2052485, 0.2631345],\n [6.8817912, 0.7970143, 1087699.9040360, 0.2747304],\n ],\n ]\n)\n\n\n# Note: compare the yawed vs non-yawed results. The upstream turbine\n# power should be lower in the yawed case. The following turbine\n# powers should higher in the yawed case.\n\n\ndef test_regression_tandem(sample_inputs_fixture):\n \"\"\"\n Tandem turbines\n \"\"\"\n sample_inputs_fixture.floris[\"wake\"][\"model_strings\"][\"velocity_model\"] = VELOCITY_MODEL\n sample_inputs_fixture.floris[\"wake\"][\"model_strings\"][\"deflection_model\"] = DEFLECTION_MODEL\n\n floris = Floris.from_dict(sample_inputs_fixture.floris)\n floris.initialize_domain()\n floris.steady_state_atmospheric_condition()\n\n n_turbines = floris.farm.n_turbines\n n_wind_speeds = floris.flow_field.n_wind_speeds\n n_wind_directions = floris.flow_field.n_wind_directions\n\n velocities = floris.flow_field.u\n yaw_angles = floris.farm.yaw_angles\n test_results = np.zeros((n_wind_directions, n_wind_speeds, n_turbines, 4))\n\n farm_avg_velocities = average_velocity(\n velocities,\n )\n farm_cts = Ct(\n velocities,\n yaw_angles,\n floris.farm.turbine_fCts,\n floris.farm.turbine_type_map,\n )\n farm_powers = power(\n floris.flow_field.air_density,\n velocities,\n yaw_angles,\n floris.farm.pPs,\n floris.farm.turbine_power_interps,\n floris.farm.turbine_type_map,\n )\n farm_axial_inductions = axial_induction(\n velocities,\n yaw_angles,\n floris.farm.turbine_fCts,\n floris.farm.turbine_type_map,\n )\n for i in range(n_wind_directions):\n for j in range(n_wind_speeds):\n for k in range(n_turbines):\n test_results[i, j, k, 0] = farm_avg_velocities[i, j, k]\n test_results[i, j, k, 1] = farm_cts[i, j, k]\n test_results[i, j, k, 2] = farm_powers[i, j, k]\n test_results[i, j, k, 3] = farm_axial_inductions[i, j, k]\n\n if DEBUG:\n print_test_values(\n farm_avg_velocities,\n farm_cts,\n farm_powers,\n farm_axial_inductions,\n )\n\n assert_results_arrays(test_results[0], baseline)\n\n\ndef test_regression_rotation(sample_inputs_fixture):\n \"\"\"\n Turbines in tandem and rotated.\n The result from 270 degrees should match the results from 360 degrees.\n\n Wind from the West (Left)\n\n ^\n |\n y\n\n 1|1 3\n |\n |\n |\n 0|0 2\n |----------|\n 0 1 x->\n\n\n Wind from the North (Top), rotated\n\n ^\n |\n y\n\n 1|3 2\n |\n |\n |\n 0|1 0\n |----------|\n 0 1 x->\n\n In 270, turbines 2 and 3 are waked. In 360, turbines 0 and 2 are waked.\n The test compares turbines 2 and 3 with 0 and 2 from 270 and 360.\n \"\"\"\n TURBINE_DIAMETER = 126.0\n\n sample_inputs_fixture.floris[\"wake\"][\"model_strings\"][\"velocity_model\"] = VELOCITY_MODEL\n sample_inputs_fixture.floris[\"wake\"][\"model_strings\"][\"deflection_model\"] = DEFLECTION_MODEL\n sample_inputs_fixture.floris[\"farm\"][\"layout_x\"] = [\n 0.0,\n 0.0,\n 5 * TURBINE_DIAMETER,\n 5 * TURBINE_DIAMETER,\n ]\n sample_inputs_fixture.floris[\"farm\"][\"layout_y\"] = [\n 0.0,\n 5 * TURBINE_DIAMETER,\n 0.0,\n 5 * TURBINE_DIAMETER\n ]\n sample_inputs_fixture.floris[\"flow_field\"][\"wind_directions\"] = [270.0, 360.0]\n sample_inputs_fixture.floris[\"flow_field\"][\"wind_speeds\"] = [8.0]\n\n floris = Floris.from_dict(sample_inputs_fixture.floris)\n floris.initialize_domain()\n floris.steady_state_atmospheric_condition()\n\n farm_avg_velocities = average_velocity(floris.flow_field.u)\n\n t0_270 = farm_avg_velocities[0, 0, 0] # upstream\n t1_270 = farm_avg_velocities[0, 0, 1] # upstream\n t2_270 = farm_avg_velocities[0, 0, 2] # waked\n t3_270 = farm_avg_velocities[0, 0, 3] # waked\n\n t0_360 = farm_avg_velocities[1, 0, 0] # waked\n t1_360 = farm_avg_velocities[1, 0, 1] # upstream\n t2_360 = farm_avg_velocities[1, 0, 2] # waked\n t3_360 = farm_avg_velocities[1, 0, 3] # upstream\n\n assert np.allclose(t0_270, t1_360)\n assert np.allclose(t1_270, t3_360)\n assert np.allclose(t2_270, t0_360)\n assert np.allclose(t3_270, t2_360)\n\n\ndef test_regression_yaw(sample_inputs_fixture):\n \"\"\"\n Tandem turbines with the upstream turbine yawed\n \"\"\"\n sample_inputs_fixture.floris[\"wake\"][\"model_strings\"][\"velocity_model\"] = VELOCITY_MODEL\n sample_inputs_fixture.floris[\"wake\"][\"model_strings\"][\"deflection_model\"] = DEFLECTION_MODEL\n\n floris = Floris.from_dict(sample_inputs_fixture.floris)\n\n yaw_angles = np.zeros((N_WIND_DIRECTIONS, N_WIND_SPEEDS, N_TURBINES))\n yaw_angles[:,:,0] = 5.0\n floris.farm.yaw_angles = yaw_angles\n\n floris.initialize_domain()\n floris.steady_state_atmospheric_condition()\n\n n_turbines = floris.farm.n_turbines\n n_wind_speeds = floris.flow_field.n_wind_speeds\n n_wind_directions = floris.flow_field.n_wind_directions\n\n velocities = floris.flow_field.u\n yaw_angles = floris.farm.yaw_angles\n test_results = np.zeros((n_wind_directions, n_wind_speeds, n_turbines, 4))\n\n farm_avg_velocities = average_velocity(\n velocities,\n )\n farm_cts = Ct(\n velocities,\n yaw_angles,\n floris.farm.turbine_fCts,\n floris.farm.turbine_type_map,\n )\n farm_powers = power(\n floris.flow_field.air_density,\n velocities,\n yaw_angles,\n floris.farm.pPs,\n floris.farm.turbine_power_interps,\n floris.farm.turbine_type_map,\n )\n farm_axial_inductions = axial_induction(\n velocities,\n yaw_angles,\n floris.farm.turbine_fCts,\n floris.farm.turbine_type_map,\n )\n for i in range(n_wind_directions):\n for j in range(n_wind_speeds):\n for k in range(n_turbines):\n test_results[i, j, k, 0] = farm_avg_velocities[i, j, k]\n test_results[i, j, k, 1] = farm_cts[i, j, k]\n test_results[i, j, k, 2] = farm_powers[i, j, k]\n test_results[i, j, k, 3] = farm_axial_inductions[i, j, k]\n\n if DEBUG:\n print_test_values(\n farm_avg_velocities,\n farm_cts,\n farm_powers,\n farm_axial_inductions,\n )\n\n assert_results_arrays(test_results[0], yawed_baseline)\n\n\ndef test_regression_yaw_added_recovery(sample_inputs_fixture):\n \"\"\"\n Tandem turbines with the upstream turbine yawed and yaw added recovery\n correction enabled\n \"\"\"\n\n sample_inputs_fixture.floris[\"wake\"][\"model_strings\"][\"velocity_model\"] = VELOCITY_MODEL\n sample_inputs_fixture.floris[\"wake\"][\"model_strings\"][\"deflection_model\"] = DEFLECTION_MODEL\n\n sample_inputs_fixture.floris[\"wake\"][\"enable_transverse_velocities\"] = True\n sample_inputs_fixture.floris[\"wake\"][\"enable_secondary_steering\"] = False\n sample_inputs_fixture.floris[\"wake\"][\"enable_yaw_added_recovery\"] = True\n\n floris = Floris.from_dict(sample_inputs_fixture.floris)\n\n yaw_angles = np.zeros((N_WIND_DIRECTIONS, N_WIND_SPEEDS, N_TURBINES))\n yaw_angles[:,:,0] = 5.0\n floris.farm.yaw_angles = yaw_angles\n \n floris.initialize_domain()\n floris.steady_state_atmospheric_condition()\n\n n_turbines = floris.farm.n_turbines\n n_wind_speeds = floris.flow_field.n_wind_speeds\n n_wind_directions = floris.flow_field.n_wind_directions\n\n velocities = floris.flow_field.u\n yaw_angles = floris.farm.yaw_angles\n test_results = np.zeros((n_wind_directions, n_wind_speeds, n_turbines, 4))\n\n farm_avg_velocities = average_velocity(\n velocities,\n )\n farm_cts = Ct(\n velocities,\n yaw_angles,\n floris.farm.turbine_fCts,\n floris.farm.turbine_type_map,\n )\n farm_powers = power(\n floris.flow_field.air_density,\n velocities,\n yaw_angles,\n floris.farm.pPs,\n floris.farm.turbine_power_interps,\n floris.farm.turbine_type_map,\n )\n farm_axial_inductions = axial_induction(\n velocities,\n yaw_angles,\n floris.farm.turbine_fCts,\n floris.farm.turbine_type_map,\n )\n for i in range(n_wind_directions):\n for j in range(n_wind_speeds):\n for k in range(n_turbines):\n test_results[i, j, k, 0] = farm_avg_velocities[i, j, k]\n test_results[i, j, k, 1] = farm_cts[i, j, k]\n test_results[i, j, k, 2] = farm_powers[i, j, k]\n test_results[i, j, k, 3] = farm_axial_inductions[i, j, k]\n\n if DEBUG:\n print_test_values(\n farm_avg_velocities,\n farm_cts,\n farm_powers,\n farm_axial_inductions,\n )\n\n assert_results_arrays(test_results[0], yaw_added_recovery_baseline)\n\n\ndef test_regression_secondary_steering(sample_inputs_fixture):\n \"\"\"\n Tandem turbines with the upstream turbine yawed and secondary steering enabled\n \"\"\"\n\n sample_inputs_fixture.floris[\"wake\"][\"model_strings\"][\"velocity_model\"] = VELOCITY_MODEL\n sample_inputs_fixture.floris[\"wake\"][\"model_strings\"][\"deflection_model\"] = DEFLECTION_MODEL\n\n sample_inputs_fixture.floris[\"wake\"][\"enable_transverse_velocities\"] = True\n sample_inputs_fixture.floris[\"wake\"][\"enable_secondary_steering\"] = True\n sample_inputs_fixture.floris[\"wake\"][\"enable_yaw_added_recovery\"] = False\n\n floris = Floris.from_dict(sample_inputs_fixture.floris)\n\n yaw_angles = np.zeros((N_WIND_DIRECTIONS, N_WIND_SPEEDS, N_TURBINES))\n yaw_angles[:,:,0] = 5.0\n floris.farm.yaw_angles = yaw_angles\n \n floris.initialize_domain()\n floris.steady_state_atmospheric_condition()\n\n n_turbines = floris.farm.n_turbines\n n_wind_speeds = floris.flow_field.n_wind_speeds\n n_wind_directions = floris.flow_field.n_wind_directions\n\n velocities = floris.flow_field.u\n yaw_angles = floris.farm.yaw_angles\n test_results = np.zeros((n_wind_directions, n_wind_speeds, n_turbines, 4))\n\n farm_avg_velocities = average_velocity(\n velocities,\n )\n farm_cts = Ct(\n velocities,\n yaw_angles,\n floris.farm.turbine_fCts,\n floris.farm.turbine_type_map,\n )\n farm_powers = power(\n floris.flow_field.air_density,\n velocities,\n yaw_angles,\n floris.farm.pPs,\n floris.farm.turbine_power_interps,\n floris.farm.turbine_type_map,\n )\n farm_axial_inductions = axial_induction(\n velocities,\n yaw_angles,\n floris.farm.turbine_fCts,\n floris.farm.turbine_type_map,\n )\n for i in range(n_wind_directions):\n for j in range(n_wind_speeds):\n for k in range(n_turbines):\n test_results[i, j, k, 0] = farm_avg_velocities[i, j, k]\n test_results[i, j, k, 1] = farm_cts[i, j, k]\n test_results[i, j, k, 2] = farm_powers[i, j, k]\n test_results[i, j, k, 3] = farm_axial_inductions[i, j, k]\n\n if DEBUG:\n print_test_values(\n farm_avg_velocities,\n farm_cts,\n farm_powers,\n farm_axial_inductions,\n )\n\n assert_results_arrays(test_results[0], secondary_steering_baseline)\n\n\ndef test_regression_small_grid_rotation(sample_inputs_fixture):\n \"\"\"\n Where wake models are masked based on the x-location of a turbine, numerical precision\n can cause masking to fail unexpectedly. For example, in the configuration here one of\n the turbines has these delta x values;\n\n [[4.54747351e-13 4.54747351e-13 4.54747351e-13 4.54747351e-13 4.54747351e-13]\n [4.54747351e-13 4.54747351e-13 4.54747351e-13 4.54747351e-13 4.54747351e-13]\n [4.54747351e-13 4.54747351e-13 4.54747351e-13 4.54747351e-13 4.54747351e-13]\n [4.54747351e-13 4.54747351e-13 4.54747351e-13 4.54747351e-13 4.54747351e-13]\n [4.54747351e-13 4.54747351e-13 4.54747351e-13 4.54747351e-13 4.54747351e-13]]\n\n and therefore the masking statement is False when it should be True. This causes the current\n turbine to be affected by its own wake. This test requires that at least in this particular\n configuration the masking correctly filters grid points.\n \"\"\"\n sample_inputs_fixture.floris[\"wake\"][\"model_strings\"][\"velocity_model\"] = VELOCITY_MODEL\n sample_inputs_fixture.floris[\"wake\"][\"model_strings\"][\"deflection_model\"] = DEFLECTION_MODEL\n X, Y = np.meshgrid(\n 6.0 * 126.0 * np.arange(0, 5, 1),\n 6.0 * 126.0 * np.arange(0, 5, 1)\n )\n X = X.flatten()\n Y = Y.flatten()\n\n sample_inputs_fixture.floris[\"farm\"][\"layout_x\"] = X\n sample_inputs_fixture.floris[\"farm\"][\"layout_y\"] = Y\n\n floris = Floris.from_dict(sample_inputs_fixture.floris)\n floris.initialize_domain()\n floris.steady_state_atmospheric_condition()\n\n # farm_avg_velocities = average_velocity(floris.flow_field.u)\n velocities = floris.flow_field.u\n yaw_angles = floris.farm.yaw_angles\n\n farm_powers = power(\n floris.flow_field.air_density,\n velocities,\n yaw_angles,\n floris.farm.pPs,\n floris.farm.turbine_power_interps,\n floris.farm.turbine_type_map,\n )\n\n # A \"column\" is oriented parallel to the wind direction\n # Columns 1 - 4 should have the same power profile\n # Column 5 leading turbine is completely unwaked\n # and the rest of the turbines have a partial wake from their immediate upstream turbine\n assert np.allclose(farm_powers[2,0,0:5], farm_powers[2,0,5:10])\n assert np.allclose(farm_powers[2,0,0:5], farm_powers[2,0,10:15])\n assert np.allclose(farm_powers[2,0,0:5], farm_powers[2,0,15:20])\n assert np.allclose(farm_powers[2,0,20], farm_powers[2,0,0])\n assert np.allclose(farm_powers[2,0,21], farm_powers[2,0,21:25])\n",
"# Copyright 2022 NREL\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\n# See https://floris.readthedocs.io for documentation\n\n\nimport copy\nfrom time import perf_counter as timerpc\n\nimport numpy as np\nimport pandas as pd\n\nfrom .yaw_optimization_tools import (\n derive_downstream_turbines,\n find_layout_symmetry,\n)\n\n\nclass YawOptimization:\n \"\"\"\n YawOptimization is a subclass of :py:class:`floris.tools.optimization.scipy.\n Optimization` that is used to optimize the yaw angles of all turbines in a Floris\n Farm for a single set of inflow conditions using the SciPy optimize package.\n \"\"\"\n\n def __init__(\n self,\n fi,\n minimum_yaw_angle=0.0,\n maximum_yaw_angle=25.0,\n yaw_angles_baseline=None,\n x0=None,\n turbine_weights=None,\n normalize_control_variables=False,\n calc_baseline_power=True,\n exclude_downstream_turbines=True,\n exploit_layout_symmetry=True,\n verify_convergence=False,\n ):\n \"\"\"\n Instantiate YawOptimization object with a FlorisInterface object\n and assign parameter values.\n\n Args:\n fi (:py:class:`~.tools.floris_interface.FlorisInterface`):\n Interface used to interact with the Floris object.\n minimum_yaw_angle (float or ndarray): Minimum constraint on yaw\n angle (deg). If a single value specified, assumes this value\n for all turbines. If a 1D array is specified, assumes these\n limits for each turbine specifically, but uniformly across\n all atmospheric conditions. If a 2D array, limits are specific\n both to the turbine and to the atmospheric condition.\n Defaults to 0.0.\n maximum_yaw_angle (float or ndarray): Maximum constraint on yaw\n angle (deg). If a single value specified, assumes this value\n for all turbines. If a 1D array is specified, assumes these\n limits for each turbine specifically, but uniformly across\n all atmospheric conditions. If a 2D array, limits are specific\n both to the turbine and to the atmospheric condition.\n Defaults to 25.0.\n yaw_angles_baseline (iterable, optional): The baseline yaw\n angles used to calculate the initial and baseline power\n production in the wind farm and used to normalize the cost\n function. If none are specified, this variable is set equal\n to the current yaw angles in floris. Note that this variable\n need not meet the yaw constraints specified in self.bnds,\n yet a warning is raised if it does to inform the user.\n Defaults to None.\n x0 (iterable, optional): The initial guess for the optimization\n problem. These values must meet the constraints specified\n in self.bnds. Note that, if exclude_downstream_turbines=True,\n the initial guess for any downstream turbines are ignored\n since they are not part of the optimization. Instead, the yaw\n angles for those turbines are 0.0 if that meets the lower and\n upper bound, or otherwise as close to 0.0 as feasible. If no\n values for x0 are specified, x0 is set to be equal to zeros\n wherever feasible (w.r.t. the bounds), and equal to the\n average of its lower and upper bound for all non-downstream\n turbines otherwise. Defaults to None.\n turbine_weights (iterable, optional): weighing terms that allow\n the user to emphasize power gains at particular turbines or\n completely ignore power gains from other turbines. The array\n of turbine powers from floris is multiplied with this array\n in the calculation of the objective function. If None, this\n is an array with all values 1.0 and length equal to the\n number of turbines. Defaults to None.\n calc_init_power (bool, optional): If True, calculates initial\n wind farm power for each set of wind conditions. Defaults to\n True.\n exclude_downstream_turbines (bool, optional): If True,\n automatically finds and excludes turbines that are most\n downstream from the optimization problem. This significantly\n reduces computation time at no loss in performance. The yaw\n angles of these downstream turbines are fixed to 0.0 deg if\n the yaw bounds specified in self.bnds allow that, or otherwise\n are fixed to the lower or upper yaw bound, whichever is closer\n to 0.0. Defaults to False.\n verify_convergence (bool, optional): specifies whether the found\n optimal yaw angles will be checked for accurately convergence.\n With large farms, especially when using SciPy or other global\n optimization methods, solutions do not always converge and\n turbines that should have a 0.0 deg actually have a 1.0 deg\n angle, for example. By enabling this function, the final yaw\n angles are compared to their baseline values one-by-one for\n the turbines to make sure no such convergence issues arise.\n Defaults to False.\n \"\"\"\n\n # Save turbine object to self\n self.fi = fi.copy()\n self.nturbs = len(self.fi.layout_x)\n\n # # Check floris options\n # if self.fi.floris.flow_field.n_wind_speeds > 1:\n # raise NotImplementedError(\n # \"Optimizer currently does not support more than one wind\" +\n # \" speed. Please assign FLORIS a single wind speed.\"\n # )\n\n # Initialize optimizer\n self.verify_convergence = verify_convergence\n if yaw_angles_baseline is not None:\n yaw_angles_baseline = self._unpack_variable(yaw_angles_baseline)\n self.yaw_angles_baseline = yaw_angles_baseline\n else:\n b = self.fi.floris.farm.yaw_angles\n self.yaw_angles_baseline = self._unpack_variable(b)\n if np.any(np.abs(b) > 0.0):\n print(\n \"INFO: Baseline yaw angles were not specified and were derived from the floris object.\"\n )\n print(\n \"INFO: The inherent yaw angles in the floris object are not all 0.0 degrees.\"\n )\n\n # Set optimization bounds\n self.minimum_yaw_angle = self._unpack_variable(minimum_yaw_angle)\n self.maximum_yaw_angle = self._unpack_variable(maximum_yaw_angle)\n\n # Set initial condition for optimization\n if x0 is not None:\n self.x0 = self._unpack_variable(x0)\n else:\n self.x0 = self._unpack_variable(0.0)\n for ti in range(self.nturbs):\n yaw_lb = self.minimum_yaw_angle[:, 0, ti]\n yaw_ub = self.maximum_yaw_angle[:, 0, ti]\n idx = (yaw_lb > 0.0) | (yaw_ub < 0.0)\n self.x0[idx, 0, ti] = (yaw_lb[idx] + yaw_ub[idx]) / 2.0\n\n # Check inputs for consistency\n if np.any(self.yaw_angles_baseline < self.minimum_yaw_angle):\n print(\"INFO: yaw_angles_baseline exceed lower bound constraints.\")\n if np.any(self.yaw_angles_baseline > self.maximum_yaw_angle):\n print(\"INFO: yaw_angles_baseline exceed upper bound constraints.\")\n if np.any(self.x0 < self.minimum_yaw_angle):\n raise ValueError(\"Initial guess x0 exceeds lower bound constraints.\")\n if np.any(self.x0 > self.maximum_yaw_angle):\n raise ValueError(\"Initial guess x0 exceeds upper bound constraints.\")\n\n # Define turbine weighing terms\n if turbine_weights is None:\n self.turbine_weights = self._unpack_variable(1.0)\n else:\n self.turbine_weights = self._unpack_variable(turbine_weights)\n\n # Save remaining user options to self\n self.normalize_variables = normalize_control_variables\n self.calc_baseline_power = calc_baseline_power\n self.exclude_downstream_turbines = exclude_downstream_turbines\n self.exploit_layout_symmetry = exploit_layout_symmetry\n\n # Prepare for optimization and calculate baseline powers (if applic.)\n self._initialize()\n self._calculate_baseline_farm_power()\n\n # Initialize optimal yaw angles and cost function as baseline values\n self._yaw_angles_opt_subset = copy.deepcopy(self._yaw_angles_baseline_subset)\n self._farm_power_opt_subset = copy.deepcopy(self._farm_power_baseline_subset)\n self._yaw_lbs = copy.deepcopy(self._minimum_yaw_angle_subset)\n self._yaw_ubs = copy.deepcopy(self._maximum_yaw_angle_subset)\n\n # Private methods\n\n def _initialize(self):\n # Derive layout symmetry, if applicable\n self._derive_layout_symmetry()\n\n # Reduce optimization problem as much as possible\n self._reduce_control_problem()\n\n # Normalize optimization variables\n if self.normalize_variables:\n self._normalize_control_problem()\n\n def _unpack_variable(self, variable, subset=False):\n \"\"\"Take a variable, can be either a float, a list equal in\n length to the number of turbines, or an ndarray. It then\n upsamples this value so that it always matches the dimensions\n (self.nconds, self.nturbs).\n \"\"\"\n # Deal with full vs. subset dimensions\n nturbs = self.nturbs\n if subset:\n nturbs = np.shape(self._x0_subset.shape[2])\n\n # Then process maximum yaw angle\n if isinstance(variable, (int, float)):\n # If single value, copy over to all turbines\n variable = np.tile(variable, (nturbs))\n\n variable = np.array(variable, dtype=float)\n if len(np.shape(variable)) == 1:\n # If one-dimensional array, copy over to all atmos. conditions\n variable = np.tile(\n variable,\n (\n self.fi.floris.flow_field.n_wind_directions,\n self.fi.floris.flow_field.n_wind_speeds,\n 1\n )\n )\n\n if len(np.shape(variable)) == 2:\n raise UserWarning(\"Variable input must have shape (n_wind_directions, n_wind_speeds, nturbs)\")\n\n return variable\n\n def _reduce_control_problem(self):\n \"\"\"\n This function reduces the control problem by eliminating turbines\n of which the yaw angles need not be optimized, either because of a\n user-specified set of bounds (where bounds[i][0] == bounds[i][1]),\n or alternatively turbines that are far downstream in the wind farm\n and of which the wake does not impinge other turbines, if\n exclude_downstream_turbines == True. This function also reduces\n the optimization problem by exploiting layout symmetry, if\n exploit_layout_symmetry == True.\n \"\"\"\n # Initialize which turbines to optimize for\n self.turbs_to_opt = (self.maximum_yaw_angle - self.minimum_yaw_angle >= 0.001)\n\n # Initialize subset variables as full set\n self.fi_subset = self.fi.copy()\n nwinddirections_subset = copy.deepcopy(self.fi.floris.flow_field.n_wind_directions)\n minimum_yaw_angle_subset = copy.deepcopy(self.minimum_yaw_angle)\n maximum_yaw_angle_subset = copy.deepcopy(self.maximum_yaw_angle)\n x0_subset = copy.deepcopy(self.x0)\n turbs_to_opt_subset = copy.deepcopy(self.turbs_to_opt)\n turbine_weights_subset = copy.deepcopy(self.turbine_weights)\n yaw_angles_template_subset = self._unpack_variable(0.0)\n yaw_angles_baseline_subset = copy.deepcopy(self.yaw_angles_baseline)\n\n # Define which turbines to optimize for\n if self.exclude_downstream_turbines:\n for iw, wd in enumerate(self.fi.floris.flow_field.wind_directions):\n # Remove turbines from turbs_to_opt that are downstream\n downstream_turbines = derive_downstream_turbines(self.fi, wd)\n downstream_turbines = np.array(downstream_turbines, dtype=int)\n self.turbs_to_opt[iw, 0, downstream_turbines] = False\n turbs_to_opt_subset = copy.deepcopy(self.turbs_to_opt) # Update\n\n # Reduce optimization problem through layout symmetry\n if (self.exploit_layout_symmetry) & (self._sym_df is not None):\n # Reinitialize floris with subset of wind directions\n wd_array = self.fi.floris.flow_field.wind_directions\n wind_direction_subset = wd_array[self._sym_mapping_reduce]\n self.fi_subset.reinitialize(wind_directions=wind_direction_subset)\n\n # Reduce control variables\n red_map = self._sym_mapping_reduce\n nwinddirections_subset = len(wind_direction_subset)\n minimum_yaw_angle_subset = minimum_yaw_angle_subset[red_map, :, :]\n maximum_yaw_angle_subset = maximum_yaw_angle_subset[red_map, :, :]\n x0_subset = x0_subset[red_map, :, :]\n turbs_to_opt_subset = turbs_to_opt_subset[red_map, :, :]\n turbine_weights_subset = turbine_weights_subset[red_map, :, :]\n yaw_angles_template_subset = yaw_angles_template_subset[red_map, :, :]\n yaw_angles_baseline_subset = yaw_angles_baseline_subset[red_map, :, :]\n\n # Set up a template yaw angles array with default solutions. The default\n # solutions are either 0.0 or the allowable yaw angle closest to 0.0 deg.\n # This solution addresses both downstream turbines, minimizing their abs.\n # yaw offset, and additionally fixing equality-constrained turbines to\n # their appropriate yaw angle.\n idx = (minimum_yaw_angle_subset > 0.0) | (maximum_yaw_angle_subset < 0.0)\n if np.any(idx):\n # Find bounds closest to 0.0 deg\n combined_bounds = np.concatenate(\n (\n np.expand_dims(minimum_yaw_angle_subset, axis=3),\n np.expand_dims(maximum_yaw_angle_subset, axis=3)\n ),\n axis=3\n )\n # Overwrite all values that are not allowed to be 0.0 with bound value closest to zero\n ids_closest = np.expand_dims(np.argmin(np.abs(combined_bounds), axis=3), axis=3)\n yaw_mb = np.squeeze(np.take_along_axis(combined_bounds, ids_closest, axis=3))\n yaw_angles_template_subset[idx] = yaw_mb[idx]\n\n # Save all subset variables to self\n self._nwinddirections_subset = nwinddirections_subset\n self._minimum_yaw_angle_subset = minimum_yaw_angle_subset\n self._maximum_yaw_angle_subset = maximum_yaw_angle_subset\n self._x0_subset = x0_subset\n self._turbs_to_opt_subset = turbs_to_opt_subset\n self._turbine_weights_subset = turbine_weights_subset\n self._yaw_angles_template_subset = yaw_angles_template_subset\n self._yaw_angles_baseline_subset = yaw_angles_baseline_subset\n\n def _normalize_control_problem(self):\n \"\"\"\n This private function normalizes variables for the optimization\n problem, specifically the initial condition x0 and the bounds.\n Normalization can improve optimization performance when using common\n optimization methods such as the SciPy Optimization Toolbox.\n \"\"\"\n lb = np.min(self._minimum_yaw_angle_subset)\n ub = np.max(self._maximum_yaw_angle_subset)\n self._normalization_length = (ub - lb)\n self._x0_subset_norm = self._x0_subset / self._normalization_length\n self._minimum_yaw_angle_subset_norm = self._minimum_yaw_angle_subset / self._normalization_length\n self._maximum_yaw_angle_subset_norm = self._maximum_yaw_angle_subset / self._normalization_length\n\n def _calculate_farm_power(self, yaw_angles=None, wd_array=None, turbine_weights=None):\n \"\"\"\n Calculate the wind farm power production assuming the predefined\n probability distribution (self.unc_options/unc_pmf), with the\n appropriate weighing terms, and for a specific set of yaw angles.\n\n Args:\n yaw_angles ([iteratible]): Array or list of yaw angles in degrees.\n\n Returns:\n farm_power (float): Weighted wind farm power.\n \"\"\"\n # Unpack all variables, whichever are defined.\n fi_subset = copy.deepcopy(self.fi_subset)\n if wd_array is None:\n wd_array = fi_subset.floris.flow_field.wind_directions\n if yaw_angles is None:\n yaw_angles = self._yaw_angles_baseline_subset\n if turbine_weights is None:\n turbine_weights = self._turbine_weights_subset\n\n # Ensure format [incompatible with _subset notation]\n yaw_angles = self._unpack_variable(yaw_angles, subset=True)\n\n # # Correct wind direction definition: 270 deg is from left, cw positive\n # wd_array = wrap_360(wd_array)\n\n # Calculate solutions\n turbine_power = np.zeros_like(self._minimum_yaw_angle_subset[:, 0, :])\n fi_subset.reinitialize(wind_directions=wd_array)\n fi_subset.calculate_wake(yaw_angles=yaw_angles)\n turbine_power = fi_subset.get_turbine_powers()\n\n # Multiply with turbine weighing terms\n turbine_power_weighted = np.multiply(turbine_weights, turbine_power)\n farm_power_weighted = np.sum(turbine_power_weighted, axis=2)\n return farm_power_weighted\n\n def _calculate_baseline_farm_power(self):\n \"\"\"\n Calculate the weighted wind farm power under the baseline turbine yaw\n angles.\n \"\"\"\n if self.calc_baseline_power:\n P = self._calculate_farm_power(self._yaw_angles_baseline_subset)\n self._farm_power_baseline_subset = P\n self.farm_power_baseline = self._unreduce_variable(P)\n\n def _derive_layout_symmetry(self):\n \"\"\"Derive symmetry lines in the wind farm layout and use that\n to reduce the optimization problem by 50 %.\n \"\"\"\n self._sym_df = None # Default option\n if self.exploit_layout_symmetry:\n # Check symmetry of bounds & turbine_weights\n if np.unique(self.minimum_yaw_angle, axis=0).shape[0] > 1:\n print(\"minimum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.turbine_weights, axis=0).shape[0] > 1:\n print(\"turbine_weights is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n # Check if turbine_weights are consistently 1.0 everywhere\n if np.any(np.abs(self.turbine_weights - 1.0) > 0.001):\n print(\"turbine_weights are not uniformly 1.0.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n x = self.fi.layout_x\n y = self.fi.layout_y\n df = find_layout_symmetry(x=x, y=y)\n\n # If no axes of symmetry, exit function\n if df.shape[0] <= 0:\n print(\"Wind farm layout in floris is not symmetrical.\")\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n wd_array = self.fi.floris.flow_field.wind_directions\n sym_step = df.iloc[0][\"wd_range\"][1]\n if ((not 0.0 in wd_array) or(not sym_step in wd_array)):\n print(\"Floris wind direction array does not \" +\n \"intersect {:.1f} and {:.1f}.\".format(0.0, sym_step))\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n ids_minimal = (wd_array >= 0.0) & (wd_array < sym_step)\n wd_array_min = wd_array[ids_minimal]\n wd_array_remn = np.remainder(wd_array, sym_step)\n\n if not np.all([(x in wd_array_min) for x in wd_array_remn]):\n print(\"Wind direction array appears irregular.\")\n print(\"Exploitation of symmetry has been disabled.\")\n\n self._sym_mapping_extrap = np.array(\n [np.where(np.abs(x - wd_array_min) < 0.0001)[0][0] \n for x in wd_array_remn], dtype=int)\n \n self._sym_mapping_reduce = copy.deepcopy(ids_minimal)\n self._sym_df = df\n\n return\n\n def _unreduce_variable(self, variable):\n # Check if needed to un-reduce at all, if not, return directly\n if not self.exploit_layout_symmetry:\n return variable\n \n if self._sym_df is None:\n return variable\n\n # Apply operation on right dimension\n ndims = len(np.shape(variable))\n if ndims == 1:\n full_array = variable[self._sym_mapping_extrap]\n elif ndims == 2:\n full_array = variable[self._sym_mapping_extrap, :]\n elif ndims == 3:\n # First upsample to full wind rose\n full_array = variable[self._sym_mapping_extrap, :, :]\n\n # Now process turbine mapping\n wd_array = self.fi.floris.flow_field.wind_directions\n for ii, dfrow in self._sym_df.iloc[1::].iterrows():\n ids = (\n (wd_array >= dfrow[\"wd_range\"][0]) &\n (wd_array < dfrow[\"wd_range\"][1])\n )\n tmap = np.argsort(dfrow[\"turbine_mapping\"])\n full_array[ids, :, :] = full_array[ids, :, :][:, :, tmap]\n else:\n raise UserWarning(\"Unknown data shape.\")\n\n return full_array\n\n def _finalize(self, farm_power_opt_subset=None, yaw_angles_opt_subset=None):\n # Process final solutions\n if farm_power_opt_subset is None:\n farm_power_opt_subset = self._farm_power_opt_subset\n if yaw_angles_opt_subset is None:\n yaw_angles_opt_subset = self._yaw_angles_opt_subset\n\n # Now verify solutions for convergence, if necessary\n if self.verify_convergence:\n yaw_angles_opt_subset, farm_power_opt_subset = (\n self._verify_solutions_for_convergence(\n farm_power_opt_subset,\n yaw_angles_opt_subset\n )\n )\n\n # Finalization step for optimization: undo reduction step\n self.farm_power_opt = self._unreduce_variable(farm_power_opt_subset)\n self.yaw_angles_opt = self._unreduce_variable(yaw_angles_opt_subset)\n\n # Produce output table\n ti = np.min(self.fi.floris.flow_field.turbulence_intensity)\n df_list = []\n num_wind_directions = len(self.fi.floris.flow_field.wind_directions)\n for ii, wind_speed in enumerate(self.fi.floris.flow_field.wind_speeds):\n df_list.append(pd.DataFrame({\n \"wind_direction\": self.fi.floris.flow_field.wind_directions,\n \"wind_speed\": wind_speed * np.ones(num_wind_directions),\n \"turbulence_intensity\": ti * np.ones(num_wind_directions),\n \"yaw_angles_opt\": [yaw_angles for yaw_angles in self.yaw_angles_opt[:, ii, :]],\n \"farm_power_opt\": self.farm_power_opt[:, ii],\n \"farm_power_baseline\": self.farm_power_baseline[:, ii],\n }))\n df_opt = pd.concat(df_list, axis=0)\n\n return df_opt\n\n def _verify_solutions_for_convergence(\n self,\n farm_power_opt_subset,\n yaw_angles_opt_subset,\n min_yaw_offset=0.01,\n min_power_gain_for_yaw=0.02,\n verbose=True,\n ):\n \"\"\"\n This function verifies whether the found solutions (yaw_angles_opt)\n have any nonzero yaw angles that are actually a result of incorrect\n converge. By evaluating the power production by setting each turbine's\n yaw angle to 0.0 deg, one by one, we verify that the found\n optimal values do in fact lead to a nonzero power production gain.\n\n Args:\n farm_power_opt_subset (iteratible): Array with the optimal wind\n farm power values (i.e., farm powers with yaw_angles_opt_subset).\n yaw_angles_opt_subset (iteratible): Array with the optimal yaw angles\n for all turbines in the farm (or for all the to-be-optimized\n turbines in the farm). The yaw angles in this array will be\n verified.\n min_yaw_offset (float, optional): Values that differ by less than\n this amount compared to the baseline value will be assumed to be\n too small to make any notable difference. Therefore, for practical\n reasons, the value is overwritten by its baseline value (which\n typically is 0.0 deg). Defaults to 0.10.\n min_power_gain_for_yaw (float, optional): The minimum percentage\n uplift a turbine must create in the farm power production for its\n yaw offset to be considered non negligible. Set to 0.0 to ignore\n this criteria. Defaults to 0.02 (implying 0.02%).\n verbose (bool, optional): Print to console. Defaults to False.\n Returns:\n x_opt (iteratible): Array with the optimal yaw angles, possibly\n with certain values being set to 0.0 deg as they were found\n to be a result of incorrect convergence. If the optimization\n has perfectly converged, x_opt will be identical to the user-\n provided input yaw_angles_opt.\n \"\"\"\n\n print(\"Verifying convergence of the found optimal yaw angles.\")\n\n # Start timer\n start_time = timerpc()\n\n # Define variables locally\n yaw_angles_opt_subset = np.array(yaw_angles_opt_subset, copy=True)\n yaw_angles_baseline_subset = self._yaw_angles_baseline_subset\n farm_power_baseline_subset = self._farm_power_baseline_subset\n turbs_to_opt_subset = self._turbs_to_opt_subset\n\n # Round small nonzero yaw angles to zero\n ydiff = np.abs(yaw_angles_opt_subset - yaw_angles_baseline_subset)\n ids = np.where((ydiff < min_yaw_offset) & (ydiff > 0.0))\n if len(ids[0]) > 0:\n if verbose:\n print(\"Rounding {:d} insignificant yaw angles to their \" +\n \"baseline value.\".format(len(ids)))\n yaw_angles_opt_subset[ids] = yaw_angles_baseline_subset[ids]\n ydiff[ids] = 0.0\n\n # Turbines to test whether their angles sufficiently improve farm power\n ids = np.where((turbs_to_opt_subset) & (ydiff > min_yaw_offset))\n\n # Define situations that need to be calculated and find farm power.\n # Each situation basically contains the exact same conditions as the\n # baseline conditions and optimal yaw angles, besides for a single\n # turbine for which its yaw angle was set to its baseline value (\n # typically 0.0 deg). This way, we investigate whether the yaw offset\n # of that turbine really adds significant uplift to the farm power\n # production.\n\n # For each turbine in the farm, reset its values to baseline. Thus,\n # we copy the atmospheric conditions n_turbs times and for each\n # copy of atmospheric conditions, we reset that turbine's yaw angle\n # to its baseline value for all conditions.\n n_turbs = len(self.fi.layout_x)\n sp = (n_turbs, 1, 1) # Tile shape for matrix expansion\n wd_array_nominal = self.fi_subset.floris.flow_field.wind_directions\n n_wind_directions = len(wd_array_nominal)\n yaw_angles_verify = np.tile(yaw_angles_opt_subset, sp)\n yaw_angles_bl_verify = np.tile(yaw_angles_baseline_subset, sp)\n turbine_id_array = np.zeros(np.shape(yaw_angles_verify)[0], dtype=int)\n for ti in range(n_turbs):\n ids = ti * n_wind_directions + np.arange(n_wind_directions)\n yaw_angles_verify[ids, :, ti] = yaw_angles_bl_verify[ids, :, ti]\n turbine_id_array[ids] = ti\n\n # Now evaluate all situations\n farm_power_baseline_verify = np.tile(farm_power_baseline_subset, (n_turbs, 1))\n farm_power = self._calculate_farm_power(\n yaw_angles=yaw_angles_verify,\n wd_array=np.tile(wd_array_nominal, n_turbs),\n turbine_weights=np.tile(self._turbs_to_opt_subset, sp)\n )\n\n # Calculate power uplift for optimal solutions\n uplift_o = 100 * (\n np.tile(farm_power_opt_subset, (n_turbs, 1)) /\n farm_power_baseline_verify - 1.0\n )\n\n # Calculate power uplift for all cases we evaluated\n uplift_n = 100.0 * (farm_power / farm_power_baseline_verify - 1.0)\n\n # Check difference in uplift, where each row represents a different\n # situation (i.e., where one turbine was set to its baseline yaw angle\n # instead of its optimal yaw angle).\n dp = uplift_o - uplift_n\n ids_to_simplify = np.where(dp < min_power_gain_for_yaw)\n ids_to_simplify = (\n np.remainder(ids_to_simplify[0], n_wind_directions), # Wind direction identifier\n ids_to_simplify[1], # Wind speed identifier\n turbine_id_array[ids_to_simplify[0]], # Turbine identifier\n )\n\n # Overwrite yaw angles that insufficiently increased farm power with baseline values\n yaw_angles_opt_subset[ids_to_simplify] = (\n yaw_angles_baseline_subset[ids_to_simplify]\n )\n\n n = len(ids_to_simplify[0])\n if n > 0:\n # Yaw angles notably changed: recalculate farm powers\n farm_power_opt_subset_new = (\n self._calculate_farm_power(yaw_angles_opt_subset)\n )\n\n if verbose:\n # Calculate old uplift for all conditions\n dP_old = 100.0 * (\n farm_power_opt_subset /\n farm_power_baseline_subset\n ) - 100.0\n\n # Calculate new uplift for all conditions\n dP_new = 100.0 * (\n farm_power_opt_subset_new /\n farm_power_baseline_subset\n ) - 100.0\n\n # Calculate differences in power uplift\n diff_uplift = dP_old - dP_new\n ids_max_loss = np.where(np.nanmax(diff_uplift) == diff_uplift)\n jj = (ids_max_loss[0][0], ids_max_loss[1][0])\n ws_array_nominal = self.fi_subset.floris.flow_field.wind_speeds\n print(\n \"Nullified the optimal yaw offset for {:d}\".format(n) +\n \" conditions and turbines.\"\n )\n print(\n \"Simplifying the yaw angles for these conditions lead \" +\n \"to a maximum change in wake-steering power uplift from \"\n + \"{:.5f}% to {:.5f}% at \".format(dP_old[jj], dP_new[jj])\n + \" WD = {:.1f} deg and WS = {:.1f} m/s.\".format(\n wd_array_nominal[jj[0]], ws_array_nominal[jj[1]],\n )\n )\n\n t = timerpc() - start_time\n print(\n \"Time spent to verify the convergence of the optimal \" +\n \"yaw angles: {:.3f} s.\".format(t)\n )\n\n # Return optimal solutions to the user\n farm_power_opt_subset = farm_power_opt_subset_new\n\n return yaw_angles_opt_subset, farm_power_opt_subset\n\n # Supporting functions\n def _norm(self, val, x1, x2):\n \"\"\"\n Normalize a variable to a value range.\n\n Args:\n val ([float]): Value to normalize.\n x1 ([float]): Normalization lower bound.\n x2 ([float]): Normalization upper bound.\n\n Returns:\n val_norm: Normalized variable.\n \"\"\"\n return (val - x1) / (x2 - x1)\n\n def _unnorm(self, val_norm, x1, x2):\n \"\"\"\n Unnormalize a variable to a value range.\n\n Args:\n val_norm ([float]): Normalized value.\n x1 ([float]): Normalization lower bound.\n x2 ([float]): Normalization upper bound.\n\n Returns:\n val: Unnormalized variable.\n \"\"\"\n return np.array(val_norm) * (x2 - x1) + x1"
] | [
[
"numpy.arange",
"numpy.array",
"numpy.zeros",
"numpy.allclose"
],
[
"numpy.nanmax",
"numpy.take_along_axis",
"numpy.expand_dims",
"numpy.all",
"numpy.max",
"numpy.zeros_like",
"numpy.any",
"numpy.where",
"numpy.unique",
"numpy.arange",
"pandas.concat",
"numpy.multiply",
"numpy.min",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.tile",
"numpy.ones",
"numpy.shape",
"numpy.remainder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
MarcusJones/kaggle_petfinder_adoption | [
"2d745b48405f4d4211b523eae272b9169fcf9fa2",
"2d745b48405f4d4211b523eae272b9169fcf9fa2"
] | [
"reference_kernels/kernel.py",
"reference_kernels/FORK EDA, PCA + Simple LGBM on KFold Technique.py"
] | [
"import gc\r\nimport glob\r\nimport json\r\nimport matplotlib.pyplot as plt\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport scipy as sp\r\nimport lightgbm as lgb\r\n\r\nfrom collections import Counter\r\nfrom functools import partial\r\nfrom math import sqrt\r\nfrom joblib import Parallel, delayed\r\nfrom tqdm import tqdm\r\nfrom PIL import Image\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.model_selection import StratifiedKFold\r\nfrom sklearn.metrics import cohen_kappa_score, mean_squared_error\r\nfrom sklearn.metrics import confusion_matrix as sk_cmatrix\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.decomposition import SparsePCA, TruncatedSVD, LatentDirichletAllocation, NMF\r\n\r\n# basic datasets\r\ntrain = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')\r\ntest = pd.read_csv('../input/petfinder-adoption-prediction/test/test.csv')\r\nsample_submission = pd.read_csv('../input/petfinder-adoption-prediction/test/sample_submission.csv')\r\nlabels_breed = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')\r\nlabels_state = pd.read_csv('../input/petfinder-adoption-prediction/color_labels.csv')\r\nlabels_color = pd.read_csv('../input/petfinder-adoption-prediction/state_labels.csv')\r\n\r\ntrain_image_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_images/*.jpg'))\r\ntrain_metadata_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_metadata/*.json'))\r\ntrain_sentiment_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_sentiment/*.json'))\r\ntest_image_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_images/*.jpg'))\r\ntest_metadata_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_metadata/*.json'))\r\ntest_sentiment_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_sentiment/*.json'))\r\n\r\n# extract datasets\r\n# https://www.kaggle.com/christofhenkel/extract-image-features-from-pretrained-nn\r\ntrain_img_features = pd.read_csv('../input/extract-image-features-from-pretrained-nn/train_img_features.csv')\r\ntest_img_features = pd.read_csv('../input/extract-image-features-from-pretrained-nn/test_img_features.csv')\r\n\r\n# img_features columns set names\r\ncol_names =[\"PetID\"] + [\"{}_img_feature\".format(_) for _ in range(256)]\r\ntrain_img_features.columns = col_names\r\ntest_img_features.columns = col_names\r\n\r\n# ref: https://www.kaggle.com/wrosinski/baselinemodeling\r\nclass PetFinderParser(object):\r\n \r\n def __init__(self, debug=False):\r\n \r\n self.debug = debug\r\n self.sentence_sep = ' '\r\n \r\n # Does not have to be extracted because main DF already contains description\r\n self.extract_sentiment_text = False\r\n \r\n \r\n def open_metadata_file(self, filename):\r\n \"\"\"\r\n Load metadata file.\r\n \"\"\"\r\n with open(filename, 'r') as f:\r\n metadata_file = json.load(f)\r\n return metadata_file\r\n \r\n def open_sentiment_file(self, filename):\r\n \"\"\"\r\n Load sentiment file.\r\n \"\"\"\r\n with open(filename, 'r') as f:\r\n sentiment_file = json.load(f)\r\n return sentiment_file\r\n \r\n def open_image_file(self, filename):\r\n \"\"\"\r\n Load image file.\r\n \"\"\"\r\n image = np.asarray(Image.open(filename))\r\n return image\r\n \r\n def parse_sentiment_file(self, file):\r\n \"\"\"\r\n Parse sentiment file. Output DF with sentiment features.\r\n \"\"\"\r\n \r\n file_sentiment = file['documentSentiment']\r\n file_entities = [x['name'] for x in file['entities']]\r\n file_entities = self.sentence_sep.join(file_entities)\r\n\r\n if self.extract_sentiment_text:\r\n file_sentences_text = [x['text']['content'] for x in file['sentences']]\r\n file_sentences_text = self.sentence_sep.join(file_sentences_text)\r\n file_sentences_sentiment = [x['sentiment'] for x in file['sentences']]\r\n \r\n file_sentences_sentiment = pd.DataFrame.from_dict(\r\n file_sentences_sentiment, orient='columns').sum()\r\n file_sentences_sentiment = file_sentences_sentiment.add_prefix('document_').to_dict()\r\n \r\n file_sentiment.update(file_sentences_sentiment)\r\n \r\n df_sentiment = pd.DataFrame.from_dict(file_sentiment, orient='index').T\r\n if self.extract_sentiment_text:\r\n df_sentiment['text'] = file_sentences_text\r\n \r\n df_sentiment['entities'] = file_entities\r\n df_sentiment = df_sentiment.add_prefix('sentiment_')\r\n \r\n return df_sentiment\r\n \r\n def parse_metadata_file(self, file):\r\n \"\"\"\r\n Parse metadata file. Output DF with metadata features.\r\n \"\"\"\r\n \r\n file_keys = list(file.keys())\r\n \r\n if 'labelAnnotations' in file_keys:\r\n file_annots = file['labelAnnotations'][:int(len(file['labelAnnotations']) * 0.3)]\r\n file_top_score = np.asarray([x['score'] for x in file_annots]).mean()\r\n file_top_desc = [x['description'] for x in file_annots]\r\n else:\r\n file_top_score = np.nan\r\n file_top_desc = ['']\r\n \r\n file_colors = file['imagePropertiesAnnotation']['dominantColors']['colors']\r\n file_crops = file['cropHintsAnnotation']['cropHints']\r\n\r\n file_color_score = np.asarray([x['score'] for x in file_colors]).mean()\r\n file_color_pixelfrac = np.asarray([x['pixelFraction'] for x in file_colors]).mean()\r\n\r\n file_crop_conf = np.asarray([x['confidence'] for x in file_crops]).mean()\r\n \r\n if 'importanceFraction' in file_crops[0].keys():\r\n file_crop_importance = np.asarray([x['importanceFraction'] for x in file_crops]).mean()\r\n else:\r\n file_crop_importance = np.nan\r\n\r\n df_metadata = {\r\n 'annots_score': file_top_score,\r\n 'color_score': file_color_score,\r\n 'color_pixelfrac': file_color_pixelfrac,\r\n 'crop_conf': file_crop_conf,\r\n 'crop_importance': file_crop_importance,\r\n 'annots_top_desc': self.sentence_sep.join(file_top_desc)\r\n }\r\n \r\n df_metadata = pd.DataFrame.from_dict(df_metadata, orient='index').T\r\n df_metadata = df_metadata.add_prefix('metadata_')\r\n \r\n return df_metadata\r\n \r\n\r\n# Helper function for parallel data processing:\r\ndef extract_additional_features(pet_id, mode='train'):\r\n \r\n sentiment_filename = '../input/petfinder-adoption-prediction/{}_sentiment/{}.json'.format(mode, pet_id)\r\n try:\r\n sentiment_file = pet_parser.open_sentiment_file(sentiment_filename)\r\n df_sentiment = pet_parser.parse_sentiment_file(sentiment_file)\r\n df_sentiment['PetID'] = pet_id\r\n except FileNotFoundError:\r\n df_sentiment = []\r\n\r\n dfs_metadata = []\r\n metadata_filenames = sorted(glob.glob('../input/petfinder-adoption-prediction/{}_metadata/{}*.json'.format(mode, pet_id)))\r\n if len(metadata_filenames) > 0:\r\n for f in metadata_filenames:\r\n metadata_file = pet_parser.open_metadata_file(f)\r\n df_metadata = pet_parser.parse_metadata_file(metadata_file)\r\n df_metadata['PetID'] = pet_id\r\n dfs_metadata.append(df_metadata)\r\n dfs_metadata = pd.concat(dfs_metadata, ignore_index=True, sort=False)\r\n dfs = [df_sentiment, dfs_metadata]\r\n \r\n return dfs\r\n\r\ndef agg_features(df_metadata, df_sentiment):\r\n # Extend aggregates and improve column naming\r\n aggregates = ['mean', \"median\", 'sum', \"var\", \"std\", \"min\", \"max\", \"nunique\"]\r\n \r\n metadata_desc = df_metadata.groupby(['PetID'])['metadata_annots_top_desc'].unique()\r\n metadata_desc = metadata_desc.reset_index()\r\n metadata_desc['metadata_annots_top_desc'] = metadata_desc['metadata_annots_top_desc'].apply(lambda x: ' '.join(x))\r\n \r\n prefix = 'metadata'\r\n metadata_gr = df_metadata.drop(['metadata_annots_top_desc'], axis=1)\r\n for i in metadata_gr.columns:\r\n if 'PetID' not in i:\r\n metadata_gr[i] = metadata_gr[i].astype(float)\r\n metadata_gr = metadata_gr.groupby(['PetID']).agg(aggregates)\r\n metadata_gr.columns = pd.Index(['{}_{}_{}'.format(prefix, c[0], c[1].upper()) for c in metadata_gr.columns.tolist()])\r\n metadata_gr = metadata_gr.reset_index()\r\n \r\n sentiment_desc = df_sentiment.groupby(['PetID'])['sentiment_entities'].unique()\r\n sentiment_desc = sentiment_desc.reset_index()\r\n sentiment_desc['sentiment_entities'] = sentiment_desc['sentiment_entities'].apply(lambda x: ' '.join(x))\r\n \r\n prefix = 'sentiment'\r\n sentiment_gr = df_sentiment.drop(['sentiment_entities'], axis=1)\r\n for i in sentiment_gr.columns:\r\n if 'PetID' not in i:\r\n sentiment_gr[i] = sentiment_gr[i].astype(float)\r\n sentiment_gr = sentiment_gr.groupby(['PetID']).agg(aggregates)\r\n sentiment_gr.columns = pd.Index(['{}_{}_{}'.format(\r\n prefix, c[0], c[1].upper()) for c in sentiment_gr.columns.tolist()])\r\n sentiment_gr = sentiment_gr.reset_index()\r\n \r\n return sentiment_gr, metadata_gr, metadata_desc, sentiment_desc\r\n\r\n\r\ndef breed_features(df, _labels_breed):\r\n breed_main = df[['Breed1']].merge(_labels_breed, how='left', left_on='Breed1', right_on='BreedID', suffixes=('', '_main_breed'))\r\n breed_main = breed_main.iloc[:, 2:]\r\n breed_main = breed_main.add_prefix('main_breed_')\r\n \r\n breed_second = df[['Breed2']].merge(_labels_breed, how='left', left_on='Breed2', right_on='BreedID', suffixes=('', '_second_breed'))\r\n breed_second = breed_second.iloc[:, 2:]\r\n breed_second = breed_second.add_prefix('second_breed_')\r\n \r\n return breed_main, breed_second\r\n\r\n\r\ndef impact_coding(data, feature, target='y'):\r\n '''\r\n In this implementation we get the values and the dictionary as two different steps.\r\n This is just because initially we were ignoring the dictionary as a result variable.\r\n \r\n In this implementation the KFolds use shuffling. If you want reproducibility the cv \r\n could be moved to a parameter.\r\n '''\r\n n_folds = 20\r\n n_inner_folds = 10\r\n impact_coded = pd.Series()\r\n \r\n oof_default_mean = data[target].mean() # Gobal mean to use by default (you could further tune this)\r\n kf = KFold(n_splits=n_folds, shuffle=True)\r\n oof_mean_cv = pd.DataFrame()\r\n split = 0\r\n for infold, oof in kf.split(data[feature]):\r\n impact_coded_cv = pd.Series()\r\n kf_inner = KFold(n_splits=n_inner_folds, shuffle=True)\r\n inner_split = 0\r\n inner_oof_mean_cv = pd.DataFrame()\r\n oof_default_inner_mean = data.iloc[infold][target].mean()\r\n for infold_inner, oof_inner in kf_inner.split(data.iloc[infold]):\r\n # The mean to apply to the inner oof split (a 1/n_folds % based on the rest)\r\n oof_mean = data.iloc[infold_inner].groupby(by=feature)[target].mean()\r\n impact_coded_cv = impact_coded_cv.append(data.iloc[infold].apply(\r\n lambda x: oof_mean[x[feature]]\r\n if x[feature] in oof_mean.index\r\n else oof_default_inner_mean\r\n , axis=1))\r\n\r\n # Also populate mapping (this has all group -> mean for all inner CV folds)\r\n inner_oof_mean_cv = inner_oof_mean_cv.join(pd.DataFrame(oof_mean), rsuffix=inner_split, how='outer')\r\n inner_oof_mean_cv.fillna(value=oof_default_inner_mean, inplace=True)\r\n inner_split += 1\r\n\r\n # Also populate mapping\r\n oof_mean_cv = oof_mean_cv.join(pd.DataFrame(inner_oof_mean_cv), rsuffix=split, how='outer')\r\n oof_mean_cv.fillna(value=oof_default_mean, inplace=True)\r\n split += 1\r\n \r\n impact_coded = impact_coded.append(data.iloc[oof].apply(\r\n lambda x: inner_oof_mean_cv.loc[x[feature]].mean()\r\n if x[feature] in inner_oof_mean_cv.index\r\n else oof_default_mean\r\n , axis=1))\r\n\r\n return impact_coded, oof_mean_cv.mean(axis=1), oof_default_mean \r\n \r\n \r\ndef frequency_encoding(df, col_name):\r\n new_name = \"{}_counts\".format(col_name)\r\n new_col_name = \"{}_freq\".format(col_name)\r\n grouped = df.groupby(col_name).size().reset_index(name=new_name)\r\n df = df.merge(grouped, how = \"left\", on = col_name)\r\n df[new_col_name] = df[new_name]/df[new_name].count()\r\n del df[new_name]\r\n return df\r\n \r\n\r\n# FROM: https://www.kaggle.com/myltykritik/simple-lgbm-image-features\r\n\r\n# The following 3 functions have been taken from Ben Hamner's github repository\r\n# https://github.com/benhamner/Metrics\r\ndef confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None):\r\n \"\"\"\r\n Returns the confusion matrix between rater's ratings\r\n \"\"\"\r\n assert(len(rater_a) == len(rater_b))\r\n if min_rating is None:\r\n min_rating = min(rater_a + rater_b)\r\n if max_rating is None:\r\n max_rating = max(rater_a + rater_b)\r\n num_ratings = int(max_rating - min_rating + 1)\r\n conf_mat = [[0 for i in range(num_ratings)]\r\n for j in range(num_ratings)]\r\n for a, b in zip(rater_a, rater_b):\r\n conf_mat[a - min_rating][b - min_rating] += 1\r\n return conf_mat\r\n\r\n\r\ndef histogram(ratings, min_rating=None, max_rating=None):\r\n \"\"\"\r\n Returns the counts of each type of rating that a rater made\r\n \"\"\"\r\n if min_rating is None:\r\n min_rating = min(ratings)\r\n if max_rating is None:\r\n max_rating = max(ratings)\r\n num_ratings = int(max_rating - min_rating + 1)\r\n hist_ratings = [0 for x in range(num_ratings)]\r\n for r in ratings:\r\n hist_ratings[r - min_rating] += 1\r\n return hist_ratings\r\n\r\n\r\ndef quadratic_weighted_kappa(y, y_pred):\r\n \"\"\"\r\n Calculates the quadratic weighted kappa\r\n axquadratic_weighted_kappa calculates the quadratic weighted kappa\r\n value, which is a measure of inter-rater agreement between two raters\r\n that provide discrete numeric ratings. Potential values range from -1\r\n (representing complete disagreement) to 1 (representing complete\r\n agreement). A kappa value of 0 is expected if all agreement is due to\r\n chance.\r\n quadratic_weighted_kappa(rater_a, rater_b), where rater_a and rater_b\r\n each correspond to a list of integer ratings. These lists must have the\r\n same length.\r\n The ratings should be integers, and it is assumed that they contain\r\n the complete range of possible ratings.\r\n quadratic_weighted_kappa(X, min_rating, max_rating), where min_rating\r\n is the minimum possible rating, and max_rating is the maximum possible\r\n rating\r\n \"\"\"\r\n rater_a = y\r\n rater_b = y_pred\r\n min_rating=None\r\n max_rating=None\r\n rater_a = np.array(rater_a, dtype=int)\r\n rater_b = np.array(rater_b, dtype=int)\r\n assert(len(rater_a) == len(rater_b))\r\n if min_rating is None:\r\n min_rating = min(min(rater_a), min(rater_b))\r\n if max_rating is None:\r\n max_rating = max(max(rater_a), max(rater_b))\r\n conf_mat = confusion_matrix(rater_a, rater_b,\r\n min_rating, max_rating)\r\n num_ratings = len(conf_mat)\r\n num_scored_items = float(len(rater_a))\r\n\r\n hist_rater_a = histogram(rater_a, min_rating, max_rating)\r\n hist_rater_b = histogram(rater_b, min_rating, max_rating)\r\n\r\n numerator = 0.0\r\n denominator = 0.0\r\n\r\n for i in range(num_ratings):\r\n for j in range(num_ratings):\r\n expected_count = (hist_rater_a[i] * hist_rater_b[j]\r\n / num_scored_items)\r\n d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0)\r\n numerator += d * conf_mat[i][j] / num_scored_items\r\n denominator += d * expected_count / num_scored_items\r\n\r\n return (1.0 - numerator / denominator)\r\n\r\nclass OptimizedRounder(object):\r\n def __init__(self):\r\n self.coef_ = 0\r\n\r\n def _kappa_loss(self, coef, X, y):\r\n X_p = np.copy(X)\r\n for i, pred in enumerate(X_p):\r\n if pred < coef[0]:\r\n X_p[i] = 0\r\n elif pred >= coef[0] and pred < coef[1]:\r\n X_p[i] = 1\r\n elif pred >= coef[1] and pred < coef[2]:\r\n X_p[i] = 2\r\n elif pred >= coef[2] and pred < coef[3]:\r\n X_p[i] = 3\r\n else:\r\n X_p[i] = 4\r\n\r\n ll = quadratic_weighted_kappa(y, X_p)\r\n return -ll\r\n\r\n def fit(self, X, y):\r\n loss_partial = partial(self._kappa_loss, X=X, y=y)\r\n initial_coef = [0.5, 1.5, 2.5, 3.5]\r\n self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')\r\n\r\n def predict(self, X, coef):\r\n X_p = np.copy(X)\r\n for i, pred in enumerate(X_p):\r\n if pred < coef[0]:\r\n X_p[i] = 0\r\n elif pred >= coef[0] and pred < coef[1]:\r\n X_p[i] = 1\r\n elif pred >= coef[1] and pred < coef[2]:\r\n X_p[i] = 2\r\n elif pred >= coef[2] and pred < coef[3]:\r\n X_p[i] = 3\r\n else:\r\n X_p[i] = 4\r\n return X_p\r\n\r\n def coefficients(self):\r\n return self.coef_['x']\r\n \r\n \r\ndef rmse(actual, predicted):\r\n return sqrt(mean_squared_error(actual, predicted))\r\n \r\n\r\ndef train_lightgbm(X_train, X_test, params, n_splits, num_rounds, verbose_eval, early_stop):\r\n kfold = StratifiedKFold(n_splits=n_splits, random_state=1337)\r\n oof_train = np.zeros((X_train.shape[0]))\r\n oof_test = np.zeros((X_test.shape[0], n_splits))\r\n \r\n i = 0\r\n for train_index, valid_index in kfold.split(X_train, X_train['AdoptionSpeed'].values):\r\n \r\n X_tr = X_train.iloc[train_index, :]\r\n X_val = X_train.iloc[valid_index, :]\r\n \r\n y_tr = X_tr['AdoptionSpeed'].values\r\n X_tr = X_tr.drop(['AdoptionSpeed'], axis=1)\r\n \r\n y_val = X_val['AdoptionSpeed'].values\r\n X_val = X_val.drop(['AdoptionSpeed'], axis=1)\r\n \r\n print('\\ny_tr distribution: {}'.format(Counter(y_tr)))\r\n \r\n d_train = lgb.Dataset(X_tr, label=y_tr)\r\n d_valid = lgb.Dataset(X_val, label=y_val)\r\n watchlist = [d_train, d_valid]\r\n \r\n print('training LGB:')\r\n model = lgb.train(params,\r\n train_set=d_train,\r\n num_boost_round=num_rounds,\r\n valid_sets=watchlist,\r\n verbose_eval=verbose_eval,\r\n early_stopping_rounds=early_stop)\r\n \r\n val_pred = model.predict(X_val, num_iteration=model.best_iteration)\r\n test_pred = model.predict(X_test, num_iteration=model.best_iteration)\r\n \r\n oof_train[valid_index] = val_pred\r\n oof_test[:, i] = test_pred\r\n \r\n i += 1\r\n \r\n return oof_train, oof_test\r\n \r\n\r\npet_parser = PetFinderParser() \r\n \r\ndef main():\r\n \r\n train_pet_ids = train.PetID.unique()\r\n test_pet_ids = test.PetID.unique()\r\n \r\n dfs_train = Parallel(n_jobs=6, verbose=1)(\r\n delayed(extract_additional_features)(i, mode='train') for i in train_pet_ids)\r\n \r\n train_dfs_sentiment = [x[0] for x in dfs_train if isinstance(x[0], pd.DataFrame)]\r\n train_dfs_metadata = [x[1] for x in dfs_train if isinstance(x[1], pd.DataFrame)]\r\n \r\n train_dfs_sentiment = pd.concat(train_dfs_sentiment, ignore_index=True, sort=False)\r\n train_dfs_metadata = pd.concat(train_dfs_metadata, ignore_index=True, sort=False)\r\n \r\n dfs_test = Parallel(n_jobs=6, verbose=1)(\r\n delayed(extract_additional_features)(i, mode='test') for i in test_pet_ids)\r\n \r\n test_dfs_sentiment = [x[0] for x in dfs_test if isinstance(x[0], pd.DataFrame)]\r\n test_dfs_metadata = [x[1] for x in dfs_test if isinstance(x[1], pd.DataFrame)]\r\n \r\n test_dfs_sentiment = pd.concat(test_dfs_sentiment, ignore_index=True, sort=False)\r\n test_dfs_metadata = pd.concat(test_dfs_metadata, ignore_index=True, sort=False)\r\n \r\n train_sentiment_gr, train_metadata_gr, train_metadata_desc, train_sentiment_desc = agg_features(train_dfs_metadata, train_dfs_sentiment) \r\n test_sentiment_gr, test_metadata_gr, test_metadata_desc, test_sentiment_desc = agg_features(test_dfs_metadata, test_dfs_sentiment) \r\n \r\n train_proc = train.copy()\r\n for tr in [train_sentiment_gr, train_metadata_gr, train_metadata_desc, train_sentiment_desc]:\r\n train_proc = train_proc.merge(tr, how='left', on='PetID')\r\n \r\n test_proc = test.copy()\r\n for ts in [test_sentiment_gr, test_metadata_gr, test_metadata_desc, test_sentiment_desc]:\r\n test_proc = test_proc.merge(\r\n ts, how='left', on='PetID')\r\n\r\n train_proc = pd.merge(train_proc, train_img_features, on=\"PetID\")\r\n test_proc = pd.merge(test_proc, test_img_features, on=\"PetID\")\r\n \r\n train_breed_main, train_breed_second = breed_features(train_proc, labels_breed)\r\n train_proc = pd.concat([train_proc, train_breed_main, train_breed_second], axis=1)\r\n \r\n test_breed_main, test_breed_second = breed_features(test_proc, labels_breed)\r\n test_proc = pd.concat([test_proc, test_breed_main, test_breed_second], axis=1)\r\n \r\n X = pd.concat([train_proc, test_proc], ignore_index=True, sort=False)\r\n column_types = X.dtypes\r\n\r\n int_cols = column_types[column_types == 'int']\r\n float_cols = column_types[column_types == 'float']\r\n cat_cols = column_types[column_types == 'object']\r\n \r\n X_temp = X.copy()\r\n\r\n text_columns = ['Description', 'metadata_annots_top_desc', 'sentiment_entities']\r\n categorical_columns = ['main_breed_BreedName', 'second_breed_BreedName']\r\n\r\n to_drop_columns = ['PetID', 'Name', 'RescuerID']\r\n \r\n rescuer_count = X.groupby(['RescuerID'])['PetID'].count().reset_index()\r\n rescuer_count.columns = ['RescuerID', 'RescuerID_COUNT']\r\n \r\n X_temp = X_temp.merge(rescuer_count, how='left', on='RescuerID')\r\n \r\n for i in categorical_columns:\r\n X_temp.loc[:, i] = pd.factorize(X_temp.loc[:, i])[0]\r\n \r\n X_text = X_temp[text_columns]\r\n\r\n for i in X_text.columns:\r\n X_text.loc[:, i] = X_text.loc[:, i].fillna('<MISSING>')\r\n \r\n n_components = 5\r\n text_features = []\r\n\r\n\r\n # Generate text features:\r\n for i in X_text.columns:\r\n \r\n # Initialize decomposition methods:\r\n print('generating features from: {}'.format(i))\r\n svd_ = TruncatedSVD(\r\n n_components=n_components, random_state=1337)\r\n nmf_ = NMF(\r\n n_components=n_components, random_state=1337)\r\n \r\n tfidf_col = TfidfVectorizer().fit_transform(X_text.loc[:, i].values)\r\n svd_col = svd_.fit_transform(tfidf_col)\r\n svd_col = pd.DataFrame(svd_col)\r\n svd_col = svd_col.add_prefix('SVD_{}_'.format(i))\r\n \r\n nmf_col = nmf_.fit_transform(tfidf_col)\r\n nmf_col = pd.DataFrame(nmf_col)\r\n nmf_col = nmf_col.add_prefix('NMF_{}_'.format(i))\r\n \r\n text_features.append(svd_col)\r\n text_features.append(nmf_col)\r\n \r\n \r\n # Combine all extracted features:\r\n text_features = pd.concat(text_features, axis=1)\r\n \r\n # Concatenate with main DF:\r\n X_temp = pd.concat([X_temp, text_features], axis=1)\r\n \r\n # Remove raw text columns:\r\n for i in X_text.columns:\r\n X_temp = X_temp.drop(i, axis=1)\r\n \r\n X_temp[\"name_length\"] = X_temp.Name[X_temp.Name.isnull()].map(lambda x: len(str(x)))\r\n X_temp[\"name_length\"] = X_temp.Name.map(lambda x: len(str(x)))\r\n X_temp = X_temp.drop(to_drop_columns, axis=1)\r\n \r\n # Split into train and test again:\r\n X_train = X_temp.loc[np.isfinite(X_temp.AdoptionSpeed), :]\r\n X_test = X_temp.loc[~np.isfinite(X_temp.AdoptionSpeed), :]\r\n \r\n # Remove missing target column from test:\r\n X_test = X_test.drop(['AdoptionSpeed'], axis=1)\r\n \r\n \r\n print('X_train shape: {}'.format(X_train.shape))\r\n print('X_test shape: {}'.format(X_test.shape))\r\n \r\n assert X_train.shape[0] == train.shape[0]\r\n assert X_test.shape[0] == test.shape[0]\r\n \r\n \r\n # Check if columns between the two DFs are the same:\r\n train_cols = X_train.columns.tolist()\r\n train_cols.remove('AdoptionSpeed')\r\n \r\n test_cols = X_test.columns.tolist()\r\n \r\n np.random.seed(13)\r\n \r\n categorical_features = [\"Type\", \"Breed1\", \"Breed2\", \"Color1\" ,\"Color2\", \"Color3\", \"State\"]\r\n \r\n impact_coding_map = {}\r\n for f in categorical_features:\r\n print(\"Impact coding for {}\".format(f))\r\n X_train[\"impact_encoded_{}\".format(f)], impact_coding_mapping, default_coding = impact_coding(X_train, f, target=\"AdoptionSpeed\")\r\n impact_coding_map[f] = (impact_coding_mapping, default_coding)\r\n mapping, default_mean = impact_coding_map[f]\r\n X_test[\"impact_encoded_{}\".format(f)] = X_test.apply(lambda x: mapping[x[f]] if x[f] in mapping\r\n else default_mean, axis=1)\r\n\r\n for cat in categorical_features:\r\n X_train = frequency_encoding(X_train, cat)\r\n X_test = frequency_encoding(X_test, cat)\r\n\r\n params = {'application': 'regression',\r\n 'boosting': 'gbdt',\r\n 'metric': 'rmse',\r\n 'num_leaves': 70,\r\n 'max_depth': 9,\r\n 'learning_rate': 0.01,\r\n 'bagging_fraction': 0.85,\r\n 'feature_fraction': 0.8,\r\n 'min_split_gain': 0.02,\r\n 'min_child_samples': 150,\r\n 'min_child_weight': 0.02,\r\n 'lambda_l2': 0.0475,\r\n 'verbosity': -1,\r\n 'data_random_seed': 17}\r\n\r\n # Additional parameters:\r\n early_stop = 500\r\n verbose_eval = 100\r\n num_rounds = 10000\r\n n_splits = 5\r\n \r\n oof_train, oof_test = train_lightgbm(X_train, X_test, params, n_splits, num_rounds, verbose_eval, early_stop)\r\n optR = OptimizedRounder()\r\n optR.fit(oof_train, X_train['AdoptionSpeed'].values)\r\n coefficients = optR.coefficients()\r\n pred_test_y_k = optR.predict(oof_train, coefficients)\r\n print(\"\\nValid Counts = \", Counter(X_train['AdoptionSpeed'].values))\r\n print(\"Predicted Counts = \", Counter(pred_test_y_k))\r\n print(\"Coefficients = \", coefficients)\r\n qwk = quadratic_weighted_kappa(X_train['AdoptionSpeed'].values, pred_test_y_k)\r\n print(\"QWK = \", qwk)\r\n \r\n # Manually adjusted coefficients:\r\n coefficients_ = coefficients.copy()\r\n \r\n coefficients_[0] = 1.645\r\n coefficients_[1] = 2.115\r\n coefficients_[3] = 2.84\r\n \r\n train_predictions = optR.predict(oof_train, coefficients_).astype(int)\r\n print('train pred distribution: {}'.format(Counter(train_predictions)))\r\n \r\n test_predictions = optR.predict(oof_test.mean(axis=1), coefficients_)\r\n print('test pred distribution: {}'.format(Counter(test_predictions)))\r\n \r\n # Generate submission:\r\n submission = pd.DataFrame({'PetID': test['PetID'].values, 'AdoptionSpeed': test_predictions.astype(np.int32)})\r\n submission.head()\r\n submission.to_csv('submission.csv', index=False)\r\n \r\n\r\nif __name__ == '__main__':\r\n main()",
"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.0.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown] {\"_uuid\": \"6f06de1b48e35853f80eb1f3384baae8f8536b3c\"}\n# <h1><center><font size=\"6\">Santander EDA, PCA and Light GBM Classification Model</font></center></h1>\n#\n# <img src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/4/4a/Another_new_Santander_bank_-_geograph.org.uk_-_1710962.jpg/640px-Another_new_Santander_bank_-_geograph.org.uk_-_1710962.jpg\"></img>\n#\n# <br>\n# <b>\n# In this challenge, Santander invites Kagglers to help them identify which customers will make a specific transaction in the future, irrespective of the amount of money transacted. The data provided for this competition has the same structure as the real data they have available to solve this problem. \n# The data is anonimyzed, each row containing 200 numerical values identified just with a number.</b>\n#\n# <b>Inspired by Jiwei Liu's Kernel. I added Data Augmentation Segment to my kernel</b>\n#\n# <pre>\n# <a id='0'><b>Content</b></a>\n# - <a href='#1'><b>Import the Data</b></a>\n# - <a href='#11'><b>Data Exploration</b></a> \n# - <a href='#2'><b>Check for the missing values</b></a> \n# - <a href='#3'><b>Visualizing the Satendar Customer Transactions Data</b></a> \n# - <a href='#31'><b>Check for Class Imbalance</b></a> \n# - <a href='#32'><b>Distribution of Mean and Standard Deviation</b></a> \n# - <a href='#33'><b>Distribution of Skewness</b></a> \n# - <a href='#34'><b>Distribution of Kurtosis</b></a> \n# - <a href='#4'><b>Principal Component Analysis</b></a>\n# - <a href='#41'><b>Kernel PCA</b></a>\n# - <a href = \"#16\"><b>Data Augmentation</b></a>\n# - <a href='#6'><b>Build the Light GBM Model</b></a></pre>\n\n# %% {\"_cell_guid\": \"b1076dfc-b9ad-4769-8c92-a6c4dae69d19\", \"_uuid\": \"8f2839f25d086af736a60e9eeb907d3b93b6e0e5\"}\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nimport matplotlib\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import StratifiedKFold,KFold\nimport warnings\nfrom six.moves import urllib\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nwarnings.filterwarnings('ignore')\n# %matplotlib inline\nplt.style.use('seaborn')\nfrom scipy.stats import norm, skew\n\n# %% [markdown] {\"_uuid\": \"d150ae0e24acf7d0107ec64ccea13d9745ce45fc\"}\n# <a id=1><pre><b>Import the Data</b></pre></a>\n\n# %% {\"_cell_guid\": \"79c7e3d0-c299-4dcb-8224-4455121ee9b0\", \"_uuid\": \"d629ff2d2480ee46fbb7e2d37f6b5fab8052498a\"}\n#Load the Data\ntrain = pd.read_csv('../input/train.csv')\ntest = pd.read_csv('../input/test.csv')\nfeatures = [c for c in train.columns if c not in ['ID_code', 'target']]\n\n# %% [markdown] {\"_uuid\": \"e711ea5576a8672fce378ede726be247aa789ef1\"}\n# <a id=11><pre><b>Data Exploration</b></pre></a>\n\n# %% {\"_uuid\": \"0ad0660223a680a8cc777c7526258759fface7a6\"}\ntrain.describe()\n\n# %% {\"_uuid\": \"217907a226a7e9425b4445805cde80c5de4feaca\"}\ntrain.info()\n\n# %% {\"_uuid\": \"90ca407e625a961a635fde6a21c9f524f024d654\"}\ntrain.shape\n\n# %% {\"_uuid\": \"089309dd0b32db21b44152f4bb15b2c7765dfd87\"}\ntrain.head(5)\n\n# %% [markdown] {\"_uuid\": \"3548150c4ae4ccd847d84baea5cba641f4fdc0bb\"}\n# <a id=2><b><pre>Check for the Missing Values.</pre></b></a> \n\n# %% {\"_uuid\": \"906ec8c811e2d415d47c7f67d8ac23bed0d8699b\"}\n#Check for Missing Values after Concatination\n\nobs = train.isnull().sum().sort_values(ascending = False)\npercent = round(train.isnull().sum().sort_values(ascending = False)/len(train)*100, 2)\npd.concat([obs, percent], axis = 1,keys= ['Number of Observations', 'Percent'])\n\n# %% [markdown] {\"_uuid\": \"bfe81109ea380b1210a3a6d50547058a4ee0e9b5\"}\n# <pre>There are no missing values in the dataset</pre>\n\n# %% [markdown] {\"_uuid\": \"8d28011134ff59dc25080e743e028bb487b8c366\"}\n# <pre><a id = 3><b>Visualizing the Satendar Customer Transactions Data</b></a></pre>\n\n# %% [markdown] {\"_uuid\": \"6abbb24cafc26afb4c6f8c52ab6b0353e2698f2e\"}\n# <pre><a id = 31 ><b>Check for Class Imbalance</b></a></pre>\n\n# %% {\"_uuid\": \"ada8973ebb427bbf9934a911095c1338b9036b35\"}\ntarget = train['target']\ntrain = train.drop([\"ID_code\", \"target\"], axis=1)\nsns.set_style('whitegrid')\nsns.countplot(target)\n\n# %% [markdown] {\"_uuid\": \"9bcb709f47ab634bd7ebaa7a9f0574d571e2b30e\"}\n# <pre><a id = 32 ><b>Distribution of Mean and Standard Deviation</b></a></pre>\n#\n# <pre>EDA Reference : https://www.kaggle.com/gpreda/santander-eda-and-prediction</pre>\n\n# %% {\"_uuid\": \"60077579a9b2e2b92119d2cebbf29c301c3ee279\"}\nplt.figure(figsize=(16,6))\nplt.title(\"Distribution of mean values per row in the train and test set\")\nsns.distplot(train[features].mean(axis=1),color=\"black\", kde=True,bins=120, label='train')\nsns.distplot(test[features].mean(axis=1),color=\"red\", kde=True,bins=120, label='test')\nplt.legend()\nplt.show()\n\n# %% [markdown] {\"_uuid\": \"c5f90ed3f3e3a6c21fd21e7891dd131a981e1f24\"}\n# <pre>Let's check the distribution of the mean of values per columns in the train and test datasets.</pre>\n\n# %% {\"_uuid\": \"4589fe2bb6b38c8f490057b6c2734aa1c8cf57a5\"}\nplt.figure(figsize=(16,6))\nplt.title(\"Distribution of mean values per column in the train and test set\")\nsns.distplot(train[features].mean(axis=0),color=\"black\", kde=True,bins=120, label='train')\nsns.distplot(test[features].mean(axis=0),color=\"red\", kde=True,bins=120, label='test')\nplt.legend();plt.show()\n\n# %% [markdown] {\"_uuid\": \"17a1f1bd380a50f59f2293071f1fd1cb85d4cace\"}\n# <pre>Distribution for Standard Deviation</pre>\n\n# %% {\"_uuid\": \"1119bbd9854b60c53eff0f5c024df241cf99a4ff\"}\nplt.figure(figsize=(16,6))\nplt.title(\"Distribution of std values per rows in the train and test set\")\nsns.distplot(train[features].std(axis=1),color=\"blue\",kde=True,bins=120, label='train')\nsns.distplot(test[features].std(axis=1),color=\"green\", kde=True,bins=120, label='test')\nplt.legend(); plt.show()\n\n# %% [markdown] {\"_uuid\": \"2e23ffd37c255be7b01aab8ef6b25d0bd4d2563f\"}\n# <pre>Let's check the distribution of the standard deviation of values per columns in the train and test datasets.</pre>\n\n# %% {\"_uuid\": \"734b96fd6a8aba302513797962498c906e299653\"}\nplt.figure(figsize=(16,6))\nplt.title(\"Distribution of mean values per column in the train and test set\")\nsns.distplot(train[features].mean(axis=0),color=\"blue\", kde=True,bins=120, label='train')\nsns.distplot(test[features].mean(axis=0),color=\"green\", kde=True,bins=120, label='test')\nplt.legend();plt.show()\n\n# %% [markdown] {\"_uuid\": \"1200ca154b1928043b67fb114d7d0eb93bfbd7e7\"}\n# <pre>Let's check now the distribution of the mean value per row in the train dataset, grouped by value of target</pre>\n\n# %% {\"_uuid\": \"802622e99a858e7e1be8a56a0dcb32c217769736\"}\nt0 = train.loc[target == 0]\nt1 = train.loc[target == 1]\nplt.figure(figsize=(16,6))\nplt.title(\"Distribution of mean values per row in the train set\")\nsns.distplot(t0[features].mean(axis=1),color=\"red\", kde=True,bins=120, label='target = 0')\nsns.distplot(t1[features].mean(axis=1),color=\"green\", kde=True,bins=120, label='target = 1')\nplt.legend(); plt.show()\n\n# %% [markdown] {\"_uuid\": \"bae148d9255104a14c07b0075dbe67084039ada9\"}\n# <pre>Let's check now the distribution of the mean values per columns in the train and test datasets.</pre>\n\n# %% {\"_uuid\": \"5778c9b5a5b82264a02907471c98aba55e753cf9\"}\nt0 = train.loc[target == 0]\nt1 = train.loc[target == 1]\nplt.figure(figsize=(16,6))\nplt.title(\"Distribution of mean values per column in the train set\")\nsns.distplot(t0[features].mean(axis=0),color=\"red\", kde=True,bins=120, label='target = 0')\nsns.distplot(t1[features].mean(axis=0),color=\"green\", kde=True,bins=120, label='target = 1')\nplt.legend(); plt.show()\n\n# %% [markdown] {\"_uuid\": \"dfe2e017dbe64a93c707785b77a2f018c55d2a92\"}\n# <pre>Let's check now the distribution of the standard deviation per row in the train dataset, grouped by value of target</pre>\n\n# %% {\"_uuid\": \"03d83a9f09460a7e0e64de7cff618fb903511eb5\"}\nt0 = train.loc[target == 0]\nt1 = train.loc[target == 1]\nplt.figure(figsize=(16,6))\nplt.title(\"Distribution of standard deviation values per row in the train set\")\nsns.distplot(t0[features].std(axis=1),color=\"blue\", kde=True,bins=120, label='target = 0')\nsns.distplot(t1[features].std(axis=1),color=\"red\", kde=True,bins=120, label='target = 1')\nplt.legend(); plt.show()\n\n# %% [markdown] {\"_uuid\": \"0796b8aa04186d551ae3d92d28e18a548dc09e51\"}\n# <pre>Let's check now the distribution of standard deviation per columns in the train and test datasets.</pre>\n\n# %% {\"_uuid\": \"8fe584abb584e77e654eb6c768b42eeafda6b784\"}\nt0 = train.loc[target == 0]\nt1 = train.loc[target == 1]\nplt.figure(figsize=(16,6))\nplt.title(\"Distribution of standard deviation values per column in the train set\")\nsns.distplot(t0[features].std(axis=0),color=\"blue\", kde=True,bins=120, label='target = 0')\nsns.distplot(t1[features].std(axis=0),color=\"red\", kde=True,bins=120, label='target = 1')\nplt.legend(); plt.show()\n\n# %% [markdown] {\"_uuid\": \"61fb22a8fdac069232e1584d97e02ca6348c7eea\"}\n# <pre><a id = 33 ><b>Distribution of Skewness</b></a></pre>\n#\n# <pre>Let's see now the distribution of skewness on rows in train separated for values of target 0 and 1. We found the distribution is left skewed</pre>\n\n# %% {\"_uuid\": \"a353fcf6b2ce7db7d6c693a2761bc8ac0e005309\"}\nt0 = train.loc[target == 0]\nt1 = train.loc[target == 1]\nplt.figure(figsize=(16,6))\nplt.title(\"Distribution of skew values per row in the train set\")\nsns.distplot(t0[features].skew(axis=1),color=\"red\", kde=True,bins=120, label='target = 0')\nsns.distplot(t1[features].skew(axis=1),color=\"blue\", kde=True,bins=120, label='target = 1')\nplt.legend(); plt.show()\n\n# %% [markdown] {\"_uuid\": \"3a0d204c325a9b78ff5b242e3b23043645040499\"}\n# <pre>Let's see now the distribution of skewness on columns in train separated for values of target 0 and 1.</pre>\n\n# %% {\"_uuid\": \"e47c1c00db66e3f43c65efad776bd2bcbea8117d\"}\nt0 = train.loc[target == 0]\nt1 = train.loc[target == 1]\nplt.figure(figsize=(16,6))\nplt.title(\"Distribution of skew values per column in the train set\")\nsns.distplot(t0[features].skew(axis=0),color=\"red\", kde=True,bins=120, label='target = 0')\nsns.distplot(t1[features].skew(axis=0),color=\"blue\", kde=True,bins=120, label='target = 1')\nplt.legend(); plt.show()\n\n# %% [markdown] {\"_uuid\": \"52dc95b188e82d5e55503348b8db57abfb385ca2\"}\n# <pre><a id = 34 ><b>Distribution of Kurtosis</b></a></pre>\n\n# %% [markdown] {\"_uuid\": \"b3d635fc2ccd5d0ad662413ccff46e062a01a13c\"}\n# <pre>Let's see now the distribution of kurtosis on rows in train separated for values of target 0 and 1. We found the distribution to be Leptokurtic</pre>\n\n# %% {\"_uuid\": \"a0785f3344f18166d838b50ecfb05901ad2180c8\"}\nt0 = train.loc[target == 0]\nt1 = train.loc[target == 1]\nplt.figure(figsize=(16,6))\nplt.title(\"Distribution of kurtosis values per row in the train set\")\nsns.distplot(t0[features].kurtosis(axis=1),color=\"red\", kde=True,bins=120, label='target = 0')\nsns.distplot(t1[features].kurtosis(axis=1),color=\"green\", kde=True,bins=120, label='target = 1')\nplt.legend(); plt.show()\n\n# %% [markdown] {\"_kg_hide-input\": true, \"_kg_hide-output\": true, \"_uuid\": \"736f0bde864b3bf327be491a0d820593415aa3f5\"}\n# <pre>Let's see now the distribution of kurtosis on columns in train separated for values of target 0 and 1.</pre>\n\n# %% {\"_uuid\": \"8b72cdd5a6f9b1db419fdd35e44974e219a9d376\"}\nt0 = train.loc[target == 0]\nt1 = train.loc[target == 1]\nplt.figure(figsize=(16,6))\nplt.title(\"Distribution of kurtosis values per column in the train set\")\nsns.distplot(t0[features].kurtosis(axis=0),color=\"red\", kde=True,bins=120, label='target = 0')\nsns.distplot(t1[features].kurtosis(axis=0),color=\"green\", kde=True,bins=120, label='target = 1')\nplt.legend(); plt.show()\n\n# %% [markdown] {\"_uuid\": \"374e9be094d1adaf17888cb16aea2f10093edd9e\"}\n# <a id=4><pre><b>Principal Component Analysis to check Dimentionality Reduction<b></pre></a>\n\n# %% {\"_uuid\": \"0af73d37cc75d3685fcb5f8c2702ad8758070b94\"}\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\ntrain_scaled = scaler.fit_transform(train) \nPCA_train_x = PCA(2).fit_transform(train_scaled)\nplt.scatter(PCA_train_x[:, 0], PCA_train_x[:, 1], c=target, cmap=\"copper_r\")\nplt.axis('off')\nplt.colorbar()\nplt.show()\n\n# %% [markdown] {\"_uuid\": \"2482fcb3497bcc3b7fe7f27256e408ff98324de2\"}\n# <pre><a id = 41><b>Kernel PCA (Since the Graph above doesn't represent meaningful analysis)</b></a></pre>\n\n# %% {\"_uuid\": \"9206e909ab4be625c94811af6bd0b676f626de22\"}\nfrom sklearn.decomposition import KernelPCA\n\nlin_pca = KernelPCA(n_components = 2, kernel=\"linear\", fit_inverse_transform=True)\nrbf_pca = KernelPCA(n_components = 2, kernel=\"rbf\", gamma=0.0433, fit_inverse_transform=True)\nsig_pca = KernelPCA(n_components = 2, kernel=\"sigmoid\", gamma=0.001, coef0=1, fit_inverse_transform=True)\n\n\nplt.figure(figsize=(11, 4))\nfor subplot, pca, title in ((131, lin_pca, \"Linear kernel\"), (132, rbf_pca, \"RBF kernel, $\\gamma=0.04$\"), \n (133, sig_pca, \"Sigmoid kernel, $\\gamma=10^{-3}, r=1$\")):\n \n PCA_train_x = PCA(2).fit_transform(train_scaled)\n plt.subplot(subplot)\n plt.title(title, fontsize=14)\n plt.scatter(PCA_train_x[:, 0], PCA_train_x[:, 1], c=target, cmap=\"nipy_spectral_r\")\n plt.xlabel(\"$z_1$\", fontsize=18)\n if subplot == 131:\n plt.ylabel(\"$z_2$\", fontsize=18, rotation=0)\n plt.grid(True)\n\nplt.show()\n\n\n# %% [markdown] {\"_uuid\": \"5b7a96339294daeedba94abaee4fbe6f16e69f2e\"}\n# <pre>Since PCA hasn't been useful, I decided to proceed with the existing dataset</pre>\n\n# %% [markdown] {\"_uuid\": \"96861473dd6cb2de3377a47684ece1714e1ab072\"}\n# <pre><a id = 16><b>Data Augmentation</b></a></pre>\n\n# %% {\"_uuid\": \"dfd26c446ff80f323791fbdbbbf158d355ee7267\"}\ndef augment(x,y,t=2):\n xs,xn = [],[]\n for i in range(t):\n mask = y>0\n x1 = x[mask].copy()\n ids = np.arange(x1.shape[0])\n for c in range(x1.shape[1]):\n np.random.shuffle(ids)\n x1[:,c] = x1[ids][:,c]\n xs.append(x1)\n\n for i in range(t//2):\n mask = y==0\n x1 = x[mask].copy()\n ids = np.arange(x1.shape[0])\n for c in range(x1.shape[1]):\n np.random.shuffle(ids)\n x1[:,c] = x1[ids][:,c]\n xn.append(x1)\n\n xs = np.vstack(xs)\n xn = np.vstack(xn)\n ys = np.ones(xs.shape[0])\n yn = np.zeros(xn.shape[0])\n x = np.vstack([x,xs,xn])\n y = np.concatenate([y,ys,yn])\n return x,y\n\n\n# %% [markdown] {\"_uuid\": \"a37f046be743d0086a2fc6094d78d7b9cab78055\"}\n# <pre><a id = 6><b>Build the Light GBM Model</b></a></pre>\n\n# %% {\"_uuid\": \"d418b9c44ef2f96b02db44d70aacbca61fe0952f\"}\nparam = {\n 'bagging_freq': 5,\n 'bagging_fraction': 0.335,\n 'boost_from_average':'false',\n 'boost': 'gbdt',\n 'feature_fraction': 0.041,\n 'learning_rate': 0.0083,\n 'max_depth': -1,\n 'metric':'auc',\n 'min_data_in_leaf': 80,\n 'min_sum_hessian_in_leaf': 10.0,\n 'num_leaves': 13,\n 'num_threads': 8,\n 'tree_learner': 'serial',\n 'objective': 'binary', \n 'verbosity': -1\n}\n\n# %% {\"_uuid\": \"fc22f099688ce4928a44f1c68cd16d6b8473e207\"}\ntrain.shape\n\n# %% {\"_uuid\": \"8b4f1d5f4aef4730673a8a6bbb2e828c2f92e2a5\"}\nnum_folds = 11\nfeatures = [c for c in train.columns if c not in ['ID_code', 'target']]\n\nfolds = KFold(n_splits=num_folds, random_state=2319)\noof = np.zeros(len(train))\ngetVal = np.zeros(len(train))\npredictions = np.zeros(len(target))\nfeature_importance_df = pd.DataFrame()\n\nprint('Light GBM Model')\nfor fold_, (trn_idx, val_idx) in enumerate(folds.split(train.values, target.values)):\n \n X_train, y_train = train.iloc[trn_idx][features], target.iloc[trn_idx]\n X_valid, y_valid = train.iloc[val_idx][features], target.iloc[val_idx]\n \n X_tr, y_tr = augment(X_train.values, y_train.values)\n X_tr = pd.DataFrame(X_tr)\n \n print(\"Fold idx:{}\".format(fold_ + 1))\n trn_data = lgb.Dataset(X_tr, label=y_tr)\n val_data = lgb.Dataset(X_valid, label=y_valid)\n \n clf = lgb.train(param, trn_data, 1000000, valid_sets = [trn_data, val_data], verbose_eval=5000, early_stopping_rounds = 4000)\n oof[val_idx] = clf.predict(train.iloc[val_idx][features], num_iteration=clf.best_iteration)\n getVal[val_idx]+= clf.predict(train.iloc[val_idx][features], num_iteration=clf.best_iteration) / folds.n_splits\n \n fold_importance_df = pd.DataFrame()\n fold_importance_df[\"feature\"] = features\n fold_importance_df[\"importance\"] = clf.feature_importance()\n fold_importance_df[\"fold\"] = fold_ + 1\n feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)\n \n predictions += clf.predict(test[features], num_iteration=clf.best_iteration) / folds.n_splits\n\nprint(\"CV score: {:<8.5f}\".format(roc_auc_score(target, oof)))\n\n# %% {\"_uuid\": \"f9dc76139cb15edf957be0a8400e6de33c14e655\"}\ncols = (feature_importance_df[[\"feature\", \"importance\"]]\n .groupby(\"feature\")\n .mean()\n .sort_values(by=\"importance\", ascending=False)[:1000].index)\nbest_features = feature_importance_df.loc[feature_importance_df.feature.isin(cols)]\n\nplt.figure(figsize=(14,26))\nsns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\",ascending=False))\nplt.title('LightGBM Features (averaged over folds)')\nplt.tight_layout()\nplt.savefig('lgbm_importances.png')\n\n# %% {\"_uuid\": \"137cf3c3924422e1a15ac63f4e259b86db86c2c5\"}\nnum_sub = 26\nprint('Saving the Submission File')\nsub = pd.DataFrame({\"ID_code\": test.ID_code.values})\nsub[\"target\"] = predictions\nsub.to_csv('submission{}.csv'.format(num_sub), index=False)\ngetValue = pd.DataFrame(getVal)\ngetValue.to_csv(\"Validation_kfold.csv\")\n"
] | [
[
"pandas.merge",
"pandas.Series",
"numpy.asarray",
"sklearn.model_selection.KFold",
"pandas.DataFrame",
"sklearn.metrics.mean_squared_error",
"pandas.read_csv",
"sklearn.model_selection.StratifiedKFold",
"numpy.copy",
"numpy.zeros",
"sklearn.feature_extraction.text.TfidfVectorizer",
"pandas.concat",
"pandas.factorize",
"scipy.optimize.minimize",
"pandas.DataFrame.from_dict",
"numpy.array",
"sklearn.decomposition.TruncatedSVD",
"sklearn.decomposition.NMF",
"numpy.random.seed",
"numpy.isfinite"
],
[
"matplotlib.pyplot.legend",
"sklearn.metrics.roc_auc_score",
"sklearn.model_selection.KFold",
"pandas.DataFrame",
"numpy.concatenate",
"pandas.read_csv",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"numpy.zeros",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"pandas.concat",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"numpy.random.shuffle",
"numpy.ones",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler",
"sklearn.decomposition.KernelPCA",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
bstellato/cvxpy | [
"c954bcfd14f9b131bd55d5c1028e667297a53f76"
] | [
"cvxpy/cvxcore/tests/python/364A_scripts/act_management.py"
] | [
"from cvxpy import Maximize, Problem, Variable, hstack, vstack\nimport numpy as np\nimport time\n\n\n# Create two scalar optimization variables.\n\nANSWERS = []\nTIME = 0\n\nA = np.array([ [1, 2, 0, 1], \\\n[0, 0, 3, 1], \\\n[0, 3, 1, 1], \\\n[2, 1, 2, 5], \\\n[1, 0, 3, 2] ])\n\nA_star = hstack(A,A)\n\nc_max = np.array([100] * 5)\n\np = np.array([3, 2, 7, 6])\np_disc = np.array([2, 1, 4, 2])\n\np_star = vstack(p, p_disc)\n\nq = np.array([4, 10, 5, 10])\n\nx_star = Variable( 8 )\nconstraints = [ A_star * x_star <= c_max, x_star >= 0 ]\nfor i in range(4):\n\tconstraints.append( x_star[i] >= q[i] )\n\nobjective = Maximize(p_star.T * x_star)\n\nprob = Problem(objective, constraints)\ntic = time.time()\nANSWERS.append( prob.solve() ) \ntoc = time.time()\nTIME += toc - tic \n\n\nx = np.array( [0] * 4)\nfor i in range(4):\n\tx[i] = x_star.value[i] + x_star.value[4 + i]\n\n\npass #print \"Optimal revenue:\", result \npass #print \"Optimal activity levels:\", x\n\naverage_rate = np.array([0] * 4)\n\nfor i in range(4):\n\taverage_rate[i] = (x_star.value[i] * p_star.value[i] + x_star.value[i + 4] * p_star.value[i + 4]) / x[i]\n\npass #print \"Average rate:\", average_rate"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
titania7777/3D-ResNets-PyTorch | [
"45921588bcf70f1b4ace424e754c48c0b5501ad6"
] | [
"main.py"
] | [
"from pathlib import Path\nimport json\nimport random\nimport os\n\nimport numpy as np\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD, lr_scheduler\nimport torch.multiprocessing as mp\nimport torch.distributed as dist\nfrom torch.backends import cudnn\nimport torchvision\n\nfrom opts import parse_opts\nfrom model import (generate_model, load_pretrained_model, make_data_parallel,\n get_fine_tuning_parameters)\nfrom mean import get_mean_std\nfrom spatial_transforms import (Compose, Normalize, Resize, CenterCrop,\n CornerCrop, MultiScaleCornerCrop,\n RandomResizedCrop, RandomHorizontalFlip,\n ToTensor, ScaleValue, ColorJitter,\n PickFirstChannels)\nfrom temporal_transforms import (LoopPadding, TemporalRandomCrop,\n TemporalCenterCrop, TemporalEvenCrop,\n SlidingWindow, TemporalSubsampling)\nfrom temporal_transforms import Compose as TemporalCompose\nfrom dataset import get_training_data, get_validation_data, get_inference_data\nfrom utils import Logger, worker_init_fn, get_lr\nfrom training import train_epoch\nfrom validation import val_epoch\nimport inference\n\n\ndef json_serial(obj):\n if isinstance(obj, Path):\n return str(obj)\n\n\ndef get_opt():\n opt = parse_opts()\n\n if opt.root_path is not None:\n opt.video_path = opt.root_path / opt.video_path\n opt.annotation_path = opt.root_path / opt.annotation_path\n opt.result_path = opt.root_path / opt.result_path\n if opt.resume_path is not None:\n opt.resume_path = opt.root_path / opt.resume_path\n if opt.pretrain_path is not None:\n opt.pretrain_path = opt.root_path / opt.pretrain_path\n\n if opt.pretrain_path is not None:\n opt.n_finetune_classes = opt.n_classes\n opt.n_classes = opt.n_pretrain_classes\n\n if opt.output_topk <= 0:\n opt.output_topk = opt.n_classes\n\n if opt.inference_batch_size == 0:\n opt.inference_batch_size = opt.batch_size\n\n opt.arch = '{}-{}'.format(opt.model, opt.model_depth)\n opt.begin_epoch = 1\n opt.mean, opt.std = get_mean_std(opt.value_scale, dataset=opt.mean_dataset)\n opt.n_input_channels = 3\n if opt.input_type == 'flow':\n opt.n_input_channels = 2\n opt.mean = opt.mean[:2]\n opt.std = opt.std[:2]\n\n if opt.distributed:\n opt.dist_rank = int(os.environ[\"OMPI_COMM_WORLD_RANK\"])\n\n if opt.dist_rank == 0:\n print(opt)\n with (opt.result_path / 'opts.json').open('w') as opt_file:\n json.dump(vars(opt), opt_file, default=json_serial)\n else:\n print(opt)\n with (opt.result_path / 'opts.json').open('w') as opt_file:\n json.dump(vars(opt), opt_file, default=json_serial)\n\n return opt\n\n\ndef resume_model(resume_path, arch, model):\n print('loading checkpoint {} model'.format(resume_path))\n checkpoint = torch.load(resume_path, map_location='cpu')\n assert arch == checkpoint['arch']\n\n if hasattr(model, 'module'):\n model.module.load_state_dict(checkpoint['state_dict'])\n else:\n model.load_state_dict(checkpoint['state_dict'])\n\n return model\n\n\ndef resume_train_utils(resume_path, begin_epoch, optimizer, scheduler):\n print('loading checkpoint {} train utils'.format(resume_path))\n checkpoint = torch.load(resume_path, map_location='cpu')\n\n begin_epoch = checkpoint['epoch'] + 1\n if optimizer is not None and 'optimizer' in checkpoint:\n optimizer.load_state_dict(checkpoint['optimizer'])\n if scheduler is not None and 'scheduler' in checkpoint:\n scheduler.load_state_dict(checkpoint['scheduler'])\n\n return begin_epoch, optimizer, scheduler\n\n\ndef get_normalize_method(mean, std, no_mean_norm, no_std_norm):\n if no_mean_norm:\n if no_std_norm:\n return Normalize([0, 0, 0], [1, 1, 1])\n else:\n return Normalize([0, 0, 0], std)\n else:\n if no_std_norm:\n return Normalize(mean, [1, 1, 1])\n else:\n return Normalize(mean, std)\n\n\ndef get_train_utils(opt, model_parameters):\n assert opt.train_crop in ['random', 'corner', 'center']\n spatial_transform = []\n if opt.train_crop == 'random':\n spatial_transform.append(\n RandomResizedCrop(\n opt.sample_size, (opt.train_crop_min_scale, 1.0),\n (opt.train_crop_min_ratio, 1.0 / opt.train_crop_min_ratio)))\n elif opt.train_crop == 'corner':\n scales = [1.0]\n scale_step = 1 / (2**(1 / 4))\n for _ in range(1, 5):\n scales.append(scales[-1] * scale_step)\n spatial_transform.append(MultiScaleCornerCrop(opt.sample_size, scales))\n elif opt.train_crop == 'center':\n spatial_transform.append(Resize(opt.sample_size))\n spatial_transform.append(CenterCrop(opt.sample_size))\n normalize = get_normalize_method(opt.mean, opt.std, opt.no_mean_norm,\n opt.no_std_norm)\n if not opt.no_hflip:\n spatial_transform.append(RandomHorizontalFlip())\n if opt.colorjitter:\n spatial_transform.append(ColorJitter())\n spatial_transform.append(ToTensor())\n if opt.input_type == 'flow':\n spatial_transform.append(PickFirstChannels(n=2))\n spatial_transform.append(ScaleValue(opt.value_scale))\n spatial_transform.append(normalize)\n spatial_transform = Compose(spatial_transform)\n\n assert opt.train_t_crop in ['random', 'center']\n temporal_transform = []\n if opt.sample_t_stride > 1:\n temporal_transform.append(TemporalSubsampling(opt.sample_t_stride))\n if opt.train_t_crop == 'random':\n temporal_transform.append(TemporalRandomCrop(opt.sample_duration))\n elif opt.train_t_crop == 'center':\n temporal_transform.append(TemporalCenterCrop(opt.sample_duration))\n temporal_transform = TemporalCompose(temporal_transform)\n\n train_data = get_training_data(opt.video_path, opt.annotation_path,\n opt.dataset, opt.input_type, opt.file_type,\n spatial_transform, temporal_transform)\n if opt.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_data)\n else:\n train_sampler = None\n train_loader = torch.utils.data.DataLoader(train_data,\n batch_size=opt.batch_size,\n shuffle=(train_sampler is None),\n num_workers=opt.n_threads,\n pin_memory=True,\n sampler=train_sampler,\n worker_init_fn=worker_init_fn)\n\n if opt.is_master_node:\n train_logger = Logger(opt.result_path / 'train.log',\n ['epoch', 'loss', 'acc', 'lr'])\n train_batch_logger = Logger(\n opt.result_path / 'train_batch.log',\n ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])\n else:\n train_logger = None\n train_batch_logger = None\n\n if opt.nesterov:\n dampening = 0\n else:\n dampening = opt.dampening\n optimizer = SGD(model_parameters,\n lr=opt.learning_rate,\n momentum=opt.momentum,\n dampening=dampening,\n weight_decay=opt.weight_decay,\n nesterov=opt.nesterov)\n\n assert opt.lr_scheduler in ['plateau', 'multistep']\n assert not (opt.lr_scheduler == 'plateau' and opt.no_val)\n if opt.lr_scheduler == 'plateau':\n scheduler = lr_scheduler.ReduceLROnPlateau(\n optimizer, 'min', patience=opt.plateau_patience)\n else:\n scheduler = lr_scheduler.MultiStepLR(optimizer,\n opt.multistep_milestones)\n\n return (train_loader, train_sampler, train_logger, train_batch_logger,\n optimizer, scheduler)\n\n\ndef get_val_utils(opt):\n normalize = get_normalize_method(opt.mean, opt.std, opt.no_mean_norm,\n opt.no_std_norm)\n spatial_transform = [\n Resize(opt.sample_size),\n CenterCrop(opt.sample_size),\n ToTensor()\n ]\n if opt.input_type == 'flow':\n spatial_transform.append(PickFirstChannels(n=2))\n spatial_transform.extend([ScaleValue(opt.value_scale), normalize])\n spatial_transform = Compose(spatial_transform)\n\n temporal_transform = []\n if opt.sample_t_stride > 1:\n temporal_transform.append(TemporalSubsampling(opt.sample_t_stride))\n temporal_transform.append(\n TemporalEvenCrop(opt.sample_duration, opt.n_val_samples))\n temporal_transform = TemporalCompose(temporal_transform)\n\n val_data, collate_fn = get_validation_data(opt.video_path,\n opt.annotation_path, opt.dataset,\n opt.input_type, opt.file_type,\n spatial_transform,\n temporal_transform)\n if opt.distributed:\n val_sampler = torch.utils.data.distributed.DistributedSampler(\n val_data, shuffle=False)\n else:\n val_sampler = None\n val_loader = torch.utils.data.DataLoader(val_data,\n batch_size=(opt.batch_size //\n opt.n_val_samples),\n shuffle=False,\n num_workers=opt.n_threads,\n pin_memory=True,\n sampler=val_sampler,\n worker_init_fn=worker_init_fn,\n collate_fn=collate_fn)\n\n if opt.is_master_node:\n val_logger = Logger(opt.result_path / 'val.log',\n ['epoch', 'loss', 'acc'])\n else:\n val_logger = None\n\n return val_loader, val_logger\n\n\ndef get_inference_utils(opt):\n assert opt.inference_crop in ['center', 'nocrop']\n\n normalize = get_normalize_method(opt.mean, opt.std, opt.no_mean_norm,\n opt.no_std_norm)\n\n spatial_transform = [Resize(opt.sample_size)]\n if opt.inference_crop == 'center':\n spatial_transform.append(CenterCrop(opt.sample_size))\n spatial_transform.append(ToTensor())\n if opt.input_type == 'flow':\n spatial_transform.append(PickFirstChannels(n=2))\n spatial_transform.extend([ScaleValue(opt.value_scale), normalize])\n spatial_transform = Compose(spatial_transform)\n\n temporal_transform = []\n if opt.sample_t_stride > 1:\n temporal_transform.append(TemporalSubsampling(opt.sample_t_stride))\n temporal_transform.append(\n SlidingWindow(opt.sample_duration, opt.inference_stride))\n temporal_transform = TemporalCompose(temporal_transform)\n\n inference_data, collate_fn = get_inference_data(\n opt.video_path, opt.annotation_path, opt.dataset, opt.input_type,\n opt.file_type, opt.inference_subset, spatial_transform,\n temporal_transform)\n\n inference_loader = torch.utils.data.DataLoader(\n inference_data,\n batch_size=opt.inference_batch_size,\n shuffle=False,\n num_workers=opt.n_threads,\n pin_memory=True,\n worker_init_fn=worker_init_fn,\n collate_fn=collate_fn)\n\n return inference_loader, inference_data.class_names\n\n\ndef save_checkpoint(save_file_path, epoch, arch, model, optimizer, scheduler):\n if hasattr(model, 'module'):\n model_state_dict = model.module.state_dict()\n else:\n model_state_dict = model.state_dict()\n save_states = {\n 'epoch': epoch,\n 'arch': arch,\n 'state_dict': model_state_dict,\n 'optimizer': optimizer.state_dict(),\n 'scheduler': scheduler.state_dict()\n }\n torch.save(save_states, save_file_path)\n\n\ndef main_worker(index, opt):\n random.seed(opt.manual_seed)\n np.random.seed(opt.manual_seed)\n torch.manual_seed(opt.manual_seed)\n\n if index >= 0 and opt.device.type == 'cuda':\n opt.device = torch.device(f'cuda:{index}')\n\n if opt.distributed:\n opt.dist_rank = opt.dist_rank * opt.ngpus_per_node + index\n dist.init_process_group(backend='nccl',\n init_method=opt.dist_url,\n world_size=opt.world_size,\n rank=opt.dist_rank)\n opt.batch_size = int(opt.batch_size / opt.ngpus_per_node)\n opt.n_threads = int(\n (opt.n_threads + opt.ngpus_per_node - 1) / opt.ngpus_per_node)\n opt.is_master_node = not opt.distributed or opt.dist_rank == 0\n\n model = generate_model(opt)\n if opt.batchnorm_sync:\n assert opt.distributed, 'SyncBatchNorm only supports DistributedDataParallel.'\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n if opt.pretrain_path:\n model = load_pretrained_model(model, opt.pretrain_path, opt.model,\n opt.n_finetune_classes)\n if opt.resume_path is not None:\n model = resume_model(opt.resume_path, opt.arch, model)\n model = make_data_parallel(model, opt.distributed, opt.device)\n\n if opt.pretrain_path:\n parameters = get_fine_tuning_parameters(model, opt.ft_begin_module)\n else:\n parameters = model.parameters()\n\n if opt.is_master_node:\n print(model)\n\n criterion = CrossEntropyLoss().to(opt.device)\n\n if not opt.no_train:\n (train_loader, train_sampler, train_logger, train_batch_logger,\n optimizer, scheduler) = get_train_utils(opt, parameters)\n if opt.resume_path is not None:\n opt.begin_epoch, optimizer, scheduler = resume_train_utils(\n opt.resume_path, opt.begin_epoch, optimizer, scheduler)\n if opt.overwrite_milestones:\n scheduler.milestones = opt.multistep_milestones\n if not opt.no_val:\n val_loader, val_logger = get_val_utils(opt)\n\n if opt.tensorboard and opt.is_master_node:\n from torch.utils.tensorboard import SummaryWriter\n if opt.begin_epoch == 1:\n tb_writer = SummaryWriter(log_dir=opt.result_path)\n else:\n tb_writer = SummaryWriter(log_dir=opt.result_path,\n purge_step=opt.begin_epoch)\n else:\n tb_writer = None\n\n prev_val_loss = None\n for i in range(opt.begin_epoch, opt.n_epochs + 1):\n if not opt.no_train:\n if opt.distributed:\n train_sampler.set_epoch(i)\n current_lr = get_lr(optimizer)\n train_epoch(i, train_loader, model, criterion, optimizer,\n opt.device, current_lr, train_logger,\n train_batch_logger, tb_writer, opt.distributed)\n\n if i % opt.checkpoint == 0 and opt.is_master_node:\n save_file_path = opt.result_path / 'save_{}.pth'.format(i)\n save_checkpoint(save_file_path, i, opt.arch, model, optimizer,\n scheduler)\n\n if not opt.no_val:\n prev_val_loss = val_epoch(i, val_loader, model, criterion,\n opt.device, val_logger, tb_writer,\n opt.distributed)\n\n if not opt.no_train and opt.lr_scheduler == 'multistep':\n scheduler.step()\n elif not opt.no_train and opt.lr_scheduler == 'plateau':\n scheduler.step(prev_val_loss)\n\n if opt.inference:\n inference_loader, inference_class_names = get_inference_utils(opt)\n inference_result_path = opt.result_path / '{}.json'.format(\n opt.inference_subset)\n\n inference.inference(inference_loader, model, inference_result_path,\n inference_class_names, opt.inference_no_average,\n opt.output_topk)\n\n\nif __name__ == '__main__':\n opt = get_opt()\n\n opt.device = torch.device('cpu' if opt.no_cuda else 'cuda')\n if not opt.no_cuda:\n cudnn.benchmark = True\n if opt.accimage:\n torchvision.set_image_backend('accimage')\n\n opt.ngpus_per_node = torch.cuda.device_count()\n if opt.distributed:\n opt.world_size = opt.ngpus_per_node * opt.world_size\n mp.spawn(main_worker, nprocs=opt.ngpus_per_node, args=(opt,))\n else:\n main_worker(-1, opt)"
] | [
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.nn.CrossEntropyLoss",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.distributed.init_process_group",
"numpy.random.seed",
"torch.load",
"torch.utils.data.distributed.DistributedSampler",
"torch.manual_seed",
"torch.multiprocessing.spawn",
"torch.utils.data.DataLoader",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.utils.tensorboard.SummaryWriter",
"torch.optim.SGD",
"torch.device",
"torch.cuda.device_count",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IntoxicatedDING/stdn | [
"1bb9555114c762b09ad65eb16c59b134e1dccb56",
"1bb9555114c762b09ad65eb16c59b134e1dccb56"
] | [
"data/voc0712.py",
"util/anchor.py"
] | [
"\"\"\"VOC Dataset Classes\n\nOriginal author: Francisco Massa\nhttps://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py\n\nUpdated by: Ellis Brown, Max deGroot\n\"\"\"\n'''\nAdapted from https://github.com/amdegroot/ssd.pytorch\n'''\nfrom .config import HOME\nimport os.path as osp\nimport sys\nimport torch\nimport torch.utils.data as data\nimport cv2\nimport numpy as np\nif sys.version_info[0] == 2:\n import xml.etree.cElementTree as ET\nelse:\n import xml.etree.ElementTree as ET\n\nVOC_CLASSES = ( # always index 0\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n\n# note: if you used our download scripts, this should be right\nVOC_ROOT = osp.join(HOME, \"dataset/VOC0712/VOCdevkit/\")\n\n\nclass VOCAnnotationTransform(object):\n \"\"\"Transforms a VOC annotation into a Tensor of bbox coords and label index\n Initilized with a dictionary lookup of classnames to indexes\n\n Arguments:\n class_to_ind (dict, optional): dictionary lookup of classnames -> indexes\n (default: alphabetic indexing of VOC's 20 classes)\n keep_difficult (bool, optional): keep difficult instances or not\n (default: False)\n height (int): height\n width (int): width\n \"\"\"\n\n def __init__(self, class_to_ind=None, keep_difficult=False):\n self.class_to_ind = class_to_ind or dict(\n zip(VOC_CLASSES, range(len(VOC_CLASSES))))\n self.keep_difficult = keep_difficult\n\n def __call__(self, target, width, height):\n \"\"\"\n Arguments:\n target (annotation) : the target annotation to be made usable\n will be an ET.Element\n Returns:\n a list containing lists of bounding boxes [bbox coords, class name]\n \"\"\"\n res = []\n for obj in target.iter('object'):\n difficult = int(obj.find('difficult').text) == 1\n if not self.keep_difficult and difficult:\n continue\n name = obj.find('name').text.lower().strip()\n bbox = obj.find('bndbox')\n\n pts = ['xmin', 'ymin', 'xmax', 'ymax']\n bndbox = []\n for i, pt in enumerate(pts):\n cur_pt = int(bbox.find(pt).text) - 1\n # scale height or width\n cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height\n bndbox.append(cur_pt)\n label_idx = self.class_to_ind[name]\n bndbox.append(label_idx)\n res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]\n # img_id = target.find('filename').text[:-4]\n\n return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]\n\n\nclass VOCDetection(data.Dataset):\n \"\"\"VOC Detection Dataset Object\n\n input is image, target is annotation\n\n Arguments:\n root (string): filepath to VOCdevkit folder.\n image_set (string): imageset to use (eg. 'train', 'val', 'test')\n transform (callable, optional): transformation to perform on the\n input image\n target_transform (callable, optional): transformation to perform on the\n target `annotation`\n (eg: take in caption string, return tensor of word indices)\n dataset_name (string, optional): which dataset to load\n (default: 'VOC2007')\n \"\"\"\n\n def __init__(self, root,\n # image_sets=[('2007', 'trainval'), ('2012', 'trainval')],\n image_sets=[('2007', 'trainval')],\n transform=None, target_transform=VOCAnnotationTransform(),\n dataset_name='VOC0712'):\n self.root = root\n self.image_set = image_sets\n self.transform = transform\n self.target_transform = target_transform\n self.name = dataset_name\n self._annopath = osp.join('%s', 'Annotations', '%s.xml')\n self._imgpath = osp.join('%s', 'JPEGImages', '%s.jpg')\n self.ids = list()\n for (year, name) in image_sets:\n rootpath = osp.join(self.root, 'VOC' + year)\n for line in open(osp.join(rootpath, 'ImageSets', 'Main', name + '.txt')):\n self.ids.append((rootpath, line.strip()))\n\n def __getitem__(self, index):\n im, gt, h, w = self.pull_item(index)\n\n return im, gt\n\n def __len__(self):\n return len(self.ids)\n\n def pull_item(self, index):\n img_id = self.ids[index]\n\n target = ET.parse(self._annopath % img_id).getroot()\n img = cv2.imread(self._imgpath % img_id)\n height, width, channels = img.shape\n\n if self.target_transform is not None:\n target = self.target_transform(target, width, height)\n\n if self.transform is not None:\n target = np.array(target)\n img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])\n # to rgb\n img = img[:, :, (2, 1, 0)]\n # img = img.transpose(2, 0, 1)\n target = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n return torch.from_numpy(img).permute(2, 0, 1), target, height, width\n # return torch.from_numpy(img), target, height, width\n\n def pull_image(self, index):\n '''Returns the original image object at index in PIL form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n PIL img\n '''\n img_id = self.ids[index]\n return cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)\n\n def pull_anno(self, index):\n '''Returns the original annotation of image at index\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to get annotation of\n Return:\n list: [img_id, [(label, bbox coords),...]]\n eg: ('001718', [('dog', (96, 13, 438, 332))])\n '''\n img_id = self.ids[index]\n anno = ET.parse(self._annopath % img_id).getroot()\n gt = self.target_transform(anno, 1, 1)\n return img_id[1], gt\n\n def pull_tensor(self, index):\n '''Returns the original image at an index in tensor form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n tensorized version of img, squeezed\n '''\n return torch.Tensor(self.pull_image(index)).unsqueeze_(0)\n",
"'''\nAdapted from https://github.com/amdegroot/ssd.pytorch\n'''\nfrom __future__ import division\nfrom math import sqrt as sqrt\nfrom itertools import product as product\nimport torch\nfrom data import VOC\n\n\nclass Anchor:\n\n def __init__(self, cfg=VOC):\n self.image_size = cfg['image_size']\n # number of priors for feature map location (either 4 or 6)\n self.num_priors = len(cfg['feature_maps'])\n self.feature_maps = cfg['feature_maps']\n self.min_sizes = cfg['min_sizes']\n self.max_sizes = cfg['max_sizes']\n self.steps = cfg['steps']\n self.aspect_ratio = cfg['aspect_ratio']\n\n def generate(self):\n anchors = []\n num = []\n for k, f in enumerate(self.feature_maps):\n cnt = 0\n for i, j in product(range(f), repeat=2):\n f_k = self.image_size / self.steps[k]\n # unit center x,y\n cx = (j + 0.5) / f_k\n cy = (i + 0.5) / f_k\n\n # aspect_ratio: 1\n # rel size: min_size\n s_k = self.min_sizes[k]/self.image_size\n anchors += [cx, cy, s_k, s_k]\n\n # aspect_ratio: 1\n # rel size: sqrt(s_k * s_(k+1))\n s_k_prime = sqrt(s_k * (self.max_sizes[k]/self.image_size))\n anchors += [cx, cy, s_k_prime, s_k_prime]\n cnt += 2\n # rest of aspect ratios\n for ar in self.aspect_ratio:\n anchors += [cx, cy, s_k*sqrt(ar), s_k/sqrt(ar)]\n anchors += [cx, cy, s_k/sqrt(ar), s_k*sqrt(ar)]\n cnt += 2\n num.append(cnt)\n # back to torch land\n output = torch.Tensor(anchors).view(-1, 4)\n output.clamp_(max=1, min=0)\n return output, num\n\n\nif __name__ == '__main__':\n import numpy as np\n anchor = Anchor()\n output, num = anchor.generate()\n print(num)\n print(output.size())\n torch.masked_select()"
] | [
[
"numpy.array",
"numpy.expand_dims",
"torch.from_numpy"
],
[
"torch.masked_select",
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hazananayurt/viref | [
"f7b5a2278d9211104ea2293077e2b85d7466d63a"
] | [
"viref/model.py"
] | [
"import torch\nfrom torch import optim\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\n\nclass Encoder(torch.nn.Module):\n\tdef __init__(self, input_size, hidden_size, num_layers, dropout):\n\t\tsuper(Encoder, self).__init__()\n\t\t\n\t\tself.input_size = input_size\n\t\tself.hidden_size = hidden_size\n\t\tself.num_layers = num_layers\n\t\t\n\t\tself.lstm = torch.nn.LSTM(input_size=input_size,\n\t\t\t\t\t\t\t\t hidden_size=hidden_size,\n\t\t\t\t\t\t\t\t num_layers=num_layers,\n\t\t\t\t\t\t\t\t batch_first=True,\n\t\t\t\t\t\t\t\t dropout=dropout)\n\t\t\n\t\t\n\tdef forward(self, features, scale_weights, h0, c0):\n\t\tscaled_features = []\n\t\tfor feature_idx, feature in enumerate(features):\n\t\t\tscaled_features.append(feature*scale_weights[:, feature_idx].unsqueeze(1).unsqueeze(1))\n\t\tinp = torch.cat(scaled_features, dim=2)\n\t\tout, (hn, cn) = self.lstm(inp, (h0, c0))\n\t\treturn out, (hn, cn)\n\n\nclass Decoder(torch.nn.Module):\n\tdef __init__(self, input_size, hidden_size, num_layers, dropout):\n\t\tsuper(Decoder, self).__init__()\n\t\t\n\t\tself.input_size = input_size\n\t\tself.hidden_size = hidden_size\n\t\tself.num_layers = num_layers\n\t\t\n\t\tself.lstm = torch.nn.LSTM(input_size=input_size,\n\t\t\t\t\t\t\t\t hidden_size=hidden_size,\n\t\t\t\t\t\t\t\t num_layers=num_layers,\n\t\t\t\t\t\t\t\t batch_first=True,\n\t\t\t\t\t\t\t\t dropout=dropout)\n\t\t\n\t\t\n\tdef forward(self, inp, h0, c0):\n\t\tout, (hn, cn) = self.lstm(inp, (h0, c0))\n\t\treturn out, (hn, cn)\n\t\t\n\t\t\nclass FF1(torch.nn.Module):\n\tdef __init__(self, hidden_size, num_features):\n\t\tsuper(FF1, self).__init__()\n\t\t\n\t\tself.fc1 = torch.nn.Linear(hidden_size, hidden_size)\n\t\tself.fc2 = torch.nn.Linear(hidden_size, int(hidden_size/2))\n\t\tself.fc3 = torch.nn.Linear(int(hidden_size/2), num_features)\n\t\tself.softmax = torch.nn.Softmax(dim=2)\n\t\t\n\tdef forward(self, decoder_out):\n\t\tbatch_size = decoder_out.size(0)\n\t\tout = decoder_out\n\t\tout = out.contiguous()\n\t\tout = out.view(-1, out.size(2))\n\t\tout = self.fc1(out)\n\t\tout = F.relu(out)\n\t\tout = self.fc2(out)\n\t\tout = F.relu(out)\n\t\tout = self.fc3(out)\n\t\tout = out.view(batch_size, -1, out.size(1))\n\t\tout = self.softmax(out)\n\t\treturn out\n\t\t\n\t\t\n\nclass FF2(torch.nn.Module):\n\tdef __init__(self, hidden_size, num_layers, decoder_output_size):\n\t\tsuper(FF2, self).__init__()\n\t\t\n\t\tself.fc1 = torch.nn.Linear(hidden_size*(num_layers+1), hidden_size*2)\n\t\tself.fc2 = torch.nn.Linear(hidden_size*2, hidden_size)\n\t\tself.fc3 = torch.nn.Linear(hidden_size, decoder_output_size)\n\t\t\n\t\t\n\tdef forward(self, decoder_hidden_state, attended_features):\n\t\tbatch_size = decoder_hidden_state.size(0)\n\t\tinp2 = attended_features.permute(1, 0, 2).contiguous().view(batch_size, -1)\n\t\tinp = torch.cat([decoder_hidden_state, inp2], dim=1)\n\t\tout = self.fc1(inp)\n\t\tout = F.relu(out)\n\t\tout = self.fc2(out)\n\t\tout = F.relu(out)\n\t\tout = self.fc3(out)\n\t\tout = F.log_softmax(out, dim=1)\n\t\treturn out\n\n\t\t\nclass Model(torch.nn.Module):\n\tdef __init__(self, encoder_input_size, decoder_input_size, decoder_output_size, hidden_size, num_layers, num_features, dropout):\n\t\tsuper(Model, self).__init__()\n\t\t\n\t\tself.encoder_input_size = encoder_input_size\n\t\tself.decoder_input_size = decoder_input_size\n\t\tself.decoder_output_size = decoder_output_size\n\t\tself.hidden_size = hidden_size\n\t\tself.num_layers = num_layers\n\t\t\n\t\tself.encoder = Encoder(encoder_input_size, hidden_size, num_layers, dropout)\n\t\tself.decoder = Decoder(decoder_input_size, hidden_size, num_layers, dropout)\n\n\t\tself.ff1 = FF1(hidden_size, num_features)\n\t\tself.ff2 = FF2(hidden_size, num_layers, decoder_output_size)\n\t\t\n\t\t\n\tdef forward(self, features, initial_scale_weights, decoder_input, h0, c0):\n\t\tbatch_size = features[0].shape[0]\n\t\t_, (hn, cn) = self.encoder(features, initial_scale_weights, h0, c0)\n\t\tdecoder_out, _ = self.decoder(decoder_input, hn, cn)\n\t\tout = self.ff1(decoder_out)\n\t\tfinal_out_list = []\n\t\tfor i in range(int(out.size(1))):\n\t\t\tscale_weights = out[:, i, :]\n\t\t\t#print(scale_weights)\n\t\t\t_, (hn, _) = self.encoder(features, scale_weights, h0, c0)\n\t\t\tout_i = self.ff2(decoder_out[:, i, :], hn)\n\t\t\tfinal_out_list.append(out_i.unsqueeze(0))\n\t\tout = torch.cat(final_out_list, dim=0)\n\t\tout = out.permute(1, 0, 2)\n\t\treturn out\n\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.functional.log_softmax",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.Linear",
"torch.nn.functional.relu"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
XiaoJake/DS-Net | [
"8400da1bd7c7b1ccf4d5c6782b86372957e79a6b"
] | [
"network/model_zoo.py"
] | [
"# -*- coding:utf-8 -*-\n# author: Hong Fangzhou\n# @file: model_zoo.py\n# @time: 2020/09/26 17:05\n\nfrom .modules import BEV_Unet\nfrom .modules import PointNet\nfrom .modules import spconv_unet\nfrom .modules import pytorch_meanshift\nfrom .loss import instance_losses\nfrom .loss import lovasz_losses\nfrom utils.evaluate_panoptic import init_eval, eval_one_scan_w_fname, eval_one_scan_vps\nfrom utils.evaluate_panoptic import printResults, valid_xentropy_ids, class_lut\nfrom utils import clustering\nfrom utils import common_utils\nfrom utils.common_utils import grp_range_torch, parallel_FPS, SemKITTI2train\nfrom scipy.optimize import linear_sum_assignment\n\nimport io\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch_scatter\nimport numpy as np\nimport numba as nb\nimport multiprocessing\nfrom scipy import stats as s\nfrom sklearn.metrics import confusion_matrix as cm\nfrom easydict import EasyDict\nimport time\nimport os\nimport pickle\nfrom sklearn.cluster import MeanShift\nfrom sklearn import manifold, datasets\nfrom scipy import stats as s\nfrom utils import common_utils\nfrom utils.config import global_args\nimport spconv\n\nclass PolarBaseClass(nn.Module):\n def __init__(self, cfg):\n super(PolarBaseClass, self).__init__()\n self.ignore_label = cfg.DATA_CONFIG.DATALOADER.CONVERT_IGNORE_LABEL\n self.pt_pooling = cfg.MODEL.MODEL_FN.PT_POOLING\n self.max_pt = cfg.MODEL.MODEL_FN.MAX_PT_PER_ENCODE\n self.pt_selection = cfg.MODEL.MODEL_FN.PT_SELECTION\n if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN.keys():\n self.fea_compre = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION\n else:\n self.fea_compre = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]\n self.grid_size = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE\n\n if self.pt_pooling == 'max':\n self.pool_dim = cfg.MODEL.VFE.OUT_CHANNEL\n\n if self.fea_compre is not None:\n self.fea_compression = nn.Sequential(\n nn.Linear(self.pool_dim, self.fea_compre),\n nn.ReLU()\n ).cuda()\n self.pt_fea_dim = self.fea_compre\n\n def voxelize(self, inputs):\n grid_ind = inputs['grid']\n pt_fea = inputs['pt_fea']\n\n pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).cuda() for i in pt_fea]\n grid_ind_ten = [torch.from_numpy(i[:, :2]).cuda() for i in grid_ind]\n\n pt_fea = pt_fea_ten\n xy_ind = grid_ind_ten\n\n # concate everything\n cat_pt_ind = []\n for i_batch in range(len(xy_ind)):\n cat_pt_ind.append(F.pad(xy_ind[i_batch],(1,0),'constant',value = i_batch))\n\n cat_pt_fea = torch.cat(pt_fea,dim = 0)\n cat_pt_ind = torch.cat(cat_pt_ind,dim = 0)\n pt_num = cat_pt_ind.shape[0]\n\n # shuffle the data\n cur_dev = pt_fea[0].get_device()\n shuffled_ind = torch.randperm(pt_num,device = cur_dev)\n cat_pt_fea = cat_pt_fea[shuffled_ind,:]\n cat_pt_ind = cat_pt_ind[shuffled_ind,:]\n\n # unique xy grid index\n unq, unq_inv, unq_cnt = torch.unique(cat_pt_ind,return_inverse=True, return_counts=True, dim=0)\n unq = unq.type(torch.int64)\n\n # subsample pts\n if self.pt_selection == 'random':\n grp_ind = grp_range_torch(unq_cnt,cur_dev)[torch.argsort(torch.argsort(unq_inv))] # convert the array that is in the order of grid to the order of cat_pt_feature\n remain_ind = grp_ind < self.max_pt # randomly sample max_pt points inside a grid\n elif self.pt_selection == 'farthest':\n unq_ind = np.split(np.argsort(unq_inv.detach().cpu().numpy()), np.cumsum(unq_cnt.detach().cpu().numpy()[:-1]))\n remain_ind = np.zeros((pt_num,),dtype = np.bool)\n np_cat_fea = cat_pt_fea.detach().cpu().numpy()[:,:3]\n pool_in = []\n for i_inds in unq_ind:\n if len(i_inds) > self.max_pt:\n pool_in.append((np_cat_fea[i_inds,:],self.max_pt))\n if len(pool_in) > 0:\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\n FPS_results = pool.starmap(parallel_FPS, pool_in)\n pool.close()\n pool.join()\n count = 0\n for i_inds in unq_ind:\n if len(i_inds) <= self.max_pt:\n remain_ind[i_inds] = True\n else:\n remain_ind[i_inds[FPS_results[count]]] = True\n count += 1\n\n cat_pt_fea = cat_pt_fea[remain_ind,:]\n cat_pt_ind = cat_pt_ind[remain_ind,:]\n unq_inv = unq_inv[remain_ind]\n unq_cnt = torch.clamp(unq_cnt,max=self.max_pt)\n\n # process feature\n processed_cat_pt_fea = self.vfe_model(cat_pt_fea)\n #TODO: maybe use pointnet to extract features inside each grid and each grid share the same parameters instead of apply pointnet to global point clouds?\n # This kind of global pointnet is more memory efficient cause otherwise we will have to alloc [480 x 360 x 32 x 64 x C] tensor in order to apply pointnet to each grid\n\n if self.pt_pooling == 'max':\n pooled_data = torch_scatter.scatter_max(processed_cat_pt_fea, unq_inv, dim=0)[0] # choose the max feature for each grid\n else: raise NotImplementedError\n\n if self.fea_compre:\n processed_pooled_data = self.fea_compression(pooled_data)\n else:\n processed_pooled_data = pooled_data\n\n # stuff pooled data into 4D tensor\n out_data_dim = [len(pt_fea),self.grid_size[0],self.grid_size[1],self.pt_fea_dim]\n out_data = torch.zeros(out_data_dim, dtype=torch.float32).to(cur_dev)\n out_data[unq[:,0],unq[:,1],unq[:,2],:] = processed_pooled_data\n out_data = out_data.permute(0,3,1,2)\n\n del pt_fea, xy_ind\n\n return out_data, grid_ind\n\n def voxelize_spconv(self, inputs, grid_name='grid', pt_fea_name='pt_fea'):\n grid_ind = inputs[grid_name]\n pt_fea = inputs[pt_fea_name]\n\n pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).cuda() for i in pt_fea]\n grid_ind_ten = [torch.from_numpy(i).cuda() for i in grid_ind]\n\n pt_fea = pt_fea_ten\n xy_ind = grid_ind_ten\n\n # concate everything\n cat_pt_ind = []\n for i_batch in range(len(xy_ind)):\n cat_pt_ind.append(F.pad(xy_ind[i_batch],(1,0),'constant',value = i_batch))\n\n cat_pt_fea = torch.cat(pt_fea,dim = 0)\n cat_pt_ind = torch.cat(cat_pt_ind,dim = 0)\n pt_num = cat_pt_ind.shape[0]\n\n # shuffle the data\n cur_dev = pt_fea[0].get_device()\n shuffled_ind = torch.randperm(pt_num,device = cur_dev)\n cat_pt_fea = cat_pt_fea[shuffled_ind,:]\n cat_pt_ind = cat_pt_ind[shuffled_ind,:]\n\n # unique xy grid index\n unq, unq_inv, unq_cnt = torch.unique(cat_pt_ind,return_inverse=True, return_counts=True, dim=0)\n unq = unq.type(torch.int64)\n\n # subsample pts\n if self.pt_selection == 'random':\n grp_ind = grp_range_torch(unq_cnt,cur_dev)[torch.argsort(torch.argsort(unq_inv))] # convert the array that is in the order of grid to the order of cat_pt_feature\n remain_ind = grp_ind < self.max_pt # randomly sample max_pt points inside a grid\n elif self.pt_selection == 'farthest':\n unq_ind = np.split(np.argsort(unq_inv.detach().cpu().numpy()), np.cumsum(unq_cnt.detach().cpu().numpy()[:-1]))\n remain_ind = np.zeros((pt_num,),dtype = np.bool)\n np_cat_fea = cat_pt_fea.detach().cpu().numpy()[:,:3]\n pool_in = []\n for i_inds in unq_ind:\n if len(i_inds) > self.max_pt:\n pool_in.append((np_cat_fea[i_inds,:],self.max_pt))\n if len(pool_in) > 0:\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\n FPS_results = pool.starmap(parallel_FPS, pool_in)\n pool.close()\n pool.join()\n count = 0\n for i_inds in unq_ind:\n if len(i_inds) <= self.max_pt:\n remain_ind[i_inds] = True\n else:\n remain_ind[i_inds[FPS_results[count]]] = True\n count += 1\n\n cat_pt_fea = cat_pt_fea[remain_ind,:]\n cat_pt_ind = cat_pt_ind[remain_ind,:]\n unq_inv = unq_inv[remain_ind]\n unq_cnt = torch.clamp(unq_cnt,max=self.max_pt)\n\n # process feature\n processed_cat_pt_fea = self.vfe_model(cat_pt_fea)\n #TODO: maybe use pointnet to extract features inside each grid and each grid share the same parameters instead of apply pointnet to global point clouds?\n # This kind of global pointnet is more memory efficient cause otherwise we will have to alloc [480 x 360 x 32 x 64 x C] tensor in order to apply pointnet to each grid\n\n if self.pt_pooling == 'max':\n pooled_data = torch_scatter.scatter_max(processed_cat_pt_fea, unq_inv, dim=0)[0] # choose the max feature for each grid\n else: raise NotImplementedError\n\n if self.fea_compre:\n processed_pooled_data = self.fea_compression(pooled_data)\n else:\n processed_pooled_data = pooled_data\n\n # stuff pooled data into 4D tensor\n # out_data_dim = [len(pt_fea),self.grid_size[0],self.grid_size[1],self.pt_fea_dim]\n # out_data = torch.zeros(out_data_dim, dtype=torch.float32).to(cur_dev)\n # out_data[unq[:,0],unq[:,1],unq[:,2],:] = processed_pooled_data\n # out_data = out_data.permute(0,3,1,2)\n\n del pt_fea, xy_ind\n\n return unq, processed_pooled_data\n\n def calc_sem_label(self, sem_logits, inputs, need_add_one=True):\n vox_pred_labels = torch.argmax(sem_logits, dim=1)\n vox_pred_labels = vox_pred_labels.cpu().detach().numpy()\n grid_ind = inputs['grid']\n pt_pred_labels = []\n for i in range(len(grid_ind)):\n if need_add_one:\n pt_pred_labels.append(vox_pred_labels[i, grid_ind[i][:, 0], grid_ind[i][:, 1], grid_ind[i][:, 2]] + 1)\n else:\n pt_pred_labels.append(vox_pred_labels[i, grid_ind[i][:, 0], grid_ind[i][:, 1], grid_ind[i][:, 2]])\n return pt_pred_labels\n\n def calc_sem_label_point_logits(self, sem_logits, inputs, need_add_one=True):\n pts_pred_labels = torch.argmax(sem_logits, dim=1)\n pts_pred_labels = pts_pred_labels.cpu().detach().numpy()\n grid_ind = inputs['grid']\n pt_pred_labels = []\n for i in range(len(grid_ind)):\n if need_add_one:\n pt_pred_labels.append(pts_pred_labels + 1)\n else:\n pt_pred_labels.append(pts_pred_labels)\n return pt_pred_labels\n\n def update_evaluator(self, evaluator, sem_preds, ins_preds, inputs):\n for i in range(len(sem_preds)):\n eval_one_scan_w_fname(evaluator, inputs['pt_labs'][i].reshape(-1),\n inputs['pt_ins_labels'][i].reshape(-1),\n sem_preds[i], ins_preds[i], inputs['pcd_fname'][i])\n\n def update_evaluator_multi_frames(self, evaluator, sem_preds, ins_preds, inputs):\n for i in range(len(sem_preds)):\n eval_one_scan_w_fname(evaluator, inputs['pt_labs'][i][inputs['mask_np'][i].reshape(-1) == 0].reshape(-1),\n inputs['pt_ins_labels'][i][inputs['mask_np'][i].reshape(-1) == 0].reshape(-1),\n sem_preds[i][inputs['mask_np'][i].reshape(-1) == 0], ins_preds[i][inputs['mask_np'][i].reshape(-1) == 0], inputs['pcd_fname'][i])\n\n def forward(self, x):\n raise NotImplementedError\n\nclass PolarSpconv(PolarBaseClass):\n def __init__(self, cfg):\n super(PolarSpconv, self).__init__(cfg)\n self.backbone = getattr(spconv_unet, cfg.MODEL.BACKBONE.NAME)(cfg)\n self.sem_head = getattr(spconv_unet, cfg.MODEL.SEM_HEAD.NAME)(cfg)\n self.vfe_model = getattr(PointNet, cfg.MODEL.VFE.NAME)(cfg)\n\n if cfg.MODEL.SEM_LOSS == 'Lovasz_loss':\n self.sem_loss_lovasz = lovasz_losses.lovasz_softmax\n if cfg.DATA_CONFIG.DATASET_NAME.startswith('SemanticKitti'):\n weights = torch.zeros(20, dtype=torch.float)\n weights[0] = 1.0\n weights[1] = 2.293\n weights[2] = 85.756\n weights[3] = 71.511\n weights[4] = 31.596\n weights[5] = 35.624\n weights[6] = 74.761\n weights[7] = 88.722\n weights[8] = 96.389\n weights[9] = 1.00\n weights[10] = 6.362\n weights[11] = 1.00\n weights[12] = 20.387\n weights[13] = 1.00\n weights[14] = 1.363\n weights[15] = 1.00\n weights[16] = 14.214\n weights[17] = 1.263\n weights[18] = 25.936\n weights[19] = 61.896\n else:\n raise NotImplementedError\n self.sem_loss = torch.nn.CrossEntropyLoss(weight=weights.cuda(), ignore_index=0)\n else:\n raise NotImplementedError\n\n def calc_loss(self, sem_logits, inputs, need_minus_one=True):\n if need_minus_one:\n vox_label = SemKITTI2train(inputs['vox_label']).type(torch.LongTensor).cuda()\n else:\n vox_label = inputs['vox_label'].type(torch.LongTensor).cuda()\n\n sem_loss = self.sem_loss_lovasz(torch.nn.functional.softmax(sem_logits), vox_label,ignore=self.ignore_label) + self.sem_loss(sem_logits,vox_label)\n\n loss = sem_loss\n\n ret_dict = {}\n ret_dict['sem_loss'] = sem_loss\n ret_dict['loss'] = loss\n\n return ret_dict\n\n def forward(self, batch, is_test=False, before_merge_evaluator=None, after_merge_evaluator=None, require_cluster=True):\n coor, feature_3d = self.voxelize_spconv(batch)\n sem_fea, _ = self.backbone(feature_3d, coor, len(batch['grid']))\n sem_logits = self.sem_head(sem_fea)\n loss_dict = self.calc_loss(sem_logits, batch, need_minus_one=False)\n\n if is_test:\n pt_sem_preds = self.calc_sem_label(sem_logits, batch, need_add_one=False)\n pt_ins_ids_preds = [np.zeros_like(pt_sem_preds[i]) for i in range(len(pt_sem_preds))]\n merged_sem_preds = pt_sem_preds\n if 'mask' in batch:\n self.update_evaluator_multi_frames(after_merge_evaluator, merged_sem_preds, pt_ins_ids_preds, batch)\n else:\n self.update_evaluator(after_merge_evaluator, merged_sem_preds, pt_ins_ids_preds, batch)\n loss_dict['sem_preds'] = merged_sem_preds\n loss_dict['ins_preds'] = pt_ins_ids_preds\n loss_dict['ins_num'] = 0\n\n return loss_dict\n\nclass PolarOffset(PolarBaseClass):\n def __init__(self, cfg, need_create_model=True):\n super(PolarOffset, self).__init__(cfg)\n self.ins_loss_name = cfg.MODEL.INS_LOSS\n self.ins_embedding_dim = cfg.MODEL.INS_HEAD.EMBEDDING_CHANNEL\n if not need_create_model:\n return\n self.backbone = getattr(BEV_Unet, cfg.MODEL.BACKBONE.NAME)(cfg)\n self.sem_head = getattr(BEV_Unet, cfg.MODEL.SEM_HEAD.NAME)(cfg)\n self.ins_head = getattr(BEV_Unet, cfg.MODEL.INS_HEAD.NAME)(cfg)\n self.vfe_model = getattr(PointNet, cfg.MODEL.VFE.NAME)(cfg)\n\n self.ins_loss = getattr(instance_losses, cfg.MODEL.INS_LOSS)\n if cfg.MODEL.SEM_LOSS == 'Lovasz_loss':\n self.sem_loss_lovasz = lovasz_losses.lovasz_softmax\n self.sem_loss = torch.nn.CrossEntropyLoss(ignore_index=cfg.DATA_CONFIG.DATALOADER.CONVERT_IGNORE_LABEL)\n else:\n raise NotImplementedError\n\n self.cluster_fn_wrapper = getattr(clustering, cfg.MODEL.POST_PROCESSING.CLUSTER_ALGO)\n self.cluster_fn = self.cluster_fn_wrapper(cfg)\n\n self.merge_func_name = cfg.MODEL.POST_PROCESSING.MERGE_FUNC\n\n def calc_loss(self, sem_logits, pred_offsets, inputs, need_minus_one=True):\n if need_minus_one:\n vox_label = SemKITTI2train(inputs['vox_label']).type(torch.LongTensor).cuda()\n else:\n vox_label = inputs['vox_label'].type(torch.LongTensor).cuda()\n\n pt_valid = [torch.from_numpy(i).cuda() for i in inputs['pt_valid']]\n if self.ins_loss_name.find('semantic_centroids') != -1:\n offset_loss_list = self.ins_loss(pred_offsets, inputs['pt_ins_labels'], pt_valid, gt_semantic_label=inputs['pt_labs'])\n elif self.ins_loss_name.find('embedding_contrastive_loss') != -1:\n offset_loss_list = self.ins_loss(pred_offsets, inputs['pt_ins_labels'], pt_valid, gt_semantic_label=inputs['pt_labs'], xyz=inputs['pt_cart_xyz'])\n elif self.ins_loss_name.find('embedding_discriminative') != -1:\n offset_loss_list = self.ins_loss(pred_offsets, inputs['pt_ins_labels'], pt_valid)\n else:\n pt_offsets = [torch.from_numpy(i).cuda() for i in inputs['pt_offsets']]\n offset_loss_list = self.ins_loss(pred_offsets, pt_offsets, pt_valid)\n\n sem_loss = self.sem_loss_lovasz(torch.nn.functional.softmax(sem_logits), vox_label,ignore=self.ignore_label) + self.sem_loss(sem_logits,vox_label)\n #if self.ins_loss_name == 'embedding_contrastive_loss':\n # loss = 5 * sem_loss + sum(offset_loss_list)\n #else:\n loss = sem_loss + sum(offset_loss_list)\n\n ret_dict = {}\n ret_dict['offset_loss_list'] = offset_loss_list\n ret_dict['sem_loss'] = sem_loss\n ret_dict['loss'] = loss\n\n return ret_dict\n\n def clustering(self, sem_preds, pred_offsets, inputs):\n grid_ind = inputs['grid']\n pt_cart_xyz = inputs['pt_cart_xyz']\n pt_pred_offsets = [pred_offsets[i].detach().cpu().numpy().reshape(-1, self.ins_embedding_dim) for i in range(len(pred_offsets))]\n pt_pred_valid = []\n for i in range(len(grid_ind)):\n pt_pred_valid.append(np.isin(sem_preds[i], valid_xentropy_ids).reshape(-1))\n pred_ins_ids = self.cluster_fn(pt_cart_xyz, pt_pred_offsets, pt_pred_valid)\n return pred_ins_ids\n\n def merge_ins_sem(self, sem_preds, pred_ins_ids, logits=None, inputs=None):\n merged_sem_preds = []\n for i in range(len(sem_preds)):\n if self.merge_func_name == 'merge_ins_sem':\n merged_sem_preds.append(common_utils.merge_ins_sem(sem_preds[i], pred_ins_ids[i]))\n elif self.merge_func_name == 'merge_ins_sem_logits_size_based':\n merged_sem_preds.append(common_utils.merge_ins_sem_logits_size_based(sem_preds[i], pred_ins_ids[i], i, logits, inputs))\n elif self.merge_func_name == 'none':\n merged_sem_preds.append(sem_preds[i])\n return merged_sem_preds\n\n def forward(self, batch, is_test=False, before_merge_evaluator=None, after_merge_evaluator=None, require_cluster=True):\n out_data, grid_ind = self.voxelize(batch)\n sem_fea, ins_fea = self.backbone(out_data)\n sem_logits = self.sem_head(sem_fea)\n pred_offsets, _ = self.ins_head(ins_fea, grid_ind)\n loss_dict = self.calc_loss(sem_logits, pred_offsets, batch)\n\n if is_test:\n pt_sem_preds = self.calc_sem_label(sem_logits, batch)\n if require_cluster:\n pt_ins_ids_preds = self.clustering(pt_sem_preds, pred_offsets, batch)\n else:\n pt_ins_ids_preds = [np.zeros_like(pt_sem_preds[i]) for i in range(len(pt_sem_preds))]\n if require_cluster:\n merged_sem_preds = self.merge_ins_sem(pt_sem_preds, pt_ins_ids_preds)\n else:\n merged_sem_preds = pt_sem_preds\n if before_merge_evaluator != None:\n self.update_evaluator(before_merge_evaluator, pt_sem_preds, pt_ins_ids_preds, batch)\n if after_merge_evaluator != None:\n self.update_evaluator(after_merge_evaluator, merged_sem_preds, pt_ins_ids_preds, batch)\n\n loss_dict['sem_preds'] = merged_sem_preds\n loss_dict['ins_preds'] = pt_ins_ids_preds\n\n return loss_dict\n\nclass PolarOffsetSpconv(PolarOffset):\n def __init__(self, cfg):\n super(PolarOffsetSpconv, self).__init__(cfg, need_create_model=False)\n self.backbone = getattr(spconv_unet, cfg.MODEL.BACKBONE.NAME)(cfg)\n self.sem_head = getattr(spconv_unet, cfg.MODEL.SEM_HEAD.NAME)(cfg)\n self.ins_head = getattr(spconv_unet, cfg.MODEL.INS_HEAD.NAME)(cfg)\n self.vfe_model = getattr(PointNet, cfg.MODEL.VFE.NAME)(cfg)\n\n self.ins_loss = getattr(instance_losses, cfg.MODEL.INS_LOSS)\n if cfg.MODEL.SEM_LOSS == 'Lovasz_loss':\n self.sem_loss_lovasz = lovasz_losses.lovasz_softmax\n if cfg.DATA_CONFIG.DATASET_NAME.startswith('SemanticKitti'):\n weights = torch.zeros(20, dtype=torch.float)\n weights[0] = 1.0\n weights[1] = 2.293\n weights[2] = 85.756\n weights[3] = 71.511\n weights[4] = 31.596\n weights[5] = 35.624\n weights[6] = 74.761\n weights[7] = 88.722\n weights[8] = 96.389\n weights[9] = 1.00\n weights[10] = 6.362\n weights[11] = 1.00\n weights[12] = 20.387\n weights[13] = 1.00\n weights[14] = 1.363\n weights[15] = 1.00\n weights[16] = 14.214\n weights[17] = 1.263\n weights[18] = 25.936\n weights[19] = 61.896\n else:\n raise NotImplementedError\n self.sem_loss = torch.nn.CrossEntropyLoss(weight=weights.cuda(), ignore_index=0)\n else:\n raise NotImplementedError\n\n cluster_fn_wrapper = getattr(clustering, cfg.MODEL.POST_PROCESSING.CLUSTER_ALGO)\n self.cluster_fn = cluster_fn_wrapper(cfg)\n self.is_fix_semantic = False\n\n self.merge_func_name = cfg.MODEL.POST_PROCESSING.MERGE_FUNC\n\n def fix_semantic_parameters(self):\n fix_list = [self.backbone, self.sem_head, self.vfe_model, self.fea_compression]\n for mod in fix_list:\n for p in mod.parameters():\n p.requires_grad = False\n self.is_fix_semantic = True\n\n def forward(self, batch, is_test=False, before_merge_evaluator=None, after_merge_evaluator=None, require_cluster=True, require_merge=True):\n if self.is_fix_semantic:\n with torch.no_grad():\n coor, feature_3d = self.voxelize_spconv(batch)\n sem_fea, ins_fea = self.backbone(feature_3d, coor, len(batch['grid']))\n sem_logits = self.sem_head(sem_fea)\n else:\n coor, feature_3d = self.voxelize_spconv(batch)\n sem_fea, ins_fea = self.backbone(feature_3d, coor, len(batch['grid']))\n sem_logits = self.sem_head(sem_fea)\n pred_offsets, _ = self.ins_head(ins_fea, batch)\n loss_dict = self.calc_loss(sem_logits, pred_offsets, batch, need_minus_one=False)\n\n if is_test:\n pt_sem_preds = self.calc_sem_label(sem_logits, batch, need_add_one=False)\n if require_cluster:\n pt_ins_ids_preds = self.clustering(pt_sem_preds, pred_offsets, batch)\n else:\n pt_ins_ids_preds = [np.zeros_like(pt_sem_preds[i]) for i in range(len(pt_sem_preds))]\n if require_merge:\n merged_sem_preds = self.merge_ins_sem(pt_sem_preds, pt_ins_ids_preds, sem_logits, batch)\n else:\n merged_sem_preds = pt_sem_preds\n if before_merge_evaluator != None:\n if 'mask' in batch:\n self.update_evaluator_multi_frames(before_merge_evaluator, pt_sem_preds, pt_ins_ids_preds, batch)\n else:\n self.update_evaluator(before_merge_evaluator, pt_sem_preds, pt_ins_ids_preds, batch)\n if after_merge_evaluator != None:\n if 'mask' in batch:\n self.update_evaluator_multi_frames(after_merge_evaluator, merged_sem_preds, pt_ins_ids_preds, batch)\n else:\n self.update_evaluator(after_merge_evaluator, merged_sem_preds, pt_ins_ids_preds, batch)\n\n loss_dict['sem_preds'] = merged_sem_preds\n loss_dict['ins_preds'] = pt_ins_ids_preds\n loss_dict['ins_num'] = np.unique(pt_ins_ids_preds[0]).shape[0]\n\n return loss_dict\n\nclass PolarOffsetSpconvPytorchMeanshift(PolarOffsetSpconv):\n def __init__(self, cfg):\n super(PolarOffsetSpconvPytorchMeanshift, self).__init__(cfg)\n self.pytorch_meanshift = pytorch_meanshift.PytorchMeanshift(cfg, self.ins_loss, self.cluster_fn)\n self.is_fix_semantic_instance = False\n\n def fix_semantic_instance_parameters(self):\n fix_list = [self.backbone, self.sem_head, self.vfe_model, self.fea_compression, self.ins_head]\n for mod in fix_list:\n for p in mod.parameters():\n p.requires_grad = False\n self.is_fix_semantic_instance = True\n\n def forward(self, batch, is_test=False, before_merge_evaluator=None, after_merge_evaluator=None, require_cluster=True, require_merge=True):\n if self.is_fix_semantic_instance:\n with torch.no_grad():\n coor, feature_3d = self.voxelize_spconv(batch)\n sem_fea, ins_fea = self.backbone(feature_3d, coor, len(batch['grid']))\n sem_logits = self.sem_head(sem_fea)\n pred_offsets, ins_fea_list = self.ins_head(ins_fea, batch)\n else:\n if self.is_fix_semantic:\n with torch.no_grad():\n coor, feature_3d = self.voxelize_spconv(batch)\n sem_fea, ins_fea = self.backbone(feature_3d, coor, len(batch['grid']))\n sem_logits = self.sem_head(sem_fea)\n else:\n coor, feature_3d = self.voxelize_spconv(batch)\n sem_fea, ins_fea = self.backbone(feature_3d, coor, len(batch['grid']))\n sem_logits = self.sem_head(sem_fea)\n pred_offsets, ins_fea_list = self.ins_head(ins_fea, batch)\n loss_dict = self.calc_loss(sem_logits, pred_offsets, batch, need_minus_one=False)\n valid = batch['pt_valid']\n valid = [v.reshape(-1) for v in valid]\n if is_test:\n pt_sem_preds = self.calc_sem_label(sem_logits, batch, need_add_one=False)\n valid = []\n for i in range(len(batch['grid'])):\n valid.append(np.isin(pt_sem_preds[i], valid_xentropy_ids).reshape(-1))\n if self.pytorch_meanshift.data_mode == 'offset':\n embedding = [offset + torch.from_numpy(xyz).cuda() for offset, xyz in zip(pred_offsets, batch['pt_cart_xyz'])]\n else:\n raise NotImplementedError\n batch['ins_fea_list'] = ins_fea_list\n pt_ins_ids_preds, meanshift_loss, bandwidth_weight_summary = self.pytorch_meanshift(batch['pt_cart_xyz'], embedding, valid, batch, need_cluster=is_test)\n\n loss_dict['bandwidth_weight_summary'] = bandwidth_weight_summary\n loss_dict['meanshift_loss'] = meanshift_loss\n loss_dict['offset_loss_list'] += meanshift_loss\n loss_dict['loss'] += sum(meanshift_loss)\n\n if is_test:\n if require_cluster:\n merged_sem_preds = self.merge_ins_sem(pt_sem_preds, pt_ins_ids_preds)\n else:\n merged_sem_preds = pt_sem_preds\n # if before_merge_evaluator != None:\n # self.update_evaluator(before_merge_evaluator, pt_sem_preds, pt_ins_ids_preds, batch)\n # if after_merge_evaluator != None:\n # self.update_evaluator(after_merge_evaluator, merged_sem_preds, pt_ins_ids_preds, batch)\n if before_merge_evaluator != None:\n if 'mask' in batch:\n self.update_evaluator_multi_frames(before_merge_evaluator, pt_sem_preds, pt_ins_ids_preds, batch)\n else:\n self.update_evaluator(before_merge_evaluator, pt_sem_preds, pt_ins_ids_preds, batch)\n if after_merge_evaluator != None:\n if 'mask' in batch:\n self.update_evaluator_multi_frames(after_merge_evaluator, merged_sem_preds, pt_ins_ids_preds, batch)\n else:\n self.update_evaluator(after_merge_evaluator, merged_sem_preds, pt_ins_ids_preds, batch)\n\n if 'mask' in batch:\n loss_dict['sem_preds'] = [m[batch['mask_np'][i].reshape(-1) == 0] for i, m in enumerate(merged_sem_preds)]\n loss_dict['ins_preds'] = [p[batch['mask_np'][i].reshape(-1) == 0] for i, p in enumerate(pt_ins_ids_preds)]\n else:\n loss_dict['sem_preds'] = merged_sem_preds\n loss_dict['ins_preds'] = pt_ins_ids_preds\n loss_dict['ins_num'] = np.unique(pt_ins_ids_preds[0]).shape[0]\n\n return loss_dict\n\nclass PolarOffsetSpconvPytorchMeanshiftTrackingMultiFrames(PolarOffsetSpconvPytorchMeanshift):\n def __init__(self, cfg):\n super(PolarOffsetSpconvPytorchMeanshiftTrackingMultiFrames, self).__init__(cfg)\n self.is_init = False\n self.before_ins_ids_preds = None\n self.before_valid_preds = None\n self.before_seq = None\n\n def update_evaluator_multi_frames(self, evaluator, sem_preds, ins_preds, inputs, window_k):\n assert len(sem_preds) == 1\n for i in range(len(sem_preds)):\n eval_one_scan_vps(evaluator, inputs['pt_labs'][i][inputs['mask_np'][i].reshape(-1) == 0].reshape(-1),\n inputs['pt_ins_labels'][i][inputs['mask_np'][i].reshape(-1) == 0].reshape(-1),\n sem_preds[i][inputs['mask_np'][i].reshape(-1) == 0].reshape(-1),\n ins_preds[i].reshape(-1), window_k)\n\n def forward(self, batch, is_test=False, merge_evaluator_list=None, merge_evaluator_window_k_list=None, require_cluster=True, require_merge=True):\n assert is_test\n if self.is_fix_semantic_instance:\n with torch.no_grad():\n coor, feature_3d = self.voxelize_spconv(batch)\n sem_fea, ins_fea = self.backbone(feature_3d, coor, len(batch['grid']))\n sem_logits = self.sem_head(sem_fea)\n pred_offsets, ins_fea_list = self.ins_head(ins_fea, batch)\n else:\n if self.is_fix_semantic:\n with torch.no_grad():\n coor, feature_3d = self.voxelize_spconv(batch)\n sem_fea, ins_fea = self.backbone(feature_3d, coor, len(batch['grid']))\n sem_logits = self.sem_head(sem_fea)\n else:\n coor, feature_3d = self.voxelize_spconv(batch)\n sem_fea, ins_fea = self.backbone(feature_3d, coor, len(batch['grid']))\n sem_logits = self.sem_head(sem_fea)\n pred_offsets, ins_fea_list = self.ins_head(ins_fea, batch)\n loss_dict = self.calc_loss(sem_logits, pred_offsets, batch, need_minus_one=False)\n valid = batch['pt_valid']\n if is_test:\n pt_sem_preds = self.calc_sem_label(sem_logits, batch, need_add_one=False)\n valid = []\n for i in range(len(batch['grid'])):\n valid.append(np.isin(pt_sem_preds[i], valid_xentropy_ids).reshape(-1))\n if self.pytorch_meanshift.data_mode == 'offset':\n embedding = [offset + torch.from_numpy(xyz).cuda() for offset, xyz in zip(pred_offsets, batch['pt_cart_xyz'])]\n else:\n raise NotImplementedError\n batch['ins_fea_list'] = ins_fea_list\n pt_ins_ids_preds, meanshift_loss, bandwidth_weight_summary = self.pytorch_meanshift(batch['pt_cart_xyz'], embedding, valid, batch, need_cluster=is_test)\n\n loss_dict['bandwidth_weight_summary'] = bandwidth_weight_summary\n loss_dict['meanshift_loss'] = meanshift_loss\n loss_dict['offset_loss_list'] += meanshift_loss\n loss_dict['loss'] += sum(meanshift_loss)\n\n if is_test:\n if require_cluster:\n merged_sem_preds = self.merge_ins_sem(pt_sem_preds, pt_ins_ids_preds)\n else:\n merged_sem_preds = pt_sem_preds\n\n cur_pcd_fname = batch['pcd_fname'][0]\n cur_pcd_seq = cur_pcd_fname.split('/')[-3]\n if self.before_seq == None:\n self.before_seq = cur_pcd_seq\n elif self.before_seq != cur_pcd_seq:\n self.before_seq = cur_pcd_seq\n self.is_init = False\n\n ins_preds_tracking, matching_list = self.tracking_test(valid, pt_ins_ids_preds, batch)\n loss_dict['ins_preds'] = ins_preds_tracking\n loss_dict['matching_list'] = matching_list\n\n if merge_evaluator_list is not None:\n for evaluator, window_k in zip(merge_evaluator_list, merge_evaluator_window_k_list):\n self.update_evaluator_multi_frames(evaluator, merged_sem_preds, ins_preds_tracking, batch, window_k)\n\n loss_dict['sem_preds'] = [m[batch['mask_np'][i].reshape(-1) == 0] for i, m in enumerate(merged_sem_preds)]\n loss_dict['ins_num'] = np.unique(ins_preds_tracking[0]).shape[0]\n\n return loss_dict\n\n def matching(self, after_ins_ids_gt, after_valid_gt, after_ins_ids_preds, after_valid_preds):\n offset = 2**32\n\n x_inst_in_cl_mask = after_valid_preds.reshape(-1)\n y_inst_in_cl_mask = after_valid_gt.reshape(-1)\n \n x_inst_in_cl = after_ins_ids_preds.reshape(-1) * x_inst_in_cl_mask.astype(np.int64)\n y_inst_in_cl = after_ins_ids_gt.reshape(-1) * y_inst_in_cl_mask.astype(np.int64)\n\n unique_pred, counts_pred = np.unique(x_inst_in_cl[x_inst_in_cl > 0], return_counts=True)\n id2idx_pred = {id: idx for idx, id in enumerate(unique_pred)}\n matched_pred = np.array([False] * unique_pred.shape[0])\n\n unique_gt, counts_gt = np.unique(y_inst_in_cl[y_inst_in_cl > 0], return_counts=True)\n id2idx_gt = {id: idx for idx, id in enumerate(unique_gt)}\n matched_gt = np.array([False] * unique_gt.shape[0])\n\n valid_combos = np.logical_and(x_inst_in_cl > 0, y_inst_in_cl > 0)\n offset_combo = x_inst_in_cl[valid_combos] + offset * y_inst_in_cl[valid_combos]\n unique_combo, counts_combo = np.unique(offset_combo, return_counts=True)\n\n gt_labels = unique_combo // offset\n pred_labels = unique_combo % offset\n gt_areas = np.array([counts_gt[id2idx_gt[id]] for id in gt_labels])\n pred_areas = np.array([counts_pred[id2idx_pred[id]] for id in pred_labels])\n intersections = counts_combo\n unions = gt_areas + pred_areas - intersections\n ious = intersections.astype(np.float) / unions.astype(np.float)\n\n tp_indexes = ious > 0.5\n\n return pred_labels[tp_indexes], gt_labels[tp_indexes]\n\n def tracking_test(self, pred_valid, pred_ins_ids, batch):\n batch_size = len(pred_valid)\n assert batch_size == 1\n ins_ids_tracking_list = []\n matching_list = []\n for b in range(batch_size):\n after_mask = batch['mask_np'][b].reshape(-1) == 0\n after_ins_ids_preds = pred_ins_ids[b][after_mask]\n after_valid_preds = pred_valid[b][after_mask]\n \n after_valid_ins_ids_preds = after_ins_ids_preds[after_valid_preds].reshape(-1)\n after_unique_ins_ids_preds, after_unique_ins_ids_preds_counts = np.unique(after_valid_ins_ids_preds, return_counts=True)\n # after_unique_ins_ids_preds = after_unique_ins_ids_preds[after_unique_ins_ids_preds_counts > min_points].reshape(-1)\n if after_unique_ins_ids_preds.shape[0] == 0:\n self.is_init = False\n return [after_ins_ids_preds], matching_list\n\n if not self.is_init:\n self.is_init = True\n self.before_ins_ids_preds = after_ins_ids_preds\n self.before_valid_preds = after_valid_preds\n return [after_ins_ids_preds], matching_list\n\n before_mask = batch['mask_np'][b].reshape(-1) == 1\n cur_before_ins_ids_preds = pred_ins_ids[b][before_mask]\n cur_before_valid_preds = pred_valid[b][before_mask]\n\n cur_before_labels, before_labels = self.matching(\n self.before_ins_ids_preds, self.before_valid_preds,\n cur_before_ins_ids_preds, cur_before_valid_preds\n )\n cur2before_dict = {c:b for c,b in zip(cur_before_labels, before_labels)}\n\n ins_ids_tracking = np.zeros_like(after_ins_ids_preds)\n cur_max = np.max(self.before_ins_ids_preds)\n for au in after_unique_ins_ids_preds:\n if au in cur2before_dict:\n ins_ids_tracking[after_ins_ids_preds == au] = cur2before_dict[au]\n else:\n cur_max += 1\n ins_ids_tracking[after_ins_ids_preds == au] = cur_max\n ins_ids_tracking_list.append(ins_ids_tracking)\n\n self.before_ins_ids_preds = ins_ids_tracking\n self.before_valid_preds = after_valid_preds\n\n return ins_ids_tracking_list, matching_list\n"
] | [
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.randperm",
"torch.zeros",
"numpy.max",
"torch.unique",
"numpy.zeros_like",
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.clamp",
"numpy.unique",
"torch.from_numpy",
"torch.argsort",
"numpy.zeros",
"torch.nn.functional.pad",
"numpy.isin",
"torch.nn.Linear",
"numpy.logical_and",
"numpy.array",
"torch.nn.ReLU",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vinitra/keras-onnx | [
"17d86705f566bee56307abd13a60b79776a58c0e"
] | [
"applications/nightly_build/test_deep_speech.py"
] | [
"###############################################################################\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n###############################################################################\nimport os\nimport sys\nimport unittest\nimport keras2onnx\nimport numpy as np\nfrom keras2onnx.proto import keras\nfrom onnxconverter_common.onnx_ex import get_maximum_opset_supported\nfrom os.path import dirname, abspath\nsys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../tests/'))\nfrom test_utils import run_keras_and_ort, test_level_0\nK = keras.backend\n\nActivation = keras.layers.Activation\nAveragePooling2D = keras.layers.AveragePooling2D\nAdd = keras.layers.Add\nBatchNormalization = keras.layers.BatchNormalization\nconcatenate = keras.layers.concatenate\nConv2D = keras.layers.Conv2D\nDense = keras.layers.Dense\nDropout = keras.layers.Dropout\nEmbedding = keras.layers.Embedding\nFlatten = keras.layers.Flatten\nGlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D\nInput = keras.layers.Input\nLambda = keras.layers.Lambda\nLeakyReLU = keras.layers.LeakyReLU\nMaxPooling2D = keras.layers.MaxPooling2D\nmultiply = keras.layers.multiply\nPermute = keras.layers.Permute\nReshape = keras.layers.Reshape\nUpSampling2D = keras.layers.UpSampling2D\nZeroPadding2D = keras.layers.ZeroPadding2D\n\nSequential = keras.models.Sequential\nModel = keras.models.Model\nlayers = keras.layers\n\n\n# Model from https://github.com/rolczynski/Automatic-Speech-Recognition\nclass TestDeepSpeech(unittest.TestCase):\n\n def setUp(self):\n self.model_files = []\n\n def tearDown(self):\n for fl in self.model_files:\n os.remove(fl)\n\n @unittest.skipIf(get_maximum_opset_supported() < 11,\n \"Deep speech conversion need opset >= 11.\")\n def test_deep_speech(self):\n K.clear_session()\n input_dim = 20\n output_dim = 10\n context = 7\n units = 1024\n dropouts = (0.1, 0.1, 0)\n\n # Define input tensor [batch, time, features]\n input_tensor = layers.Input([None, input_dim], name='X')\n\n # Add 4th dimension [batch, time, frequency, channel]\n x = layers.Lambda(keras.backend.expand_dims,\n arguments=dict(axis=-1))(input_tensor)\n # Fill zeros around time dimension\n x = layers.ZeroPadding2D(padding=(context, 0))(x)\n # Convolve signal in time dim\n receptive_field = (2 * context + 1, input_dim)\n x = layers.Conv2D(filters=units, kernel_size=receptive_field)(x)\n # Squeeze into 3rd dim array\n x = layers.Lambda(keras.backend.squeeze, arguments=dict(axis=2))(x)\n # Add non-linearity\n x = layers.ReLU(max_value=20)(x)\n # Use dropout as regularization\n x = layers.Dropout(rate=dropouts[0])(x)\n\n # 2nd and 3rd FC layers do a feature extraction base on a narrow\n # context of convolutional layer\n x = layers.TimeDistributed(layers.Dense(units))(x)\n x = layers.ReLU(max_value=20)(x)\n x = layers.Dropout(rate=dropouts[1])(x)\n\n x = layers.TimeDistributed(layers.Dense(units))(x)\n x = layers.ReLU(max_value=20)(x)\n x = layers.Dropout(rate=dropouts[2])(x)\n\n # Use recurrent layer to have a broader context\n x = layers.Bidirectional(layers.LSTM(units, return_sequences=True),\n merge_mode='sum')(x)\n\n # Return at each time step logits along characters. Then CTC\n # computation is more stable, in contrast to the softmax.\n output_tensor = layers.TimeDistributed(layers.Dense(output_dim))(x)\n model = keras.Model(input_tensor, output_tensor, name='DeepSpeech')\n data = np.random.rand(2, 3, input_dim).astype(np.float32)\n expected = model.predict(data)\n onnx_model = keras2onnx.convert_keras(model, model.name)\n self.assertTrue(\n run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))\n\n @unittest.skipIf(get_maximum_opset_supported() < 11,\n \"Deep speech conversion need opset >= 11.\")\n def test_deep_speech_2(self):\n K.clear_session()\n input_dim = 20\n output_dim = 10\n rnn_units = 800\n # Define input tensor [batch, time, features]\n input_tensor = layers.Input([None, input_dim], name='X')\n\n # Add 4th dimension [batch, time, frequency, channel]\n x = layers.Lambda(keras.backend.expand_dims,\n arguments=dict(axis=-1))(input_tensor)\n x = layers.Conv2D(filters=32,\n kernel_size=[11, 41],\n strides=[2, 2],\n padding='same',\n use_bias=False,\n name='conv_1')(x)\n x = layers.BatchNormalization(name='conv_1_bn')(x)\n x = layers.ReLU(name='conv_1_relu')(x)\n\n x = layers.Conv2D(filters=32,\n kernel_size=[11, 21],\n strides=[1, 2],\n padding='same',\n use_bias=False,\n name='conv_2')(x)\n x = layers.BatchNormalization(name='conv_2_bn')(x)\n x = layers.ReLU(name='conv_2_relu')(x)\n # We need to squeeze to 3D tensor. Thanks to the stride in frequency\n # domain, we reduce the number of features four times for each channel.\n x = layers.Reshape([-1, input_dim//4*32])(x)\n\n for i in [1, 2, 3, 4, 5]:\n recurrent = layers.GRU(units=rnn_units,\n activation='tanh',\n recurrent_activation='sigmoid',\n use_bias=True,\n return_sequences=True,\n reset_after=True,\n name='gru_'+str(i))\n x = layers.Bidirectional(recurrent,\n name='bidirectional'+str(i),\n merge_mode='concat')(x)\n x = layers.Dropout(rate=0.5)(x) if i < 5 else x # Only between\n\n # Return at each time step logits along characters. Then CTC\n # computation is more stable, in contrast to the softmax.\n x = layers.TimeDistributed(layers.Dense(units=rnn_units*2), name='dense_1')(x)\n x = layers.ReLU(name='dense_1_relu')(x)\n x = layers.Dropout(rate=0.5)(x)\n output_tensor = layers.TimeDistributed(layers.Dense(units=output_dim),\n name='dense_2')(x)\n\n model = keras.Model(input_tensor, output_tensor, name='DeepSpeech2')\n data = np.random.rand(2, 3, input_dim).astype(np.float32)\n expected = model.predict(data)\n onnx_model = keras2onnx.convert_keras(model, model.name)\n self.assertTrue(\n run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
schmocker/Pyjamas | [
"52a72d6e8b915f77a2194d4e7d53c46d0ec28c17"
] | [
"Models/Electricity_Market/Tiers/V001/model.py"
] | [
"from pyjamas_core import Supermodel\nfrom pyjamas_core.util import Input, Output, Property\nfrom datetime import datetime, timedelta\nfrom Models._utils.time import datetime2utc_time, utc_time2datetime\nimport numpy as np\nfrom pytz import timezone\nimport json\nfrom scipy.interpolate import griddata\nimport pandas as pd\nimport requests\nimport os\n\n# define the model class and inherit from class \"Supermodel\"\nclass Model(Supermodel):\n # model constructor\n def __init__(self, id, name: str):\n # instantiate supermodel\n super(Model, self).__init__(id, name)\n\n # define inputs\n self.inputs['stock_ex_price'] = Input(name='Stock exchange price', unit='€/J', info=\"stock exchange price\")\n self.inputs['distnet_costs'] = Input(name='Distribution network cost', unit='{-, €/J}', info=\"distribution network cost\")\n self.inputs['service_cost'] = Input(name='Service cost', unit='€/J', info=\"service cost\")\n self.inputs['taxes'] = Input(name='Taxes', unit='€/J', info=\"taxes\")\n self.inputs['futures'] = Input(name='Futures', unit='s', info=\"Futures\")\n\n # define outputs\n self.outputs['el_rate'] = Output(name='Electricity rate', unit='€/J', info='electricity rate')\n self.outputs['times'] = Output(name='Times', unit='s', info='Times')\n self.outputs['y_scaling'] = Output(name='Scaling of y axis', unit='', info='Scaling of y axis')\n self.outputs['y_unit'] = Output(name='Unit of y axis', unit='', info='Unit of y axis')\n self.outputs['y_label'] = Output(name='y label', unit='', info='Label of y axis')\n\n # define properties\n ET_def = {\"location\": [\"Baden\"],\n \"border\": [[-1.0, -0.5, -0.2, 0.0, 0.2, 0.5, 1.0]],\n \"weight\": [[-0.3, -0.6, -0.8, 1.1, 1.3, 1.5]]}\n NT_def = {\"location\": [\"Baden\"],\n \"border\": [[-1.0, -0.8, -0.5, 0.0, 0.4, 0.8, 1.0]],\n \"weight\": [[-0.5, -0.6, -0.8, 1.2, 1.5, 1.8]]}\n ET_def = json.dumps(ET_def)\n NT_def = json.dumps(NT_def)\n self.properties['weight_ET'] = Property(default=ET_def, data_type=str, name='energy tiers', unit='-',\n info='borders and weights of energy tiers', example=ET_def)\n self.properties['weight_NT'] = Property(default=NT_def, data_type=str, name='net tiers', unit='-',\n info='borders and weights of net tiers', example=NT_def)\n self.properties[\"scaling\"] = Property(default=1, data_type=float, name='Scaling factor', unit='-',\n info='Scaling factor for y axis', example='3.6e9')\n self.properties[\"y_unit\"] = Property(default='€/MWh', data_type=str, name='unit of y label', unit='-',\n info='Unit of label for y axis', example='[€/MWh]')\n self.properties[\"y_labeling\"] = Property(default='Price', data_type=str, name='y label', unit='-',\n info='Label for y axis', example='Price [€/MWh]')\n\n # define persistent variables\n self.weight_ET = None\n self.weight_NT = None\n self.y_scaling = None\n self.y_unit = None\n self.y_labeling = None\n\n async def func_birth(self):\n pass\n\n async def func_amend(self, keys=[]):\n\n if 'weight_ET' in keys:\n weight_ET_i = self.get_property('weight_ET')\n self.weight_ET = json.loads(weight_ET_i)\n\n if 'weight_NT' in keys:\n weight_NT_i = self.get_property('weight_NT')\n self.weight_NT = json.loads(weight_NT_i)\n\n if 'scaling' in keys:\n self.y_scaling = self.get_property(\"scaling\")\n\n if 'y_unit' in keys:\n self.y_unit = self.get_property(\"y_unit\")\n\n if 'y_labeling' in keys:\n self.y_labeling = self.get_property(\"y_labeling\")\n\n async def func_peri(self, prep_to_peri=None):\n\n # locations information\n loc_tiers = self.weight_ET['location']\n\n # read prices\n stock_prices_input = await self.get_input('stock_ex_price')\n\n # read distribution costs\n dn_costs_input = await self.get_input('distnet_costs')\n loc_distnet = dn_costs_input['distribution_networks']\n len_loc_distnet = len(loc_distnet)\n\n # DLK\n DLK_val = await self.get_input('service_cost')\n\n # Abgaben\n abgaben_val = await self.get_input('taxes')\n\n # electricity rate\n el_rate = []\n border_tiers = []\n border_val = []\n ET_val = []\n NT_val = []\n MP_val = []\n for nt in range(0, len_loc_distnet):\n\n # compare location of distribution network with tiers locations, in it?\n if loc_distnet[nt] in loc_tiers:\n idx = loc_tiers.index(loc_distnet[nt])\n else: # if not in list, take default values\n idx = 0\n\n # distribution cost\n dist_costs = dn_costs_input['costs'][nt]\n\n # read and determine borders and tiers\n border_tiers_i = self.det_border_tiers(idx)\n\n # stock prices\n stock_prices = stock_prices_input['prices'][nt]\n\n el_rate_i = []\n for i_mt in range(0, len(stock_prices)):\n # stock price\n mt = stock_prices[i_mt]\n # electricity rate\n el_rate_ii = np.multiply(mt, border_tiers_i['ET_tiers']) + np.multiply(dist_costs, border_tiers_i['NT_tiers']) + DLK_val + abgaben_val\n el_rate_ii = el_rate_ii.tolist()\n el_rate_i.append(el_rate_ii)\n\n el_rate.append(el_rate_i)\n border_tiers.append(border_tiers_i)\n\n border_val_i = []\n ET_val_i = []\n NT_val_i = []\n border_i = border_tiers[nt]\n len_border_i = len(border_i[\"borders\"])\n\n for ni in range(0, len_border_i):\n if ni == 0:\n border_val_i.append(border_i[\"borders\"][ni])\n ET_val_i.append(border_i[\"ET_tiers\"][ni])\n NT_val_i.append(border_i[\"NT_tiers\"][ni])\n if ni == (len_border_i-1):\n border_val_i.append(border_i[\"borders\"][ni])\n ET_val_i.append(border_i[\"ET_tiers\"][ni-1])\n NT_val_i.append(border_i[\"NT_tiers\"][ni-1])\n if ((ni>0) & (ni<(len_border_i-1))):\n border_val_i.append(border_i[\"borders\"][ni])\n ET_val_i.append(border_i[\"ET_tiers\"][ni-1])\n NT_val_i.append(border_i[\"NT_tiers\"][ni-1])\n border_val_i.append(border_i[\"borders\"][ni])\n ET_val_i.append(border_i[\"ET_tiers\"][ni])\n NT_val_i.append(border_i[\"NT_tiers\"][ni])\n\n\n border_val.append(border_val_i)\n ET_val.append(ET_val_i)\n NT_val.append(NT_val_i)\n\n MP_val_data = []\n for mi in range(0, el_rate_i.__len__()):\n MP_val_i = [];\n for ni in range(0, len_border_i):\n erate_i = el_rate_i[mi]\n if ni == 0:\n MP_val_i.append(erate_i[ni])\n if ni == (len_border_i - 1):\n MP_val_i.append(erate_i[ni-1])\n if ((ni > 0) & (ni < (len_border_i - 1))):\n MP_val_i.append(erate_i[ni-1])\n MP_val_i.append(erate_i[ni])\n\n MP_val_data.append(MP_val_i)\n MP_val.append(MP_val_data)\n\n\n tier_val = [ET_val, NT_val]\n border_lines = {\"borders\": border_val,\n \"tiers\": [\"ET Tiers\", \"NT Tiers\"],\n \"tier_values\": tier_val,\n \"prices\": MP_val}\n\n output = {'Stao_ID': loc_distnet,\n 'values': el_rate,\n 'borders': border_tiers,\n 'border_lines': border_lines\n }\n\n # set output\n self.set_output(\"el_rate\", output)\n self.set_output(\"times\", await self.get_input('futures'))\n self.set_output(\"y_scaling\", self.y_scaling)\n self.set_output(\"y_unit\", self.y_unit)\n self.set_output(\"y_label\", self.y_labeling)\n\n def det_border_tiers(self, it):\n\n # read borders\n ET_border = self.weight_ET[\"border\"][it]\n NT_border = self.weight_NT[\"border\"][it]\n\n ET_border = np.array(ET_border)\n NT_border = np.array(NT_border)\n\n # merge\n borders = np.append(ET_border, NT_border)\n borders = np.unique(borders)\n\n # read tiers\n ET_tiers_orig = self.weight_ET[\"weight\"][it]\n NT_tiers_orig = self.weight_NT[\"weight\"][it]\n\n # create tiers corresponding to border\n ind_ET = 0\n ind_NT = 0\n ET_tiers = np.array(ET_tiers_orig[ind_ET])\n NT_tiers = np.array(NT_tiers_orig[ind_NT])\n for it in range(1, len(borders) - 1):\n\n # ET\n if ET_border[ind_ET+1] <= borders[it]:\n ind_ET = ind_ET + 1\n ET_tiers = np.append(ET_tiers, ET_tiers_orig[ind_ET])\n else:\n ET_tiers = np.append(ET_tiers, ET_tiers_orig[ind_ET])\n\n # NT\n if NT_border[ind_NT+1] <= borders[it]:\n ind_NT = ind_NT + 1\n NT_tiers = np.append(NT_tiers, NT_tiers_orig[ind_NT])\n else:\n NT_tiers = np.append(NT_tiers, NT_tiers_orig[ind_NT])\n\n #print(it)\n\n # return dict\n border_tiers = {'borders': borders.tolist(),\n 'ET_tiers': ET_tiers.tolist(),\n 'NT_tiers': NT_tiers.tolist()}\n\n return border_tiers\n\nif __name__ == \"__main__\":\n\n # input\n stock_ex_price = {'distribution_networks': ['Baden', 'Brugg'],\n 'prices': [[1, 2, 3], [1.1, 2.2, 3.3]]}\n distnet_costs = {'distribution_networks': stock_ex_price['distribution_networks'],\n 'costs': [100, 111]}\n DLK = [0.5]\n abgaben = [0.25]\n\n # properties\n ET = {\"location\": ['Baden', 'Brugg'],\n \"border\": [[-1., -0.8, -0.3, 0., 0.3, 0.8, 1.], [-1., -0.85, -0.35, 0., 0.35, 0.85, 1.]],\n \"weight\": [[-2., -1.25, -0.75, 0.75, 1.25, 2.], [-2., -1.3, -0.8, 0.8, 1.3, 2.]]}\n NT = {\"location\": ['Baden', 'Brugg'],\n \"border\": [[-1., -0.7, -0.4, 0., 0.4, 0.7, 1.], [-1., -0.75, -0.45, 0., 0.45, 0.75, 1.]],\n \"weight\": [[-1.75, -1., -0.5, 0.5, 1., 1.75], [-1.8, -1.05, -0.55, 0.55, 1.05, 1.8]]}\n ET = json.dumps(ET)\n NT = json.dumps(NT)\n\n inputs = {'stock_ex_price': stock_ex_price,\n 'distnet_costs': distnet_costs,\n 'service_cost': DLK,\n 'taxes': abgaben}\n props = {'weight_ET': ET,\n 'weight_NT': NT}\n\n outputs = Model.test(inputs, props)\n"
] | [
[
"numpy.append",
"numpy.array",
"numpy.multiply",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Leon-Francis/Script-role-emotion-recognition | [
"e80b8366f1e868b6611c149ad18945a994784b3d"
] | [
"train.py"
] | [
"import torch\nimport tqdm\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\nfrom torch import optim, nn\nfrom dataset import Script_dataset\nfrom config import TrainingConfig, CONFIG_PATH\nfrom model import BaseModel\nfrom tools import logging, get_time\nfrom datetime import datetime\nimport os\nfrom shutil import copyfile\nimport copy\nimport math\n\ndef save_config(path):\n copyfile(CONFIG_PATH, path + r'/config.txt')\n\ndef build_dataset():\n train_dataset_orig = Script_dataset(train_data=True, full_train_mode=False)\n test_dataset_orig = Script_dataset(train_data=False, full_train_mode=False)\n\n train_data = DataLoader(train_dataset_orig,\n batch_size=TrainingConfig.batch_size,\n shuffle=True,\n num_workers=4)\n test_data = DataLoader(test_dataset_orig,\n batch_size=TrainingConfig.batch_size,\n shuffle=False,\n num_workers=4)\n\n return train_data, test_data, train_dataset_orig.tokenizer\n\ndef train(train_data, model, criterion, optimizer):\n model.train()\n loss_mean = 0.0\n for train_features, train_labels in train_data:\n input_ids = train_features['input_ids'].to(TrainingConfig.train_device)\n token_type_ids = train_features['token_type_ids'].to(TrainingConfig.train_device)\n attention_mask = train_features['attention_mask'].to(TrainingConfig.train_device)\n\n outputs = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)\n\n loss_love = criterion(outputs['love'], train_labels['love'].view(-1, 1).to(TrainingConfig.train_device))\n loss_joy = criterion(outputs['joy'], train_labels['joy'].view(-1, 1).to(TrainingConfig.train_device))\n loss_fright = criterion(outputs['fright'], train_labels['fright'].view(-1, 1).to(TrainingConfig.train_device))\n loss_anger = criterion(outputs['anger'], train_labels['anger'].view(-1, 1).to(TrainingConfig.train_device))\n loss_fear = criterion(outputs['fear'], train_labels['fear'].view(-1, 1).to(TrainingConfig.train_device))\n loss_sorrow = criterion(outputs['sorrow'], train_labels['sorrow'].view(-1, 1).to(TrainingConfig.train_device))\n loss = loss_love + loss_joy + loss_fright + loss_anger + loss_fear + loss_sorrow\n\n loss_mean += loss.item()\n if loss.item() > TrainingConfig.skip_loss:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return loss_mean / len(train_data)\n\n\[email protected]_grad()\ndef evaluate(test_data, model, criterion):\n model.eval()\n loss_mean = 0.0\n score = 0.0\n total = 0\n for test_features, test_labels in test_data:\n input_ids = test_features['input_ids'].to(TrainingConfig.train_device)\n token_type_ids = test_features['token_type_ids'].to(TrainingConfig.train_device)\n attention_mask = test_features['attention_mask'].to(TrainingConfig.train_device)\n\n outputs = model(input_ids=input_ids,token_type_ids=token_type_ids,attention_mask=attention_mask)\n\n loss_love = criterion(outputs['love'], test_labels['love'].view(-1, 1).to(TrainingConfig.train_device))\n loss_joy = criterion(outputs['joy'], test_labels['joy'].view(-1, 1).to(TrainingConfig.train_device))\n loss_fright = criterion(outputs['fright'], test_labels['fright'].view(-1, 1).to(TrainingConfig.train_device))\n loss_anger = criterion(outputs['anger'], test_labels['anger'].view(-1, 1).to(TrainingConfig.train_device))\n loss_fear = criterion(outputs['fear'], test_labels['fear'].view(-1, 1).to(TrainingConfig.train_device))\n loss_sorrow = criterion(outputs['sorrow'], test_labels['sorrow'].view(-1, 1).to(TrainingConfig.train_device))\n loss = loss_love + loss_joy + loss_fright + loss_anger + loss_fear + loss_sorrow\n\n loss_mean += loss.item()\n\n for key, value in outputs.items():\n score += torch.sum((outputs[key].sigmoid().squeeze(1) * 3 - test_labels[key].to(TrainingConfig.train_device) * 3) ** 2).item()\n\n total += test_labels['love'].size()[0]\n\n return loss_mean / len(test_data), 1 / (1 + math.sqrt(score / total / 6))\n\n\nif __name__ == \"__main__\":\n logging('Using cuda device gpu: ' + str(TrainingConfig.cuda_idx))\n cur_dir = TrainingConfig.output_dir + '/train_model/' + datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n cur_models_dir = cur_dir + '/models'\n if not os.path.isdir(cur_dir):\n os.makedirs(cur_dir)\n os.makedirs(cur_models_dir)\n\n logging('Saving into directory ' + cur_dir)\n save_config(cur_dir)\n\n logging('preparing data...')\n train_data, test_data, tokenizer = build_dataset()\n\n logging('init models, optimizer, criterion...')\n model = BaseModel(tokenizer).to(TrainingConfig.train_device)\n\n optimizer = optim.AdamW([{\n 'params': model.bert.parameters(),\n 'lr': TrainingConfig.Bert_lr\n }, {\n 'params': model.out_love.parameters()\n }, {\n 'params': model.out_joy.parameters()\n }, {\n 'params': model.out_fright.parameters()\n }, {\n 'params': model.out_anger.parameters()\n }, {\n 'params': model.out_fear.parameters()\n }, {\n 'params': model.out_sorrow.parameters()\n }],\n lr=TrainingConfig.lr,\n betas=TrainingConfig.betas,\n eps=TrainingConfig.eps,\n weight_decay=TrainingConfig.weight_decay)\n\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n mode='min',\n factor=0.95,\n patience=3,\n verbose=True,\n min_lr=3e-9)\n warmup_scheduler = optim.lr_scheduler.LambdaLR(optimizer,\n lr_lambda=lambda ep: 1e-2\n if ep < 3 else 1.0)\n\n criterion = nn.BCEWithLogitsLoss().to(TrainingConfig.train_device)\n\n logging('Start training...')\n best_score = 0.0\n temp_path = cur_models_dir + f'/temp_model.pt'\n for ep in range(TrainingConfig.epoch):\n logging(f'epoch {ep} start train')\n train_loss = train(train_data, model, criterion, optimizer)\n logging(f'epoch {ep} start evaluate')\n evaluate_loss, score = evaluate(test_data, model, criterion)\n if score > best_score:\n best_score = score\n best_path = cur_models_dir + f'/best_score_{get_time()}_{score:.5f}.pt'\n best_state = copy.deepcopy(model.state_dict())\n\n if ep > 3 and best_score > TrainingConfig.save_score_limit and best_state != None:\n logging(f'saving best model score {best_score:.5f} in {temp_path}')\n torch.save(best_state, temp_path)\n\n if ep < 4:\n warmup_scheduler.step(ep)\n else:\n scheduler.step(evaluate_loss, epoch=ep)\n\n logging(\n f'epoch {ep} done! train_loss {train_loss:.5f} evaluate_loss {evaluate_loss:.5f} \\n'\n f'score {score:.5f} now best_score {best_score:.5f}')\n\n if best_score > TrainingConfig.save_score_limit and best_state != None:\n logging(f'saving best model score {best_score:.5f} in {best_path}')\n torch.save(best_state, best_path)"
] | [
[
"torch.optim.lr_scheduler.LambdaLR",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.utils.data.DataLoader",
"torch.nn.BCEWithLogitsLoss",
"torch.no_grad",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
granttremblay/aoide | [
"ea25bdf92013f7dc3b254e261039c43e697ee901"
] | [
"aoide/make_sky_mask.py"
] | [
"#!/usr/bin/env python\n'''\nAoide | Reduction & Analysis of MUSE observations\n-------------------------------------------------\nDr. Grant R. Tremblay | Harvard-Smithsonian Center for Astrophysics\ngrant.tremblay @ cfa.harvard.edu\n\nSee the README associated with this repository for documentation & examples.\n'''\n\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom astropy.io import fits as pyfits\n\n\"\"\"\nInteractive Masking of a FITS-file. The FITS-file must be provided upon\ncreating a new instance. If no mask is provided, the routine will create one\nfrom scratch. Otherwise, the supplied mask will be modified.\nThe masking porcess is carried out using the mouse: An area to mask is selected\nby moving the mouse over it while pressing the left button. To unmask an area,\nuse the right button. The cuts might be changed by clicking on the wheel.\nNote that plt.show() must be called after an instance of MaskFrame has been\ncreated!\n\n\"\"\"\n\n\nclass MaskFrame:\n \"\"\"\n Initiate an instance\n \"\"\"\n\n def __init__(self, image, mask_name, cuts=(0, 10), extension=0):\n fits_ima = pyfits.open(image)\n self.true_arr = fits_ima[extension].data\n if len(self.true_arr.shape) == 3:\n self.true_arr = self.true_arr[0, :]\n fits_ima.close()\n self.mask_name = mask_name\n self.extension = extension\n\n if os.path.exists(mask_name):\n self.in_mask = pyfits.open(mask_name, mode='update')\n self.mask = self.in_mask[0].data\n else:\n self.in_mask = None\n self.mask = np.zeros(self.true_arr.shape, dtype='Int16')\n\n self.plot_arr = self.true_arr + (self.mask * 1e9)\n\n self.lo_cut = cuts[0]\n self.hi_cut = cuts[1]\n\n self.fig = plt.figure(figsize=(8,8))\n self.ax = self.fig.add_subplot(111)\n self.ax.set_title('LEFT: Mask | RIGHT: Unmask | Wheel: Change cuts')\n self.im = self.ax.imshow(\n self.true_arr, origin='lower', interpolation='nearest', cmap='magma')\n\n self.update()\n\n self.xM = []\n self.yM = []\n\n self._connect()\n\n \"\"\"\n Connect the button_***_events to the corresponding methods\n \"\"\"\n\n def _connect(self):\n self.ax.figure.canvas.mpl_connect('button_press_event', self.__on)\n self.ax.figure.canvas.mpl_connect('button_release_event', self.__off)\n\n \"\"\"\n The actions that are carried out when a mouse button is pressed:\n \"\"\"\n\n def __on(self, event):\n if event.button == 2:\n print('Current cut levels are: {}, {}'.format(\n self.lo_cut, self.hi_cut))\n new_c = input('Enter new cut levels as low,high e.g. 0,20: ')\n self.lo_cut = float(new_c.split(',')[0])\n self.hi_cut = float(new_c.split(',')[1])\n self.update()\n else:\n if event.inaxes != self.ax.axes:\n print('Out of bounds!')\n return\n self.xM.append(int(round(event.xdata)))\n self.yM.append(int(round(event.ydata)))\n\n \"\"\"\n The actions that are carried out when a mouse button is released.\n \"\"\"\n\n def __off(self, event):\n if event.inaxes != self.ax.axes:\n print('Out of bounds!')\n return\n else:\n self.xM.append(int(round(event.xdata)))\n self.yM.append(int(round(event.ydata)))\n\n if len(self.xM) == 2:\n if event.button == 1:\n self.mask[min(self.yM):max(self.yM) + 1,\n min(self.xM):max(self.xM) + 1] = 1\n elif event.button == 3:\n self.mask[min(self.yM):max(self.yM) + 1,\n min(self.xM):max(self.xM) + 1] = 0\n\n self.plot_arr = self.true_arr + (self.mask * 1e9)\n self.update()\n\n self.xM = []\n self.yM = []\n\n \"\"\"\n This method updates the graphical interface:\n \"\"\"\n\n def update(self):\n self.im.set_data(self.plot_arr[:, :])\n self.im.set_clim(vmin=self.lo_cut, vmax=self.hi_cut)\n self.im.axes.figure.canvas.draw()\n\n \"\"\"\n Save the mask under the filename specified in FrameMask.__init__\n Note that unlike the other methods, this method must be called explicitely\n \"\"\"\n\n def save_mask(self):\n extension = self.extension\n if self.in_mask == None:\n maskHDU = pyfits.PrimaryHDU(self.mask)\n maskHDU.writeto(self.mask_name, overwrite=True)\n else:\n self.in_mask[0].data = self.mask\n self.in_mask.flush()\n\n\n\ndef main():\n if len(sys.argv) == 3:\n make_mask = MaskFrame(sys.argv[1], sys.argv[2])\n elif len(sys.argv) == 4:\n make_mask = MaskFrame(\n sys.argv[1], sys.argv[2], extension=int(sys.argv[3]))\n plt.show()\n make_mask.save_mask()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
serge-m/jina | [
"9c9af3cd2982daabc75dd3d3e2f380e17c21aac0"
] | [
"tests/unit/executors/evaluators/rank/test_recall.py"
] | [
"import numpy as np\nimport pytest\n\nfrom jina.executors.evaluators.rank.recall import RecallEvaluator\n\n\[email protected](\n 'eval_at, expected',\n [\n (0, 0.0),\n (1, 0.2),\n (2, 0.4),\n (3, 0.4),\n (5, 0.4),\n (100, 0.4)\n ]\n)\ndef test_recall_evaluator(eval_at, expected):\n matches_ids = [0, 1, 2, 3, 4]\n\n desired_ids = [1, 0, 20, 30, 40]\n\n evaluator = RecallEvaluator(eval_at=eval_at)\n assert evaluator.evaluate(actual=matches_ids, desired=desired_ids) == expected\n assert evaluator._running_stats._n == 1\n np.testing.assert_almost_equal(evaluator.mean, expected)\n\n\[email protected](\n 'eval_at, expected_first',\n [\n (0, 0.0),\n (1, 0.2),\n (2, 0.4),\n (3, 0.4),\n (5, 0.4),\n (100, 0.4)\n ]\n)\ndef test_recall_evaluator_average(eval_at, expected_first):\n matches_ids = [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]\n\n desired_ids = [[1, 0, 20, 30, 40], [-1, -1, -1, -1, -1], [-1, -1, -1, -1, -1]]\n\n evaluator = RecallEvaluator(eval_at=eval_at)\n assert evaluator.evaluate(actual=matches_ids[0], desired=desired_ids[0]) == expected_first\n assert evaluator.evaluate(actual=matches_ids[1], desired=desired_ids[1]) == 0.0\n assert evaluator.evaluate(actual=matches_ids[2], desired=desired_ids[2]) == 0.0\n assert evaluator._running_stats._n == 3\n np.testing.assert_almost_equal(evaluator.mean, expected_first / 3)\n\n\ndef test_recall_evaluator_no_matches():\n matches_ids = []\n\n desired_ids = [1, 0, 20, 30, 40]\n\n evaluator = RecallEvaluator(eval_at=2)\n assert evaluator.evaluate(actual=matches_ids, desired=desired_ids) == 0.0\n assert evaluator._running_stats._n == 1\n np.testing.assert_almost_equal(evaluator.mean, 0.0)\n"
] | [
[
"numpy.testing.assert_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HaowenWeiJohn/RealityNavigationRealTimeInference | [
"cef6906d939f56c88ea38e4394f13f35f055e3d9"
] | [
"utils/data_utils.py"
] | [
"import os\nfrom pathlib import Path\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom scipy.signal import resample\n\nfrom utils.sig_proc_utils import notch_filter, baseline_correction\n\n\ndef window_slice(data, window_size, stride, channel_mode='channel_last'):\n assert len(data.shape) == 2\n if channel_mode == 'channel_first':\n data = np.transpose(data)\n elif channel_mode == 'channel_last':\n pass\n else:\n raise Exception('Unsupported channel mode')\n assert window_size <= len(data)\n assert stride > 0\n rtn = np.expand_dims(data, axis=0) if window_size == len(data) else []\n for i in range(window_size, len(data), stride):\n rtn.append(data[i - window_size:i])\n return np.array(rtn)\n\n\ndef modify_indice_to_cover(i1, i2, coverage, tolerance=3):\n assert i1 < i2\n assert abs(coverage - (i2 - i1)) <= tolerance\n is_modifying_i1 = True\n if i2 - i1 > coverage:\n while i2 - i1 != coverage:\n if is_modifying_i1:\n i1 += 1\n else:\n i2 -= 1\n print('Modified')\n\n elif i2 - i1 < coverage:\n while i2 - i1 != coverage:\n if is_modifying_i1:\n i1 -= 1\n else:\n i2 += 1\n print('Modified')\n\n return i1, i2\n\n\n\ndef interp_negative(y):\n idx = y < 0\n x = np.arange(len(y))\n y_interp = np.copy(y)\n y_interp[idx] = np.interp(x[idx], x[~idx], y[~idx])\n return y_interp\n\n\ndef clutter_removal(cur_frame, clutter, signal_clutter_ratio):\n if clutter is None:\n clutter = cur_frame\n else:\n clutter = signal_clutter_ratio * clutter + (1 - signal_clutter_ratio) * cur_frame\n return cur_frame - clutter, clutter\n\n\ndef integer_one_hot(a, num_classes):\n a = a.astype(int)\n return np.squeeze(np.eye(num_classes)[a.reshape(-1)]).astype(int)\n\n\ndef corrupt_frame_padding(time_series_data, min_threshold=np.NINF, max_threshold=np.PINF, frame_channel_first=True):\n if not frame_channel_first:\n time_series_data = np.moveaxis(time_series_data, -1, 0)\n\n if np.min(time_series_data[0]) < min_threshold or np.max(time_series_data[0]) > max_threshold:\n print('error: first frame is broken')\n return\n\n if np.min(time_series_data[-1]) < min_threshold or np.max(time_series_data[-1]) > max_threshold:\n print('error: last frame is broken')\n return\n\n broken_frame_counter = 0\n\n # check first and last frame\n for frame_index in range(1, len(time_series_data) - 1):\n data = np.squeeze(time_series_data[frame_index], axis=-1)\n if np.min(time_series_data[frame_index]) < min_threshold or np.max(\n time_series_data[frame_index]) > max_threshold:\n # find broken frame, padding with frame +1 and frame -1\n broken_frame_before = time_series_data[frame_index - 1]\n broken_frame = time_series_data[frame_index]\n broken_frame_next = time_series_data[frame_index + 1]\n if np.min(time_series_data[frame_index + 1]) >= min_threshold and np.max(\n time_series_data[frame_index + 1]) < max_threshold:\n time_series_data[frame_index] = (time_series_data[frame_index - 1] + time_series_data[\n frame_index + 1]) * 0.5\n broken_frame_counter += 1\n print('find broken frame at index:', frame_index, ' interpolate by the frame before and after.')\n else:\n time_series_data[frame_index] = time_series_data[frame_index - 1]\n print('find two continues broken frames at index: ', frame_index, ', equalize with previous frame.')\n\n if not frame_channel_first:\n time_series_data = np.moveaxis(time_series_data, 0, -1)\n\n print('pad broken frame: ', broken_frame_counter)\n return time_series_data\n\n\ndef time_series_static_clutter_removal(time_series_data, init_clutter=None, signal_clutter_ratio=0.1,\n frame_channel_first=True):\n if not frame_channel_first:\n time_series_data = np.moveaxis(time_series_data, -1, 0)\n\n clutter = None\n if init_clutter:\n clutter = init_clutter\n else: # using first two frames as the init_clutter\n clutter = (time_series_data[0] + time_series_data[1]) * 0.5\n\n for frame_index in range(0, len(time_series_data)):\n clutter_removal_frame, clutter = clutter_removal(\n cur_frame=time_series_data[frame_index],\n clutter=clutter,\n signal_clutter_ratio=signal_clutter_ratio)\n\n time_series_data[frame_index] = clutter_removal_frame\n\n if not frame_channel_first:\n time_series_data = np.moveaxis(time_series_data, 0, -1)\n\n return time_series_data\n\ndef is_broken_frame(frame, min_threshold=np.NINF, max_threshold=np.PINF):\n if np.min(frame) < min_threshold or np.max(frame) > max_threshold:\n return True\n else:\n return False\n\n\ndef levenshtein_ratio_and_distance(s, t, ratio_calc=False):\n \"\"\" levenshtein_ratio_and_distance:\n Calculates levenshtein distance between two strings.\n If ratio_calc = True, the function computes the\n levenshtein distance ratio of similarity between two strings\n For all i and j, distance[i,j] will contain the Levenshtein\n distance between the first i characters of s and the\n first j characters of t\n \"\"\"\n # Initialize matrix of zeros\n rows = len(s) + 1\n cols = len(t) + 1\n distance = np.zeros((rows, cols), dtype=int)\n\n # Populate matrix of zeros with the indeces of each character of both strings\n for i in range(1, rows):\n for k in range(1, cols):\n distance[i][0] = i\n distance[0][k] = k\n\n # Iterate over the matrix to compute the cost of deletions,insertions and/or substitutions\n for col in range(1, cols):\n for row in range(1, rows):\n if s[row - 1] == t[col - 1]:\n cost = 0 # If the characters are the same in the two strings in a given position [i,j] then the cost is 0\n else:\n # In order to align the results with those of the Python Levenshtein package, if we choose to calculate the ratio\n # the cost of a substitution is 2. If we calculate just distance, then the cost of a substitution is 1.\n if ratio_calc == True:\n cost = 2\n else:\n cost = 1\n distance[row][col] = min(distance[row - 1][col] + 1, # Cost of deletions\n distance[row][col - 1] + 1, # Cost of insertions\n distance[row - 1][col - 1] + cost) # Cost of substitutions\n if ratio_calc == True:\n # Computation of the Levenshtein Distance Ratio\n Ratio = ((len(s) + len(t)) - distance[row][col]) / (len(s) + len(t))\n return Ratio\n else:\n # print(distance) # Uncomment if you want to see the matrix showing how the algorithm computes the cost of deletions,\n # insertions and/or substitutions\n # This is the minimum number of edits needed to convert string a to string b\n return \"The strings are {} edits away\".format(distance[row][col])\n\ndef replace_special(target_str: str, replacement_dict):\n for special, replacement in replacement_dict.items():\n # print('replacing ' + special)\n target_str = target_str.replace(special, replacement)\n return target_str"
] | [
[
"numpy.expand_dims",
"numpy.min",
"numpy.squeeze",
"numpy.eye",
"numpy.max",
"numpy.copy",
"numpy.interp",
"numpy.moveaxis",
"numpy.transpose",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stefanv/dipy | [
"4d4518861a796502826f053c17161487db126487"
] | [
"doc/examples/tractography_clustering.py"
] | [
"\"\"\" \n\n=============================\nTractography Clustering\n=============================\n\nOverview\n========\n\n**This example gives a tour of clustering related features of dipy.**\n\nFirst import the necessary modules\n----------------------------------\n\n``numpy`` is for numerical computation\n\n\"\"\"\n\nimport numpy as np\n\nimport time\n\nfrom nibabel import trackvis as tv\n\nfrom dipy.tracking import metrics as tm\nfrom dipy.tracking import distances as td\nfrom dipy.io import pickles as pkl\nfrom dipy.viz import fvtk\n\n\n#fname='/home/user/Data_Backup/Data/PBC/pbc2009icdm/brain1/brain1_scan1_fiber_track_mni.trk'\n#fname='/home/user/Data/PBC/pbc2009icdm/brain1/brain1_scan1_fiber_track_mni.trk'\nfrom dipy.data import get_data\n\nfname=get_data('fornix')\nprint(fname)\n\n\"\"\"\nLoad Trackvis file for *Fornix*:\n\"\"\"\n\nstreams,hdr=tv.read(fname)\n\n\"\"\"\nCopy tracks:\n\"\"\"\n\nT=[i[0] for i in streams]\n\n#T=T[:1000]\n\n\"\"\"\nDownsample tracks to just 3 points:\n\"\"\"\n\ntracks=[tm.downsample(t,3) for t in T]\n\n\"\"\"\nDelete unnecessary data:\n\"\"\"\n\ndel streams,hdr\n\n\"\"\"\nPerform Local Skeleton Clustering (LSC) with a 5mm threshold:\n\"\"\"\n\nnow=time.clock()\nC=td.local_skeleton_clustering(tracks,d_thr=5)\nprint('Done in %.2f s' % (time.clock()-now,))\n\n\n\"\"\"\nReduce the number of points for faster visualization using the ``approx_polygon_track`` algorithm which retains points depending on how much they are need to define the shape of the track:\n\"\"\"\n\nT=[td.approx_polygon_track(t) for t in T]\n\n\"\"\"\nShow the initial *Fornix* dataset:\n\"\"\"\n\nr=fvtk.ren()\nfvtk.add(r,fvtk.line(T,fvtk.white,opacity=1))\n#fvtk.show(r)\nfvtk.record(r,n_frames=1,out_path='fornix_initial',size=(600,600))\n\n\"\"\"\n.. figure:: fornix_initial1000000.png\n :align: center\n\n **Initial Fornix dataset**.\n\"\"\"\n\n\"\"\"\nShow the *Fornix* after clustering (with random bundle colors):\n\"\"\"\n\nfvtk.clear(r)\ncolors=np.zeros((len(T),3))\nfor c in C:\n color=np.random.rand(1,3)\n for i in C[c]['indices']:\n colors[i]=color\nfvtk.add(r,fvtk.line(T,colors,opacity=1))\n#fvtk.show(r)\nfvtk.record(r,n_frames=1,out_path='fornix_clust',size=(600,600))\n\n\"\"\"\n.. figure:: fornix_clust1000000.png\n :align: center\n\n **Showing the different clusters with random colors**.\n\n\"\"\"\n\n\"\"\"\nCalculate some statistics about the clusters\n\"\"\"\n\nlens=[len(C[c]['indices']) for c in C]\nprint('max %d min %d' %(max(lens), min(lens)))\nprint('singletons %d ' % lens.count(1))\nprint('doubletons %d' % lens.count(2))\nprint('tripletons %d' % lens.count(3))\n\n\"\"\"\nFind and display the skeleton of most representative tracks in each cluster:\n\"\"\"\n\nskeleton=[]\n\nfvtk.clear(r)\n\nfor c in C:\n \n bundle=[T[i] for i in C[c]['indices']]\n si,s=td.most_similar_track_mam(bundle,'avg') \n skeleton.append(bundle[si])\n fvtk.label(r,text=str(len(bundle)),pos=(bundle[si][-1]),scale=(2,2,2))\n\nfvtk.add(r,fvtk.line(skeleton,colors,opacity=1))\n#fvtk.show(r)\nfvtk.record(r,n_frames=1,out_path='fornix_most',size=(600,600))\n\n\"\"\"\n.. figure:: fornix_most1000000.png\n :align: center\n\n **Showing skeleton with the most representative tracks as the skeletal representation**.\n \n The numbers are depicting the number of tracks in each cluster. This is a very compact way to see the underlying\n structures an alternative would be to draw the representative tracks with different widths.\n \n\"\"\"\n\n\"\"\"\nSave the skeleton information in the dictionary. Now try to play with different thresholds LSC and check the different results.\nTry it with your datasets and gives us some feedback.\n\n\"\"\"\n\nfor (i,c) in enumerate(C): \n C[c]['most']=skeleton[i]\n \nfor c in C: \n print('Keys in bundle %d' % c)\n print(C[c].keys())\n print('Shape of skeletal track (%d, %d) ' % C[c]['most'].shape)\n\npkl.save_pickle('skeleton_fornix.pkl',C)\n\n\n\n\n"
] | [
[
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dougalsutherland/cvxpy | [
"34349b5e41c124a6a1e32426e68af95b5044498c"
] | [
"cvxpy/reductions/solvers/qp_solvers/qp_solver.py"
] | [
"\"\"\"\nCopyright 2017 Robin Verschueren\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom cvxpy.atoms.affine.add_expr import AddExpression\nfrom cvxpy.atoms.affine.binary_operators import MulExpression\nfrom cvxpy.atoms.quad_form import QuadForm\nfrom cvxpy.constraints import NonPos, Zero\nfrom cvxpy.problems.objective import Minimize\nfrom cvxpy.reductions import InverseData\nfrom cvxpy.reductions.solvers.conic_solvers.conic_solver import ConicSolver\nfrom cvxpy.reductions.solvers.solver import Solver\nfrom cvxpy.reductions.utilities import are_args_affine\nimport cvxpy.settings as s\n\n\ndef is_stuffed_qp_objective(objective):\n \"\"\"QPSolver requires objectives to be stuffed in the following way.\n \"\"\"\n expr = objective.expr\n return (type(expr) == AddExpression\n and len(expr.args) == 2\n and type(expr.args[0]) == QuadForm\n and type(expr.args[1]) == MulExpression\n and expr.args[1].is_affine())\n\n\nclass QpSolver(Solver):\n \"\"\"\n A QP solver interface.\n \"\"\"\n\n def accepts(self, problem):\n return (type(problem.objective) == Minimize\n and is_stuffed_qp_objective(problem.objective)\n and all(type(c) == Zero or type(c) == NonPos\n for c in problem.constraints)\n and are_args_affine(problem.constraints))\n\n def apply(self, problem):\n \"\"\"\n Construct QP problem data stored in a dictionary.\n The QP has the following form\n\n minimize 1/2 x' P x + q' x\n subject to A x = b\n F x <= g\n\n \"\"\"\n inverse_data = InverseData(problem)\n\n obj = problem.objective\n # quadratic part of objective is x.T * P * x but solvers expect\n # 0.5*x.T * P * x.\n P = 2*obj.expr.args[0].args[1].value\n q = obj.expr.args[1].args[0].value.flatten()\n\n # Get number of variables\n n = problem.size_metrics.num_scalar_variables\n\n # TODO(akshayka): This dependence on ConicSolver is hacky; something\n # should change here.\n eq_cons = [c for c in problem.constraints if type(c) == Zero]\n if eq_cons:\n eq_coeffs = list(zip(*[ConicSolver.get_coeff_offset(con.expr)\n for con in eq_cons]))\n A = sp.vstack(eq_coeffs[0])\n b = - np.concatenate(eq_coeffs[1])\n else:\n A, b = sp.csr_matrix((0, n)), -np.array([])\n\n ineq_cons = [c for c in problem.constraints if type(c) == NonPos]\n if ineq_cons:\n ineq_coeffs = list(zip(*[ConicSolver.get_coeff_offset(con.expr)\n for con in ineq_cons]))\n F = sp.vstack(ineq_coeffs[0])\n g = - np.concatenate(ineq_coeffs[1])\n else:\n F, g = sp.csr_matrix((0, n)), -np.array([])\n\n # Create dictionary with problem data\n variables = problem.variables()[0]\n data = {}\n data[s.P] = sp.csc_matrix(P)\n data[s.Q] = q\n data[s.A] = sp.csc_matrix(A)\n data[s.B] = b\n data[s.F] = sp.csc_matrix(F)\n data[s.G] = g\n data[s.BOOL_IDX] = [t[0] for t in variables.boolean_idx]\n data[s.INT_IDX] = [t[0] for t in variables.integer_idx]\n data['n_var'] = n\n data['n_eq'] = A.shape[0]\n data['n_ineq'] = F.shape[0]\n\n inverse_data.sorted_constraints = ineq_cons + eq_cons\n\n # Add information about integer variables\n inverse_data.is_mip = \\\n len(data[s.BOOL_IDX]) > 0 or len(data[s.INT_IDX]) > 0\n\n return data, inverse_data\n"
] | [
[
"scipy.sparse.csc_matrix",
"scipy.sparse.csr_matrix",
"numpy.concatenate",
"scipy.sparse.vstack",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
jpjuvo/RSNA-MICCAI-Brain-Tumor-Classification | [
"a8a4e9257b7475bc328870504edd18fdd9ec9d2f",
"a8a4e9257b7475bc328870504edd18fdd9ec9d2f",
"a8a4e9257b7475bc328870504edd18fdd9ec9d2f"
] | [
"src/seg_model_utils/torchio_transforms.py",
"src/seg_model_utils/visualization.py",
"src/train_2d_cnn_fold.py"
] | [
"import torch\nimport torchio as tio\nimport numpy as np\n\ndef load_tio_image(fn):\n \"\"\"\n ScalarImage(shape: (c, w, h, d))\n dtype: torch.DoubleTensor\n \"\"\"\n arr = np.load(fn).swapaxes(0,3)\n return tio.ScalarImage(tensor=arr)\n\ndef arr_2_tio_image(arr):\n \"\"\"\n ScalarImage(shape: (c, w, h, d))\n dtype: torch.DoubleTensor\n \"\"\"\n arr = arr.swapaxes(0,3)\n return tio.ScalarImage(tensor=arr)\n\ndef load_tio_seg_image(fn):\n \"\"\"\n LabelMap(shape: (c, w, h, d))\n dtype: torch.FloatTensor\n \n Intensity transforms are not applied to these images.\n Nearest neighbor interpolation is always used to resample label maps.\n \"\"\"\n if fn is None:\n return None\n if not os.path.exists(fn):\n return None\n arr = (np.expand_dims(np.load(fn),3).swapaxes(0,3) > 0).astype(np.float32)\n return tio.LabelMap(tensor=arr)\n\ndef arr_2_tio_seg_image(arr):\n \"\"\"\n LabelMap(shape: (c, w, h, d))\n dtype: torch.FloatTensor\n \n Intensity transforms are not applied to these images.\n Nearest neighbor interpolation is always used to resample label maps.\n \"\"\"\n if arr is None:\n return None\n arr = (np.expand_dims(arr,3).swapaxes(0,3) > 0).astype(np.float32)\n return tio.LabelMap(tensor=arr)\n\ndef load_tio_subject(image_fn:str, label:int, seg_fn=None):\n return tio.Subject(\n rgb_image=load_tio_image(image_fn),\n segmentation=load_tio_seg_image(seg_fn),\n label=int(label),\n name=os.path.basename(image_fn).split('.')[0])",
"\"\"\"\nSee notebook 5 for example use of show_mri_sample()\n\"\"\"\nimport glob\nimport os\nimport random\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import Dataset\nimport matplotlib.pyplot as plt\nimport cv2\nimport scipy.ndimage as ndimage\n\n\ndef make_bg_transparent(im, bg_th=0.0, set_to_color=None):\n # create transparency alpha channel\n # convert image to RGBA\n if len(im.shape) == 3:\n alpha_c = (np.sum(im[:,:,:],axis=2) > bg_th).astype(im.dtype)\n c1,c2,c3 = cv2.split(im)\n else:\n alpha_c = (im[:,:] > bg_th).astype(im.dtype)\n c1,c2,c3 = im.copy(), im.copy(), im.copy()\n if set_to_color is not None:\n zeros = np.zeros_like(c1)\n if set_to_color == 'green':\n merged = np.stack([zeros,c2,zeros,alpha_c], axis=-1)\n elif set_to_color == 'red':\n merged = np.stack([c1,zeros,zeros,alpha_c], axis=-1)\n elif set_to_color == 'royalblue':\n merged = np.stack([c1,zeros,zeros,alpha_c], axis=-1)\n elif set_to_color == 'violet':\n merged = np.stack([c1,zeros,c3,alpha_c], axis=-1)\n elif set_to_color == 'yellow':\n merged = np.stack([c1,c2,zeros,alpha_c], axis=-1)\n else:\n merged = np.stack([c1,c2,c3,alpha_c], axis=-1)\n return merged\n\ndef to_3d_points(im, th=1e-6, downsample=5):\n xs,ys,ds = [],[],[]\n if len(im.shape) == 4:\n im3d = np.sum(im,axis=3)\n else:\n im3d = im\n depth,width,height = im3d.shape\n step_vol = downsample**3\n for x in range(0, width - downsample, downsample):\n for y in range(0, height - downsample, downsample):\n for d in range(0, depth - downsample, downsample):\n if (np.sum(im3d[d:d+downsample, x:x+downsample, y:y+downsample]) / step_vol) > th:\n xs.append(x + (downsample//2))\n ys.append(y + (downsample//2))\n ds.append(d + (downsample//2))\n return np.array(xs), np.array(ys), np.array(ds)\n\ndef adjust_saturation(img, sat_scale=0.3):\n hsv_im = cv2.cvtColor((img * 255).astype(np.uint8), cv2.COLOR_RGB2HSV)\n (h, s, v) = cv2.split(hsv_im)\n s = s*sat_scale\n s = np.clip(s,0,255)\n hsv_im = np.stack([h,s,v],axis=2).astype(np.uint8)\n return cv2.cvtColor(hsv_im, cv2.COLOR_HSV2RGB) / 255.\n\ndef show_mri_sample(sample, pred_mask=None, pred_lbl=None, seg_downsample=None, save_fn=None):\n \"\"\" Plot sample in three projections \"\"\"\n plt.close('all')\n \n alpha=0.5\n image_alpha=1.0\n \n ims = sample['image'].numpy()\n means = sample['mean'].numpy()\n stds = sample['std'].numpy()\n segs = sample['segmentation'].numpy() if 'segmentation' in sample else None\n \n # add batch dims if missing\n if ims.ndim == 4:\n ims = np.expand_dims(ims, 0)\n means = np.expand_dims(means, 0)\n stds = np.expand_dims(stds, 0)\n if segs is not None:\n segs = np.expand_dims(segs, 0)\n \n n_images = len(ims)\n n_root = int(np.ceil(np.sqrt(n_images)))\n n_cols = n_root * 2\n n_rows = n_root * 2\n # special case fix to get with correct with small bs\n if n_images == 2:\n n_rows = 2\n \n fig_scale = 2\n f = plt.figure(figsize=(fig_scale*n_cols,fig_scale*n_rows))\n \n # Read additional meta from batch\n brats_ids = [sample['BraTSID']] if n_images == 1 else sample['BraTSID']\n labels = None\n if 'label' in sample:\n labels = [sample['label']] if n_images == 1 else sample['label']\n \n def _subplot_index(index, row_off, col_off):\n startrow = (index * 2)//n_cols\n startcol = (index * 2)%n_cols\n return (2*startrow+row_off)*n_cols + (startcol + col_off) + 1\n \n for index in range(n_images):\n im = ims[index]\n seg = segs[index]\n seg = np.swapaxes(seg, 0,3)\n # upsample seg back to original size if it has been downsampled\n if seg_downsample is not None:\n seg = seg.repeat(seg_downsample, axis=0).repeat(seg_downsample, axis=1).repeat(seg_downsample, axis=2)\n \n # Normalize images for visualization\n im = np.swapaxes(im, 0,3) # swap depth and chan axes\n im = (im * stds[index]) + means[index]\n \n title = f'BraTSID: {brats_ids[index]}'\n if labels is not None:\n title += f', GT-MGMT:{labels[index]}'\n if pred_lbl is not None:\n title += f'\\nPred-MGMT:{float(pred_lbl[index][0]):.3f}'\n \n d,x,y,c = im.shape\n \n coronal_ax = f.add_subplot(n_rows,n_cols, _subplot_index(index,0,0))\n coronal_ax.set_title(title + ' - coronal', fontsize=8)\n coronal_ax.imshow(make_bg_transparent(adjust_saturation(im[::-1,x//2,:,:])), alpha=image_alpha)\n \n sagittal_ax = f.add_subplot(n_rows,n_cols,_subplot_index(index,0,1))\n sagittal_ax.set_title(title + ' - sagittal', fontsize=8)\n sagittal_ax.get_yaxis().set_visible(False)\n sagittal_ax.imshow(make_bg_transparent(adjust_saturation(im[::-1,:,y//2,:])), alpha=image_alpha)\n \n axial_ax = f.add_subplot(n_rows,n_cols,_subplot_index(index,1,0))\n axial_ax.set_title(title + ' - axial', fontsize=8)\n axial_ax.imshow(make_bg_transparent(adjust_saturation(im[d//2,:,:,:])), alpha=image_alpha)\n \n proj_ax = f.add_subplot(n_rows, n_cols, _subplot_index(index,1,1), projection='3d')\n proj_ax.scatter(*to_3d_points(im), color='gray', alpha=0.015, s=5, depthshade=False)\n proj_ax.set_title(f'Green=GT-tumor, Red=Pred-tumor\\n{title}', fontsize=6)\n proj_ax.set_xticks([]) \n proj_ax.set_yticks([]) \n proj_ax.set_zticks([])\n \n if seg is not None:\n for seg_chan, color in zip(range(seg.shape[3]),['green']):\n coronal_ax.imshow(make_bg_transparent(seg[::-1,x//2,:,seg_chan], set_to_color=color), alpha=alpha)\n sagittal_ax.imshow(make_bg_transparent(seg[::-1,:,y//2,seg_chan], set_to_color=color), alpha=alpha)\n axial_ax.imshow(make_bg_transparent(seg[d//2,:,:,seg_chan], set_to_color=color), alpha=alpha)\n proj_ax.scatter(*to_3d_points(seg[:,:,:,seg_chan]), color=color, s=5, alpha=0.05)\n \n if pred_mask is not None:\n pred = np.swapaxes(pred_mask[index].cpu().numpy(), 0,3)\n pred = np.clip(pred, 0, 1.)\n # upsample seg back to original size if it has been downsampled\n if seg_downsample is not None:\n pred = pred.repeat(seg_downsample, axis=0).repeat(seg_downsample, axis=1).repeat(seg_downsample, axis=2)\n for seg_chan, color in zip(range(pred.shape[3]),['red']):\n coronal_ax.imshow(make_bg_transparent(pred[::-1,x//2,:, seg_chan], set_to_color=color, bg_th=0.5), alpha=alpha)\n sagittal_ax.imshow(make_bg_transparent(pred[::-1,:,y//2, seg_chan], set_to_color=color, bg_th=0.5), alpha=alpha)\n axial_ax.imshow(make_bg_transparent(pred[d//2,:,:, seg_chan], set_to_color=color, bg_th=0.5), alpha=alpha)\n proj_ax.scatter(*to_3d_points(pred[:,:,:,seg_chan], th=0.5), color=color, s=5, alpha=0.05)\n \n # draw axial lines\n coronal_ax.plot([0,x-1],[d//2,d//2],'--',color='white', linewidth=1) # coronal horizontal\n coronal_ax.plot([x//2,x//2],[0,d-1],'--',color='white', linewidth=1) # coronal vertical\n sagittal_ax.plot([0,y-1],[d//2,d//2],'--',color='white', linewidth=1) # sagittal horizontal\n sagittal_ax.plot([y//2,y//2],[0,d-1],'--',color='white', linewidth=1) # sagittal vertical\n axial_ax.plot([0,y-1],[x//2,x//2],'--',color='white', linewidth=1) # axial horizontal\n axial_ax.plot([x//2,x//2],[0,y-1],'--',color='white', linewidth=1) # axial vertical\n \n plt.subplots_adjust(left=0.00,top=1.,right=1.,bottom=0.00, wspace=0.15, hspace=0.15)\n \n bbox = f.get_window_extent().transformed(f.dpi_scale_trans.inverted())\n width, height = bbox.width*f.dpi, bbox.height*f.dpi\n width *= 1.05\n height *= 1.05\n #if n_images == 2:\n # n_rows = 2\n \n for row in range(0, n_rows,2):\n if n_images == 2 and row > 0:\n break\n for col in range(0, n_cols,2):\n different_color = (row//2) % 2 == (col//2) % 2\n color = (1,1,1) if different_color else (0.8,0.8,0.8)\n \n f.patches.extend([\n plt.Rectangle(\n (width * col / n_cols, height * (n_rows - row - 2) / n_rows), \n width / max(1,n_cols//2), \n height / max(1,n_rows//2),\n fill=True, \n color=color, \n zorder=-1, # below axes\n alpha=0.5,\n transform=None, \n figure=f)\n ])\n \n if save_fn is not None:\n plt.savefig(save_fn, transparent=False)\n else:\n plt.show()",
"import glob\nimport os\nimport random\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nimport matplotlib.pyplot as plt\nimport cv2\nimport scipy.ndimage as ndimage\nimport torch.optim as optim\nimport time\nimport shutil\nfrom sklearn.metrics import roc_curve, auc\nfrom argparse import ArgumentParser, Namespace\n\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport math\nfrom functools import partial\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport torchio as tio\nfrom tqdm.auto import tqdm\nfrom clf_model_utils.miccai_2d_dataset import MICCAI2DDataset\n\nimport json\nimport wandb\n\nimport fastai\nfrom fastai.vision.all import *\nfrom fastai.data.core import DataLoaders\nfrom fastai.callback.all import *\nfrom fastai.callback.wandb import WandbCallback\nimport torch.nn.functional as F\nfrom timm import create_model\nfrom fastai.vision.learner import _update_first_layer\nfrom fastai.vision.learner import _add_norm\n\nLOG_WANDB = False\n\n# This is modified from https://libauc.org/\nclass AUCMLoss(torch.nn.Module):\n \"\"\"\n AUCM Loss with squared-hinge function: a novel loss function to directly optimize AUROC\n \n inputs:\n margin: margin term for AUCM loss, e.g., m in [0, 1]\n imratio: imbalance ratio, i.e., the ratio of number of postive samples to number of total samples\n outputs:\n loss value \n \n Reference: \n Yuan, Z., Yan, Y., Sonka, M. and Yang, T., \n Large-scale Robust Deep AUC Maximization: A New Surrogate Loss and Empirical Studies on Medical Image Classification. \n International Conference on Computer Vision (ICCV 2021)\n Link:\n https://arxiv.org/abs/2012.03173\n \"\"\"\n def __init__(self, margin=1.0, imratio=None, device=None):\n super(AUCMLoss, self).__init__()\n if not device:\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n else:\n self.device = device \n self.margin = margin\n self.p = imratio\n # https://discuss.pytorch.org/t/valueerror-cant-optimize-a-non-leaf-tensor/21751\n self.a = torch.zeros(1, dtype=torch.float32, device=self.device, requires_grad=True).to(self.device) #cuda()\n self.b = torch.zeros(1, dtype=torch.float32, device=self.device, requires_grad=True).to(self.device) #.cuda()\n self.alpha = torch.zeros(1, dtype=torch.float32, device=self.device, requires_grad=True).to(self.device) #.cuda()\n \n def forward(self, input, target):\n\n y_pred = (torch.softmax(input, 1)[:,1]).unsqueeze(1)\n y_true = target.unsqueeze(1)\n\n if self.p is None:\n self.p = (y_true==1).float().sum()/y_true.shape[0] \n \n y_pred = y_pred.reshape(-1, 1) # be carefull about these shapes\n y_true = y_true.reshape(-1, 1) \n loss = (1-self.p)*torch.mean((y_pred - self.a)**2*(1==y_true).float()) + \\\n self.p*torch.mean((y_pred - self.b)**2*(0==y_true).float()) + \\\n 2*self.alpha*(self.p*(1-self.p)*self.margin + \\\n torch.mean((self.p*y_pred*(0==y_true).float() - (1-self.p)*y_pred*(1==y_true).float())) )- \\\n self.p*(1-self.p)*self.alpha**2\n return loss\n\ndef datestr():\n now = time.gmtime()\n return '{:02}_{:02}___{:02}_{:02}'.format(now.tm_mday, now.tm_mon, now.tm_hour, now.tm_min)\n\ndef make_dirs(path):\n if os.path.exists(path):\n shutil.rmtree(path)\n os.mkdir(path)\n else:\n os.makedirs(path)\n\ndef show_2d_batch(batch, preds=None, scale=4, save_fn=None):\n _images, _labels = batch\n images = _images.cpu().numpy()[:,0,:,:] # reduce rgb dimension to grayscale\n labels = _labels.cpu().numpy()\n \n cmap = matplotlib.cm.rainbow\n norm = matplotlib.colors.Normalize(vmin=np.percentile(images, 2), vmax=np.percentile(images, 98))\n \n if preds is not None:\n pred_lbls = list(preds.cpu().numpy())\n else:\n pred_lbls = [-1 for _ in labels]\n \n n_root = int(np.ceil(np.sqrt(len(images))))\n plt.close('all')\n f, axs = plt.subplots(n_root, n_root, figsize=((scale + 1)*n_root, scale*n_root))\n axs = axs.flatten()\n for img, lbl, pred, ax in zip(images, labels, pred_lbls, axs):\n axim = ax.imshow(img, cmap=cmap, norm=norm)\n \n divider = make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n f.colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical')\n \n ax.set_title(f'GT: {lbl}, Pred: {pred:.3f}', fontsize=16)\n ax.set_xticks([])\n ax.set_yticks([])\n \n # hide empties\n for ax_index in range(len(images), len(axs)):\n axs[ax_index].axis('off')\n \n plt.tight_layout()\n plt.subplots_adjust(left = 0.1, right = 0.9, wspace=0.2, hspace=0.05)\n \n if save_fn is not None:\n plt.savefig(save_fn, transparent=False)\n else:\n plt.show()\n\nclass RocStarLoss(torch.nn.Module):\n \"\"\"Smooth approximation for ROC AUC\n \"\"\"\n def __init__(self, delta = 1.0, sample_size = 100, sample_size_gamma = 100, update_gamma_each=100):\n r\"\"\"\n Args:\n delta: Param from article\n sample_size (int): Number of examples to take for ROC AUC approximation\n sample_size_gamma (int): Number of examples to take for Gamma parameter approximation\n update_gamma_each (int): Number of steps after which to recompute gamma value.\n \"\"\"\n super().__init__()\n self.delta = delta\n self.sample_size = sample_size\n self.sample_size_gamma = sample_size_gamma\n self.update_gamma_each = update_gamma_each\n self.steps = 0\n size = max(sample_size, sample_size_gamma)\n\n # Randomly init labels\n self.y_pred_history = torch.rand((size, 1)).cuda()\n self.y_true_history = torch.randint(2, (size, 1)).cuda()\n \n\n def forward(self, y_pred, target):\n \"\"\"\n Args:\n y_pred: Tensor of model predictions in [0, 1] range. Shape (B x 1)\n y_true: Tensor of true labels in {0, 1}. Shape (B x 1)\n \"\"\"\n y_pred_1 = (torch.softmax(y_pred, 1)[:,1]).unsqueeze(1)\n y_true = target.unsqueeze(1)\n \n if self.steps % self.update_gamma_each == 0:\n self.update_gamma()\n self.steps += 1\n \n positive = y_pred_1[y_true > 0]\n negative = y_pred_1[y_true < 1]\n \n # Take last `sample_size` elements from history\n y_pred_history = self.y_pred_history[- self.sample_size:]\n y_true_history = self.y_true_history[- self.sample_size:]\n \n positive_history = y_pred_history[y_true_history > 0]\n negative_history = y_pred_history[y_true_history < 1]\n \n if positive.size(0) > 0:\n diff = negative_history.view(1, -1) + self.gamma - positive.view(-1, 1)\n loss_positive = torch.nn.functional.relu(diff ** 2).mean()\n else:\n loss_positive = 0\n \n if negative.size(0) > 0:\n diff = negative.view(1, -1) + self.gamma - positive_history.view(-1, 1)\n loss_negative = torch.nn.functional.relu(diff ** 2).mean()\n else:\n loss_negative = 0\n \n loss = loss_negative + loss_positive\n \n # Update FIFO queue\n batch_size = y_pred_1.size(0)\n self.y_pred_history = torch.cat((self.y_pred_history[batch_size:], y_pred_1.clone().detach()))\n self.y_true_history = torch.cat((self.y_true_history[batch_size:], y_pred_1.clone().detach()))\n return loss\n\n def update_gamma(self):\n # Take last `sample_size_gamma` elements from history\n y_pred = self.y_pred_history[- self.sample_size_gamma:]\n y_true = self.y_true_history[- self.sample_size_gamma:]\n \n positive = y_pred[y_true > 0]\n negative = y_pred[y_true < 1]\n \n # Create matrix of size sample_size_gamma x sample_size_gamma\n diff = positive.view(-1, 1) - negative.view(1, -1)\n AUC = (diff > 0).type(torch.float).mean()\n num_wrong_ordered = (1 - AUC) * diff.flatten().size(0)\n \n # Adjuct gamma, so that among correct ordered samples `delta * num_wrong_ordered` were considered\n # ordered incorrectly with gamma added\n correct_ordered = diff[diff > 0].flatten().sort().values\n idx = min(int(num_wrong_ordered * self.delta), len(correct_ordered)-1)\n self.gamma = correct_ordered[idx]\n\n@patch\n@delegates(subplots)\ndef plot_metrics(self: Recorder, nrows=None, ncols=None, figsize=None, **kwargs):\n metrics = np.stack(self.values)\n names = self.metric_names[1:-1]\n n = len(names) - 1\n if nrows is None and ncols is None:\n nrows = int(math.sqrt(n))\n ncols = int(np.ceil(n / nrows))\n elif nrows is None: nrows = int(np.ceil(n / ncols))\n elif ncols is None: ncols = int(np.ceil(n / nrows))\n figsize = figsize or (ncols * 6, nrows * 4)\n fig, axs = subplots(nrows, ncols, figsize=figsize, **kwargs)\n axs = [ax if i < n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]\n for i, (name, ax) in enumerate(zip(names, [axs[0]] + axs)):\n ax.plot(metrics[:, i], color='#1f77b4' if i == 0 else '#ff7f0e', label='valid' if i > 0 else 'train')\n ax.set_title(name if i > 1 else 'losses')\n ax.legend(loc='best')\n save_fn = None\n if 'save_fn' in kwargs:\n save_fn = kwargs['save_fn']\n if save_fn is not None:\n plt.savefig(save_fn, transparent=False)\n else:\n plt.show()\n\n# timm + fastai functions copied from https://walkwithfastai.com/vision.external.timm\ndef create_timm_body(arch:str, pretrained=True, cut=None, n_in=3):\n \"Creates a body from any model in the `timm` library.\"\n if 'vit' in arch:\n model = create_model(arch, pretrained=pretrained, num_classes=0)\n else:\n model = create_model(arch, pretrained=pretrained, num_classes=0, global_pool='')\n _update_first_layer(model, n_in, pretrained)\n if cut is None:\n ll = list(enumerate(model.children()))\n cut = next(i for i,o in reversed(ll) if has_pool_type(o))\n if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])\n elif callable(cut): return cut(model)\n else: raise NamedError(\"cut must be either integer or function\")\n\ndef create_timm_model(arch:str, n_out, cut=None, pretrained=True, n_in=3, init=nn.init.kaiming_normal_, custom_head=None,\n concat_pool=True, **kwargs):\n \"Create custom architecture using `arch`, `n_in` and `n_out` from the `timm` library\"\n body = create_timm_body(arch, pretrained, None, n_in)\n if custom_head is None:\n nf = num_features_model(nn.Sequential(*body.children()))\n head = create_head(nf, n_out, concat_pool=concat_pool, **kwargs)\n else: head = custom_head\n model = nn.Sequential(body, head)\n if init is not None: apply_init(model[1], init)\n return model\n\ndef timm_learner(dls, arch:str, loss_func=None, pretrained=True, cut=None, splitter=None,\n y_range=None, config=None, n_out=None, normalize=True, **kwargs):\n \"Build a convnet style learner from `dls` and `arch` using the `timm` library\"\n if config is None: config = {}\n if n_out is None: n_out = get_c(dls)\n assert n_out, \"`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`\"\n if y_range is None and 'y_range' in config: y_range = config.pop('y_range')\n model = create_timm_model(arch, n_out, default_split, pretrained, y_range=y_range, **config)\n kwargs.pop('ps')\n learn = Learner(dls, model, loss_func=loss_func, splitter=default_split, **kwargs)\n if pretrained: learn.freeze()\n return learn\n\ndef main(fold:int, train_df_fn:str, npy_dir:str, bs:int, epochs:int, \n lr:float=1e-4, arch:str='resnet34', ps:float=0.6, \n optim:str='ranger', im_sz:int=256, loss_name:str=\"rocstar\"):\n\n modality = str(os.path.dirname(npy_dir)).split('_')[-1]\n name = f'fold-{fold}'\n group_name = f'{modality}_{arch}_bs{bs}_ep{epochs}_{loss_name}_lr{lr}_ps{ps}_{optim}_sz{im_sz}'\n train_dir = npy_dir\n \n\n out_folder = os.path.join('./output', group_name, name)\n make_dirs(out_folder)\n\n # start logging\n global LOG_WANDB\n wandb_config_fn = None\n if os.path.exists('../wandb_params.json'): \n wandb_config_fn = '../wandb_params.json'\n if os.path.exists('./wandb_params.json'): \n wandb_config_fn = './wandb_params.json'\n if wandb_config_fn is not None:\n with open(wandb_config_fn) as f:\n config = json.load(f)\n wandb.init(**config,\n name=name, group=group_name,\n tags=['MGMT-classification', f'fold-{fold}', modality], \n config={\n 'bs':bs, 'epochs':epochs, 'fold':fold,\n 'ep':epochs, 'lr':lr, 'arch':arch, 'ps':ps, \n 'optim':optim, 'sz':im_sz, 'loss_name': loss_name,\n 'modality' : modality\n },\n sync_tensorboard=True)\n LOG_WANDB = True\n\n df = pd.read_csv(train_df_fn)\n train_df = df[df.fold != fold]\n val_df = df[df.fold == fold]\n image_size = (im_sz,im_sz)\n\n if len(val_df) == 0:\n val_df = df[df.fold == 0]\n\n tio_augmentations = tio.Compose([\n tio.RandomAffine(p=0.5),\n tio.RandomBiasField(p=0.3),\n tio.RandomGhosting(p=0.05),\n tio.RandomElasticDeformation(p=0.2),\n tio.RandomSpike(p=0.05),\n tio.RandomNoise(p=0.1),\n tio.RandomAnisotropy(p=0.05),\n tio.RandomBlur(p=0.1),\n tio.RandomGamma(0.1, p=0.15),\n ])\n\n ds_t = MICCAI2DDataset(\n train_df, \n npy_dir=npy_dir,\n image_size=image_size,\n tio_augmentations=tio_augmentations,\n is_train=True\n )\n\n ds_v = MICCAI2DDataset(\n val_df, \n npy_dir=npy_dir,\n image_size=image_size,\n tio_augmentations=None,\n is_train=False\n )\n\n num_workers = 8\n dls = DataLoaders.from_dsets(ds_t, ds_v, bs=bs, device='cuda', num_workers=num_workers)\n\n loss = LabelSmoothingCrossEntropyFlat(eps=0.2)\n create_learner = cnn_learner\n\n if arch == 'densetnet121':\n base = densenet121\n elif arch == 'resnet18':\n base = resnet18\n elif arch == 'resnet34':\n base = resnet34\n elif arch == 'resnet50':\n base = resnet50\n elif arch == 'resnet101':\n base = resnet101\n elif arch == 'densenet169':\n base = densenet169\n else:\n create_learner = timm_learner\n base = arch\n \n if optim == \"ranger\":\n opt_func = fastai.optimizer.ranger\n else:\n opt_func = fastai.optimizer.Adam\n\n if loss_name == 'rocstar':\n second_loss = RocStarLoss()\n elif loss_name == 'bce':\n second_loss = loss\n elif loss_name == 'libauc':\n second_loss = AUCMLoss()\n else:\n raise Exception\n\n learn = create_learner(\n dls, \n base,\n pretrained=True,\n n_out=2,\n loss_func=loss,\n opt_func=opt_func,\n metrics=[\n RocAucBinary(),\n accuracy\n ],\n ps=ps\n ).to_fp16()\n\n # train head first with CE\n learn.fit_one_cycle(1, lr)\n learn.unfreeze()\n\n model_path = os.path.join('..', out_folder, 'final')\n cbs = [WandbCallback(log=None, log_preds=False, log_model=False)] if LOG_WANDB else []\n \n #best_path = os.path.join('..', out_folder, 'best')\n #save_cb = SaveModelCallback(monitor='roc_auc_score', fname=best_path, reset_on_fit=True)\n #cbs.append(save_cb)\n\n # continue with main loss\n learn.loss_func = second_loss\n learn.fit_flat_cos(epochs, lr, div_final=2, pct_start=0.99, cbs=cbs)\n \n learn.save(model_path, with_opt=False)\n\n #plot_fn = os.path.join(out_folder, 'plot_metrics.png')\n #plt.close('all')\n #learn.recorder.plot_metrics()\n #plt.savefig(plot_fn)\n\n #if LOG_WANDB:\n # wandb.log({'training': wandb.Image(plot_fn)})\n \n # eval\n if fold >= 0:\n dl_test = DataLoader(ds_v, 32, num_workers=8, shuffle=False)\n test_preds = learn.get_preds(dl=dl_test)\n\n test_p, test_gt = test_preds\n test_p = torch.softmax(test_p, 1)\n test_p = test_p.numpy()[:,1]\n test_gt = test_gt.numpy()\n\n tta_preds = learn.tta(dl=dl_test)\n tta_p, tta_gt = tta_preds\n tta_p = torch.softmax(tta_p, 1)\n tta_p = tta_p.numpy()[:,1]\n tta_gt = tta_gt.numpy()\n\n fpr, tpr, _ = roc_curve(np.array(test_gt), np.array(test_p))\n tta_fpr, tta_tpr, _ = roc_curve(np.array(tta_gt), np.array(tta_p))\n\n roc_auc = auc(fpr, tpr)\n tta_roc_auc = auc(tta_fpr, tta_tpr)\n\n acc = np.sum((np.array(test_gt) > 0.5) == (np.array(test_p) > 0.5)) / len(test_gt)\n tta_acc = np.sum((np.array(test_gt) > 0.5) == (np.array(test_p) > 0.5)) / len(test_gt)\n\n auc_fn = os.path.join(out_folder, 'auc.png')\n plt.close('all')\n plt.figure()\n lw = 2\n plt.plot(fpr, tpr, color='darkorange',\n lw=lw, label=f'ROC curve (area = {roc_auc:.2f}), Acc. = {acc*100:.2f}')\n plt.plot(tta_fpr, tta_tpr, color='red',\n lw=lw, label=f'TTA ROC curve (area = {tta_roc_auc:.2f}), Acc. = {tta_acc*100:.2f}')\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.savefig(auc_fn, transparent=False)\n \n if LOG_WANDB:\n wandb.log({'validation': wandb.Image(auc_fn)})\n wandb.log({'auc' : roc_auc})\n wandb.log({'auc-tta' : tta_roc_auc})\n wandb.log({'acc' : acc})\n wandb.log({'acc-tta' : tta_acc})\n\n result_df = val_df.copy()\n result_df['pred_mgmt'] = list(test_p)\n result_df['pred_mgmt_tta'] = list(tta_p)\n result_df.to_csv(os.path.join(out_folder, 'oof.csv'))\n\nif __name__ == '__main__':\n parser = ArgumentParser(parents=[])\n \n parser.add_argument('--fold', type=int)\n parser.add_argument('--bs', type=int, default=32)\n parser.add_argument('--epochs', type=int, default=30)\n parser.add_argument('--lr', type=float, default=1e-4)\n parser.add_argument('--train_df', type=str, default='./input/train_feature_data_v2.csv')\n parser.add_argument('--npy_dir', type=str, default='./input/aligned_and_cropped_t2w/')\n parser.add_argument('--arch', type=str, default='resnet34')\n parser.add_argument('--ps', type=float, default=0.6)\n parser.add_argument('--optim', type=str, default='ranger')\n parser.add_argument('--im_sz', type=int, default=256)\n parser.add_argument('--loss_name', type=str, default='auclib')\n\n\n params = parser.parse_args()\n fold = params.fold\n train_df = params.train_df\n npy_dir = params.npy_dir\n bs = params.bs\n epochs = params.epochs\n lr = params.lr\n arch = params.arch\n ps = params.ps\n optim = params.optim\n im_sz = params.im_sz\n loss_name = params.loss_name\n \n main(fold, train_df, npy_dir, bs, epochs, lr, arch, ps, optim, im_sz, loss_name)"
] | [
[
"numpy.load",
"numpy.expand_dims"
],
[
"numpy.swapaxes",
"numpy.expand_dims",
"numpy.sqrt",
"numpy.clip",
"numpy.stack",
"matplotlib.pyplot.savefig",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots_adjust",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"torch.randint",
"torch.zeros",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.plot",
"torch.cuda.is_available",
"torch.softmax",
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"numpy.stack",
"numpy.ceil",
"torch.nn.functional.relu",
"torch.rand",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.figure",
"torch.nn.Sequential",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"sklearn.metrics.auc",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplots",
"numpy.percentile",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
levitsky/biosaur2 | [
"5cc8474906408c58ff043af722607c1452aa444f"
] | [
"biosaur2/utils.py"
] | [
"from pyteomics import mzml\nimport numpy as np\nfrom collections import defaultdict, Counter\nfrom os import path\nimport math\nfrom scipy.optimize import curve_fit\nimport logging\nlogger = logging.getLogger(__name__)\nfrom .cutils import get_fast_dict, get_and_calc_apex_intensity_and_scan\n\nclass MS1OnlyMzML(mzml.MzML): \n _default_iter_path = '//spectrum[./*[local-name()=\"cvParam\" and @name=\"ms level\" and @value=\"1\"]]' \n _use_index = False \n _iterative = False\n\ndef noisygaus(x, a, x0, sigma, b):\n return a * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) + b\n\ndef calibrate_mass(bwidth, mass_left, mass_right, true_md):\n\n bbins = np.arange(-mass_left, mass_right, bwidth)\n H1, b1 = np.histogram(true_md, bins=bbins)\n b1 = b1 + bwidth\n b1 = b1[:-1]\n\n popt, pcov = curve_fit(noisygaus, b1, H1, p0=[1, np.median(true_md), 1, 1])\n mass_shift, mass_sigma = popt[1], abs(popt[2])\n return mass_shift, mass_sigma, pcov[0][0]\n\n\ndef calc_peptide_features(hills_dict, peptide_features, negative_mode, faims_val, RT_dict, data_start_id):\n\n for pep_feature in peptide_features:\n\n pep_feature['mz'] = pep_feature['hill_mz_1']\n pep_feature['isoerror'] = pep_feature['isotopes'][0]['mass_diff_ppm']\n pep_feature['isoerror2'] = pep_feature['isotopes'][1]['mass_diff_ppm'] if len(pep_feature['isotopes']) > 1 else -100\n pep_feature['nScans'] = hills_dict['hills_lengths'][pep_feature['monoisotope idx']]\n\n pep_feature['massCalib'] = pep_feature['mz'] * pep_feature['charge'] - 1.0072765 * pep_feature['charge'] * (-1 if negative_mode else 1)\n\n hills_dict, _, _ = get_and_calc_apex_intensity_and_scan(hills_dict, pep_feature['monoisotope idx'])\n\n pep_feature['scanApex'] = hills_dict['hills_scan_apex'][pep_feature['monoisotope idx']]\n pep_feature['rtApex'] = RT_dict[hills_dict['hills_scan_apex'][pep_feature['monoisotope idx']]+data_start_id]\n pep_feature['intensityApex'] = hills_dict['hills_intensity_apex'][pep_feature['monoisotope idx']]\n pep_feature['rtStart'] = RT_dict[hills_dict['hills_scan_lists'][pep_feature['monoisotope idx']][0]+data_start_id]\n pep_feature['rtEnd'] = RT_dict[hills_dict['hills_scan_lists'][pep_feature['monoisotope idx']][-1]+data_start_id]\n pep_feature['mono_hills_scan_lists'] = hills_dict['hills_scan_lists'][pep_feature['monoisotope idx']]\n pep_feature['mono_hills_intensity_list'] = hills_dict['hills_intensity_array'][pep_feature['monoisotope idx']]\n\n return peptide_features\n\n\ndef write_output(peptide_features, args, write_header=True):\n\n input_mzml_path = args['file']\n\n if args['o']:\n output_file = args['o']\n else:\n output_file = path.splitext(input_mzml_path)[0]\\\n + path.extsep + 'features.tsv'\n\n columns_for_output = [\n 'massCalib',\n 'rtApex',\n 'intensityApex',\n 'charge',\n 'nIsotopes',\n 'nScans',\n 'mz',\n 'rtStart',\n 'rtEnd',\n 'FAIMS',\n 'im',\n 'mono_hills_scan_lists',\n 'mono_hills_intensity_list',\n 'scanApex',\n 'isoerror2',\n ]\n\n if write_header:\n\n out_file = open(output_file, 'w')\n out_file.write('\\t'.join(columns_for_output) + '\\n')\n out_file.close()\n\n out_file = open(output_file, 'a')\n for pep_feature in peptide_features:\n out_file.write('\\t'.join([str(pep_feature[col]) for col in columns_for_output]) + '\\n')\n\n out_file.close()\n\n\ndef centroid_pasef_data(data_for_analyse_tmp, args, mz_step):\n\n cnt_ms1_scans = len(data_for_analyse_tmp)\n for spec_idx, z in enumerate(data_for_analyse_tmp):\n\n logger.info('PASEF scans analysis: %d/%d', spec_idx+1, cnt_ms1_scans)\n logger.info('number of m/z peaks in scan: %d', len(z['m/z array']))\n\n if 'ignore_ion_mobility' not in z:\n\n mz_ar_new = []\n intensity_ar_new = []\n ion_mobility_ar_new = []\n\n mz_ar = z['m/z array']\n intensity_ar = z['intensity array']\n ion_mobility_ar = z['mean inverse reduced ion mobility array']\n\n ion_mobility_accuracy = args['paseftol']\n ion_mobility_step = max(ion_mobility_ar) * ion_mobility_accuracy\n\n ion_mobility_ar_fast = (ion_mobility_ar/ion_mobility_step).astype(int)\n mz_ar_fast = (mz_ar/mz_step).astype(int)\n\n idx = np.argsort(mz_ar_fast)\n mz_ar_fast = mz_ar_fast[idx]\n ion_mobility_ar_fast = ion_mobility_ar_fast[idx]\n\n mz_ar = mz_ar[idx]\n intensity_ar = intensity_ar[idx]\n ion_mobility_ar = ion_mobility_ar[idx]\n\n max_peak_idx = len(mz_ar)\n\n banned_idx = set()\n\n peak_idx = 0\n while peak_idx < max_peak_idx:\n\n if peak_idx not in banned_idx:\n\n mass_accuracy_cur = mz_ar[peak_idx] * 1e-6 * args['itol']\n\n mz_val_int = mz_ar_fast[peak_idx]\n ion_mob_val_int = ion_mobility_ar_fast[peak_idx]\n\n tmp = [peak_idx, ]\n\n peak_idx_2 = peak_idx + 1\n\n while peak_idx_2 < max_peak_idx:\n\n\n if peak_idx_2 not in banned_idx:\n\n mz_val_int_2 = mz_ar_fast[peak_idx_2]\n if mz_val_int_2 - mz_val_int > 1:\n break\n elif abs(mz_ar[peak_idx]-mz_ar[peak_idx_2]) <= mass_accuracy_cur:\n ion_mob_val_int_2 = ion_mobility_ar_fast[peak_idx_2]\n if abs(ion_mob_val_int - ion_mob_val_int_2) <= 1:\n tmp.append(peak_idx_2)\n peak_idx = peak_idx_2\n peak_idx_2 += 1\n\n all_intensity = [intensity_ar[p_id] for p_id in tmp]\n i_val_new = sum(all_intensity)\n\n if i_val_new >= args['pasefmini'] and len(all_intensity) >= args['pasefminlh']:\n\n all_mz = [mz_ar[p_id] for p_id in tmp]\n all_ion_mob = [ion_mobility_ar[p_id] for p_id in tmp]\n\n mz_val_new = np.average(all_mz, weights=all_intensity)\n ion_mob_new = np.average(all_ion_mob, weights=all_intensity)\n\n intensity_ar_new.append(i_val_new)\n mz_ar_new.append(mz_val_new)\n ion_mobility_ar_new.append(ion_mob_new)\n\n banned_idx.update(tmp)\n\n peak_idx += 1\n\n data_for_analyse_tmp[spec_idx]['m/z array'] = np.array(mz_ar_new)\n data_for_analyse_tmp[spec_idx]['intensity array'] = np.array(intensity_ar_new)\n data_for_analyse_tmp[spec_idx]['mean inverse reduced ion mobility array'] = np.array(ion_mobility_ar_new)\n\n logger.info('number of m/z peaks in scan after centroiding: %d', len(data_for_analyse_tmp[spec_idx]['m/z array']))\n\n data_for_analyse_tmp = [z for z in data_for_analyse_tmp if len(z['m/z array'] > 0)]\n logger.info('Number of MS1 scans after combining ion mobility peaks: %d', len(data_for_analyse_tmp))\n\n # fast_dict = defaultdict(set)\n # for peak_idx, (mz_val_int, ion_mob_val_int) in enumerate(zip(mz_ar_fast, ion_mobility_ar_fast)):\n\n # fast_dict[(mz_val_int-1, ion_mob_val_int)].add(peak_idx)\n # fast_dict[(mz_val_int, ion_mob_val_int)].add(peak_idx)\n # fast_dict[(mz_val_int+1, ion_mob_val_int)].add(peak_idx)\n\n # fast_dict[(mz_val_int-1, ion_mob_val_int-1)].add(peak_idx)\n # fast_dict[(mz_val_int, ion_mob_val_int-1)].add(peak_idx)\n # fast_dict[(mz_val_int+1, ion_mob_val_int-1)].add(peak_idx)\n\n # fast_dict[(mz_val_int-1, ion_mob_val_int+1)].add(peak_idx)\n # fast_dict[(mz_val_int, ion_mob_val_int+1)].add(peak_idx)\n # fast_dict[(mz_val_int+1, ion_mob_val_int+1)].add(peak_idx)\n\n\n # print('HERE2')\n\n # hill_length = []\n # peak_idx_array = []\n # for peak_idx, (mz_val_int, ion_mob_val_int) in enumerate(zip(mz_ar_fast, ion_mobility_ar_fast)):\n # hill_length.append(len(fast_dict[(mz_val_int, ion_mob_val_int)]))\n # peak_idx_array.append(peak_idx)\n # peak_idx_array = np.array(peak_idx_array)\n\n\n # print('HERE3')\n\n # added_idx = set()\n # idx_sort = np.argsort(hill_length)[::-1]\n # for peak_idx in peak_idx_array[idx_sort]:\n # if peak_idx not in added_idx:\n # mz_val_int = mz_ar_fast[peak_idx]\n # ion_mob_val_int = ion_mobility_ar_fast[peak_idx]\n # all_idx = set([p_id for p_id in fast_dict[(mz_val_int, ion_mob_val_int)] if p_id not in added_idx])\n # if len(all_idx):\n # added_idx.update(all_idx)\n\n # all_intensity = [intensity_ar[p_id] for p_id in all_idx]\n # i_val_new = sum(all_intensity)\n\n # if i_val_new >= args['pasefmini']:\n\n # all_mz = [mz_ar[p_id] for p_id in all_idx]\n # all_ion_mob = [ion_mobility_ar[p_id] for p_id in all_idx]\n\n # mz_val_new = np.average(all_mz, weights=all_intensity)\n # ion_mob_new = np.average(all_ion_mob, weights=all_intensity)\n\n # intensity_ar_new.append(i_val_new)\n # mz_ar_new.append(mz_val_new)\n # ion_mobility_ar_new.append(ion_mob_new)\n\n # data_for_analyse_tmp[spec_idx]['m/z array'] = np.array(mz_ar_new)\n # data_for_analyse_tmp[spec_idx]['intensity array'] = np.array(intensity_ar_new)\n # data_for_analyse_tmp[spec_idx]['mean inverse reduced ion mobility array'] = np.array(ion_mobility_ar_new)\n\n # data_for_analyse_tmp = [z for z in data_for_analyse_tmp if len(z['m/z array'] > 0)]\n # print('Number of MS1 scans after combining ion mobility peaks: ', len(data_for_analyse_tmp))\n\n return data_for_analyse_tmp\n\ndef process_profile(data_for_analyse_tmp):\n\n data_for_analyse_tmp_out = []\n\n for z in data_for_analyse_tmp:\n\n best_mz = 0\n best_int = 0\n best_im = 0\n prev_mz = False\n prev_int = False\n\n threshold = 0.05\n\n ar1 = []\n ar2 = []\n ar3 = []\n for mzv, intv, imv in zip(z['m/z array'], z['intensity array'], z['mean inverse reduced ion mobility array']):\n if prev_mz is False:\n best_mz = mzv\n best_int = intv\n best_im = imv\n elif mzv - prev_mz > threshold:\n ar1.append(best_mz)\n ar2.append(best_int)\n ar3.append(best_im)\n best_mz = mzv\n best_int = intv\n best_im = imv\n elif best_int > prev_int and intv > prev_int:\n ar1.append(best_mz)\n ar2.append(best_int)\n ar3.append(best_im)\n best_mz = mzv\n best_int = intv\n best_im = imv\n elif intv > best_int:\n best_mz = mzv\n best_int = intv\n best_im = imv\n prev_mz = mzv\n prev_int = intv\n\n ar1.append(best_mz)\n ar2.append(best_int)\n ar3.append(best_im)\n\n z['m/z array'] = np.array(ar1)\n z['intensity array'] = np.array(ar2)\n z['mean inverse reduced ion mobility array'] = np.array(ar3)\n\n data_for_analyse_tmp_out.append(z)\n return data_for_analyse_tmp_out\n\n\n\ndef process_tof(data_for_analyse_tmp):\n\n # print(len(z['m/z array']))\n universal_dict = {}\n cnt = 0\n\n for z in data_for_analyse_tmp:\n\n fast_set = z['m/z array'] // 50\n if cnt == 1:\n\n\n for l in set(fast_set):\n idxt = fast_set == l\n true_i = np.log10(z['intensity array'])[idxt]\n\n if len(true_i) > 150:\n\n i_left = true_i.min()\n i_right = true_i.max()\n\n i_shift, i_sigma, covvalue = calibrate_mass(0.05, i_left, i_right, true_i)\n # median_val = \n print(i_shift, i_sigma, covvalue)\n universal_dict[l] = 10**(i_shift + 3 * i_sigma)#10**(np.median(true_i[idxt]) * 2)\n \n\n \n thresholds = [universal_dict.get(zz, 150) for zz in list(fast_set)]\n idxt2 = z['intensity array'] <= thresholds\n z['intensity array'][idxt2] = -1\n\n\n idx = z['intensity array'] > 0\n z['intensity array'] = z['intensity array'][idx]\n z['m/z array'] = z['m/z array'][idx]\n z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]\n\n\n\n cnt += 1\n\n data_for_analyse_tmp = [z for z in data_for_analyse_tmp if len(z['m/z array'])]\n\n return data_for_analyse_tmp\n\n\ndef process_mzml(args):\n\n input_mzml_path = args['file']\n min_intensity = args['mini']\n min_mz = args['minmz']\n max_mz = args['maxmz']\n\n skipped = 0\n data_for_analyse = []\n\n cnt = 0\n\n for z in MS1OnlyMzML(source=input_mzml_path):\n if z['ms level'] == 1:\n\n if 'mean inverse reduced ion mobility array' not in z:\n z['ignore_ion_mobility'] = True\n z['mean inverse reduced ion mobility array'] = np.zeros(len(z['m/z array']))\n\n idx = z['intensity array'] >= min_intensity\n z['intensity array'] = z['intensity array'][idx]\n z['m/z array'] = z['m/z array'][idx]\n z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]\n\n idx = z['m/z array'] >= min_mz\n z['m/z array'] = z['m/z array'][idx]\n z['intensity array'] = z['intensity array'][idx]\n z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]\n\n idx = z['m/z array'] <= max_mz\n z['m/z array'] = z['m/z array'][idx]\n z['intensity array'] = z['intensity array'][idx]\n z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]\n\n idx = np.argsort(z['m/z array'])\n z['m/z array'] = z['m/z array'][idx]\n z['intensity array'] = z['intensity array'][idx]\n z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]\n\n cnt += 1\n\n # if len(data_for_analyse) > 50:\n # break\n\n if len(z['m/z array']):\n data_for_analyse.append(z)\n else:\n skipped += 1\n\n\n logger.info('Number of MS1 scans: %d', len(data_for_analyse))\n logger.info('Number of skipped MS1 scans: %d', skipped)\n\n if len(data_for_analyse) == 0:\n raise Exception('no MS1 scans in input file')\n\n return data_for_analyse\n\n\n\ndef process_mzml_dia(args):\n\n input_mzml_path = args['file']\n # min_intensity = args['mini']\n # min_mz = args['minmz']\n # max_mz = args['maxmz']\n min_intensity = 0\n min_mz = 1\n max_mz = 1e6\n\n skipped = 0\n data_for_analyse = []\n\n cnt = 0\n\n for z in mzml.read(input_mzml_path):\n if z['ms level'] == 2:\n\n if 'mean inverse reduced ion mobility array' not in z:\n z['ignore_ion_mobility'] = True\n z['mean inverse reduced ion mobility array'] = np.zeros(len(z['m/z array']))\n\n idx = z['intensity array'] >= min_intensity\n z['intensity array'] = z['intensity array'][idx]\n z['m/z array'] = z['m/z array'][idx]\n z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]\n\n idx = z['m/z array'] >= min_mz\n z['m/z array'] = z['m/z array'][idx]\n z['intensity array'] = z['intensity array'][idx]\n z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]\n\n idx = z['m/z array'] <= max_mz\n z['m/z array'] = z['m/z array'][idx]\n z['intensity array'] = z['intensity array'][idx]\n z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]\n\n idx = np.argsort(z['m/z array'])\n z['m/z array'] = z['m/z array'][idx]\n z['intensity array'] = z['intensity array'][idx]\n z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]\n\n cnt += 1\n\n # if len(data_for_analyse) > 5000:\n # break\n\n if len(z['m/z array']):\n data_for_analyse.append(z)\n else:\n skipped += 1\n\n\n logger.info('Number of MS2 scans: %d', len(data_for_analyse))\n logger.info('Number of skipped MS2 scans: %d', skipped)\n\n return data_for_analyse\n"
] | [
[
"numpy.arange",
"numpy.median",
"numpy.log10",
"numpy.average",
"numpy.argsort",
"numpy.array",
"numpy.exp",
"numpy.histogram"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kaczmarj/robustness | [
"79d371fd799885ea5fe5553c2b749f41de1a2c4e",
"79d371fd799885ea5fe5553c2b749f41de1a2c4e"
] | [
"robustness/attack_steps.py",
"robustness/datasets.py"
] | [
"\"\"\"\n**For most use cases, this can just be considered an internal class and\nignored.**\n\nThis module contains the abstract class AttackerStep as well as a few subclasses. \n\nAttackerStep is a generic way to implement optimizers specifically for use with\n:class:`robustness.attacker.AttackerModel`. In general, except for when you want\nto :ref:`create a custom optimization method <adding-custom-steps>`, you probably do not need to\nimport or edit this module and can just think of it as internal.\n\"\"\"\n\nimport torch as ch\n\nclass AttackerStep:\n '''\n Generic class for attacker steps, under perturbation constraints\n specified by an \"origin input\" and a perturbation magnitude.\n Must implement project, step, and random_perturb\n '''\n def __init__(self, orig_input, eps, step_size, use_grad=True):\n '''\n Initialize the attacker step with a given perturbation magnitude.\n\n Args:\n eps (float): the perturbation magnitude\n orig_input (ch.tensor): the original input\n '''\n self.orig_input = orig_input\n self.eps = eps\n self.step_size = step_size\n self.use_grad = use_grad\n\n def project(self, x):\n '''\n Given an input x, project it back into the feasible set\n\n Args:\n ch.tensor x : the input to project back into the feasible set.\n\n Returns:\n A `ch.tensor` that is the input projected back into\n the feasible set, that is,\n .. math:: \\min_{x' \\in S} \\|x' - x\\|_2\n '''\n raise NotImplementedError\n\n def step(self, x, g):\n '''\n Given a gradient, make the appropriate step according to the\n perturbation constraint (e.g. dual norm maximization for :math:`\\ell_p`\n norms).\n\n Parameters:\n g (ch.tensor): the raw gradient\n\n Returns:\n The new input, a ch.tensor for the next step.\n '''\n raise NotImplementedError\n\n def random_perturb(self, x):\n '''\n Given a starting input, take a random step within the feasible set\n '''\n raise NotImplementedError\n\n def to_image(self, x):\n '''\n Given an input (which may be in an alternative parameterization),\n convert it to a valid image (this is implemented as the identity\n function by default as most of the time we use the pixel\n parameterization, but for alternative parameterizations this functino\n must be overriden).\n '''\n return x\n\n### Instantiations of the AttackerStep class\n\n# L-infinity threat model\nclass LinfStep(AttackerStep):\n \"\"\"\n Attack step for :math:`\\ell_\\infty` threat model. Given :math:`x_0`\n and :math:`\\epsilon`, the constraint set is given by:\n\n .. math:: S = \\{x | \\|x - x_0\\|_\\infty \\leq \\epsilon\\}\n \"\"\"\n def project(self, x):\n \"\"\"\n \"\"\"\n diff = x - self.orig_input\n diff = ch.clamp(diff, -self.eps, self.eps)\n return ch.clamp(diff + self.orig_input, 0, 1)\n\n def step(self, x, g):\n \"\"\"\n \"\"\"\n step = ch.sign(g) * self.step_size\n return x + step\n\n def random_perturb(self, x):\n \"\"\"\n \"\"\"\n new_x = x + 2 * (ch.rand_like(x) - 0.5) * self.eps\n return ch.clamp(new_x, 0, 1)\n\n# L2 threat model\nclass L2Step(AttackerStep):\n \"\"\"\n Attack step for :math:`\\ell_\\infty` threat model. Given :math:`x_0`\n and :math:`\\epsilon`, the constraint set is given by:\n\n .. math:: S = \\{x | \\|x - x_0\\|_2 \\leq \\epsilon\\}\n \"\"\"\n def project(self, x):\n \"\"\"\n \"\"\"\n diff = x - self.orig_input\n diff = diff.renorm(p=2, dim=0, maxnorm=self.eps)\n return ch.clamp(self.orig_input + diff, 0, 1)\n\n def step(self, x, g):\n \"\"\"\n \"\"\"\n l = len(x.shape) - 1\n g_norm = ch.norm(g.view(g.shape[0], -1), dim=1).view(-1, *([1]*l))\n scaled_g = g / (g_norm + 1e-10)\n return x + scaled_g * self.step_size\n\n def random_perturb(self, x):\n \"\"\"\n \"\"\"\n l = len(x.shape) - 1\n rp = ch.randn_like(x)\n rp_norm = rp.view(rp.shape[0], -1).norm(dim=1).view(-1, *([1]*l))\n return ch.clamp(x + self.eps * rp / (rp_norm + 1e-10), 0, 1)\n\n# Unconstrained threat model\nclass UnconstrainedStep(AttackerStep):\n \"\"\"\n Unconstrained threat model, :math:`S = [0, 1]^n`.\n \"\"\"\n def project(self, x):\n \"\"\"\n \"\"\"\n return ch.clamp(x, 0, 1)\n\n def step(self, x, g):\n \"\"\"\n \"\"\"\n return x + g * self.step_size\n\n def random_perturb(self, x):\n \"\"\"\n \"\"\"\n new_x = x + (ch.rand_like(x) - 0.5).renorm(p=2, dim=0, maxnorm=step_size)\n return ch.clamp(new_x, 0, 1)\n\nclass FourierStep(AttackerStep):\n \"\"\"\n Step under the Fourier (decorrelated) parameterization of an image.\n\n See https://distill.pub/2017/feature-visualization/#preconditioning for more information.\n \"\"\"\n def project(self, x):\n \"\"\"\n \"\"\"\n return x\n\n def step(self, x, g):\n \"\"\"\n \"\"\"\n return x + g * self.step_size\n\n def random_perturb(self, x):\n \"\"\"\n \"\"\"\n return x\n\n def to_image(self, x):\n \"\"\"\n \"\"\"\n return ch.sigmoid(ch.irfft(x, 2, normalized=True, onesided=False))\n\nclass RandomStep(AttackerStep):\n \"\"\"\n Step for Randomized Smoothing.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.use_grad = False\n\n def project(self, x):\n \"\"\"\n \"\"\"\n return x\n\n def step(self, x, g):\n \"\"\"\n \"\"\"\n return x + self.step_size * ch.randn_like(x)\n\n def random_perturb(self, x):\n \"\"\"\n \"\"\"\n return x\n",
"\"\"\"\nModule containing all the supported datasets, which are subclasses of the\nabstract class :class:`robustness.datasets.DataSet`. \n\nCurrently supported datasets:\n\n- ImageNet (:class:`robustness.datasets.ImageNet`)\n- RestrictedImageNet (:class:`robustness.datasets.RestrictedImageNet`)\n- CIFAR-10 (:class:`robustness.datasets.CIFAR`)\n- CINIC-10 (:class:`robustness.datasets.CINIC`)\n- A2B: horse2zebra, summer2winter_yosemite, apple2orange\n (:class:`robustness.datasets.A2B`)\n\n:doc:`../example_usage/training_lib_part_2` shows how to add custom\ndatasets to the library.\n\"\"\"\n\nimport pathlib\n\nimport torch as ch\nimport torch.utils.data\nfrom . import imagenet_models, cifar_models\nfrom torchvision import transforms, datasets\n\nfrom .tools import constants\nfrom .tools import openimgs_helpers\nfrom . import data_augmentation as da\nfrom . import loaders\n\nfrom .tools.helpers import get_label_mapping\n\n###\n# Datasets: (all subclassed from dataset)\n# In order:\n## ImageNet\n## Restricted Imagenet \n## Other Datasets:\n## - CIFAR\n## - CINIC\n## - A2B (orange2apple, horse2zebra, etc)\n###\n\nclass DataSet(object):\n '''\n Base class for representing a dataset. Meant to be subclassed, with\n subclasses implementing the `get_model` function. \n '''\n\n def __init__(self, ds_name, data_path, **kwargs):\n \"\"\"\n Args:\n ds_name (str) : string identifier for the dataset\n data_path (str) : path to the dataset \n num_classes (int) : *required kwarg*, the number of classes in\n the dataset\n mean (ch.tensor) : *required kwarg*, the mean to normalize the\n dataset with (e.g. :samp:`ch.tensor([0.4914, 0.4822,\n 0.4465])` for CIFAR-10)\n std (ch.tensor) : *required kwarg*, the standard deviation to\n normalize the dataset with (e.g. :samp:`ch.tensor([0.2023,\n 0.1994, 0.2010])` for CIFAR-10)\n custom_class (type) : *required kwarg*, a\n :samp:`torchvision.models` class corresponding to the\n dataset, if it exists (otherwise :samp:`None`)\n label_mapping (dict[int,str]) : *required kwarg*, a dictionary\n mapping from class numbers to human-interpretable class\n names (can be :samp:`None`)\n transform_train (torchvision.transforms) : *required kwarg*, \n transforms to apply to the training images from the\n dataset\n transform_test (torchvision.transforms) : *required kwarg*,\n transforms to apply to the validation images from the\n dataset\n \"\"\"\n required_args = ['num_classes', 'mean', 'std', \n 'transform_train', 'transform_test']\n optional_args = ['custom_class', 'label_mapping', 'custom_class_args']\n\n missing_args = set(required_args) - set(kwargs.keys())\n if len(missing_args) > 0:\n raise ValueError(\"Missing required args %s\" % missing_args)\n\n extra_args = set(kwargs.keys()) - set(required_args + optional_args)\n if len(extra_args) > 0:\n raise ValueError(\"Got unrecognized args %s\" % extra_args)\n final_kwargs = {k: kwargs.get(k, None) for k in required_args + optional_args} \n\n self.ds_name = ds_name\n self.data_path = data_path\n self.__dict__.update(final_kwargs)\n \n def override_args(self, default_args, kwargs):\n '''\n Convenience method for overriding arguments. (Internal)\n '''\n for k in kwargs:\n if not (k in default_args): continue\n req_type = type(default_args[k])\n no_nones = (default_args[k] is not None) and (kwargs[k] is not None)\n if no_nones and (not isinstance(kwargs[k], req_type)):\n raise ValueError(f\"Argument {k} should have type {req_type}\")\n return {**default_args, **kwargs}\n\n def get_model(self, arch, pretrained):\n '''\n Should be overriden by subclasses. Also, you will probably never\n need to call this function, and should instead by using\n `model_utils.make_and_restore_model </source/robustness.model_utils.html>`_.\n\n Args:\n arch (str) : name of architecture \n pretrained (bool): whether to try to load torchvision \n pretrained checkpoint\n\n Returns:\n A model with the given architecture that works for each\n dataset (e.g. with the right input/output dimensions).\n '''\n\n raise NotImplementedError\n\n def make_loaders(self, workers, batch_size, data_aug=True, subset=None, \n subset_start=0, subset_type='rand', val_batch_size=None,\n only_val=False, shuffle_train=True, shuffle_val=True, subset_seed=None):\n '''\n Args:\n workers (int) : number of workers for data fetching (*required*).\n batch_size (int) : batch size for the data loaders (*required*).\n data_aug (bool) : whether or not to do train data augmentation.\n subset (None|int) : if given, the returned training data loader\n will only use a subset of the training data; this should be a\n number specifying the number of training data points to use.\n subset_start (int) : only used if `subset` is not None; this specifies the\n starting index of the subset.\n subset_type (\"rand\"|\"first\"|\"last\") : only used if `subset is\n not `None`; \"rand\" selects the subset randomly, \"first\"\n uses the first `subset` images of the training data, and\n \"last\" uses the last `subset` images of the training data.\n seed (int) : only used if `subset == \"rand\"`; allows one to fix\n the random seed used to generate the subset (defaults to 1).\n val_batch_size (None|int) : if not `None`, specifies a\n different batch size for the validation set loader.\n only_val (bool) : If `True`, returns `None` in place of the\n training data loader\n shuffle_train (bool) : Whether or not to shuffle the training data\n in the returned DataLoader.\n shuffle_val (bool) : Whether or not to shuffle the test data in the\n returned DataLoader.\n\n Returns:\n A training loader and validation loader according to the\n parameters given. These are standard PyTorch data loaders, and\n thus can just be used via:\n\n >>> train_loader, val_loader = ds.make_loaders(workers=8, batch_size=128) \n >>> for im, lab in train_loader:\n >>> # Do stuff...\n '''\n transforms = (self.transform_train, self.transform_test)\n return loaders.make_loaders(workers=workers,\n batch_size=batch_size,\n transforms=transforms,\n data_path=self.data_path,\n data_aug=data_aug,\n dataset=self.ds_name,\n label_mapping=self.label_mapping,\n custom_class=self.custom_class,\n val_batch_size=val_batch_size,\n subset=subset,\n subset_start=subset_start,\n subset_type=subset_type,\n only_val=only_val,\n seed=subset_seed,\n shuffle_train=shuffle_train,\n shuffle_val=shuffle_val,\n custom_class_args=self.custom_class_args)\n\nclass ImageNet(DataSet):\n '''\n ImageNet Dataset [DDS+09]_.\n\n Requires ImageNet in ImageFolder-readable format. \n ImageNet can be downloaded from http://www.image-net.org. See\n `here <https://pytorch.org/docs/master/torchvision/datasets.html#torchvision.datasets.ImageFolder>`_\n for more information about the format.\n\n .. [DDS+09] Deng, J., Dong, W., Socher, R., Li, L., Li, K., & Fei-Fei, L. (2009). ImageNet: A large-scale hierarchical image database. 2009 IEEE Conference on Computer Vision and Pattern Recognition, 248-255.\n\n '''\n def __init__(self, data_path, **kwargs):\n \"\"\"\n \"\"\"\n ds_kwargs = {\n 'num_classes': 1000,\n 'mean': ch.tensor([0.485, 0.456, 0.406]),\n 'std': ch.tensor([0.229, 0.224, 0.225]),\n 'custom_class': None,\n 'label_mapping': None,\n 'transform_train': da.TRAIN_TRANSFORMS_IMAGENET,\n 'transform_test': da.TEST_TRANSFORMS_IMAGENET\n }\n ds_kwargs = self.override_args(ds_kwargs, kwargs)\n super(ImageNet, self).__init__('imagenet', data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n \"\"\"\n \"\"\"\n return imagenet_models.__dict__[arch](num_classes=self.num_classes, \n pretrained=pretrained)\n\nclass Places365(DataSet):\n '''\n Places365 Dataset [ZLK+17]_, a 365-class scene recognition dataset.\n\n See `the places2 webpage <http://places2.csail.mit.edu>`_\n for information on how to download this dataset.\n\n .. [ZLK+17] Zhou, B., Lapedriza, A., Khosla, A., Oliva, A., & Torralba, A. (2017). Places: A 10 million Image Database for Scene Recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence.\n\n '''\n def __init__(self, data_path, **kwargs):\n \"\"\"\n \"\"\"\n ds_kwargs = {\n 'num_classes': 365,\n 'mean': ch.tensor([0.485, 0.456, 0.406]),\n 'std': ch.tensor([0.229, 0.224, 0.225]),\n 'custom_class': None,\n 'label_mapping': None, \n 'transform_train': da.TRAIN_TRANSFORMS_DEFAULT(256),\n 'transform_test': da.TEST_TRANSFORMS_DEFAULT(256)\n }\n ds_kwargs = self.override_args(ds_kwargs, kwargs)\n super(Places365, self).__init__('places365', data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n \"\"\"\n \"\"\"\n return imagenet_models.__dict__[arch](num_classes=self.num_classes, \n pretrained=pretrained)\n\nclass RestrictedImageNet(DataSet):\n '''\n RestrictedImagenet Dataset [TSE+19]_\n\n A subset of ImageNet with the following labels:\n\n * Dog (classes 151-268)\n * Cat (classes 281-285)\n * Frog (classes 30-32)\n * Turtle (classes 33-37)\n * Bird (classes 80-100)\n * Monkey (classes 365-382)\n * Fish (classes 389-397)\n * Crab (classes 118-121)\n * Insect (classes 300-319)\n\n To initialize, just provide the path to the full ImageNet dataset\n (no special formatting required).\n\n .. [TSE+19] Tsipras, D., Santurkar, S., Engstrom, L., Turner, A., &\n Madry, A. (2019). Robustness May Be at Odds with Accuracy. ICLR\n 2019.\n '''\n def __init__(self, data_path, **kwargs):\n \"\"\"\n \"\"\"\n ds_name = 'restricted_imagenet'\n ds_kwargs = {\n 'num_classes': len(constants.RESTRICTED_IMAGNET_RANGES),\n 'mean': ch.tensor([0.4717, 0.4499, 0.3837]), \n 'std': ch.tensor([0.2600, 0.2516, 0.2575]),\n 'custom_class': None,\n 'label_mapping': get_label_mapping(ds_name,\n constants.RESTRICTED_IMAGNET_RANGES),\n 'transform_train': da.TRAIN_TRANSFORMS_IMAGENET,\n 'transform_test': da.TEST_TRANSFORMS_IMAGENET\n }\n ds_kwargs = self.override_args(ds_kwargs, kwargs)\n super(RestrictedImageNet, self).__init__(ds_name,\n data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n \"\"\"\n \"\"\"\n if pretrained:\n raise ValueError(\"Dataset doesn't support pytorch_pretrained\")\n return imagenet_models.__dict__[arch](num_classes=self.num_classes)\n\nclass CustomImageNet(DataSet):\n '''\n CustomImagenet Dataset \n\n A subset of ImageNet with the user-specified labels\n\n To initialize, just provide the path to the full ImageNet dataset\n along with a list of lists of wnids to be grouped together\n (no special formatting required).\n\n '''\n def __init__(self, data_path, custom_grouping, **kwargs):\n \"\"\"\n \"\"\"\n ds_name = 'custom_imagenet'\n ds_kwargs = {\n 'num_classes': len(custom_grouping),\n 'mean': ch.tensor([0.4717, 0.4499, 0.3837]), \n 'std': ch.tensor([0.2600, 0.2516, 0.2575]),\n 'custom_class': None,\n 'label_mapping': get_label_mapping(ds_name,\n custom_grouping),\n 'transform_train': da.TRAIN_TRANSFORMS_IMAGENET,\n 'transform_test': da.TEST_TRANSFORMS_IMAGENET\n }\n ds_kwargs = self.override_args(ds_kwargs, kwargs)\n super(CustomImageNet, self).__init__(ds_name,\n data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n \"\"\"\n \"\"\"\n if pretrained:\n raise ValueError(\"Dataset doesn't support pytorch_pretrained\")\n return imagenet_models.__dict__[arch](num_classes=self.num_classes)\n\nclass CIFAR(DataSet):\n \"\"\"\n CIFAR-10 dataset [Kri09]_.\n\n A dataset with 50k training images and 10k testing images, with the\n following classes:\n\n * Airplane\n * Automobile\n * Bird\n * Cat\n * Deer\n * Dog\n * Frog\n * Horse\n * Ship\n * Truck\n\n .. [Kri09] Krizhevsky, A (2009). Learning Multiple Layers of Features\n from Tiny Images. Technical Report.\n \"\"\"\n def __init__(self, data_path='/tmp/', **kwargs):\n \"\"\"\n \"\"\"\n ds_kwargs = {\n 'num_classes': 10,\n 'mean': ch.tensor([0.4914, 0.4822, 0.4465]),\n 'std': ch.tensor([0.2023, 0.1994, 0.2010]),\n 'custom_class': datasets.CIFAR10,\n 'label_mapping': None, \n 'transform_train': da.TRAIN_TRANSFORMS_DEFAULT(32),\n 'transform_test': da.TEST_TRANSFORMS_DEFAULT(32)\n }\n ds_kwargs = self.override_args(ds_kwargs, kwargs)\n super(CIFAR, self).__init__('cifar', data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n \"\"\"\n \"\"\"\n if pretrained:\n raise ValueError('CIFAR does not support pytorch_pretrained=True')\n return cifar_models.__dict__[arch](num_classes=self.num_classes)\n\nclass CINIC(DataSet):\n \"\"\"\n CINIC-10 dataset [DCA+18]_.\n\n A dataset with the same classes as CIFAR-10, but with downscaled images\n from various matching ImageNet classes added in to increase the size of\n the dataset.\n\n .. [DCA+18] Darlow L.N., Crowley E.J., Antoniou A., and A.J. Storkey\n (2018) CINIC-10 is not ImageNet or CIFAR-10. Report\n EDI-INF-ANC-1802 (arXiv:1810.03505)\n \"\"\"\n def __init__(self, data_path, **kwargs):\n \"\"\"\n \"\"\"\n ds_kwargs = {\n 'num_classes': 10,\n 'mean': ch.tensor([0.47889522, 0.47227842, 0.43047404]),\n 'std': ch.tensor([0.24205776, 0.23828046, 0.25874835]),\n 'custom_class': None,\n 'label_mapping': None,\n 'transform_train': da.TRAIN_TRANSFORMS_DEFAULT(32),\n 'transform_test': da.TEST_TRANSFORMS_DEFAULT(32)\n }\n ds_kwargs = self.override_args(ds_kwargs, kwargs)\n super(CINIC, self).__init__('cinic', data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n \"\"\"\n \"\"\"\n if pretrained:\n raise ValueError('CINIC does not support pytorch_pretrained=True')\n return cifar_models.__dict__[arch](num_classes=self.num_classes)\n\nclass A2B(DataSet):\n \"\"\"\n A-to-B datasets [ZPI+17]_\n\n A general class for image-to-image translation dataset. Currently\n supported are:\n \n * Horse <-> Zebra\n * Apple <-> Orange\n * Summer <-> Winter\n\n .. [ZPI+17] Zhu, J., Park, T., Isola, P., & Efros, A.A. (2017).\n Unpaired Image-to-Image Translation Using Cycle-Consistent\n Adversarial Networks. 2017 IEEE International Conference on\n Computer Vision (ICCV), 2242-2251.\n \"\"\"\n def __init__(self, data_path, **kwargs):\n \"\"\"\n \"\"\"\n ds_name = pathlib.Path(data_path).parts[-1]\n valid_names = ['horse2zebra', 'apple2orange', 'summer2winter_yosemite']\n assert ds_name in valid_names, \\\n f\"path must end in one of {valid_names}, not {ds_name}\"\n ds_kwargs = {\n 'num_classes': 2,\n 'mean': ch.tensor([0.5, 0.5, 0.5]),\n 'custom_class': None,\n 'std': ch.tensor([0.5, 0.5, 0.5]),\n 'transform_train': da.TRAIN_TRANSFORMS_IMAGENET,\n 'label_mapping': None,\n 'transform_test': da.TEST_TRANSFORMS_IMAGENET\n }\n ds_kwargs = self.override_args(ds_kwargs, kwargs)\n super(A2B, self).__init__(ds_name, data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained=False):\n \"\"\"\n \"\"\"\n if pretrained:\n raise ValueError('A2B does not support pytorch_pretrained=True')\n return imagenet_models.__dict__[arch](num_classes=self.num_classes)\n\nclass OpenImages(DataSet):\n \"\"\"\n OpenImages dataset [KDA+17]_\n\n More info: https://storage.googleapis.com/openimages/web/index.html\n\n 600-way classification with graular labels and bounding boxes.\n\n ..[KDA+17] Krasin I., Duerig T., Alldrin N., Ferrari V., Abu-El-Haija S.,\n Kuznetsova A., Rom H., Uijlings J., Popov S., Kamali S., Malloci M.,\n Pont-Tuset J., Veit A., Belongie S., Gomes V., Gupta A., Sun C., Chechik G.,\n Cai D., Feng Z., Narayanan D., Murphy K. (2017). OpenImages: A public\n dataset for large-scale multi-label and multi-class image classification.\n Available from https://storage.googleapis.com/openimages/web/index.html. \n \"\"\"\n def __init__(self, data_path, custom_grouping=None, **kwargs):\n \"\"\"\n \"\"\"\n if custom_grouping is None:\n num_classes = 601\n label_mapping = None \n else:\n num_classes = len(custom_grouping)\n label_mapping = get_label_mapping(\"custom_imagenet\", custom_grouping)\n\n ds_kwargs = {\n 'num_classes': num_classes,\n 'mean': ch.tensor([0.4859, 0.4131, 0.3083]),\n 'std': ch.tensor([0.2919, 0.2507, 0.2273]),\n 'custom_class': openimgs_helpers.OIDatasetFolder,\n 'label_mapping': label_mapping, \n 'transform_train': da.TRAIN_TRANSFORMS_IMAGENET,\n 'transform_test': da.TEST_TRANSFORMS_IMAGENET\n }\n ds_kwargs = self.override_args(ds_kwargs, kwargs)\n super(OpenImages, self).__init__('openimages', data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n \"\"\"\n \"\"\"\n if pretrained:\n raise ValueError('OpenImages does not support pytorch_pretrained=True')\n return imagenet_models.__dict__[arch](num_classes=self.num_classes)\n\nDATASETS = {\n 'imagenet': ImageNet,\n 'restricted_imagenet': RestrictedImageNet,\n 'custom_imagenet': CustomImageNet,\n 'cifar': CIFAR,\n 'cinic': CINIC,\n 'a2b': A2B,\n 'places365': Places365,\n 'openimages': OpenImages\n}\n'''\nDictionary of datasets. A dataset class can be accessed as:\n\n>>> import robustness.datasets\n>>> ds = datasets.DATASETS['cifar']('/path/to/cifar')\n'''\n"
] | [
[
"torch.randn_like",
"torch.rand_like",
"torch.sign",
"torch.clamp",
"torch.irfft"
],
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MourabitElBachir/visual-recognition-server-control-back | [
"859b385480e16de16cc4c4adb57f49e98bfd3ade",
"49ec2a459e8c418a395340d2d3f876af400cb75c"
] | [
"object_detection/builders/optimizer_builder.py",
"object_detection/camera_capture.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functions to build DetectionModel training optimizers.\"\"\"\n\nimport tensorflow as tf\nfrom object_detection.utils import learning_schedules\n\nslim = tf.contrib.slim\n\n\ndef build(optimizer_config, global_summaries):\n \"\"\"Create optimizer based on config.\n\n Args:\n optimizer_config: A Optimizer proto message.\n global_summaries: A set to attach learning rate summary to.\n\n Returns:\n An optimizer.\n\n Raises:\n ValueError: when using an unsupported input data type.\n \"\"\"\n optimizer_type = optimizer_config.WhichOneof('optimizer')\n optimizer = None\n\n if optimizer_type == 'rms_prop_optimizer':\n config = optimizer_config.rms_prop_optimizer\n optimizer = tf.train.RMSPropOptimizer(\n _create_learning_rate(config.learning_rate, global_summaries),\n decay=config.decay,\n momentum=config.momentum_optimizer_value,\n epsilon=config.epsilon)\n\n if optimizer_type == 'momentum_optimizer':\n config = optimizer_config.momentum_optimizer\n optimizer = tf.train.MomentumOptimizer(\n _create_learning_rate(config.learning_rate, global_summaries),\n momentum=config.momentum_optimizer_value)\n\n if optimizer_type == 'adam_optimizer':\n config = optimizer_config.adam_optimizer\n optimizer = tf.train.AdamOptimizer(\n _create_learning_rate(config.learning_rate, global_summaries))\n\n if optimizer is None:\n raise ValueError('Optimizer %s not supported.' % optimizer_type)\n\n if optimizer_config.use_moving_average:\n optimizer = tf.contrib.opt.MovingAverageOptimizer(\n optimizer, average_decay=optimizer_config.moving_average_decay)\n\n return optimizer\n\n\ndef _create_learning_rate(learning_rate_config, global_summaries):\n \"\"\"Create optimizer learning rate based on config.\n\n Args:\n learning_rate_config: A LearningRate proto message.\n global_summaries: A set to attach learning rate summary to.\n\n Returns:\n A learning rate.\n\n Raises:\n ValueError: when using an unsupported input data type.\n \"\"\"\n learning_rate = None\n learning_rate_type = learning_rate_config.WhichOneof('learning_rate')\n if learning_rate_type == 'constant_learning_rate':\n config = learning_rate_config.constant_learning_rate\n learning_rate = config.learning_rate\n\n if learning_rate_type == 'exponential_decay_learning_rate':\n config = learning_rate_config.exponential_decay_learning_rate\n learning_rate = tf.train.exponential_decay(\n config.initial_learning_rate,\n slim.get_or_create_global_step(),\n config.decay_steps,\n config.decay_factor,\n staircase=config.staircase)\n\n if learning_rate_type == 'manual_step_learning_rate':\n config = learning_rate_config.manual_step_learning_rate\n if not config.schedule:\n raise ValueError('Empty learning rate schedule.')\n learning_rate_step_boundaries = [x.step for x in config.schedule]\n learning_rate_sequence = [config.initial_learning_rate]\n learning_rate_sequence += [x.learning_rate for x in config.schedule]\n learning_rate = learning_schedules.manual_stepping(\n slim.get_or_create_global_step(), learning_rate_step_boundaries,\n learning_rate_sequence)\n\n if learning_rate is None:\n raise ValueError('Learning_rate %s not supported.' % learning_rate_type)\n\n global_summaries.add(tf.summary.scalar('Learning Rate', learning_rate))\n return learning_rate\n",
"import cv2\nimport numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nimport json\nimport time\nimport glob\n\nfrom io import StringIO\nfrom PIL import Image\n\nimport matplotlib.pyplot as plt\n\nfrom object_detection.utils import visualization_utils as vis_util\nfrom object_detection.utils import label_map_util\n\nfrom multiprocessing.dummy import Pool as ThreadPool\n\n# Windows dependencies\n# - Python 2.7.6: http://www.python.org/download/\n# - OpenCV: http://opencv.org/\n# - Numpy -- get numpy from here because the official builds don't support x64:\n# http://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy\n\n# Mac Dependencies\n# - brew install python\n# - pip install numpy\n# - brew tap homebrew/science\n# - brew install opencv\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n ret, frame = cap.read()\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)\n out = cv2.imwrite(os.path.join('image_upload', 'capture.jpg'), frame)\n\n cv2.imshow('Buttons Detection', rgb)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n\ndef get_correct_path(files):\n\n return os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), files)\n\n\ndef object_detection_runner(filename):\n\n UPLOAD_FOLDER = 'image_upload'\n OUTPUT_FOLDER = 'image_output'\n\n MAX_NUMBER_OF_BOXES = 10\n MINIMUM_CONFIDENCE = 0.9\n\n PATH_TO_LABELS = get_correct_path('annotations/label_map.pbtxt')\n PATH_TO_TEST_IMAGES_DIR = UPLOAD_FOLDER\n\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=sys.maxsize,\n use_display_name=True)\n CATEGORY_INDEX = label_map_util.create_category_index(categories)\n\n # Path to frozen detection graph. This is the actual model that is used for the object detection.\n MODEL_NAME = get_correct_path('graphs')\n PATH_TO_CKPT = MODEL_NAME + '/ssd_mobilenet_v1.pb'\n\n def load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n def detect_objects(image_path):\n import ntpath\n head, tail = ntpath.split(image_path)\n image_name = tail or ntpath.basename(head)\n print(image_name)\n\n image = Image.open(image_path)\n (im_width, im_height) = image.size\n image_np = load_image_into_numpy_array(image)\n image_np_expanded = np.expand_dims(image_np, axis=0)\n\n (boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n CATEGORY_INDEX,\n min_score_thresh=MINIMUM_CONFIDENCE,\n use_normalized_coordinates=True,\n line_thickness=8)\n fig = plt.figure()\n dpi = 100\n im_width_inches = im_width / dpi\n im_height_inches = im_height / dpi\n fig.set_size_inches(im_width_inches, im_height_inches)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n plt.imshow(image_np, aspect='auto')\n plt.savefig(os.path.join(OUTPUT_FOLDER, image_name), dpi=62)\n plt.close(fig)\n\n # TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image-{}.jpg'.format(i)) for i in range(1, 4) ]\n TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)\n\n # Load model into memory\n print('Loading model...')\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n print('detecting...')\n with detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n detect_objects(TEST_IMAGE_PATH)\n"
] | [
[
"tensorflow.contrib.opt.MovingAverageOptimizer",
"tensorflow.summary.scalar"
],
[
"matplotlib.pyplot.imshow",
"tensorflow.Graph",
"numpy.expand_dims",
"tensorflow.import_graph_def",
"tensorflow.gfile.GFile",
"numpy.squeeze",
"matplotlib.pyplot.Axes",
"matplotlib.pyplot.close",
"tensorflow.Session",
"tensorflow.GraphDef",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
while0l1/ranzcr2020 | [
"64edbcbe638e7d52cd831e9edd40c72e96a7e3a0"
] | [
"load_dataset.py"
] | [
"from sklearn.model_selection import GroupKFold\nimport pandas as pd\nimport cv2\nimport os\nimport numpy as np\nimport ast\nimport torch\nimport albumentations\nfrom config import CFG\nfrom torch.utils.data import DataLoader\n\nclass RanzcrDataset(object):\n def __init__(self, root, df, mode='test', transforms=None, train_anno=None):\n self.root = root\n self.transforms = transforms\n self.filenames = (df['StudyInstanceUID']).values\n self.mode = mode\n self.train_anno = train_anno\n\n def __len__(self):\n return len(self.filenames)\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.root, self.filenames[idx] + '.jpg')\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n if self.mode == 'test':\n img = self.transforms(image=img)['image']\n img = img.astype('float32').transpose(2, 0, 1) / 255.\n return img\n else:\n mask = np.zeros((img.shape[0], img.shape[1], 2)).astype('float32')\n mask0 = mask[:, :, 0].copy()\n mask1 = mask[:, :, 1].copy()\n this_anno = self.train_anno.query(f'StudyInstanceUID == \"{self.filenames[idx]}\"')\n for _, anno in this_anno.iterrows():\n data = np.array(ast.literal_eval(anno[\"data\"]))\n mask0 = cv2.polylines(mask0, np.int32([data]), isClosed=False, color=1, thickness=15, lineType=16) # 管道位置画线\n mask1 = cv2.circle(mask1, (data[0][0], data[0][1]), radius=15, color=1, thickness=25) # 管道开始位置画圈\n mask1 = cv2.circle(mask1, (data[-1][0], data[-1][1]), radius=15, color=1, thickness=25) # 管道结束位置画圈\n \n mask[:, :, 0] = mask0\n mask[:, :, 1] = mask1\n res = self.transforms(image=img, mask=mask)\n img = res['image']\n mask = res['mask']\n img = img.astype('float32').transpose(2, 0, 1) / 255.\n mask = mask.astype('float32').transpose(2, 0, 1)\n return torch.tensor(img), torch.tensor(mask)\n \ntransforms_train = albumentations.Compose([\n albumentations.Resize(CFG.image_size, CFG.image_size), \n albumentations.HorizontalFlip(p=0.5),\n albumentations.RandomBrightness(limit=0.1, p=0.75),\n # albumentations.OneOf([\n # albumentations.GaussNoise(var_limit=[10, 50]),\n # albumentations.MotionBlur(),\n # albumentations.MedianBlur(),\n # ], p=0.2),\n albumentations.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=30, border_mode=0, p=0.75),\n albumentations.Cutout(max_h_size=int(CFG.image_size * 0.3), max_w_size=int(CFG.image_size * 0.3), num_holes=1, p=0.75),\n# albumentations.Normalize(),\n])\ntransforms_valid = albumentations.Compose([\n albumentations.Resize(CFG.image_size, CFG.image_size),\n# albumentations.Normalize(),\n])\n\n\n'''\nK-fold划分数据集\n'''\ndef get_folds(nfolds=5):\n traindf = pd.read_csv(CFG.train_df_path)\n folds = traindf.copy()\n Fold = GroupKFold(n_splits=nfolds)\n groups = folds['PatientID'].values\n for n, (train_index, val_index) in enumerate(Fold.split(folds, folds[CFG.target_cols], groups)):\n folds.loc[val_index, 'fold'] = int(n) # 添加一个fold列,将val_index对应的行设置为n\n\n folds['fold'] = folds['fold'].astype(int)\n return folds\n\n'''\n得到有标注信息的样本表格\n'''\ndef get_df_with_anno():\n folds = get_folds(5) # k折\n train_anno = pd.read_csv(CFG.train_anno_path)\n unique_anno = train_anno.drop_duplicates(['StudyInstanceUID']).copy() # 去掉重复的样本名\n unique_anno['with_anno'] = True\n\n # 连接两个表\n train_v2 = pd.merge(folds, unique_anno[['StudyInstanceUID', 'with_anno']], left_on='StudyInstanceUID', right_on='StudyInstanceUID', how='left')\n\n # 将没有annotation的样本设置为False\n train_v2['with_anno'] = train_v2['with_anno'].fillna(value=False)\n sample_with_anno_df = train_v2[train_v2['with_anno'] == True].copy()\n return sample_with_anno_df\n\ndef get_seg_loader(fold_id, debug=False):\n sample_with_anno_df = get_df_with_anno()\n train_df = sample_with_anno_df[sample_with_anno_df.fold != fold_id]\n valid_df = sample_with_anno_df[sample_with_anno_df.fold == fold_id]\n\n # 小样本用作测试\n if debug:\n train_df = train_df.iloc[:16]\n valid_df = train_df.iloc[:16]\n\n train_anno = pd.read_csv(CFG.train_anno_path)\n\n train_data = RanzcrDataset(CFG.train_img_path, train_df, mode='train', transforms=transforms_train, train_anno=train_anno)\n valid_data = RanzcrDataset(CFG.train_img_path, valid_df, mode='valid', transforms=transforms_valid, train_anno=train_anno)\n\n train_loader = DataLoader(train_data, batch_size=CFG.seg_batch_size, shuffle=True, num_workers=CFG.num_workers)\n valid_loader = DataLoader(valid_data, batch_size=CFG.seg_batch_size, shuffle=False, num_workers=CFG.num_workers)\n return train_loader, valid_loader"
] | [
[
"pandas.merge",
"pandas.read_csv",
"numpy.int32",
"torch.utils.data.DataLoader",
"torch.tensor",
"numpy.zeros",
"sklearn.model_selection.GroupKFold"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
jimthompson5802/ludwig | [
"8a369328a3f839d9cdb3710be315952c7891d7c0",
"8a369328a3f839d9cdb3710be315952c7891d7c0",
"8a369328a3f839d9cdb3710be315952c7891d7c0"
] | [
"tests/ludwig/encoders/test_text_encoders.py",
"tests/ludwig/utils/test_torch_utils.py",
"ludwig/features/numerical_feature.py"
] | [
"import pytest\nimport torch\n\nfrom ludwig.encoders import text_encoders\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_albert_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n albert_encoder = text_encoders.ALBERTEncoder(\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.rand((2, max_sequence_length)).type(albert_encoder.input_dtype)\n inputs = torch.rand((2, max_sequence_length)).type(albert_encoder.input_dtype)\n outputs = albert_encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == albert_encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"cls_pooled\", \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_bert_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n bert = text_encoders.BERTEncoder(\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.rand((2, max_sequence_length)).type(bert.input_dtype)\n outputs = bert(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == bert.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [\"last\", \"sum\", \"mean\"])\[email protected](\"max_sequence_length\", [20])\ndef test_xlm_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n xlm_encoder = text_encoders.XLMEncoder(\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.rand((2, max_sequence_length)).type(xlm_encoder.input_dtype)\n outputs = xlm_encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == xlm_encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_gpt_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n gpt_encoder = text_encoders.GPTEncoder(\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.rand((2, max_sequence_length)).type(gpt_encoder.input_dtype)\n outputs = gpt_encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == gpt_encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [\"cls_pooled\", \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_roberta_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n roberta_encoder = text_encoders.RoBERTaEncoder(\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.rand((2, max_sequence_length)).type(roberta_encoder.input_dtype)\n outputs = roberta_encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == roberta_encoder.output_shape\n\n\[email protected](\"use_pretrained\", [True, False])\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_gpt2_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n gpt_encoder = text_encoders.GPT2Encoder(\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.rand((2, max_sequence_length)).type(gpt_encoder.input_dtype)\n outputs = gpt_encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == gpt_encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_distil_bert(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n distil_bert_encoder = text_encoders.DistilBERTEncoder(\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.rand((2, max_sequence_length)).type(distil_bert_encoder.input_dtype)\n outputs = distil_bert_encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == distil_bert_encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_transfoxl_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n transfo = text_encoders.TransformerXLEncoder(\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.randint(10, (2, max_sequence_length)).type(transfo.input_dtype)\n outputs = transfo(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == transfo.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_ctrl_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n encoder = text_encoders.CTRLEncoder(\n max_sequence_length,\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n )\n inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)\n outputs = encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"cls_pooled\"])\[email protected](\"max_sequence_length\", [20])\ndef test_camembert_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n encoder = text_encoders.CamemBERTEncoder(\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)\n outputs = encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"cls_pooled\"])\[email protected](\"max_sequence_length\", [20])\ndef test_longformer_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n encoder = text_encoders.LongformerEncoder(\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)\n outputs = encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_mt5_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n mt5_encoder = text_encoders.MT5Encoder(\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.rand((2, max_sequence_length)).type(mt5_encoder.input_dtype)\n outputs = mt5_encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == mt5_encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_xlmroberta_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n xlmroberta_encoder = text_encoders.XLMRoBERTaEncoder(\n use_pretrained=use_pretrained,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.rand((2, max_sequence_length)).type(xlmroberta_encoder.input_dtype)\n outputs = xlmroberta_encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == xlmroberta_encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"cls_pooled\"])\[email protected](\"max_sequence_length\", [20])\ndef test_longformer_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n encoder = text_encoders.LongformerEncoder(\n use_pretrained=use_pretrained, reduce_output=reduce_output, max_sequence_length=max_sequence_length\n )\n inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)\n outputs = encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_electra_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n encoder = text_encoders.ELECTRAEncoder(\n use_pretrained=use_pretrained, reduce_output=reduce_output, max_sequence_length=max_sequence_length\n )\n inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)\n outputs = encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == encoder.output_shape\n\n\[email protected](\"pretrained_model_name_or_path\", [\"bert-base-uncased\"])\[email protected](\"reduce_output\", [None, \"sum\", \"cls_pooled\"])\[email protected](\"max_sequence_length\", [20])\ndef test_auto_transformer_encoder(pretrained_model_name_or_path: str, reduce_output: str, max_sequence_length: int):\n encoder = text_encoders.AutoTransformerEncoder(\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n reduce_output=reduce_output,\n max_sequence_length=max_sequence_length,\n )\n inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)\n outputs = encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_flaubert_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n encoder = text_encoders.FlauBERTEncoder(\n use_pretrained=use_pretrained, reduce_output=reduce_output, max_sequence_length=max_sequence_length\n )\n inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)\n outputs = encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == encoder.output_shape\n\n\[email protected](\"use_pretrained\", [False])\[email protected](\"reduce_output\", [None, \"sum\"])\[email protected](\"max_sequence_length\", [20])\ndef test_t5_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):\n encoder = text_encoders.T5Encoder(\n use_pretrained=use_pretrained, reduce_output=reduce_output, max_sequence_length=max_sequence_length\n )\n inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)\n outputs = encoder(inputs)\n assert outputs[\"encoder_output\"].shape[1:] == encoder.output_shape\n",
"import contextlib\nimport os\nfrom typing import List\nfrom unittest.mock import Mock, patch\n\nimport pytest\nimport torch\n\nfrom ludwig.utils.torch_utils import (\n _get_torch_init_params,\n _set_torch_init_params,\n initialize_pytorch,\n sequence_length_2D,\n sequence_length_3D,\n)\n\n\[email protected](\"input_sequence\", [[[0, 1, 1], [2, 0, 0], [3, 3, 3]]])\[email protected](\"expected_output\", [[2, 1, 3]])\ndef test_sequence_length_2D(input_sequence: List[List[int]], expected_output: List[int]):\n output_seq_length = sequence_length_2D(torch.tensor(input_sequence))\n assert torch.equal(torch.tensor(expected_output), output_seq_length)\n\n\[email protected](\"input_sequence\", [[[[-1, 0, 1], [1, -2, 0]], [[0, 0, 0], [3, 0, -2]]]])\[email protected](\"expected_output\", [[2, 1]])\ndef test_sequence_length_3D(input_sequence: List[List[List[int]]], expected_output: List[int]):\n input_sequence = torch.tensor(input_sequence, dtype=torch.int32)\n expected_output = torch.tensor(expected_output, dtype=torch.int32)\n output_seq_length = sequence_length_3D(input_sequence)\n assert torch.equal(expected_output, output_seq_length)\n\n\[email protected]\ndef clean_params():\n prev = _get_torch_init_params()\n try:\n _set_torch_init_params(None)\n if \"CUDA_VISIBLE_DEVICES\" in os.environ:\n del os.environ[\"CUDA_VISIBLE_DEVICES\"]\n yield\n finally:\n _set_torch_init_params(prev)\n\n\n@patch(\"ludwig.utils.torch_utils.torch\")\ndef test_initialize_pytorch_only_once(mock_torch):\n mock_torch.cuda.is_available.return_value = True\n mock_torch.cuda.device_count.return_value = 4\n with clean_params():\n # During first time initialization, set pytorch parallelism\n initialize_pytorch(allow_parallel_threads=False)\n mock_torch.set_num_threads.assert_called_once()\n mock_torch.set_num_interop_threads.assert_called_once()\n\n # Reset call counts on all threading calls\n mock_torch.reset_mock()\n\n # In the second call to initialization, avoid calling these methods again, as pytorch\n # will raise an exception\n initialize_pytorch(allow_parallel_threads=False)\n mock_torch.set_num_threads.assert_not_called()\n mock_torch.set_num_interop_threads.assert_not_called()\n\n # No GPUs were specified, so this should not have been called even once\n mock_torch.cuda.memory.set_per_process_memory_fraction.assert_not_called()\n\n\n@patch(\"ludwig.utils.torch_utils.torch\")\ndef test_initialize_pytorch_with_gpu_list(mock_torch):\n # For test purposes, these devices can be anything, we just need to be able to uniquely\n # identify them.\n mock_torch.cuda.is_available.return_value = True\n mock_torch.cuda.device_count.return_value = 4\n with clean_params():\n initialize_pytorch(gpus=[1, 2])\n assert os.environ[\"CUDA_VISIBLE_DEVICES\"] == \"1,2\"\n\n\n@patch(\"ludwig.utils.torch_utils.torch\")\ndef test_initialize_pytorch_with_gpu_string(mock_torch):\n mock_torch.cuda.is_available.return_value = True\n mock_torch.cuda.device_count.return_value = 4\n with clean_params():\n initialize_pytorch(gpus=\"1,2\")\n assert os.environ[\"CUDA_VISIBLE_DEVICES\"] == \"1,2\"\n\n\n@patch(\"ludwig.utils.torch_utils.torch\")\ndef test_initialize_pytorch_with_gpu_int(mock_torch):\n mock_torch.cuda.is_available.return_value = True\n mock_torch.cuda.device_count.return_value = 4\n with clean_params():\n initialize_pytorch(gpus=1)\n mock_torch.cuda.set_device.assert_called_with(1)\n assert \"CUDA_VISIBLE_DEVICES\" not in os.environ\n\n\n@patch(\"ludwig.utils.torch_utils.torch\")\ndef test_initialize_pytorch_without_gpu(mock_torch):\n mock_torch.cuda.is_available.return_value = True\n mock_torch.cuda.device_count.return_value = 4\n with clean_params():\n initialize_pytorch(gpus=-1)\n assert os.environ[\"CUDA_VISIBLE_DEVICES\"] == \"\"\n\n\n@patch(\"ludwig.utils.torch_utils.torch\")\ndef test_initialize_pytorch_with_horovod(mock_torch):\n mock_torch.cuda.is_available.return_value = True\n mock_torch.cuda.device_count.return_value = 4\n\n mock_hvd = Mock()\n mock_hvd.local_rank.return_value = 1\n mock_hvd.local_size.return_value = 4\n\n with clean_params():\n initialize_pytorch(horovod=mock_hvd)\n\n mock_torch.cuda.set_device.assert_called_with(1)\n assert \"CUDA_VISIBLE_DEVICES\" not in os.environ\n\n\n@patch(\"ludwig.utils.torch_utils.warnings\")\n@patch(\"ludwig.utils.torch_utils.torch\")\ndef test_initialize_pytorch_with_horovod_bad_local_rank(mock_torch, mock_warnings):\n \"\"\"In this scenario, the local_size 5 is out of the bounds of the GPU indices.\"\"\"\n mock_torch.cuda.is_available.return_value = True\n mock_torch.cuda.device_count.return_value = 4\n\n mock_hvd = Mock()\n mock_hvd.local_rank.return_value = 1\n mock_hvd.local_size.return_value = 5\n\n with clean_params():\n initialize_pytorch(horovod=mock_hvd)\n\n assert os.environ[\"CUDA_VISIBLE_DEVICES\"] == \"\"\n mock_warnings.warn.assert_called()\n\n\n@patch(\"ludwig.utils.torch_utils.torch\")\ndef test_initialize_pytorch_with_horovod_explicit_gpus(mock_torch):\n mock_torch.cuda.is_available.return_value = True\n mock_torch.cuda.device_count.return_value = 4\n\n mock_hvd = Mock()\n mock_hvd.local_rank.return_value = 1\n mock_hvd.local_size.return_value = 4\n\n with clean_params():\n initialize_pytorch(gpus=\"-1\", horovod=mock_hvd)\n\n assert os.environ[\"CUDA_VISIBLE_DEVICES\"] == \"\"\n",
"#! /usr/bin/env python\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport logging\nimport random\nfrom typing import Dict\n\nimport numpy as np\nimport torch\n\nfrom ludwig.constants import (\n COLUMN,\n FILL_WITH_CONST,\n HIDDEN,\n LOGITS,\n LOSS,\n MEAN_ABSOLUTE_ERROR,\n MEAN_SQUARED_ERROR,\n MISSING_VALUE_STRATEGY_OPTIONS,\n NAME,\n NUMERICAL,\n PREDICTIONS,\n PROC_COLUMN,\n R2,\n ROOT_MEAN_SQUARED_ERROR,\n ROOT_MEAN_SQUARED_PERCENTAGE_ERROR,\n SUM,\n TIED,\n TYPE,\n)\nfrom ludwig.features.base_feature import InputFeature, OutputFeature\nfrom ludwig.utils import output_feature_utils\nfrom ludwig.utils.misc_utils import get_from_registry, set_default_value, set_default_values\n\nlogger = logging.getLogger(__name__)\n\n\nclass ZScoreTransformer:\n def __init__(self, mean: float = None, std: float = None, **kwargs: dict):\n self.mu = mean\n self.sigma = std\n\n def transform(self, x: np.ndarray) -> np.ndarray:\n return (x - self.mu) / self.sigma\n\n def inverse_transform(self, x: np.ndarray) -> np.ndarray:\n return x * self.sigma + self.mu\n\n @staticmethod\n def fit_transform_params(column: np.ndarray, backend: \"Backend\") -> dict: # noqa\n compute = backend.df_engine.compute\n return {\n \"mean\": compute(column.astype(np.float32).mean()),\n \"std\": compute(column.astype(np.float32).std()),\n }\n\n\nclass MinMaxTransformer:\n def __init__(self, min: float = None, max: float = None, **kwargs: dict):\n self.min_value = min\n self.max_value = max\n self.range = None if min is None or max is None else max - min\n\n def transform(self, x: np.ndarray) -> np.ndarray:\n return (x - self.min_value) / self.range\n\n def inverse_transform(self, x: np.ndarray) -> np.ndarray:\n if self.range is None:\n raise ValueError(\"Numeric transformer needs to be instantiated with \" \"min and max values.\")\n return x * self.range + self.min_value\n\n @staticmethod\n def fit_transform_params(column: np.ndarray, backend: \"Backend\") -> dict: # noqa\n compute = backend.df_engine.compute\n return {\n \"min\": compute(column.astype(np.float32).min()),\n \"max\": compute(column.astype(np.float32).max()),\n }\n\n\nclass Log1pTransformer:\n def __init__(self, **kwargs: dict):\n pass\n\n def transform(self, x: np.ndarray) -> np.ndarray:\n if np.any(x <= 0):\n raise ValueError(\n \"One or more values are non-positive. \" \"log1p normalization is defined only for positive values.\"\n )\n return np.log1p(x)\n\n def inverse_transform(self, x: np.ndarray) -> np.ndarray:\n return np.expm1(x)\n\n @staticmethod\n def fit_transform_params(column: np.ndarray, backend: \"Backend\") -> dict: # noqa\n return {}\n\n\nclass IdentityTransformer:\n def __init__(self, **kwargs):\n pass\n\n def transform(self, x: np.ndarray) -> np.ndarray:\n return x\n\n def inverse_transform(self, x: np.ndarray) -> np.ndarray:\n return x\n\n @staticmethod\n def fit_transform_params(column: np.ndarray, backend: \"Backend\") -> dict: # noqa\n return {}\n\n\nnumeric_transformation_registry = {\n \"minmax\": MinMaxTransformer,\n \"zscore\": ZScoreTransformer,\n \"log1p\": Log1pTransformer,\n None: IdentityTransformer,\n}\n\n\nclass NumericalFeatureMixin:\n type = NUMERICAL\n preprocessing_defaults = {\n \"missing_value_strategy\": FILL_WITH_CONST,\n \"fill_value\": 0,\n \"normalization\": None,\n }\n\n preprocessing_schema = {\n \"missing_value_strategy\": {\n \"type\": \"string\",\n \"enum\": MISSING_VALUE_STRATEGY_OPTIONS,\n },\n \"fill_value\": {\"type\": \"number\"},\n \"computed_fill_value\": {\"type\": \"number\"},\n \"normalization\": {\n \"type\": [\"string\", \"null\"],\n \"enum\": list(numeric_transformation_registry.keys()),\n },\n }\n\n @staticmethod\n def cast_column(column, backend):\n return backend.df_engine.df_lib.to_numeric(column, errors=\"coerce\").astype(np.float32)\n\n @staticmethod\n def get_feature_meta(column, preprocessing_parameters, backend):\n numeric_transformer = get_from_registry(\n preprocessing_parameters.get(\"normalization\", None),\n numeric_transformation_registry,\n )\n\n return numeric_transformer.fit_transform_params(column, backend)\n\n @staticmethod\n def add_feature_data(\n feature,\n input_df,\n proc_df,\n metadata,\n preprocessing_parameters,\n backend,\n skip_save_processed_input,\n ):\n proc_df[feature[PROC_COLUMN]] = input_df[feature[COLUMN]].astype(np.float32).values\n\n # normalize data as required\n numeric_transformer = get_from_registry(\n preprocessing_parameters.get(\"normalization\", None),\n numeric_transformation_registry,\n )(**metadata[feature[NAME]])\n\n proc_df[feature[PROC_COLUMN]] = numeric_transformer.transform(proc_df[feature[PROC_COLUMN]])\n\n return proc_df\n\n\nclass NumericalInputFeature(NumericalFeatureMixin, InputFeature):\n encoder = \"passthrough\"\n\n def __init__(self, feature, encoder_obj=None):\n # Required for certain encoders, maybe pass into initialize_encoder\n super().__init__(feature)\n self.overwrite_defaults(feature)\n feature[\"input_size\"] = self.input_shape[-1]\n if encoder_obj:\n self.encoder_obj = encoder_obj\n else:\n self.encoder_obj = self.initialize_encoder(feature)\n\n def forward(self, inputs):\n assert isinstance(inputs, torch.Tensor)\n assert inputs.dtype == torch.float32 or inputs.dtype == torch.float64\n assert len(inputs.shape) == 1 or (len(inputs.shape) == 2 and inputs.shape[1] == 1)\n\n if len(inputs.shape) == 1:\n inputs = inputs[:, None]\n inputs_encoded = self.encoder_obj(inputs)\n\n return inputs_encoded\n\n def create_sample_input(self):\n # Used by get_model_inputs(), which is used for tracing-based torchscript generation.\n return torch.Tensor([random.randint(1, 100), random.randint(1, 100)])\n\n @property\n def input_shape(self) -> torch.Size:\n return torch.Size([1])\n\n @property\n def output_shape(self) -> torch.Size:\n return torch.Size(self.encoder_obj.output_shape)\n\n @staticmethod\n def update_config_with_metadata(input_feature, feature_metadata, *args, **kwargs):\n pass\n\n @staticmethod\n def populate_defaults(input_feature):\n set_default_value(input_feature, TIED, None)\n\n\nclass NumericalOutputFeature(NumericalFeatureMixin, OutputFeature):\n decoder = \"regressor\"\n loss = {TYPE: MEAN_SQUARED_ERROR}\n metric_functions = {\n LOSS: None,\n MEAN_SQUARED_ERROR: None,\n MEAN_ABSOLUTE_ERROR: None,\n ROOT_MEAN_SQUARED_ERROR: None,\n ROOT_MEAN_SQUARED_PERCENTAGE_ERROR: None,\n R2: None,\n }\n default_validation_metric = MEAN_SQUARED_ERROR\n clip = None\n\n def __init__(self, feature):\n super().__init__(feature)\n self.overwrite_defaults(feature)\n feature[\"input_size\"] = self.input_shape[-1]\n self.decoder_obj = self.initialize_decoder(feature)\n self._setup_loss()\n self._setup_metrics()\n\n def logits(self, inputs, **kwargs): # hidden\n hidden = inputs[HIDDEN]\n return self.decoder_obj(hidden)\n\n def predictions(self, inputs: Dict[str, torch.Tensor], feature_name: str, **kwargs):\n logits = output_feature_utils.get_output_feature_tensor(inputs, feature_name, LOGITS)\n predictions = logits\n\n if self.clip is not None:\n if isinstance(self.clip, (list, tuple)) and len(self.clip) == 2:\n predictions = torch.clamp(logits, self.clip[0], self.clip[1])\n\n logger.debug(f\" clipped_predictions: {predictions}\")\n else:\n raise ValueError(\n \"The clip parameter of {} is {}. \"\n \"It must be a list or a tuple of length 2.\".format(self.feature_name, self.clip)\n )\n\n return {PREDICTIONS: predictions, LOGITS: logits}\n\n def get_prediction_set(self):\n return {PREDICTIONS, LOGITS}\n\n @property\n def input_shape(self) -> torch.Size:\n return torch.Size([self.input_size])\n\n @classmethod\n def get_output_dtype(cls):\n return torch.float32\n\n @property\n def output_shape(self) -> torch.Size:\n return torch.Size([1])\n\n @staticmethod\n def update_config_with_metadata(output_feature, feature_metadata, *args, **kwargs):\n pass\n\n @staticmethod\n def calculate_overall_stats(predictions, targets, metadata):\n # no overall stats, just return empty dictionary\n return {}\n\n def postprocess_predictions(\n self,\n predictions,\n metadata,\n output_directory,\n backend,\n ):\n predictions_col = f\"{self.feature_name}_{PREDICTIONS}\"\n if predictions_col in predictions:\n # as needed convert predictions make to original value space\n numeric_transformer = get_from_registry(\n metadata[\"preprocessing\"].get(\"normalization\", None),\n numeric_transformation_registry,\n )(**metadata)\n predictions[predictions_col] = backend.df_engine.map_objects(\n predictions[predictions_col],\n lambda pred: numeric_transformer.inverse_transform(pred),\n )\n\n return predictions\n\n @staticmethod\n def populate_defaults(output_feature):\n set_default_value(output_feature, LOSS, {TYPE: \"mean_squared_error\", \"weight\": 1})\n set_default_value(output_feature[LOSS], TYPE, \"mean_squared_error\")\n set_default_value(output_feature[LOSS], \"weight\", 1)\n\n set_default_values(\n output_feature,\n {\n \"clip\": None,\n \"dependencies\": [],\n \"reduce_input\": SUM,\n \"reduce_dependencies\": SUM,\n },\n )\n"
] | [
[
"torch.randint",
"torch.rand"
],
[
"torch.equal",
"torch.tensor"
],
[
"torch.Size",
"numpy.expm1",
"numpy.any",
"numpy.log1p",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZitongYu/Flex-Modal-FAS | [
"b5aad6aae1737ea5746ee7ae7330eb9893043095"
] | [
"Load_FAS_MultiModal.py"
] | [
"from __future__ import print_function, division\nimport os\nimport torch\nimport pandas as pd\n#from skimage import io, transform\nimport cv2\nimport numpy as np\nimport random\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nimport pdb\nimport math\nimport os \nimport imgaug.augmenters as iaa\n\n\n \n\n\n#face_scale = 0.9 #default for test, for training , can be set from [0.8 to 1.0]\n\n# data augment from 'imgaug' --> Add (value=(-40,40), per_channel=True), GammaContrast (gamma=(0.5,1.5))\nseq = iaa.Sequential([\n iaa.Add(value=(-40,40), per_channel=True), # Add color \n iaa.GammaContrast(gamma=(0.5,1.5)) # GammaContrast with a gamma of 0.5 to 1.5\n])\n\n\n\n# Tensor\nclass Cutout(object):\n def __init__(self, length=30):\n self.length = length\n\n def __call__(self, sample):\n img, image_x_depth, image_x_ir, spoofing_label, map_x1 = sample['image_x'],sample['image_x_depth'],sample['image_x_ir'],sample['spoofing_label'],sample['map_x1']\n h, w = img.shape[1], img.shape[2] # Tensor [1][2], nparray [0][1]\n mask = np.ones((h, w), np.float32)\n y = np.random.randint(h)\n x = np.random.randint(w)\n length_new = np.random.randint(1, self.length)\n \n y1 = np.clip(y - length_new // 2, 0, h)\n y2 = np.clip(y + length_new // 2, 0, h)\n x1 = np.clip(x - length_new // 2, 0, w)\n x2 = np.clip(x + length_new // 2, 0, w)\n\n mask[y1: y2, x1: x2] = 0.\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img *= mask\n image_x_depth *= mask\n image_x_ir *= mask\n \n return {'image_x': img, 'image_x_depth': image_x_depth, 'image_x_ir': image_x_ir, 'spoofing_label': spoofing_label, 'map_x1': map_x1}\n\n\nclass Normaliztion(object):\n \"\"\"\n same as mxnet, normalize into [-1, 1]\n image = (image - 127.5)/128\n \"\"\"\n def __call__(self, sample):\n image_x, image_x_depth, image_x_ir, spoofing_label, map_x1 = sample['image_x'],sample['image_x_depth'],sample['image_x_ir'],sample['spoofing_label'],sample['map_x1']\n new_image_x = (image_x - 127.5)/128 # [-1,1]\n new_image_x_depth = (image_x_depth - 127.5)/128 # [-1,1]\n new_image_x_ir = (image_x_ir - 127.5)/128 # [-1,1]\n return {'image_x': new_image_x, 'image_x_depth': new_image_x_depth, 'image_x_ir': new_image_x_ir, 'spoofing_label': spoofing_label, 'map_x1': map_x1}\n\n\n\nclass RandomHorizontalFlip(object):\n \"\"\"Horizontally flip the given Image randomly with a probability of 0.5.\"\"\"\n def __call__(self, sample):\n image_x, image_x_depth, image_x_ir, spoofing_label, map_x1 = sample['image_x'],sample['image_x_depth'],sample['image_x_ir'],sample['spoofing_label'],sample['map_x1']\n \n new_image_x = np.zeros((224, 224, 3))\n new_image_x_depth = np.zeros((224, 224, 3))\n new_image_x_ir = np.zeros((224, 224, 3))\n\n p = random.random()\n if p < 0.5:\n #print('Flip')\n\n new_image_x = cv2.flip(image_x, 1)\n new_image_x_depth = cv2.flip(image_x_depth, 1)\n new_image_x_ir = cv2.flip(image_x_ir, 1)\n\n \n return {'image_x': new_image_x, 'image_x_depth': new_image_x_depth, 'image_x_ir': new_image_x_ir, 'spoofing_label': spoofing_label, 'map_x1': map_x1}\n else:\n #print('no Flip')\n return {'image_x': image_x, 'image_x_depth': image_x_depth, 'image_x_ir': image_x_ir, 'spoofing_label': spoofing_label, 'map_x1': map_x1}\n\n\n\nclass ToTensor(object):\n \"\"\"\n Convert ndarrays in sample to Tensors.\n process only one batch every time\n \"\"\"\n\n def __call__(self, sample):\n image_x, image_x_depth, image_x_ir, spoofing_label, map_x1 = sample['image_x'],sample['image_x_depth'],sample['image_x_ir'],sample['spoofing_label'],sample['map_x1']\n \n # swap color axis because\n # numpy image: (batch_size) x H x W x C\n # torch image: (batch_size) x C X H X W\n image_x = image_x[:,:,::-1].transpose((2, 0, 1))\n image_x = np.array(image_x)\n \n image_x_depth = image_x_depth[:,:,::-1].transpose((2, 0, 1))\n image_x_depth = np.array(image_x_depth)\n \n image_x_ir = image_x_ir[:,:,::-1].transpose((2, 0, 1))\n image_x_ir = np.array(image_x_ir)\n \n map_x1 = np.array(map_x1)\n \n spoofing_label_np = np.array([0],dtype=np.long)\n spoofing_label_np[0] = spoofing_label\n \n \n return {'image_x': torch.from_numpy(image_x.astype(np.float)).float(), 'image_x_depth': torch.from_numpy(image_x_depth.astype(np.float)).float(), 'image_x_ir': torch.from_numpy(image_x_ir.astype(np.float)).float(), 'spoofing_label': torch.from_numpy(spoofing_label_np.astype(np.long)).long(), 'map_x1': torch.from_numpy(map_x1.astype(np.float)).float()}\n\n\n# /home/ztyu/FAS_dataset/OULU/Train_images/ 6_3_20_5_121_scene.jpg 6_3_20_5_121_scene.dat\n# /home/ztyu/FAS_dataset/OULU/IJCB_re/OULUtrain_images/ 6_3_20_5_121_depth1D.jpg\nclass Spoofing_train(Dataset):\n\n def __init__(self, info_list, root_dir, transform=None):\n\n self.landmarks_frame = pd.read_csv(info_list, delimiter=' ', header=None)\n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.landmarks_frame)\n\n \n def __getitem__(self, idx):\n #print(self.landmarks_frame.iloc[idx, 0])\n videoname = str(self.landmarks_frame.iloc[idx, 0])\n image_path = os.path.join(self.root_dir, videoname)\n \n videoname_depth = str(self.landmarks_frame.iloc[idx, 1])\n image_path_depth = os.path.join(self.root_dir, videoname_depth) \n \n videoname_ir = str(self.landmarks_frame.iloc[idx, 2])\n image_path_ir = os.path.join(self.root_dir, videoname_ir) \n \n \n #log_file2 = open('temp.txt', 'w')\n #log_file2.write('%s \\n' % (image_path))\n #log_file2.write('%s \\n' % (image_path_depth))\n #log_file2.write('%s \\n' % (image_path_ir))\n #log_file2.flush()\n \n image_x, map_x1 = self.get_single_image_x_RGB(image_path)\n image_x_depth = self.get_single_image_x(image_path_depth)\n image_x_ir = self.get_single_image_x(image_path_ir)\n\t\t \n spoofing_label = self.landmarks_frame.iloc[idx, 3]\n \n if spoofing_label == 1: # real\n spoofing_label = 1 # real\n #map_x1 = np.zeros((28, 28)) # real\n #map_x1 = np.ones((28, 28))\n else: # fake\n spoofing_label = 0\n #map_x1 = np.ones((28, 28)) # fake\n map_x1 = np.zeros((28, 28))\n \n\n sample = {'image_x': image_x, 'image_x_depth': image_x_depth, 'image_x_ir': image_x_ir, 'spoofing_label': spoofing_label, 'map_x1': map_x1}\n\n if self.transform:\n sample = self.transform(sample)\n return sample\n\n def get_single_image_x_RGB(self, image_path):\n \n image_x = np.zeros((224, 224, 3))\n binary_mask = np.zeros((28, 28))\n\n # RGB\n image_x_temp = cv2.imread(image_path)\n \n #cv2.imwrite('temp.jpg', image_x_temp)\n \n image_x = cv2.resize(image_x_temp, (224, 224))\n \n # data augment from 'imgaug' --> Add (value=(-40,40), per_channel=True), GammaContrast (gamma=(0.5,1.5))\n image_x_aug = seq.augment_image(image_x) \n \n image_x_temp_gray = cv2.imread(image_path, 0)\n image_x_temp_gray = cv2.resize(image_x_temp_gray, (28, 28))\n for i in range(28):\n for j in range(28):\n if image_x_temp_gray[i,j]>0:\n binary_mask[i,j]=1\n else:\n binary_mask[i,j]=0\n \n return image_x_aug, binary_mask\n \n def get_single_image_x(self, image_path):\n \n image_x = np.zeros((224, 224, 3))\n\n # RGB\n image_x_temp = cv2.imread(image_path)\n \n #cv2.imwrite('temp.jpg', image_x_temp)\n \n image_x = cv2.resize(image_x_temp, (224, 224))\n \n # data augment from 'imgaug' --> Add (value=(-40,40), per_channel=True), GammaContrast (gamma=(0.5,1.5))\n image_x_aug = seq.augment_image(image_x) \n \n \n return image_x_aug\n\n\n\n\n"
] | [
[
"pandas.read_csv",
"numpy.clip",
"torch.from_numpy",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
AtreyeeS/gammapy | [
"a3b47c3da08900a833f0360e0374203e054cadfc",
"a3b47c3da08900a833f0360e0374203e054cadfc"
] | [
"gammapy/datasets/flux_points.py",
"gammapy/data/hdu_index_table.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.table import Table\nfrom astropy.visualization import quantity_support\nfrom gammapy.modeling.models import DatasetModels\nfrom gammapy.utils.scripts import make_name, make_path\nfrom .core import Dataset\nfrom .utils import get_axes\n\nlog = logging.getLogger(__name__)\n\n__all__ = [\"FluxPointsDataset\"]\n\n\nclass FluxPointsDataset(Dataset):\n \"\"\"\n Fit a set of flux points with a parametric model.\n\n Parameters\n ----------\n models : `~gammapy.modeling.models.Models`\n Models (only spectral part needs to be set)\n data : `~gammapy.estimators.FluxPoints`\n Flux points.\n mask_fit : `numpy.ndarray`\n Mask to apply for fitting\n mask_safe : `numpy.ndarray`\n Mask defining the safe data range. By default upper limit values are excluded.\n meta_table : `~astropy.table.Table`\n Table listing informations on observations used to create the dataset.\n One line per observation for stacked datasets.\n\n Examples\n --------\n Load flux points from file and fit with a power-law model::\n\n from gammapy.modeling import Fit\n from gammapy.modeling.models import PowerLawSpectralModel, SkyModel\n from gammapy.estimators import FluxPoints\n from gammapy.datasets import FluxPointsDataset\n\n filename = \"$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits\"\n flux_points = FluxPoints.read(filename)\n\n model = SkyModel(spectral_model=PowerLawSpectralModel())\n\n dataset = FluxPointsDataset(model, flux_points)\n fit = Fit()\n result = fit.run([dataset])\n print(result)\n print(result.parameters.to_table())\n\n Note: In order to reproduce the example you need the tests datasets folder.\n You may download it with the command\n ``gammapy download datasets --tests --out $GAMMAPY_DATA``\n \"\"\"\n\n stat_type = \"chi2\"\n tag = \"FluxPointsDataset\"\n\n def __init__(\n self,\n models=None,\n data=None,\n mask_fit=None,\n mask_safe=None,\n name=None,\n meta_table=None,\n ):\n self.data = data\n self.mask_fit = mask_fit\n self._name = make_name(name)\n self.models = models\n self.meta_table = meta_table\n\n if mask_safe is None:\n mask_safe = (~data.is_ul).data[:, 0, 0]\n\n self.mask_safe = mask_safe\n\n @property\n def name(self):\n return self._name\n\n @property\n def gti(self):\n \"\"\"Good time interval info (`GTI`)\"\"\"\n return self.data.gti\n\n @property\n def models(self):\n return self._models\n\n @models.setter\n def models(self, models):\n if models is None:\n self._models = None\n else:\n models = DatasetModels(models)\n self._models = models.select(datasets_names=self.name)\n\n def write(self, filename, overwrite=True, **kwargs):\n \"\"\"Write flux point dataset to file.\n\n Parameters\n ----------\n filename : str\n Filename to write to.\n overwrite : bool\n Overwrite existing file.\n **kwargs : dict\n Keyword arguments passed to `~astropy.table.Table.write`.\n \"\"\"\n table = self.data.to_table()\n\n if self.mask_fit is None:\n mask_fit = self.mask_safe\n else:\n mask_fit = self.mask_fit\n\n table[\"mask_fit\"] = mask_fit\n table[\"mask_safe\"] = self.mask_safe\n table.write(make_path(filename), overwrite=overwrite, **kwargs)\n\n @classmethod\n def from_dict(cls, data, **kwargs):\n \"\"\"Create flux point dataset from dict.\n\n Parameters\n ----------\n data : dict\n Dict containing data to create dataset from.\n\n Returns\n -------\n dataset : `FluxPointsDataset`\n Flux point datasets.\n \"\"\"\n from gammapy.estimators import FluxPoints\n\n filename = make_path(data[\"filename\"])\n table = Table.read(filename)\n mask_fit = table[\"mask_fit\"].data.astype(\"bool\")\n mask_safe = table[\"mask_safe\"].data.astype(\"bool\")\n table.remove_columns([\"mask_fit\", \"mask_safe\"])\n return cls(\n name=data[\"name\"],\n data=FluxPoints.from_table(table, format=\"gadf-sed\"),\n mask_fit=mask_fit,\n mask_safe=mask_safe,\n )\n\n def __str__(self):\n str_ = f\"{self.__class__.__name__}\\n\"\n str_ += \"-\" * len(self.__class__.__name__) + \"\\n\"\n str_ += \"\\n\"\n\n str_ += \"\\t{:32}: {} \\n\\n\".format(\"Name\", self.name)\n\n # data section\n n_bins = 0\n if self.data is not None:\n n_bins = self.data.energy_axis.nbin\n str_ += \"\\t{:32}: {} \\n\".format(\"Number of total flux points\", n_bins)\n\n n_fit_bins = 0\n if self.mask is not None:\n n_fit_bins = np.sum(self.mask.data)\n str_ += \"\\t{:32}: {} \\n\\n\".format(\"Number of fit bins\", n_fit_bins)\n\n # likelihood section\n str_ += \"\\t{:32}: {}\\n\".format(\"Fit statistic type\", self.stat_type)\n\n stat = np.nan\n if self.data is not None and self.models is not None:\n stat = self.stat_sum()\n str_ += \"\\t{:32}: {:.2f}\\n\\n\".format(\"Fit statistic value (-2 log(L))\", stat)\n\n # model section\n n_models = 0\n if self.models is not None:\n n_models = len(self.models)\n\n str_ += \"\\t{:32}: {} \\n\".format(\"Number of models\", n_models)\n\n str_ += \"\\t{:32}: {}\\n\".format(\n \"Number of parameters\", len(self.models.parameters)\n )\n str_ += \"\\t{:32}: {}\\n\\n\".format(\n \"Number of free parameters\", len(self.models.parameters.free_parameters)\n )\n\n if self.models is not None:\n str_ += \"\\t\" + \"\\n\\t\".join(str(self.models).split(\"\\n\")[2:])\n\n return str_.expandtabs(tabsize=2)\n\n def data_shape(self):\n \"\"\"Shape of the flux points data (tuple).\"\"\"\n return self.data.energy_ref.shape\n\n def flux_pred(self):\n \"\"\"Compute predicted flux.\"\"\"\n flux = 0.0\n for model in self.models:\n flux += model.spectral_model(self.data.energy_ref)\n return flux\n\n def stat_array(self):\n \"\"\"Fit statistic array.\"\"\"\n model = self.flux_pred()\n data = self.data.dnde.quantity[:, 0, 0]\n try:\n sigma = self.data.dnde_err\n except AttributeError:\n sigma = (self.data.dnde_errn + self.data.dnde_errp) / 2\n return ((data - model) / sigma.quantity[:, 0, 0]).to_value(\"\") ** 2\n\n def residuals(self, method=\"diff\"):\n \"\"\"Compute the flux point residuals ().\n\n Parameters\n ----------\n method: {\"diff\", \"diff/model\"}\n Method used to compute the residuals. Available options are:\n - `diff` (default): data - model\n - `diff/model`: (data - model) / model\n\n Returns\n -------\n residuals : `~numpy.ndarray`\n Residuals array.\n \"\"\"\n fp = self.data\n\n model = self.flux_pred()\n\n residuals = self._compute_residuals(fp.dnde.quantity[:, 0, 0], model, method)\n # Remove residuals for upper_limits\n residuals[fp.is_ul.data[:, 0, 0]] = np.nan\n return residuals\n\n def plot_fit(\n self,\n ax_spectrum=None,\n ax_residuals=None,\n kwargs_spectrum=None,\n kwargs_residuals=None,\n ):\n \"\"\"Plot flux points, best fit model and residuals in two panels.\n\n Calls `~FluxPointsDataset.plot_spectrum` and `~FluxPointsDataset.plot_residuals`.\n\n Parameters\n ----------\n ax_spectrum : `~matplotlib.axes.Axes`\n Axes to plot flux points and best fit model on.\n ax_residuals : `~matplotlib.axes.Axes`\n Axes to plot residuals on.\n kwargs_spectrum : dict\n Keyword arguments passed to `~FluxPointsDataset.plot_spectrum`.\n kwargs_residuals : dict\n Keyword arguments passed to `~FluxPointsDataset.plot_residuals`.\n\n Returns\n -------\n ax_spectrum, ax_residuals : `~matplotlib.axes.Axes`\n Flux points, best fit model and residuals plots.\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.gridspec import GridSpec\n\n fig = plt.figure(figsize=(9, 7))\n\n gs = GridSpec(7, 1)\n if ax_spectrum is None:\n ax_spectrum = fig.add_subplot(gs[:5, :])\n\n if ax_residuals is None:\n ax_residuals = fig.add_subplot(gs[5:, :], sharex=ax_spectrum)\n\n kwargs_spectrum = kwargs_spectrum or {}\n kwargs_residuals = kwargs_residuals or {}\n kwargs_residuals.setdefault(\"method\", \"diff/model\")\n\n self.plot_spectrum(ax=ax_spectrum, **kwargs_spectrum)\n self.plot_residuals(ax=ax_residuals, **kwargs_residuals)\n return ax_spectrum, ax_residuals\n\n @property\n def _energy_bounds(self):\n try:\n return u.Quantity([self.data.energy_min.min(), self.data.energy_max.max()])\n except KeyError:\n return u.Quantity([self.data.energy_ref.min(), self.data.energy_ref.max()])\n\n @property\n def _energy_unit(self):\n return self.data.energy_ref.unit\n\n def plot_residuals(self, ax=None, method=\"diff\", **kwargs):\n \"\"\"Plot flux point residuals.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`\n Axes to plot on.\n method : {\"diff\", \"diff/model\"}\n Normalization used to compute the residuals, see `FluxPointsDataset.residuals`.\n **kwargs : dict\n Keyword arguments passed to `~matplotlib.axes.Axes.errorbar`.\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axes object.\n \"\"\"\n import matplotlib.pyplot as plt\n\n ax = ax or plt.gca()\n\n fp = self.data\n residuals = self.residuals(method)\n\n xerr = self.data.energy_axis.as_plot_xerr\n\n yerr = fp._plot_get_flux_err(sed_type=\"dnde\")\n\n if method == \"diff/model\":\n model = self.flux_pred()\n yerr = (yerr[0].quantity[:, 0, 0] / model), (yerr[1].quantity[:, 0, 0] / model)\n elif method == \"diff\":\n yerr = yerr[0].quantity[:, 0, 0], yerr[1].quantity[:, 0, 0]\n else:\n raise ValueError('Invalid method, choose between \"diff\" and \"diff/model\"')\n\n kwargs.setdefault(\"color\", kwargs.pop(\"c\", \"black\"))\n kwargs.setdefault(\"marker\", \"+\")\n kwargs.setdefault(\"linestyle\", kwargs.pop(\"ls\", \"none\"))\n\n with quantity_support():\n ax.errorbar(\n fp.energy_ref, residuals, xerr=xerr, yerr=yerr, **kwargs\n )\n\n ax.axhline(0, color=kwargs[\"color\"], lw=0.5)\n\n # format axes\n ax.set_xlabel(f\"Energy [{self._energy_unit}]\")\n ax.set_xscale(\"log\")\n label = self._residuals_labels[method]\n ax.set_ylabel(f\"Residuals\\n {label}\")\n ymin = np.nanmin(residuals - yerr[0])\n ymax = np.nanmax(residuals + yerr[1])\n ymax = max(abs(ymin), ymax)\n ax.set_ylim(-1.05 * ymax, 1.05 * ymax)\n return ax\n\n def plot_spectrum(self, ax=None, kwargs_fp=None, kwargs_model=None):\n \"\"\"Plot spectrum including flux points and model.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`\n Axes to plot on.\n kwargs_fp : dict\n Keyword arguments passed to `gammapy.estimators.FluxPoints.plot`.\n kwargs_model : dict\n Keyword arguments passed to `gammapy.modeling.models.SpectralModel.plot` and\n `gammapy.modeling.models.SpectralModel.plot_error`.\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axes object.\n \"\"\"\n kwargs_fp = (kwargs_fp or {}).copy()\n kwargs_model = (kwargs_model or {}).copy()\n\n # plot flux points\n kwargs_fp.setdefault(\"label\", \"Flux points\")\n kwargs_fp.setdefault(\"sed_type\", \"e2dnde\")\n ax = self.data.plot(ax, **kwargs_fp)\n\n kwargs_model.setdefault(\"energy_bounds\", self._energy_bounds)\n kwargs_model.setdefault(\"label\", \"Best fit model\")\n kwargs_model.setdefault(\"sed_type\", \"e2dnde\")\n kwargs_model.setdefault(\"zorder\", 10)\n\n for model in self.models:\n if model.datasets_names is None or self.name in model.datasets_names:\n model.spectral_model.plot(ax=ax, **kwargs_model)\n\n kwargs_model[\"color\"] = ax.lines[-1].get_color()\n kwargs_model.pop(\"label\")\n\n for model in self.models:\n if model.datasets_names is None or self.name in model.datasets_names:\n model.spectral_model.plot_error(ax=ax, **kwargs_model)\n\n return ax\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom astropy.table import Table\nfrom astropy.utils import lazyproperty\nfrom gammapy.utils.fits import HDULocation\nfrom gammapy.utils.scripts import make_path\n\n__all__ = [\"HDUIndexTable\"]\n\nlog = logging.getLogger(__name__)\n\n\nclass HDUIndexTable(Table):\n \"\"\"HDU index table.\n\n See :ref:`gadf:hdu-index`.\n \"\"\"\n\n VALID_HDU_TYPE = [\"events\", \"gti\", \"aeff\", \"edisp\", \"psf\", \"bkg\", \"rad_max\"]\n \"\"\"Valid values for `HDU_TYPE`.\"\"\"\n\n VALID_HDU_CLASS = [\n \"events\",\n \"gti\",\n \"aeff_2d\",\n \"edisp_2d\",\n \"psf_table\",\n \"psf_3gauss\",\n \"psf_king\",\n \"bkg_2d\",\n \"bkg_3d\",\n \"rad_max_2d\"\n ]\n \"\"\"Valid values for `HDU_CLASS`.\"\"\"\n\n @classmethod\n def read(cls, filename, **kwargs):\n \"\"\"Read :ref:`gadf:hdu-index`.\n\n Parameters\n ----------\n filename : `pathlib.Path`, str\n Filename\n \"\"\"\n filename = make_path(filename)\n table = super().read(filename, **kwargs)\n table.meta[\"BASE_DIR\"] = filename.parent.as_posix()\n\n return table\n\n @property\n def base_dir(self):\n \"\"\"Base directory.\"\"\"\n return make_path(self.meta.get(\"BASE_DIR\", \"\"))\n\n def hdu_location(self, obs_id, hdu_type=None, hdu_class=None):\n \"\"\"Create `HDULocation` for a given selection.\n\n Parameters\n ----------\n obs_id : int\n Observation ID\n hdu_type : str\n HDU type (see `~gammapy.data.HDUIndexTable.VALID_HDU_TYPE`)\n hdu_class : str\n HDU class (see `~gammapy.data.HDUIndexTable.VALID_HDU_CLASS`)\n\n Returns\n -------\n location : `~gammapy.data.HDULocation`\n HDU location\n \"\"\"\n self._validate_selection(obs_id=obs_id, hdu_type=hdu_type, hdu_class=hdu_class)\n\n idx = self.row_idx(obs_id=obs_id, hdu_type=hdu_type, hdu_class=hdu_class)\n\n if len(idx) == 1:\n idx = idx[0]\n elif len(idx) == 0:\n log.warning(\n f\"No HDU found matching: OBS_ID = {obs_id}, HDU_TYPE = {hdu_type}, HDU_CLASS = {hdu_class}\"\n )\n return None\n else:\n idx = idx[0]\n log.warning(\n f\"Found multiple HDU matching: OBS_ID = {obs_id}, HDU_TYPE = {hdu_type}, HDU_CLASS = {hdu_class}.\"\n f\" Returning the first entry, which has \"\n f\"HDU_TYPE = {self[idx]['HDU_TYPE']} and HDU_CLASS = {self[idx]['HDU_CLASS']}\"\n )\n\n return self.location_info(idx)\n\n def _validate_selection(self, obs_id, hdu_type, hdu_class):\n \"\"\"Validate HDU selection.\n\n The goal is to give helpful error messages to the user.\n \"\"\"\n if hdu_type is None and hdu_class is None:\n raise ValueError(\"You have to specify `hdu_type` or `hdu_class`.\")\n\n if hdu_type and hdu_type not in self.VALID_HDU_TYPE:\n valid = [str(_) for _ in self.VALID_HDU_TYPE]\n raise ValueError(f\"Invalid hdu_type: {hdu_type}. Valid values are: {valid}\")\n\n if hdu_class and hdu_class not in self.VALID_HDU_CLASS:\n valid = [str(_) for _ in self.VALID_HDU_CLASS]\n raise ValueError(\n f\"Invalid hdu_class: {hdu_class}. Valid values are: {valid}\"\n )\n\n if obs_id not in self[\"OBS_ID\"]:\n raise IndexError(f\"No entry available with OBS_ID = {obs_id}\")\n\n def row_idx(self, obs_id, hdu_type=None, hdu_class=None):\n \"\"\"Table row indices for a given selection.\n\n Parameters\n ----------\n obs_id : int\n Observation ID\n hdu_type : str\n HDU type (see `~gammapy.data.HDUIndexTable.VALID_HDU_TYPE`)\n hdu_class : str\n HDU class (see `~gammapy.data.HDUIndexTable.VALID_HDU_CLASS`)\n\n Returns\n -------\n idx : list of int\n List of row indices matching the selection.\n \"\"\"\n selection = self[\"OBS_ID\"] == obs_id\n\n if hdu_class:\n is_hdu_class = self._hdu_class_stripped == hdu_class\n selection &= is_hdu_class\n\n if hdu_type:\n is_hdu_type = self._hdu_type_stripped == hdu_type\n selection &= is_hdu_type\n\n idx = np.where(selection)[0]\n return list(idx)\n\n def location_info(self, idx):\n \"\"\"Create `HDULocation` for a given row index.\"\"\"\n row = self[idx]\n return HDULocation(\n hdu_class=row[\"HDU_CLASS\"].strip(),\n base_dir=self.base_dir.as_posix(),\n file_dir=row[\"FILE_DIR\"].strip(),\n file_name=row[\"FILE_NAME\"].strip(),\n hdu_name=row[\"HDU_NAME\"].strip(),\n )\n\n @lazyproperty\n def _hdu_class_stripped(self):\n return np.array([_.strip() for _ in self[\"HDU_CLASS\"]])\n\n @lazyproperty\n def _hdu_type_stripped(self):\n return np.array([_.strip() for _ in self[\"HDU_TYPE\"]])\n\n @lazyproperty\n def obs_id_unique(self):\n \"\"\"Observation IDs (unique).\"\"\"\n return np.unique(np.sort(self[\"OBS_ID\"]))\n\n @lazyproperty\n def hdu_type_unique(self):\n \"\"\"HDU types (unique).\"\"\"\n return list(np.unique(np.sort([_.strip() for _ in self[\"HDU_TYPE\"]])))\n\n @lazyproperty\n def hdu_class_unique(self):\n \"\"\"HDU classes (unique).\"\"\"\n return list(np.unique(np.sort([_.strip() for _ in self[\"HDU_CLASS\"]])))\n\n def summary(self):\n \"\"\"Summary report (str).\"\"\"\n obs_id = self.obs_id_unique\n return (\n \"HDU index table:\\n\"\n f\"BASE_DIR: {self.base_dir}\\n\"\n f\"Rows: {len(self)}\\n\"\n f\"OBS_ID: {obs_id[0]} -- {obs_id[-1]}\\n\"\n f\"HDU_TYPE: {self.hdu_type_unique}\\n\"\n f\"HDU_CLASS: {self.hdu_class_unique}\\n\"\n )\n"
] | [
[
"numpy.nanmax",
"matplotlib.pyplot.gca",
"numpy.nanmin",
"matplotlib.gridspec.GridSpec",
"numpy.sum",
"matplotlib.pyplot.figure"
],
[
"numpy.where",
"numpy.sort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NeolithEra/mbin | [
"27e9aea4ed67a48cc2d993a0fbd142a651e9ee7b"
] | [
"mbin/controls.py"
] | [
"import os,sys\nimport optparse\nimport logging\nfrom pbcore.io.align.CmpH5IO import CmpH5Reader\nfrom pbcore.io import openIndexedAlignmentFile\nfrom pbcore.io.BasH5IO import BasH5Reader\nimport glob\nimport numpy as np\nimport logging\nimport shutil\nimport pickle\nimport math\nimport mbin\nimport motif_tools\n\ndef launch():\n\topts,control_aln_fn = __parseArgs()\n\t__initLog(opts)\n\n\textract_controls(opts, control_aln_fn)\n\n\tprint >> sys.stderr, \"mBin control extraction has finished running. See log for details.\"\n\ndef extract_controls(opts, control_aln_fn):\n\t\"\"\"\n\n\t\"\"\"\n\tcontrols = ControlRunner(control_aln_fn, opts)\n\tmbinRunner = mbin.mbinRunner(opts)\n\n\t# Pulling the IPD data for each motif from the WGA cmp.h5 file\n\tmotifs,bi_motifs = motif_tools.build_motif_sets(opts)\n\topts.motifs = motifs\n\topts.bi_motifs = bi_motifs\n\n\tlogging.info(\"\")\n\tlogging.info(\"Preparing to create new control data in %s\" % opts.control_tmp)\n\tcontrols.goto_control_output_dir()\n\t\n\topts = controls.scan_WGA_aligns()\n\tfilter_N_reads = opts.N_reads\n\n\tmbinRunner.launch_data_loader( control_aln_fn, filter_N_reads, 1, opts )\n\t\n\tcontrols.analyze_WGA_reads()\n\tlogging.info(\"Done.\")\n\tlogging.info(\"\")\n\n\t# Building dictionary of mean control IPD values for each motif\n\tlogging.info(\"Building dictionary of control values for all motifs...\")\n\tlogging.info(\" * Initial build requires significant time and memory.\")\n\tcontrols.combine_control_data_from_contigs()\n\n\t\n\tcontrol_means = controls.build_control_IPD_dict(motifs, bi_motifs)\n\tcontrols.return_to_orig_dir()\n\n\tlogging.info(\"\")\n\tlogging.info(\"Cleaning up temp files from control data processing...\")\n\tshutil.rmtree(opts.control_tmp)\n\n\t# Controls are loaded into control_means, now pickle them for easy\n\t# passing between parallel processes\n\tpickle.dump(control_means, open(opts.control_pkl_name, \"wb\"))\n\ndef chunks( l, n ):\n\t\"\"\"\n\tYield successive n-sized chunks from l.\n\t\"\"\"\n\tfor i in xrange(0, len(l), n):\n\t\tyield l[i:i+n]\n\ndef __parseArgs():\n\t\"\"\"Handle command line argument parsing\"\"\"\n\n\tusage = \"\"\"%prog [--help] [options] [input]\n\n\tbuildcontrols takes a set of whole-genome amplified (WGA) sequencing \n\talignments, either in the legacy *.cmp.h5 PacBio format or the newer\n\taligned BAM format. buildcontrols then reads the polymerase kinetics\n\tvalues from these alignments and constructs a pickled dictionary of\n\tcontrol IPD values for the specified set of motifs. This output is \n\twritten to ./control_ipds.pkl by default.\n\n\tExamples:\n\n\t### Get control IPD values from BAM file of aligned WGA reads ###\n\n\tbuildcontrols -i --ref=reference.fasta wga_aligned_reads.bam\n\n\t\n\t### Use 4 cores to process legacy cmp.h5 file of aligned WGA reads ###\n\n\tbuildcontrols -i --procs=4 wga_aligned_reads.cmp.h5\n\n\t\n\t### Only include data from the first 500 alignments (e.g. for testing) ###\n\n\tbuildcontrols -i --N_reads=500 --ref=reference.fasta wga_aligned_reads.bam\n\n\n\t### Exclude all bipartite motifs from the control dictionary ###\n\n\tbuildcontrols -i --no_bipartite --ref=reference.fasta wga_aligned_reads.bam\n\t\"\"\"\n\n\tparser = optparse.OptionParser( usage=usage, description=__doc__ )\n\n\tparser.add_option( \"-d\", \"--debug\", action=\"store_true\", help=\"Increase verbosity of logging\" )\n\n\tparser.add_option( \"-i\", \"--info\", action=\"store_true\", help=\"Add basic logging\" )\n\n\tparser.add_option( \"--logFile\", type=\"str\", help=\"Write logging to file [log.controls]\" )\n\n\tparser.add_option( \"--ref\", type=\"str\", help=\"Path to reference fasta file used in the alignment. Must be accompanied by an index file (use samtools faidx). Required if input is BAM file of aligned reads (not needed for cmp.h5). [None]\" )\n\t\n\tparser.add_option( \"--subreadlength_min\", type=\"int\", help=\"Minimum subread length to include for analysis [100]\" )\n\n\tparser.add_option( \"--readlength_min\", type=\"int\", help=\"Minimum read length to include for analysis [100]\" )\n\n\tparser.add_option( \"--min_kmer\", type=\"int\", help=\"Minimum motif size to scan (contiguous motifs) [4]\" )\n\n\tparser.add_option( \"--max_kmer\", type=\"int\", help=\"Maximum motif size to scan (contiguous motifs) [6]\" )\n\n\tparser.add_option( \"--no_bipartite\", action=\"store_true\", help=\"Omit bipartite motifs [False]\" )\n\t\n\tparser.add_option( \"--bipart_first\", type=\"str\", help=\"Bipartite motif configuration: acceptable length of first determinate component (comma-separated string of integers) [3,4]\" )\n\t\n\tparser.add_option( \"--bipart_Ns\", type=\"str\", help=\"Bipartite motif configuration: acceptable length of middle indeterminate component (comma-separated string of integers) [5,6]\" )\n\t\n\tparser.add_option( \"--bipart_second\", type=\"str\", help=\"Bipartite motif configuration: acceptable length of second determinate component (comma-separated string of integers) [3,4]\" )\n\n\tparser.add_option( \"--mod_bases\", type=\"str\", help=\"String containing bases to query for mods. Changing this is not recommended ['A']\" )\n\n\tparser.add_option( \"--minAcc\", type=\"float\", help=\"Min subread accuracy of read [0.8]\" )\n\n\tparser.add_option( \"--minMapQV\", type=\"float\", help=\"Min mapping QV of aligned read [240]\" )\n\n\tparser.add_option( \"--procs\", type=\"int\", help=\"Number of cores to use [4]\" )\n\n\tparser.add_option( \"--N_reads\", type=\"int\", help=\"Number of qualifying reads to include in analysis [1000000000]\" )\n\t\n\tparser.add_option( \"--min_motif_count\", type=\"int\", help=\"Number of motif sites required in WGA data to be included in controls dictionary [10]\" )\n\n\tparser.add_option( \"--control_pkl_name\", type=\"str\", help=\"Filename to save control IPD data from WGA sequencing [control_ipds.pkl]\" )\n\t\n\tparser.set_defaults( logFile=\"log.buildcontrols\", \\\n\t\t\t\t\t\t info=False, \\\n\t\t\t\t\t\t debug=False, \\\n\t\t\t\t\t\t ref=None, \\\n\t\t\t\t\t\t subreadlength_min=100, \\\n\t\t\t\t\t\t readlength_min=100, \\\n\t\t\t\t\t\t min_kmer=4, \\\n\t\t\t\t\t\t max_kmer=6, \\\n\t\t\t\t\t\t no_bipartite=False, \\\n\t\t\t\t\t\t bipart_first=\"3,4\", \\\n\t\t\t\t\t\t bipart_Ns=\"5,6\", \\\n\t\t\t\t\t\t bipart_second=\"3,4\", \\\n\t\t\t\t\t\t mod_bases=\"A\", \\\n\t\t\t\t\t\t minAcc=0.8, \\\n\t\t\t\t\t\t minMapQV=240, \\\n\t\t\t\t\t\t procs=4, \\\n\t\t\t\t\t\t N_reads=1000000000, \\\n\t\t\t\t\t\t min_motif_count=10, \\\n\t\t\t\t\t\t control_pkl_name=\"control_ipds.pkl\")\n\n\topts, args = parser.parse_args( )\n\tcontrol_aln_fn = __check_input( opts, args, parser )\n\n\tif opts.no_bipartite:\n\t\topts.bipartite = False\n\telse:\n\t\topts.bipartite = True\n\n\t############################################\n\t# Define the types of bipartite motifs to \n\t# include in the analysis. This describes the\n\t# acceptable sizes of the three components of\n\t# bipartite motifs. For example, the motif \n\t# ACCT/NNNNN/CTT (first/Ns/last) would be \n\t# described by 4/5/3.\n\t\n\tfirst = map(lambda x: int(x), opts.bipart_first.split(\",\"))\n\tmiddle = map(lambda x: int(x), opts.bipart_Ns.split(\",\"))\n\tsecond = map(lambda x: int(x), opts.bipart_second.split(\",\"))\n\topts.bipart_config = [(first), (middle), (second)]\n\t\n\t# As set, acceptible bipartite motifs would have\n\t# the following component lengths.\n\t# First: 3 or 4 ACGT bases\n\t# Ns: 5 or 6 unspecified N bases\n\t# Last: 3 or 4 ACGT bases\n\t############################################\n\n\topts.control_tmp = \"ctrl_tmp\"\n\topts.tmp = \"tmp\"\n\topts.minContigLength = 0\n\topts.comp_kmer = 5\n\t# opts.h5_type = \"cmp\"\n\topts.cross_cov_bins = None\n\topts.sam = None\n\topts.motifs_file = None\n\topts.skip_motifs = None\n\t\n\topts.control_pkl_name = os.path.abspath(opts.control_pkl_name)\n\tif opts.ref!=None:\n\t\topts.ref = os.path.abspath(opts.ref)\n\n\treturn opts,control_aln_fn\n\ndef __initLog( opts ):\n\t\"\"\"Sets up logging based on command line arguments. Allows for three levels of logging:\n\tlogging.error( ): always emitted\n\tlogging.info( ) : emitted with --info or --debug\n\tlogging.debug( ): only with --debug\"\"\"\n\n\tif os.path.exists(opts.logFile):\n\t\tos.remove(opts.logFile)\n\n\tlogLevel = logging.DEBUG if opts.debug \\\n\t\t\t\telse logging.INFO if opts.info \\\n\t\t\t\telse logging.ERROR\n\n\tlogger = logging.getLogger(\"\")\n\tlogger.setLevel(logLevel)\n\t\n\t# create file handler which logs even debug messages\n\tfh = logging.FileHandler(opts.logFile)\n\tfh.setLevel(logLevel)\n\t\n\t# create console handler with a higher log level\n\tch = logging.StreamHandler()\n\tch.setLevel(logLevel)\n\t\n\t# create formatter and add it to the handlers\n\tlogFormat = \"%(asctime)s [%(levelname)s] %(message)s\"\n\tformatter = logging.Formatter(logFormat, \"%Y-%m-%d %H:%M:%S\")\n\tch.setFormatter(formatter)\n\tfh.setFormatter(formatter)\n\t\n\t# add the handlers to logger\n\tlogger.addHandler(ch)\n\tlogger.addHandler(fh)\n\ndef __check_input( opts, args, parser ):\n\tcontrol_aln_fn = os.path.abspath(args[0])\n\n\tif control_aln_fn[-7:]==\".cmp.h5\":\n\t\topts.aln_ftype = \"cmp\"\n\t\topts.aligned = True\n\telif control_aln_fn[-4:]==\".bam\":\n\t\topts.aln_ftype = \"bam\"\n\t\topts.aligned = True\n\telse:\n\t\tparser.error(\"Could not recognize valid input (BAM or cmp.h5 file of aligned reads): %s\" % control_aln_fn)\n\n\tif opts.aln_ftype==\"bam\" and opts.ref==None:\n\t\tparser.error(\"With BAM input, must specify reference fasta using --ref. Fasta must be indexed (use samtools faidx).\")\n\n\t# if opts.contigs==None:\n\t# \tparser.error(\"Please specify the fasta file used for the alignments in %s!\" % control_aln_fn)\n\n\tif len(args) != 1:\n\t\tparser.error( \"Expected 1 argument.\" )\n\n\treturn control_aln_fn\n\ndef process_contig_chunk( args ):\n\tchunk_id = args[0]\n\tcut_CMDs = args[1]\n\tkmers = args[2]\n\tcols_chunk = args[3]\n\tn_chunks = args[4]\n\tmin_motif_count = args[5]\n\tlogging.info(\" - Control data: chunk %s/%s\" % ((chunk_id+1), (n_chunks+1)))\n\tcontrol_means = {}\n\t\n\tfor cut_CMD in cut_CMDs:\n\t\tsts,stdOutErr = mbin.run_OS_command( cut_CMD )\n\t\n\tfns = map(lambda x: x.split(\"> \")[-1], cut_CMDs)\n\tcontrol_ipds_sub = np.loadtxt(fns[0], dtype=\"float\")\n\tcontrol_ipds_N_sub = np.loadtxt(fns[1], dtype=\"int\")\n\t# If there is only one row (read) for this contig, still treat as\n\t# a 2d matrix of many reads\n\tcontrol_ipds_sub = np.atleast_2d(control_ipds_sub)\n\tcontrol_ipds_N_sub = np.atleast_2d(control_ipds_N_sub)\n\t\n\tnot_found = 0\n\tfor j in range(len(cols_chunk)):\n\t\tmotif = kmers[cols_chunk[j]]\n\t\tif np.sum(control_ipds_N_sub[:,j])>=min_motif_count:\n\t\t\tif np.sum(control_ipds_N_sub[:,j])>0:\n\t\t\t\tcontrol_mean = np.dot(control_ipds_sub[:,j], control_ipds_N_sub[:,j]) / np.sum(control_ipds_N_sub[:,j])\n\t\t\telse:\n\t\t\t\tcontrol_mean = 0\n\t\t\tcontrol_means[motif] = control_mean\n\t\telse:\n\t\t\tnot_found += 1\n\n\treturn control_means,not_found\n\nclass ControlRunner:\n\tdef __init__( self, wga_h5, opts ):\n\t\t\"\"\"\n\t\tPoint to the appropriate WGA sequencing data files\n\t\tto generate the control IPD values for the motifs. \n\t\t\"\"\"\n\t\tself.control_aln_fn = wga_h5\n\t\tself.opts = opts\n\t\tself.orig_dir = os.getcwd()\n\n\tdef build_insilico_controls( self ):\n\t\t\"\"\"\n\t\tTo be added...\n\t\t\"\"\"\n\t\tpass\n\n\tdef goto_control_output_dir( self ):\n\t\t\"\"\"\n\t\tCreate directory where data from control reads \n\t\twill be gathered and analyzed.\n\t\t\"\"\"\n\t\t# Make control directory\n\t\tif os.path.exists(self.opts.control_tmp):\n\t\t\tshutil.rmtree(self.opts.control_tmp)\n\t\tos.mkdir(self.opts.control_tmp)\n\t\tos.chdir(self.opts.control_tmp)\n\t\t\n\t\t# Make tmp directory inside control directory\n\t\tif os.path.exists(self.opts.tmp):\n\t\t\tshutil.rmtree(self.opts.tmp)\n\t\tos.mkdir(self.opts.tmp)\n\n\tdef return_to_orig_dir( self ):\n\t\t\"\"\"\n\t\tBack out of the control directory.\n\t\t\"\"\"\n\t\tos.chdir(self.orig_dir)\n\n\tdef scan_WGA_aligns( self ):\n\t\t\"\"\"\n\t\tGet some necessary information about the WGA cmp.h5 \n\t\tbeing used to generate the control IPD data.\n\t\t\"\"\"\n\t\tself.opts.aln_fn_labels = {}\n\t\tself.opts.aln_fn_contig_lens = {}\n\t\tself.opts.aln_fn_labels[self.control_aln_fn] = \"control\"\n\t\tself.opts.aln_fn_contig_lens[self.control_aln_fn] = {}\n\t\t\n\t\t# reader = CmpH5Reader(self.control_aln_fn)\n\t\treader = openIndexedAlignmentFile(self.control_aln_fn)\n\t\tfor entry in reader.referenceInfoTable:\n\t\t\tname = entry[3]\n\t\t\tlength = entry[4]\n\t\t\tslug_name = mbin.slugify(name)\n\t\t\tself.opts.aln_fn_contig_lens[self.control_aln_fn][slug_name] = length\n\t\treader.close()\n\n\t\treturn self.opts\n\n\tdef analyze_WGA_reads( self ):\n\t\t\"\"\"\n\t\tLaunch read scanning pipeline for building up\n\t\tcontrol IPD values for motifs.\n\t\t\"\"\"\n\t\tcontrol_fns = glob.glob( os.path.join(self.opts.tmp, \"*.tmp\"))\n\t\tftypes = set( map(lambda x: \"_\".join(os.path.basename(x).split(\"_\")[2:]), control_fns) )\n\t\tfor ftype in ftypes:\n\t\t\tif ftype in [\"compkmers.tmp\", \"ipdskmers.tmp\"]:\n\t\t\t\t# first_fn = glob.glob( os.path.join(self.opts.tmp, \"unitig_*_%s\" % ftype) )[0]\n\t\t\t\tfirst_fn = glob.glob( os.path.join(self.opts.tmp, \"*_%s\" % ftype) )[0]\n\t\t\t\tshutil.copy( first_fn, os.path.join(self.opts.tmp, \"control_%s\" % ftype) )\n\t\t\t\t\n\t\t\t\tftype_fns = glob.glob( os.path.join(self.opts.tmp, \"*_%s\" % ftype))\n\t\t\t\tto_rm = [fn for fn in ftype_fns if not os.path.basename(fn).startswith(\"control_\")]\n\t\t\t\tfor fn in to_rm:\n\t\t\t\t\tos.remove(fn)\n\t\t\telse:\n\t\t\t\tftype_fns = glob.glob( os.path.join(self.opts.tmp, \"*_%s\" % ftype) )\n\t\t\t\tto_cat = [fn for fn in ftype_fns if not os.path.basename(fn).startswith(\"control_\")]\n\t\t\t\tto_cat.sort()\n\t\t\t\toutname = os.path.join(self.opts.tmp, \"control_%s\" % ftype )\n\t\t\t\tmbin.cat_list_of_files(to_cat, outname)\n\t\t\n\tdef chunk_control_matrices( self, control_ipds_fn, control_ipds_N_fn, control_kmers_fn ):\n\t\t\"\"\"\n\n\t\t\"\"\"\n\t\tkmers = np.atleast_1d(np.loadtxt(control_kmers_fn, dtype=\"str\"))\n\t\tfns = [control_ipds_fn, control_ipds_N_fn]\n\t\tn_chunks = 99\n\t\tchunksize = int(math.ceil(float( len(kmers)/n_chunks )))\n\t\tcols_chunks = list(chunks( range(len(kmers)), chunksize ))\n\t\targs = []\n\t\tfor i,cols_chunk in enumerate(cols_chunks):\n\t\t\tcut_CMDs = []\n\t\t\tfor fn in fns:\n\t\t\t\tcut_cols = \"%s-%s\" % ((cols_chunk[0]+1), (cols_chunk[-1]+1))\n\t\t\t\tin_fn = fn\n\t\t\t\tout_fn = fn+\".sub.%s\" % i\n\t\t\t\tcut_CMD = \"cut -d$\\'\\\\t\\' -f%s %s > %s\" % (cut_cols, in_fn, out_fn)\n\t\t\t\tcut_CMDs.append(cut_CMD)\n\t\t\targs.append( (i, cut_CMDs, kmers, cols_chunk, n_chunks, self.opts.min_motif_count) )\n\t\t\n\t\tresults = mbin.launch_pool(self.opts.procs, process_contig_chunk, args)\n\t\t\n\t\tlogging.info(\"Combining motifs from all chunks of control data...\")\n\t\tnot_found = 0\n\t\tcontrol_means = {}\n\t\tfor i,result in enumerate(results):\n\t\t\tnot_found += result[1]\n\t\t\tfor motif in result[0].keys():\n\t\t\t\tcontrol_means[motif] = result[0][motif]\n\t\tlogging.info(\"Done.\")\n\n\t\treturn control_means,not_found\n\n\tdef combine_control_data_from_contigs( self ):\n\t\t\"\"\"\n\t\tIf control WGA data contains multiple contigs, this \n\t\twill combine them into one file for each data type\n\t\tso that the control IPD dictionary will be generated\n\t\tusing data from all contigs.\n\t\t\"\"\"\n\t\tcontigs_fns = glob.glob( os.path.join(self.opts.tmp, \"control_*.tmp\") )\n\t\tlabels_fns = []\n\t\tstrands_fns = []\n\t\tlengths_fns = []\n\t\treadnames_fns = []\n\t\tipds_fns = []\n\t\tipds_N_fns = []\n\t\tcomp_N_fns = []\n\t\tcomp_kmers_fns = []\n\t\tipds_kmers_fns = []\n\t\tfor fn in contigs_fns:\n\t\t\tif fn.find(\"_labels.\")>-1:\n\t\t\t\tlabels_fns.append(fn)\n\t\t\telif fn.find(\"_strand.\")>-1:\n\t\t\t\tstrands_fns.append(fn)\n\t\t\telif fn.find(\"_lengths.\")>-1:\n\t\t\t\tlengths_fns.append(fn)\n\t\t\telif fn.find(\"_readnames.\")>-1:\n\t\t\t\treadnames_fns.append(fn)\n\t\t\telif fn.find(\"_ipds.\")>-1:\n\t\t\t\tipds_fns.append(fn)\n\t\t\telif fn.find(\"_ipdsN.\")>-1:\n\t\t\t\tipds_N_fns.append(fn)\n\t\t\telif fn.find(\"_compN.\")>-1:\n\t\t\t\tcomp_N_fns.append(fn)\n\t\t\telif fn.find(\"_compkmers.\")>-1:\n\t\t\t\tcomp_kmers_fns.append(fn)\n\t\t\telif fn.find(\"_ipdskmers.\")>-1:\n\t\t\t\tipds_kmers_fns.append(fn)\n\n\t\tlabels_fns.sort()\n\t\tstrands_fns.sort()\n\t\tlengths_fns.sort()\n\t\treadnames_fns.sort()\n\t\tipds_fns.sort()\n\t\tipds_N_fns.sort()\n\t\tcomp_N_fns.sort()\n\n\t\tmbin.cat_list_of_files(labels_fns, \"control_labels.tmp\")\n\t\tmbin.cat_list_of_files(strands_fns, \"control_strands.tmp\")\n\t\tmbin.cat_list_of_files(lengths_fns, \"control_lengths.tmp\")\n\t\tmbin.cat_list_of_files(readnames_fns, \"control_names.tmp\")\n\t\tmbin.cat_list_of_files(ipds_fns, \"control_ipds.tmp\")\n\t\tmbin.cat_list_of_files(ipds_N_fns, \"control_ipdsN.tmp\")\n\t\tmbin.cat_list_of_files(comp_N_fns, \"control_compN.tmp\")\n\t\t\n\t\tshutil.copy(comp_kmers_fns[0], \"control_compkmers.tmp\")\n\t\tshutil.copy(ipds_kmers_fns[0], \"control_ipdskmers.tmp\")\n\t\tx = [os.remove(fn) for fn in comp_kmers_fns]\n\t\tx = [os.remove(fn) for fn in ipds_kmers_fns]\n\n\tdef build_control_IPD_dict( self, motifs, bi_motifs ):\n\t\t\"\"\"\n\n\t\t\"\"\"\n\t\tcontrol_ipds_fn = glob.glob( \"control_ipds.tmp\" )\n\t\tcontrol_ipds_N_fn = glob.glob( \"control_ipdsN.tmp\")\n\t\tcontrol_kmers_fn = glob.glob( \"control_ipdskmers.tmp\")\n\n\t\tif (len(control_ipds_fn)>1 or len(control_ipds_N_fn)>1 or len(control_kmers_fn)>1):\n\t\t\traise Exception(\"*** Double check the control files. There should not be multiples for a file type.\")\n\n\t\tcontrol_means,not_found = self.chunk_control_matrices(control_ipds_fn[0], control_ipds_N_fn[0], control_kmers_fn[0])\n\n\t\tif not_found > 0:\n\t\t\tlogging.info(\"\")\n\t\t\tlogging.warning(\"WARNING: could not find sufficient instances (>=%s) for %s motifs (out of %s total) in control data!\" % (self.opts.min_motif_count, not_found, (len(motifs)+len(bi_motifs))))\n\t\t\tlogging.warning(\" * If this is alarming, try reducing --min_motif_count or increasing --N_reads, although you just might not have those motifs in your reference sequence.\")\n\t\t\n\t\tlogging.info(\"\")\n\t\tlogging.info(\"Writing control data to a pickled file: %s\" % self.opts.control_pkl_name)\n\t\tpickle.dump( control_means, open( self.opts.control_pkl_name, \"wb\" ) )\n\n\t\treturn control_means\n\nif __name__ == \"__main__\":\n\tmain()"
] | [
[
"numpy.atleast_2d",
"numpy.dot",
"numpy.sum",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kishorecbe/Tensorflow-Solutions | [
"a158766633bc20ca4289096e4a5454a9e0d04e51"
] | [
"research/MCNN/model/mcnn_model.py"
] | [
"import multiprocessing\n\nimport tensorflow as tf\nfrom tensorflow.contrib import estimator\nfrom tensorflow.contrib import lookup\nfrom model import commons\n\n__author__ = 'KKishore'\n\nhead = estimator.binary_classification_head()\n\n\ndef parse_csv_row(row):\n columns = tf.decode_csv(row, record_defaults=commons.HEADER_DEFAULTS, field_delim='\\t')\n features = dict(zip(commons.HEADERS, columns))\n target = features.pop(commons.LABEL_COL)\n return features, tf.string_to_number(target, out_type=tf.int32)\n\n\ndef input_fn(file_name, batch_size=32, shuffle=False, repeat_count=1):\n num_threads = multiprocessing.cpu_count()\n\n data_set = tf.data.TextLineDataset(filenames=file_name).skip(1)\n\n if shuffle:\n data_set = data_set.shuffle(buffer_size=1000)\n\n data_set = data_set.map(lambda row: parse_csv_row(row), num_parallel_calls=num_threads).batch(batch_size) \\\n .repeat(repeat_count).prefetch(1000)\n\n iterator = data_set.make_one_shot_iterator()\n features, target = iterator.get_next()\n return features, target\n\n\ndef model_fn(features, labels, mode, params):\n if mode == tf.estimator.ModeKeys.TRAIN:\n tf.keras.backend.set_learning_phase(True)\n else:\n tf.keras.backend.set_learning_phase(False)\n\n vocab_table = lookup.index_table_from_file(vocabulary_file='data/vocab.csv', num_oov_buckets=1, default_value=-1)\n text = features[commons.FEATURE_COL]\n words = tf.string_split(text)\n dense_words = tf.sparse_tensor_to_dense(words, default_value=commons.PAD_WORD)\n word_ids = vocab_table.lookup(dense_words)\n\n padding = tf.constant([[0, 0], [0, commons.MAX_DOCUMENT_LENGTH]])\n # Pad all the word_ids entries to the maximum document length\n word_ids_padded = tf.pad(word_ids, padding)\n word_id_vector = tf.slice(word_ids_padded, [0, 0], [-1, commons.MAX_DOCUMENT_LENGTH])\n\n f1 = tf.keras.layers.Embedding(params.N_WORDS, 100, input_length=commons.MAX_DOCUMENT_LENGTH)(word_id_vector)\n f2 = tf.keras.layers.Embedding(params.N_WORDS, 200, input_length=commons.MAX_DOCUMENT_LENGTH)(word_id_vector)\n f3 = tf.keras.layers.Embedding(params.N_WORDS, 300, input_length=commons.MAX_DOCUMENT_LENGTH)(word_id_vector)\n\n filter_sizes = [3, 5]\n\n conv_pools = []\n for text_embedding in [f1, f2, f3]:\n for filter_size in filter_sizes:\n l_zero = tf.keras.layers.ZeroPadding1D((filter_size - 1, filter_size - 1))(text_embedding)\n l_conv = tf.keras.layers.Conv1D(filters=32, kernel_size=filter_size, padding='same', activation='tanh')(l_zero)\n l_pool = tf.keras.layers.GlobalMaxPool1D()(l_conv)\n conv_pools.append(l_pool)\n merged = tf.keras.layers.Concatenate(axis=1)(conv_pools)\n dense1 = tf.keras.layers.Dense(128, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01))(merged)\n dense2 = tf.keras.layers.Dense(64, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01))(dense1)\n\n logits = tf.keras.layers.Dense(1, activation=None)(dense2)\n\n if labels is not None:\n labels = tf.reshape(labels, [-1, 1])\n\n optimizer = tf.train.AdamOptimizer()\n\n def _train_op_fn(loss):\n return optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())\n\n return head.create_estimator_spec(features=features, labels=labels, mode=mode, logits=logits,\n train_op_fn=_train_op_fn)\n\n\ndef serving_fn():\n receiver_tensor = {\n commons.FEATURE_COL: tf.placeholder(dtype=tf.string, shape=None)\n }\n\n features = {\n key: tensor\n for key, tensor in receiver_tensor.items()\n }\n\n return tf.estimator.export.ServingInputReceiver(features, receiver_tensor)\n"
] | [
[
"tensorflow.pad",
"tensorflow.train.AdamOptimizer",
"tensorflow.data.TextLineDataset",
"tensorflow.string_split",
"tensorflow.contrib.lookup.index_table_from_file",
"tensorflow.string_to_number",
"tensorflow.keras.layers.Concatenate",
"tensorflow.decode_csv",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.regularizers.l2",
"tensorflow.train.get_global_step",
"tensorflow.estimator.export.ServingInputReceiver",
"tensorflow.keras.backend.set_learning_phase",
"tensorflow.keras.layers.Dense",
"tensorflow.sparse_tensor_to_dense",
"tensorflow.placeholder",
"tensorflow.contrib.estimator.binary_classification_head",
"tensorflow.keras.layers.ZeroPadding1D",
"tensorflow.constant",
"tensorflow.slice",
"tensorflow.keras.layers.Conv1D",
"tensorflow.reshape",
"tensorflow.keras.layers.GlobalMaxPool1D"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
serig/Jupyter-notebook-with-Flask | [
"f6cc37517e10fcd5de5229fe5264192c578faf5f"
] | [
"app.py"
] | [
"from flask import Flask, render_template\r\n# import os\r\n\r\napp = Flask(__name__)\r\n\r\n\r\[email protected]('/plot/')\r\ndef plot():\r\n from IPython.core.display import display, HTML\r\n from string import Template\r\n import pandas as pd\r\n import json\r\n\r\n # d3js = HTML('<script src=\"d3_jupyter/lib/d3/d3.min.js\"></script>')\r\n\r\n worldmap_data = json.loads(open('data/worldmap.json', 'r').read())\r\n sites_data_stations = pd.read_csv('data/stations.csv')\r\n sites_data_temps = pd.read_csv('data/monthly_temps.csv')\r\n sites_data_temps = sites_data_temps.sort_values(by='ID')\r\n\r\n temps_by_ID = []\r\n previous_ID = -1\r\n collected_temps = {}\r\n for i,row in sites_data_temps.iterrows():\r\n if (row['ID'] != previous_ID) and (previous_ID != -1):\r\n temps_by_ID.append(collected_temps)\r\n collected_temps = {}\r\n collected_temps[row['month']] = {'ave': row['ave'], \r\n 'max': row['max'], \r\n 'min': row['min']}\r\n previous_ID = row['ID']\r\n temps_by_ID.append(collected_temps)\r\n site_data_temps_2 = pd.DataFrame({'ID': sites_data_temps['ID'].unique(), \r\n 'temps': temps_by_ID})\r\n # site_data_temps_2.head()\r\n \r\n sites_data = pd.merge(sites_data_stations, site_data_temps_2, on='ID')\r\n sites_data_dict = sites_data.to_dict(orient='records')\r\n\r\n # html_template = Template('''\r\n # <style> $css_text </style>\r\n # <div><svg width=\"700\" height=\"500px\" id=\"graph-svg\"></svg></div>\r\n # <script> $js_text </script>\r\n # ''')\r\n\r\n css_text = open('static/temperature_histories.css','r').read()\r\n js_text_template = Template(open('static/temperature_histories.js','r').read())\r\n js_text = js_text_template.safe_substitute({'worldmapdata': json.dumps(worldmap_data), \r\n 'sitesdata': json.dumps(sites_data_dict) })\r\n # display(HTML(html_template.substitute({'css_text': css_text, 'js_text': js_text})))\r\n return render_template(\"plot.html\", \r\n css_text=css_text,\r\n js_text=js_text)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n"
] | [
[
"pandas.merge",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
dblyon/cmapPy | [
"abd4349f28af6d035f69fe8c399fde7bef8dd635",
"d310d092dbf0a0596448c9bd1f75ffff0bb92f09"
] | [
"cmapPy/pandasGEXpress/tests/python2_tests/test_parse_gctx.py",
"cmapPy/pandasGEXpress/GCToo.py"
] | [
"import logging\nimport unittest\nimport os\nimport pandas as pd\nimport numpy as np\nimport h5py\n\nimport pandas.util.testing as pandas_testing\nimport cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger\nimport cmapPy.pandasGEXpress.GCToo as GCToo\nimport cmapPy.pandasGEXpress.parse_gctx as parse_gctx\nimport cmapPy.pandasGEXpress.mini_gctoo_for_testing as mini_gctoo_for_testing\nimport cmapPy.pandasGEXpress.subset_gctoo as subset_gctoo\nimport cmapPy.pandasGEXpress.write_gctx as write_gctx\n\n\n__author__ = \"Oana Enache\"\n__email__ = \"[email protected]\"\n\nFUNCTIONAL_TESTS_PATH = \"cmapPy/pandasGEXpress/tests/functional_tests/\"\n\nlogger = logging.getLogger(setup_logger.LOGGER_NAME)\n\nversion_node = \"version\"\nrid_node = \"/0/META/ROW/id\"\ncid_node = \"/0/META/COL/id\"\ndata_node = \"/0/DATA/0/matrix\"\nrow_meta_group_node = \"/0/META/ROW\"\ncol_meta_group_node = \"/0/META/COL\"\n\n\nclass MockHdf5Dset(object):\n def __init__(self, data_list, dtype):\n self.data_list = data_list\n self.shape = (len(data_list),)\n self.dtype = dtype\n\n def read_direct(self, dest):\n for i in range(len(dest)):\n dest[i] = self.data_list[i]\n\n\nclass TestParseGctx(unittest.TestCase):\n def test_parse(self):\n # parse whole thing\n mg1 = mini_gctoo_for_testing.make()\n mg2 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\")\n\n pandas_testing.assert_frame_equal(mg1.data_df, mg2.data_df)\n pandas_testing.assert_frame_equal(mg1.row_metadata_df, mg2.row_metadata_df)\n pandas_testing.assert_frame_equal(mg1.col_metadata_df, mg2.col_metadata_df)\n\n # test with string rid/cid\n test_rids = ['LJP007_MCF10A_24H:TRT_CP:BRD-K93918653:3.33', 'LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666']\n test_cids = ['LJP007_MCF7_24H:TRT_POSCON:BRD-A61304759:10']\n mg3 = subset_gctoo.subset_gctoo(mg1, rid=test_rids, cid=test_cids)\n mg4 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\",\n rid=test_rids, cid=test_cids)\n pandas_testing.assert_frame_equal(mg3.data_df, mg4.data_df)\n pandas_testing.assert_frame_equal(mg3.row_metadata_df, mg4.row_metadata_df)\n pandas_testing.assert_frame_equal(mg3.col_metadata_df, mg4.col_metadata_df)\n\n # first, make & write out temp version of mini_gctoo with int rids/cids\n new_mg = mini_gctoo_for_testing.make(convert_neg_666=False)\n int_indexed_data_df = new_mg.data_df.copy()\n int_indexed_data_df.index = [str(i) for i in range(0, 6)]\n int_indexed_data_df.columns = [str(i) for i in range(10, 16)]\n\n int_indexed_row_meta = new_mg.row_metadata_df.copy()\n int_indexed_row_meta.index = int_indexed_data_df.index\n\n int_indexed_col_meta = new_mg.col_metadata_df.copy()\n int_indexed_col_meta.index = int_indexed_data_df.columns\n\n int_indexed_gctoo = GCToo.GCToo(data_df=int_indexed_data_df, row_metadata_df=int_indexed_row_meta,\n col_metadata_df=int_indexed_col_meta)\n\n write_gctx.write(int_indexed_gctoo, \"int_indexed_mini_gctoo.gctx\")\n\n # test with numeric (repr as string) rid/cid\n mg5 = GCToo.GCToo(data_df=int_indexed_data_df, row_metadata_df=int_indexed_row_meta,\n col_metadata_df=int_indexed_col_meta)\n mg5 = subset_gctoo.subset_gctoo(mg5, row_bool=[True, False, True, False, True, False],\n col_bool=[True, False, False, True, True, True])\n\n mg5.data_df.index.name = \"rid\"\n mg5.data_df.columns.name = \"cid\"\n\n mg5.row_metadata_df.index.name = \"rid\"\n mg5.row_metadata_df.columns.name = \"rhd\"\n\n mg5.col_metadata_df.index.name = \"cid\"\n mg5.col_metadata_df.columns.name = \"chd\"\n\n mg6 = parse_gctx.parse(\"int_indexed_mini_gctoo.gctx\", rid=[\"0\", \"2\", \"4\"],\n cid=[\"10\", \"13\", \"14\", \"15\"], convert_neg_666=False)\n\n os.remove(\"int_indexed_mini_gctoo.gctx\")\n\n pandas_testing.assert_frame_equal(mg5.data_df, mg6.data_df)\n pandas_testing.assert_frame_equal(mg5.row_metadata_df, mg6.row_metadata_df)\n pandas_testing.assert_frame_equal(mg5.col_metadata_df, mg6.col_metadata_df)\n\n # test with ridx/cidx\n mg7 = subset_gctoo.subset_gctoo(mg1, rid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'],\n cid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'])\n mg8 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\", ridx=[4], cidx=[4])\n\n pandas_testing.assert_frame_equal(mg7.data_df, mg8.data_df)\n pandas_testing.assert_frame_equal(mg7.row_metadata_df, mg8.row_metadata_df)\n pandas_testing.assert_frame_equal(mg7.col_metadata_df, mg8.col_metadata_df)\n\n # test with rid/cidx\n mg9 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\",\n rid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'],\n cidx=[4])\n\n pandas_testing.assert_frame_equal(mg7.data_df, mg9.data_df)\n pandas_testing.assert_frame_equal(mg7.row_metadata_df, mg9.row_metadata_df)\n pandas_testing.assert_frame_equal(mg7.col_metadata_df, mg9.col_metadata_df)\n\n # test with ridx/cid\n mg10 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\", ridx=[4],\n cid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'])\n\n pandas_testing.assert_frame_equal(mg7.data_df, mg10.data_df)\n pandas_testing.assert_frame_equal(mg7.row_metadata_df, mg10.row_metadata_df)\n pandas_testing.assert_frame_equal(mg7.col_metadata_df, mg10.col_metadata_df)\n\n # test with row_meta_only\n mg11 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\", row_meta_only=True)\n pandas_testing.assert_frame_equal(mg11, mg1.row_metadata_df)\n\n # test with col_meta_only\n mg12 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\", col_meta_only=True)\n pandas_testing.assert_frame_equal(mg12, mg1.col_metadata_df)\n\n # test with sort_col_meta False and cidx\n mg13 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\", \n cidx = [4,1,3], sort_col_meta= False)\n\n pandas_testing.assert_frame_equal(mg13.data_df, mg1.data_df.iloc[:, [4,1,3]])\n pandas_testing.assert_frame_equal(mg13.col_metadata_df, mg1.col_metadata_df.iloc[[4,1,3],:])\n pandas_testing.assert_frame_equal(mg13.row_metadata_df, mg1.row_metadata_df)\n\n\n # test with sort_row_meta False and ridx\n mg14 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\", \n ridx = [3,0,1], sort_row_meta= False)\n\n pandas_testing.assert_frame_equal(mg14.data_df, mg1.data_df.iloc[[3,0,1],:])\n pandas_testing.assert_frame_equal(mg14.col_metadata_df, mg1.col_metadata_df)\n pandas_testing.assert_frame_equal(mg14.row_metadata_df, mg1.row_metadata_df.iloc[[3,0,1],:])\n\n # test with sort_col_meta False and cidx and col_meta_only\n mg15 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\", \n cidx = [4,1,3], sort_col_meta= False, col_meta_only=True)\n pandas_testing.assert_frame_equal(mg15, mg1.col_metadata_df.iloc[[4,1,3],:])\n\n # test with sort_row_meta False and ridx and row_meta_only\n mg16 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\", \n ridx = [3,0,1], sort_row_meta= False, row_meta_only=True)\n pandas_testing.assert_frame_equal(mg16, mg1.row_metadata_df.iloc[[3,0,1],:])\n\n # test with sort_col_meta False and cid \n cid_unsorted = ['LJP007_MCF7_24H:TRT_POSCON:BRD-K81418486:10','LJP007_MCF10A_24H:TRT_CP:BRD-K93918653:3.33']\n mg17 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\", \n cid = cid_unsorted, sort_col_meta= False)\n pandas_testing.assert_frame_equal(mg17.data_df, mg1.data_df.iloc[:, [2,0]])\n pandas_testing.assert_frame_equal(mg17.col_metadata_df, mg1.col_metadata_df.iloc[[2,0],:])\n pandas_testing.assert_frame_equal(mg17.row_metadata_df, mg1.row_metadata_df)\n\n # test with sort_row_meta False and rid\n rid_unsorted = ['LJP007_MCF7_24H:TRT_CP:BRD-K64857848:10', 'MISC003_A375_24H:TRT_CP:BRD-K93918653:3.33']\n mg18 = parse_gctx.parse(\"cmapPy/pandasGEXpress/tests/functional_tests/mini_gctoo_for_testing.gctx\",\n rid = rid_unsorted, sort_row_meta=False)\n pandas_testing.assert_frame_equal(mg18.data_df, mg1.data_df.iloc[[5,1], :])\n pandas_testing.assert_frame_equal(mg18.col_metadata_df, mg1.col_metadata_df)\n pandas_testing.assert_frame_equal(mg18.row_metadata_df, mg1.row_metadata_df.iloc[[5,1],:])\n\n def test_parse_rid_as_entrez_id(self):\n input_file = \"cmapPy/pandasGEXpress/tests/functional_tests//test_parse_gctx_rid_entrez_id.gctx\"\n g = parse_gctx.parse(input_file)\n self.assertEqual((5, 5), g.data_df.shape)\n logger.debug(\"g.data_df.index: {}\".format(g.data_df.index))\n\n my_rids = [\"5720\", \"55847\", \"7416\"]\n g = parse_gctx.parse(input_file, rid=my_rids)\n self.assertEqual((3, 5), g.data_df.shape)\n logger.debug(\"g.data_df.index: {}\".format(g.data_df.index))\n\n my_rids = [str(x) for x in my_rids]\n logger.debug(\"using rid as str (mismatched type) - my_rids: {}\".format(my_rids))\n g = parse_gctx.parse(input_file, rid=my_rids)\n self.assertEqual((3, 5), g.data_df.shape)\n logger.debug(\"g.data_df.index: {}\".format(g.data_df.index))\n\n def test_check_and_order_id_inputs(self):\n ridx = [0, 1]\n cidx = [2, 1]\n rid = [\"a\", \"b\", \"c\"]\n cid = [\"l\", \"m\", \"n\", \"o\"]\n row_meta = pd.DataFrame(index=[\"b\", \"c\", \"a\", \"d\"])\n col_meta = pd.DataFrame(index=[\"l\", \"m\", \"n\", \"o\", \"p\", \"q\"])\n\n # case 1: row and col lists are populated and same type\n self.assertEqual((sorted(ridx), sorted(cidx)),\n parse_gctx.check_and_order_id_inputs(None, ridx, None, cidx, row_meta, col_meta, sort_row_meta = True, sort_col_meta = True))\n\n # case 2: row & col lists are populated, but of different types\n self.assertEqual((sorted(ridx), [0, 1, 2, 3]),\n parse_gctx.check_and_order_id_inputs(None, ridx, cid, None, row_meta, col_meta, sort_row_meta = True, sort_col_meta = True))\n\n # case 3: row list and col lists are both None\n self.assertEqual(([0, 1, 2, 3], [0, 1, 2, 3, 4, 5]),\n parse_gctx.check_and_order_id_inputs(None, None, None, None, row_meta, col_meta, sort_row_meta = True, sort_col_meta = True))\n\n # case 4: row list is populated, col list is None\n self.assertEqual(([0, 1, 2], [0, 1, 2, 3, 4, 5]),\n parse_gctx.check_and_order_id_inputs(rid, None, None, None, row_meta, col_meta, sort_row_meta = True, sort_col_meta = True))\n\n def test_check_id_idx_exclusivity(self):\n ids = [\"a\", \"b\", \"c\"]\n idx = [0, 1, 2]\n\n # case 1: id != None and idx != None\n with self.assertRaises(Exception) as context:\n parse_gctx.check_id_idx_exclusivity(ids, idx)\n self.assertTrue(\"'id' and 'idx' fields can't both not be None\" in str(context.exception))\n\n # case 2: id != None\n self.assertEqual((\"id\", ids), parse_gctx.check_id_idx_exclusivity(ids, None))\n\n # case 3: idx != None\n self.assertEqual((\"idx\", idx), parse_gctx.check_id_idx_exclusivity(None, idx))\n\n # case 4: id == None & idx == None\n self.assertEqual((None, []), parse_gctx.check_id_idx_exclusivity(None, None))\n\n def test_parse_metadata_df(self):\n mini_gctoo = mini_gctoo_for_testing.make()\n # convert row_metadata to np.nan\n mini_row_meta = mini_gctoo.row_metadata_df.replace([-666, \"-666\", -666.0], [np.nan, np.nan, np.nan])\n logger.debug(\"mini_row_meta.shape: {}\".format(mini_row_meta.shape))\n logger.debug(\"mini_row_meta.index: {}\".format(mini_row_meta.index))\n logger.debug(\"mini_row_meta.columns: {}\".format(mini_row_meta.columns))\n logger.debug(\"mini_row_meta.dtypes: {}\".format(mini_row_meta.dtypes))\n\n gctx_file = h5py.File(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx\", \"r\")\n row_dset = gctx_file[row_meta_group_node]\n col_dset = gctx_file[col_meta_group_node]\n\n # with convert_neg_666\n row_df = parse_gctx.parse_metadata_df(\"row\", row_dset, True)\n logger.debug(\"row_df.dtypes: {}\".format(row_df.dtypes))\n pandas_testing.assert_frame_equal(mini_row_meta, row_df)\n\n # no convert_neg_666\n mini_gctoo_with_neg_666 = mini_gctoo_for_testing.make(convert_neg_666=False)\n col_df = parse_gctx.parse_metadata_df(\"col\", col_dset, False)\n pandas_testing.assert_frame_equal(mini_gctoo_with_neg_666.col_metadata_df, col_df)\n\n # test that ID's are not converted to numeric\n expected_rids = [str(i) for i in range(3)]\n row_dset = {\"id\": MockHdf5Dset(expected_rids, str),\n \"other_meta\": MockHdf5Dset(range(3, 6), str)}\n r = parse_gctx.parse_metadata_df(\"row\", row_dset, True)\n logger.debug(\"test that ID's are not converted to numeric - r: {}\".format(r))\n logger.debug(\"r.index: {}\".format(r.index))\n self.assertEqual(set(expected_rids), set(r.index))\n\n def test_replace_666(self):\n # convert_neg_666 is True\n row_df = pd.DataFrame([[3, \"a\"], [-666, \"c\"], [\"-666\", -666.0]],\n index=[\"r1\", \"r2\", \"r3\"], columns=[\"rhd1\", \"rhd2\"])\n e_df = pd.DataFrame([[3, \"a\"], [np.nan, \"c\"], [np.nan, np.nan]],\n index=[\"r1\", \"r2\", \"r3\"], columns=[\"rhd1\", \"rhd2\"])\n out_df = parse_gctx.replace_666(row_df, convert_neg_666=True)\n self.assertTrue(e_df.equals(out_df))\n\n # convert_neg_666 is False\n e_df2 = pd.DataFrame([[3, \"a\"], [\"-666\", \"c\"], [\"-666\", \"-666\"]],\n index=[\"r1\", \"r2\", \"r3\"], columns=[\"rhd1\", \"rhd2\"])\n out_df2 = parse_gctx.replace_666(row_df, convert_neg_666=False)\n self.assertTrue(e_df2.equals(out_df2))\n\n # edge case: if row meta is 1 column of floats\n row_df3 = pd.DataFrame([[3], [-666], [-666.0]],\n index=[\"r1\", \"r2\", \"r3\"], columns=[\"rhd3\"])\n e_df3 = pd.DataFrame([[3], [np.nan], [np.nan]],\n index=[\"r1\", \"r2\", \"r3\"], columns=[\"rhd3\"])\n out_df3 = parse_gctx.replace_666(row_df3, convert_neg_666=True)\n self.assertTrue(e_df3.equals(out_df3))\n\n def test_set_metadata_index_and_column_names(self):\n mini_gctoo = mini_gctoo_for_testing.make()\n mini_gctoo.row_metadata_df.index.name = None\n mini_gctoo.row_metadata_df.columns.name = None\n mini_gctoo.col_metadata_df.index.name = None\n mini_gctoo.col_metadata_df.columns.name = None\n\n # case 1: dim == \"row\"\n parse_gctx.set_metadata_index_and_column_names(\"row\", mini_gctoo.row_metadata_df)\n self.assertEqual(mini_gctoo.row_metadata_df.index.name, \"rid\")\n self.assertEqual(mini_gctoo.row_metadata_df.columns.name, \"rhd\")\n\n # case 2: dim == \"col\"\n parse_gctx.set_metadata_index_and_column_names(\"col\", mini_gctoo.col_metadata_df)\n self.assertEqual(mini_gctoo.col_metadata_df.index.name, \"cid\")\n self.assertEqual(mini_gctoo.col_metadata_df.columns.name, \"chd\")\n\n def test_get_ordered_idx(self):\n mg = mini_gctoo_for_testing.make()\n\n # case 1: id_type == None\n case1 = parse_gctx.get_ordered_idx(None, [], mg.row_metadata_df, sort_idx = True)\n self.assertEqual(case1, list(range(0, 6)),\n \"Expected ordered idx to be {} but got {}\".format(list(range(0, 6)), case1))\n\n # case 2: id_type == \"id\"\n case2 = parse_gctx.get_ordered_idx(\"id\",\n ['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'], mg.col_metadata_df, sort_idx = True)\n self.assertEqual(case2, [4],\n \"Expected ordered idx to be {} but got {}\".format([4], case2))\n\n # case 3: id_type == ridx\n case3 = parse_gctx.get_ordered_idx(\"idx\",\n [5, 1, 3], mg.col_metadata_df, sort_idx = True)\n self.assertEqual(case3, [1, 3, 5],\n \"Expected ordered idx to be {} but got {}\".format([1, 3, 5], case3))\n\n def test_parse_data_df(self):\n mini_data_df = pd.DataFrame([[-0.283359, 0.011270], [0.304119, 1.921061], [0.398655, -0.144652]],\n index=[\"200814_at\", \"218597_s_at\", \"217140_s_at\"],\n columns=[\"LJP005_A375_24H:DMSO:-666\", \"LJP005_A375_24H:BRD-K76908866:10\"])\n mini_data_df = mini_data_df.astype(np.float32)\n mini_data_df.index.name = \"rid\"\n mini_data_df.columns.name = \"cid\"\n\n # create h5py File instance\n mini_gctx = h5py.File(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctx_with_metadata_n2x3.gctx\", \"r\")\n data_dset = mini_gctx[data_node]\n\n # get relevant metadata fields\n col_meta = parse_gctx.get_column_metadata(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctx_with_metadata_n2x3.gctx\")\n row_meta = parse_gctx.get_row_metadata(\"cmapPy/pandasGEXpress/tests/functional_tests//mini_gctx_with_metadata_n2x3.gctx\")\n\n # case 1: no subsetting\n data_df1 = parse_gctx.parse_data_df(data_dset, [0, 1, 2], [0, 1], row_meta, col_meta)\n # note: checks to 3 decimal places\n pandas_testing.assert_frame_equal(mini_data_df, data_df1,\n check_exact=False, check_less_precise=True)\n\n # case 2: subset; ridx < cidx\n data_df2 = parse_gctx.parse_data_df(data_dset, [0], [0, 1], row_meta, col_meta)\n pandas_testing.assert_frame_equal(mini_data_df.iloc[[0], [0, 1]], data_df2,\n check_exact=False, check_less_precise=True)\n\n # case 3: subset; ridx == cidx\n data_df3 = parse_gctx.parse_data_df(data_dset, [0], [0], row_meta, col_meta)\n pandas_testing.assert_frame_equal(mini_data_df.iloc[[0], [0]], data_df3,\n check_exact=False, check_less_precise=True)\n\n # case 4: subset; ridx > cidx\n data_df4 = parse_gctx.parse_data_df(data_dset, [0, 1, 2], [0], row_meta, col_meta)\n pandas_testing.assert_frame_equal(mini_data_df.iloc[[0, 1, 2], [0]], data_df4,\n check_exact=False, check_less_precise=True)\n\n mini_gctx.close()\n\n def test_convert_ids_to_meta_type(self):\n # happy path\n id_list = [0, 1, 2]\n self.assertEqual(int, type(id_list[0]))\n df = pd.DataFrame({}, index=pd.Series(range(1, 4)).astype(np.int64))\n r = parse_gctx.convert_ids_to_meta_type(id_list, df)\n logger.debug(\"conversion from regular int to numpy int64 - type(r[0]): {}\".format(type(r[0])))\n self.assertEqual(np.int64, type(r[0]))\n\n id_list = [str(i) for i in range(3)]\n r = parse_gctx.convert_ids_to_meta_type(id_list, df)\n logger.debug(\"conversion from str to numpy int64 - type(r[0]): {}\".format(type(r[0])))\n self.assertEqual(np.int64, type(r[0]))\n\n # unhappy path\n id_list[0] = \"a\"\n with self.assertRaises(Exception) as context:\n parse_gctx.convert_ids_to_meta_type(id_list, df)\n logger.debug(\"context.exception: {}\".format(context.exception))\n self.assertIn(\n \"The type of the id_list (rid or cid) being used to subset the data is not compatible with the metadata id's in the file\",\n str(context.exception))\n\n def test_check_idx_validity(self):\n id_list = [0,1,2]\n df = pd.DataFrame({}, index=range(5))\n logger.debug(\"df.shape: {}\".format(df.shape))\n parse_gctx.check_idx_validity(id_list, df, sort_id = True)\n\n id_list[0] = -1\n with self.assertRaises(Exception) as context:\n parse_gctx.check_idx_validity(id_list, df, sort_id = True)\n logger.debug(\"context.exception: {}\".format(context.exception))\n self.assertIn(\"some of indexes being used to subset the data are not valid\", str(context.exception))\n self.assertIn(\"[-1]\", str(context.exception))\n\n invalid_high = df.shape[0] + 1\n id_list[0] = invalid_high\n with self.assertRaises(Exception) as context:\n parse_gctx.check_idx_validity(id_list, df, sort_id = True)\n logger.debug(\"context.exception: {}\".format(context.exception))\n self.assertIn(\"some of indexes being used to subset the data are not valid\", str(context.exception))\n self.assertIn(\"[{}]\".format(invalid_high), str(context.exception))\n\n def test_check_id_validity(self):\n id_list = [\"a\", \"b\", \"c\"]\n df = pd.DataFrame({}, index=[\"a\", \"b\", \"c\", \"d\"])\n parse_gctx.check_id_validity(id_list, df)\n\n id_list[0] = \"z\"\n with self.assertRaises(Exception) as context:\n parse_gctx.check_id_validity(id_list, df)\n logger.debug(\"context.exception: {}\".format(context.exception))\n self.assertIn(\n \"some of the ids being used to subset the data are not present in the metadata for the file being parsed\",\n str(context.exception))\n\n\nif __name__ == \"__main__\":\n setup_logger.setup(verbose=True)\n\n unittest.main()\n",
"\"\"\"\nDATA:\n-----------------------------\n| | cid |\n-----------------------------\n| | |\n|r | |\n|i | data |\n|d | |\n| | |\n-----------------------------\nROW METADATA:\n--------------------------\n|id| rhd |\n--------------------------\n| | |\n|r | |\n|i | row_metadata |\n|d | |\n| | |\n--------------------------\nCOLUMN METADATA:\nN.B. The df is transposed from how it looks in a gct file.\n---------------------\n|id| chd |\n---------------------\n| | |\n| | |\n| | |\n|c | |\n|i | col_metadata |\n|d | |\n| | |\n| | |\n| | |\n---------------------\n\nN.B. rids, cids, rhds, and chds must be:\n- unique\n- matching in both content & order everywhere they're found\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport logging\nimport cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger\n\n\n__authors__ = 'Oana Enache, Lev Litichevskiy, Dave Lahr'\n__email__ = '[email protected]'\n\n\nclass GCToo(object):\n \"\"\"Class representing parsed gct(x) objects as pandas dataframes.\n Contains 3 component dataframes (row_metadata_df, column_metadata_df,\n and data_df) as well as an assembly of these 3 into a multi index df\n that provides an alternate way of selecting data.\n \"\"\"\n def __init__(self, data_df, row_metadata_df=None, col_metadata_df=None,\n src=None, version=None, make_multiindex=False, logger_name=setup_logger.LOGGER_NAME):\n\n self.logger = logging.getLogger(logger_name)\n\n self.src = src\n self.version = version\n\n # Check data_df before setting\n self.check_df(data_df)\n self.data_df = data_df\n\n if row_metadata_df is None:\n self.row_metadata_df = pd.DataFrame(index=data_df.index)\n else:\n # Lots of checks will occur when this attribute is set (see __setattr__ below)\n self.row_metadata_df = row_metadata_df\n\n if col_metadata_df is None:\n self.col_metadata_df = pd.DataFrame(index=data_df.columns)\n else:\n # Lots of checks will occur when this attribute is set (see __setattr__ below)\n self.col_metadata_df = col_metadata_df\n\n # Create multi_index_df if explicitly requested\n if make_multiindex:\n self.assemble_multi_index_df()\n else:\n self.multi_index_df = None\n\n # This GCToo object is now initialized\n self._initialized = True\n\n def __setattr__(self, name, value):\n # Make sure row/col metadata agree with data_df before setting\n if name in [\"row_metadata_df\", \"col_metadata_df\"]:\n self.check_df(value)\n if name == \"row_metadata_df\":\n self.id_match_check(self.data_df, value, \"row\")\n value = value.reindex(self.data_df.index)\n super(GCToo, self).__setattr__(name, value)\n else:\n self.id_match_check(self.data_df, value, \"col\")\n value = value.reindex(self.data_df.columns)\n super(GCToo, self).__setattr__(name, value)\n\n # When reassigning data_df after initialization, reindex row/col metadata if necessary\n # N.B. Need to check if _initialized is present before checking if it's true, or code will break\n elif name == \"data_df\" and \"_initialized\" in self.__dict__ and self._initialized:\n self.id_match_check(value, self.row_metadata_df, \"row\")\n self.id_match_check(value, self.col_metadata_df, \"col\")\n super(GCToo, self).__setattr__(\"row_metadata_df\", self.row_metadata_df.reindex(value.index))\n super(GCToo, self).__setattr__(\"col_metadata_df\", self.col_metadata_df.reindex(value.columns))\n super(GCToo, self).__setattr__(name, value)\n\n # Can't reassign multi_index_df after initialization\n elif name == \"multi_index_df\" and \"_initialized\" in self.__dict__ and self._initialized:\n msg = (\"Cannot reassign value of multi_index_df attribute; \" +\n \"if you'd like a new multiindex df, please create a new GCToo instance\" +\n \"with appropriate data_df, row_metadata_df, and col_metadata_df fields.\")\n self.logger.error(msg)\n raise Exception(\"GCToo.__setattr__: \" + msg)\n\n # Otherwise, use the normal __setattr__ method\n else:\n super(GCToo, self).__setattr__(name, value)\n\n def check_df(self, df):\n \"\"\"\n Verifies that df is a pandas DataFrame instance and\n that its index and column values are unique.\n \"\"\"\n if isinstance(df, pd.DataFrame):\n if not df.index.is_unique:\n repeats = df.index[df.index.duplicated()].values\n msg = \"Index values must be unique but aren't. The following entries appear more than once: {}\".format(repeats)\n self.logger.error(msg)\n raise Exception(\"GCToo GCToo.check_df \" + msg)\n if not df.columns.is_unique:\n repeats = df.columns[df.columns.duplicated()].values\n msg = \"Columns values must be unique but aren't. The following entries appear more than once: {}\".format(repeats)\n raise Exception(\"GCToo GCToo.check_df \" + msg)\n else:\n return True\n else:\n msg = \"expected Pandas DataFrame, got something else: {} of type: {}\".format(df, type(df))\n self.logger.error(msg)\n raise Exception(\"GCToo GCToo.check_df \" + msg)\n\n def id_match_check(self, data_df, meta_df, dim):\n \"\"\"\n Verifies that id values match between:\n - row case: index of data_df & index of row metadata\n - col case: columns of data_df & index of column metadata\n \"\"\"\n if dim == \"row\":\n if len(data_df.index) == len(meta_df.index) and set(data_df.index) == set(meta_df.index):\n return True\n else:\n msg = (\"The rids are inconsistent between data_df and row_metadata_df.\\n\" +\n \"data_df.index.values:\\n{}\\nrow_metadata_df.index.values:\\n{}\").format(data_df.index.values, meta_df.index.values)\n self.logger.error(msg)\n raise Exception(\"GCToo GCToo.id_match_check \" + msg)\n elif dim == \"col\":\n if len(data_df.columns) == len(meta_df.index) and set(data_df.columns) == set(meta_df.index):\n return True\n else:\n msg = (\"The cids are inconsistent between data_df and col_metadata_df.\\n\" +\n \"data_df.columns.values:\\n{}\\ncol_metadata_df.index.values:\\n{}\").format(data_df.columns.values, meta_df.index.values)\n self.logger.error(msg)\n raise Exception(\"GCToo GCToo.id_match_check \" + msg)\n\n def __str__(self):\n \"\"\"Prints a string representation of a GCToo object.\"\"\"\n version = \"{}\\n\".format(self.version)\n source = \"src: {}\\n\".format(self.src)\n\n\n data = \"data_df: [{} rows x {} columns]\\n\".format(\n self.data_df.shape[0], self.data_df.shape[1])\n\n row_meta = \"row_metadata_df: [{} rows x {} columns]\\n\".format(\n self.row_metadata_df.shape[0], self.row_metadata_df.shape[1])\n\n col_meta = \"col_metadata_df: [{} rows x {} columns]\".format(\n self.col_metadata_df.shape[0], self.col_metadata_df.shape[1])\n\n full_string = (version + source + data + row_meta + col_meta)\n return full_string\n\n def assemble_multi_index_df(self):\n \"\"\"Assembles three component dataframes into a multiindex dataframe.\n Sets the result to self.multi_index_df.\n IMPORTANT: Cross-section (\"xs\") is the best command for selecting\n data. Be sure to use the flag \"drop_level=False\" with this command,\n or else the dataframe that is returned will not have the same\n metadata as the input.\n N.B. \"level\" means metadata header.\n N.B. \"axis=1\" indicates column annotations.\n Examples:\n 1) Select the probe with pr_lua_id=\"LUA-3404\":\n lua3404_df = multi_index_df.xs(\"LUA-3404\", level=\"pr_lua_id\", drop_level=False)\n 2) Select all DMSO samples:\n DMSO_df = multi_index_df.xs(\"DMSO\", level=\"pert_iname\", axis=1, drop_level=False)\n \"\"\"\n #prepare row index\n self.logger.debug(\"Row metadata shape: {}\".format(self.row_metadata_df.shape))\n self.logger.debug(\"Is empty? {}\".format(self.row_metadata_df.empty))\n row_copy = pd.DataFrame(self.row_metadata_df.index) if self.row_metadata_df.empty else self.row_metadata_df.copy()\n row_copy[\"rid\"] = row_copy.index\n row_index = pd.MultiIndex.from_arrays(row_copy.T.values, names=row_copy.columns)\n\n #prepare column index\n self.logger.debug(\"Col metadata shape: {}\".format(self.col_metadata_df.shape))\n col_copy = pd.DataFrame(self.col_metadata_df.index) if self.col_metadata_df.empty else self.col_metadata_df.copy()\n col_copy[\"cid\"] = col_copy.index\n transposed_col_metadata = col_copy.T\n col_index = pd.MultiIndex.from_arrays(transposed_col_metadata.values, names=transposed_col_metadata.index)\n\n # Create multi index dataframe using the values of data_df and the indexes created above\n self.logger.debug(\"Data df shape: {}\".format(self.data_df.shape))\n self.multi_index_df = pd.DataFrame(data=self.data_df.values, index=row_index, columns=col_index)\n\n\ndef multi_index_df_to_component_dfs(multi_index_df, rid=\"rid\", cid=\"cid\"):\n \"\"\" Convert a multi-index df into 3 component dfs. \"\"\"\n\n # Id level of the multiindex will become the index\n rids = list(multi_index_df.index.get_level_values(rid))\n cids = list(multi_index_df.columns.get_level_values(cid))\n\n # It's possible that the index and/or columns of multi_index_df are not\n # actually multi-index; need to check for this and there are more than one level in index(python3)\n if isinstance(multi_index_df.index, pd.MultiIndex):\n\n # check if there are more than one levels in index (python3)\n if len(multi_index_df.index.names) > 1:\n\n # If so, drop rid because it won't go into the body of the metadata\n mi_df_index = multi_index_df.index.droplevel(rid)\n\n # Names of the multiindex levels become the headers\n rhds = list(mi_df_index.names)\n\n # Assemble metadata values\n row_metadata = np.array([mi_df_index.get_level_values(level).values for level in list(rhds)]).T\n\n # if there is one level in index (python3), then rhds and row metadata should be empty\n else:\n rhds = []\n row_metadata = []\n\n # If the index is not multi-index, then rhds and row metadata should be empty\n else:\n rhds = []\n row_metadata = []\n\n # Check if columns of multi_index_df are in fact multi-index\n if isinstance(multi_index_df.columns, pd.MultiIndex):\n\n # Check if there are more than one levels in columns(python3)\n if len(multi_index_df.columns.names) > 1:\n\n # If so, drop cid because it won't go into the body of the metadata\n mi_df_columns = multi_index_df.columns.droplevel(cid)\n\n # Names of the multiindex levels become the headers\n chds = list(mi_df_columns.names)\n\n # Assemble metadata values\n col_metadata = np.array([mi_df_columns.get_level_values(level).values for level in list(chds)]).T\n\n # If there is one level in columns (python3), then rhds and row metadata should be empty\n else:\n chds = []\n col_metadata = []\n # If the columns are not multi-index, then rhds and row metadata should be empty\n else:\n chds = []\n col_metadata = []\n\n # Create component dfs\n row_metadata_df = pd.DataFrame.from_records(row_metadata, index=pd.Index(rids, name=\"rid\"), columns=pd.Index(rhds, name=\"rhd\"))\n col_metadata_df = pd.DataFrame.from_records(col_metadata, index=pd.Index(cids, name=\"cid\"), columns=pd.Index(chds, name=\"chd\"))\n data_df = pd.DataFrame(multi_index_df.values, index=pd.Index(rids, name=\"rid\"), columns=pd.Index(cids, name=\"cid\"))\n\n return data_df, row_metadata_df, col_metadata_df\n"
] | [
[
"pandas.util.testing.assert_frame_equal",
"pandas.DataFrame"
],
[
"pandas.Index",
"pandas.MultiIndex.from_arrays",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
eifuentes/swae-pytorch | [
"763f771c1d4860f71819af48d4f21a8a29a689d5"
] | [
"examples/mnist.py"
] | [
"import argparse\nimport os\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.optim as optim\nimport torchvision.utils as vutils\nfrom swae.distributions import rand_cirlce2d, rand_ring2d, rand_uniform2d\nfrom swae.models.mnist import MNISTAutoencoder\nfrom swae.trainer import SWAEBatchTrainer\nfrom torchvision import datasets, transforms\n\n\ndef main():\n # train args\n parser = argparse.ArgumentParser(description='Sliced Wasserstein Autoencoder PyTorch MNIST Example')\n parser.add_argument('--datadir', default='/input/', help='path to dataset')\n parser.add_argument('--outdir', default='/output/', help='directory to output images and model checkpoints')\n parser.add_argument('--batch-size', type=int, default=500, metavar='N',\n help='input batch size for training (default: 500)')\n parser.add_argument('--epochs', type=int, default=30, metavar='N',\n help='number of epochs to train (default: 30)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.001)')\n parser.add_argument('--alpha', type=float, default=0.9, metavar='A',\n help='RMSprop alpha/rho (default: 0.9)')\n parser.add_argument('--distribution', type=str, default='circle', metavar='DIST',\n help='Latent Distribution (default: circle)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--num-workers', type=int, default=8, metavar='N',\n help='number of dataloader workers if device is CPU (default: 8)')\n parser.add_argument('--seed', type=int, default=7, metavar='S',\n help='random seed (default: 7)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='number of batches to log training status (default: 10)')\n args = parser.parse_args()\n # create output directory\n imagesdir = os.path.join(args.outdir, 'images')\n chkptdir = os.path.join(args.outdir, 'models')\n os.makedirs(args.datadir, exist_ok=True)\n os.makedirs(imagesdir, exist_ok=True)\n os.makedirs(chkptdir, exist_ok=True)\n # determine device and device dep. args\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n dataloader_kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {'num_workers': args.num_workers, 'pin_memory': False}\n # set random seed\n torch.manual_seed(args.seed)\n if use_cuda:\n torch.cuda.manual_seed(args.seed)\n # log args\n print('batch size {}\\nepochs {}\\nRMSprop lr {} alpha {}\\ndistribution {}\\nusing device {}\\nseed set to {}'.format(\n args.batch_size, args.epochs, args.lr, args.alpha, args.distribution, device.type, args.seed\n ))\n # build train and test set data loaders\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.datadir, train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **dataloader_kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.datadir, train=False, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=64, shuffle=False, **dataloader_kwargs)\n # create encoder and decoder\n model = MNISTAutoencoder().to(device)\n print(model)\n # create optimizer\n # matching default Keras args for RMSprop\n optimizer = optim.RMSprop(model.parameters(), lr=args.lr, alpha=args.alpha)\n # determine latent distribution\n if args.distribution == 'circle':\n distribution_fn = rand_cirlce2d\n elif args.distribution == 'ring':\n distribution_fn = rand_ring2d\n else:\n distribution_fn = rand_uniform2d\n # create batch sliced_wasserstein autoencoder trainer\n trainer = SWAEBatchTrainer(model, optimizer, distribution_fn, device=device)\n # put networks in training mode\n model.train()\n # train networks for n epochs\n print('training...')\n for epoch in range(args.epochs):\n if epoch > 10:\n trainer.weight *= 1.1\n # train autoencoder on train dataset\n for batch_idx, (x, y) in enumerate(train_loader, start=0):\n batch = trainer.train_on_batch(x)\n if (batch_idx + 1) % args.log_interval == 0:\n print('Train Epoch: {} ({:.2f}%) [{}/{}]\\tLoss: {:.6f}'.format(\n epoch + 1, float(epoch + 1) / (args.epochs) * 100.,\n (batch_idx + 1), len(train_loader),\n batch['loss'].item()))\n # evaluate autoencoder on test dataset\n test_encode, test_targets, test_loss = list(), list(), 0.0\n with torch.no_grad():\n for test_batch_idx, (x_test, y_test) in enumerate(test_loader, start=0):\n test_evals = trainer.test_on_batch(x_test)\n test_encode.append(test_evals['encode'].detach())\n test_loss += test_evals['loss'].item()\n test_targets.append(y_test)\n test_encode, test_targets = torch.cat(test_encode).cpu().numpy(), torch.cat(test_targets).cpu().numpy()\n test_loss /= len(test_loader)\n print('Test Epoch: {} ({:.2f}%)\\tLoss: {:.6f}'.format(\n epoch + 1, float(epoch + 1) / (args.epochs) * 100.,\n test_loss))\n print('{{\"metric\": \"loss\", \"value\": {}}}'.format(test_loss))\n # save model\n torch.save(model.state_dict(), '{}/mnist_epoch_{}.pth'.format(chkptdir, epoch + 1))\n # save encoded samples plot\n plt.figure(figsize=(10, 10))\n plt.scatter(test_encode[:, 0], -test_encode[:, 1], c=(10 * test_targets), cmap=plt.cm.Spectral)\n plt.xlim([-1.5, 1.5])\n plt.ylim([-1.5, 1.5])\n plt.title('Test Latent Space\\nLoss: {:.5f}'.format(test_loss))\n plt.savefig('{}/test_latent_epoch_{}.png'.format(imagesdir, epoch + 1))\n plt.close()\n # save sample input and reconstruction\n vutils.save_image(x, '{}/test_samples_epoch_{}.png'.format(imagesdir, epoch + 1))\n vutils.save_image(batch['decode'].detach(),\n '{}/test_reconstructions_epoch_{}.png'.format(imagesdir, epoch + 1),\n normalize=True)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.cuda.manual_seed",
"matplotlib.pyplot.scatter",
"torch.cat",
"matplotlib.use",
"torch.manual_seed",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"torch.no_grad",
"torch.cuda.is_available",
"matplotlib.pyplot.close",
"torch.device",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aviadlazar/FLS | [
"03f1ec28adf3b15810ef2c2ac5e024697c3d0bff"
] | [
"tool/tv_reference/coco_eval.py"
] | [
"import json\r\nimport tempfile\r\n\r\nimport numpy as np\r\nimport copy\r\nimport time\r\nimport torch\r\nimport torch._six\r\n\r\nfrom pycocotools.cocoeval import COCOeval\r\nfrom pycocotools.coco import COCO\r\nimport pycocotools.mask as mask_util\r\n\r\nfrom collections import defaultdict\r\n\r\nfrom . import utils\r\n\r\n\r\nclass CocoEvaluator(object):\r\n def __init__(self, coco_gt, iou_types, bbox_fmt='coco'):\r\n assert isinstance(iou_types, (list, tuple))\r\n coco_gt = copy.deepcopy(coco_gt)\r\n self.coco_gt = coco_gt\r\n self.bbox_fmt = bbox_fmt.lower()\r\n assert self.bbox_fmt in ['voc', 'coco', 'yolo']\r\n\r\n self.iou_types = iou_types\r\n self.coco_eval = {}\r\n for iou_type in iou_types:\r\n self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)\r\n\r\n self.img_ids = []\r\n self.eval_imgs = {k: [] for k in iou_types}\r\n\r\n def update(self, predictions):\r\n img_ids = list(np.unique(list(predictions.keys())))\r\n self.img_ids.extend(img_ids)\r\n\r\n for iou_type in self.iou_types:\r\n results = self.prepare(predictions, iou_type)\r\n coco_dt = loadRes(self.coco_gt, results) if results else COCO()\r\n coco_eval = self.coco_eval[iou_type]\r\n\r\n coco_eval.cocoDt = coco_dt\r\n coco_eval.params.imgIds = list(img_ids)\r\n img_ids, eval_imgs = evaluate(coco_eval)\r\n\r\n self.eval_imgs[iou_type].append(eval_imgs)\r\n\r\n def synchronize_between_processes(self):\r\n for iou_type in self.iou_types:\r\n self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)\r\n create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])\r\n\r\n def accumulate(self):\r\n for coco_eval in self.coco_eval.values():\r\n coco_eval.accumulate()\r\n\r\n def summarize(self):\r\n for iou_type, coco_eval in self.coco_eval.items():\r\n print(\"IoU metric: {}\".format(iou_type))\r\n coco_eval.summarize()\r\n\r\n def prepare(self, predictions, iou_type):\r\n if iou_type == \"bbox\":\r\n return self.prepare_for_coco_detection(predictions)\r\n elif iou_type == \"segm\":\r\n return self.prepare_for_coco_segmentation(predictions)\r\n elif iou_type == \"keypoints\":\r\n return self.prepare_for_coco_keypoint(predictions)\r\n else:\r\n raise ValueError(\"Unknown iou type {}\".format(iou_type))\r\n\r\n def prepare_for_coco_detection(self, predictions):\r\n coco_results = []\r\n for original_id, prediction in predictions.items():\r\n if len(prediction) == 0:\r\n continue\r\n \r\n if self.bbox_fmt == 'coco':\r\n boxes = prediction[\"boxes\"].tolist()\r\n else:\r\n boxes = prediction[\"boxes\"]\r\n boxes = convert_to_xywh(boxes, fmt=self.bbox_fmt).tolist()\r\n scores = prediction[\"scores\"].tolist()\r\n labels = prediction[\"labels\"].tolist()\r\n\r\n coco_results.extend(\r\n [\r\n {\r\n \"image_id\": original_id,\r\n \"category_id\": labels[k],\r\n \"bbox\": box,\r\n \"score\": scores[k],\r\n }\r\n for k, box in enumerate(boxes)\r\n ]\r\n )\r\n return coco_results\r\n\r\n def prepare_for_coco_segmentation(self, predictions):\r\n coco_results = []\r\n for original_id, prediction in predictions.items():\r\n if len(prediction) == 0:\r\n continue\r\n\r\n scores = prediction[\"scores\"]\r\n labels = prediction[\"labels\"]\r\n masks = prediction[\"masks\"]\r\n\r\n masks = masks > 0.5\r\n\r\n scores = prediction[\"scores\"].tolist()\r\n labels = prediction[\"labels\"].tolist()\r\n\r\n rles = [\r\n mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order=\"F\"))[0]\r\n for mask in masks\r\n ]\r\n for rle in rles:\r\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\")\r\n\r\n coco_results.extend(\r\n [\r\n {\r\n \"image_id\": original_id,\r\n \"category_id\": labels[k],\r\n \"segmentation\": rle,\r\n \"score\": scores[k],\r\n }\r\n for k, rle in enumerate(rles)\r\n ]\r\n )\r\n return coco_results\r\n\r\n def prepare_for_coco_keypoint(self, predictions):\r\n coco_results = []\r\n for original_id, prediction in predictions.items():\r\n if len(prediction) == 0:\r\n continue\r\n\r\n # boxes = prediction[\"boxes\"]\r\n # boxes = convert_to_xywh(boxes).tolist()\r\n scores = prediction[\"scores\"].tolist()\r\n labels = prediction[\"labels\"].tolist()\r\n keypoints = prediction[\"keypoints\"]\r\n keypoints = keypoints.flatten(start_dim=1).tolist()\r\n\r\n coco_results.extend(\r\n [\r\n {\r\n \"image_id\": original_id,\r\n \"category_id\": labels[k],\r\n 'keypoints': keypoint,\r\n \"score\": scores[k],\r\n }\r\n for k, keypoint in enumerate(keypoints)\r\n ]\r\n )\r\n return coco_results\r\n\r\n\r\ndef convert_to_xywh(boxes, fmt='voc'):\r\n if fmt.lower() == 'voc':\r\n xmin, ymin, xmax, ymax = boxes.unbind(1)\r\n return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)\r\n elif fmt.lower() == 'yolo':\r\n xcen, ycen, w, h = boxes.unbind(1)\r\n return torch.stack((xcen-w/2, ycen-h/2, w, h), dim=1)\r\n\r\n\r\ndef merge(img_ids, eval_imgs):\r\n all_img_ids = utils.all_gather(img_ids)\r\n all_eval_imgs = utils.all_gather(eval_imgs)\r\n\r\n merged_img_ids = []\r\n for p in all_img_ids:\r\n merged_img_ids.extend(p)\r\n\r\n merged_eval_imgs = []\r\n for p in all_eval_imgs:\r\n merged_eval_imgs.append(p)\r\n\r\n merged_img_ids = np.array(merged_img_ids)\r\n merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)\r\n\r\n # keep only unique (and in sorted order) images\r\n merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)\r\n merged_eval_imgs = merged_eval_imgs[..., idx]\r\n\r\n return merged_img_ids, merged_eval_imgs\r\n\r\n\r\ndef create_common_coco_eval(coco_eval, img_ids, eval_imgs):\r\n img_ids, eval_imgs = merge(img_ids, eval_imgs)\r\n img_ids = list(img_ids)\r\n eval_imgs = list(eval_imgs.flatten())\r\n\r\n coco_eval.evalImgs = eval_imgs\r\n coco_eval.params.imgIds = img_ids\r\n coco_eval._paramsEval = copy.deepcopy(coco_eval.params)\r\n\r\n\r\n#################################################################\r\n# From pycocotools, just removed the prints and fixed\r\n# a Python3 bug about unicode not defined\r\n#################################################################\r\n\r\n# Ideally, pycocotools wouldn't have hard-coded prints\r\n# so that we could avoid copy-pasting those two functions\r\n\r\ndef createIndex(self):\r\n # create index\r\n # print('creating index...')\r\n anns, cats, imgs = {}, {}, {}\r\n imgToAnns, catToImgs = defaultdict(list), defaultdict(list)\r\n if 'annotations' in self.dataset:\r\n for ann in self.dataset['annotations']:\r\n imgToAnns[ann['image_id']].append(ann)\r\n anns[ann['id']] = ann\r\n\r\n if 'images' in self.dataset:\r\n for img in self.dataset['images']:\r\n imgs[img['id']] = img\r\n\r\n if 'categories' in self.dataset:\r\n for cat in self.dataset['categories']:\r\n cats[cat['id']] = cat\r\n\r\n if 'annotations' in self.dataset and 'categories' in self.dataset:\r\n for ann in self.dataset['annotations']:\r\n catToImgs[ann['category_id']].append(ann['image_id'])\r\n\r\n # print('index created!')\r\n\r\n # create class members\r\n self.anns = anns\r\n self.imgToAnns = imgToAnns\r\n self.catToImgs = catToImgs\r\n self.imgs = imgs\r\n self.cats = cats\r\n\r\n\r\nmaskUtils = mask_util\r\n\r\n\r\ndef loadRes(self, resFile):\r\n \"\"\"\r\n Load result file and return a result api object.\r\n :param resFile (str) : file name of result file\r\n :return: res (obj) : result api object\r\n \"\"\"\r\n res = COCO()\r\n res.dataset['images'] = [img for img in self.dataset['images']]\r\n\r\n # print('Loading and preparing results...')\r\n # tic = time.time()\r\n if isinstance(resFile, torch._six.string_classes):\r\n anns = json.load(open(resFile))\r\n elif type(resFile) == np.ndarray:\r\n anns = self.loadNumpyAnnotations(resFile)\r\n else:\r\n anns = resFile\r\n assert type(anns) == list, 'results in not an array of objects'\r\n annsImgIds = [ann['image_id'] for ann in anns]\r\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\r\n 'Results do not correspond to current coco set'\r\n if 'caption' in anns[0]:\r\n imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\r\n res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\r\n for id, ann in enumerate(anns):\r\n ann['id'] = id + 1\r\n elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\r\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\r\n for id, ann in enumerate(anns):\r\n ann['bbox'] = ann['bbox'][0]\r\n bb = ann['bbox']\r\n x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]\r\n if 'segmentation' not in ann:\r\n ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\r\n ann['area'] = bb[2] * bb[3]\r\n ann['id'] = id + 1\r\n ann['iscrowd'] = 0\r\n elif 'segmentation' in anns[0]:\r\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\r\n for id, ann in enumerate(anns):\r\n # now only support compressed RLE format as segmentation results\r\n ann['area'] = maskUtils.area(ann['segmentation'])\r\n if 'bbox' not in ann:\r\n ann['bbox'] = maskUtils.toBbox(ann['segmentation'])\r\n ann['id'] = id + 1\r\n ann['iscrowd'] = 0\r\n elif 'keypoints' in anns[0]:\r\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\r\n for id, ann in enumerate(anns):\r\n s = ann['keypoints']\r\n x = s[0::3]\r\n y = s[1::3]\r\n x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)\r\n ann['area'] = (x2 - x1) * (y2 - y1)\r\n ann['id'] = id + 1\r\n ann['bbox'] = [x1, y1, x2 - x1, y2 - y1]\r\n # print('DONE (t={:0.2f}s)'.format(time.time()- tic))\r\n\r\n res.dataset['annotations'] = anns\r\n createIndex(res)\r\n return res\r\n\r\n\r\ndef evaluate(self):\r\n '''\r\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\r\n :return: None\r\n '''\r\n # tic = time.time()\r\n # print('Running per image evaluation...')\r\n p = self.params\r\n # add backward compatibility if useSegm is specified in params\r\n if p.useSegm is not None:\r\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\r\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\r\n # print('Evaluate annotation type *{}*'.format(p.iouType))\r\n p.imgIds = list(np.unique(p.imgIds))\r\n if p.useCats:\r\n p.catIds = list(np.unique(p.catIds))\r\n p.maxDets = sorted(p.maxDets)\r\n self.params = p\r\n\r\n self._prepare()\r\n # loop through images, area range, max detection number\r\n catIds = p.catIds if p.useCats else [-1]\r\n\r\n if p.iouType == 'segm' or p.iouType == 'bbox':\r\n computeIoU = self.computeIoU\r\n elif p.iouType == 'keypoints':\r\n computeIoU = self.computeOks\r\n self.ious = {\r\n (imgId, catId): computeIoU(imgId, catId)\r\n for imgId in p.imgIds\r\n for catId in catIds}\r\n\r\n evaluateImg = self.evaluateImg\r\n maxDet = p.maxDets[-1]\r\n evalImgs = [\r\n evaluateImg(imgId, catId, areaRng, maxDet)\r\n for catId in catIds\r\n for areaRng in p.areaRng\r\n for imgId in p.imgIds\r\n ]\r\n # this is NOT in the pycocotools code, but could be done outside\r\n evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))\r\n self._paramsEval = copy.deepcopy(self.params)\r\n # toc = time.time()\r\n # print('DONE (t={:0.2f}s).'.format(toc-tic))\r\n return p.imgIds, evalImgs\r\n\r\n#################################################################\r\n# end of straight copy from pycocotools, just removing the prints\r\n#################################################################\r\n"
] | [
[
"numpy.unique",
"numpy.asarray",
"numpy.min",
"numpy.concatenate",
"numpy.max",
"torch.stack",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bbw7561135/PlasmaEDU | [
"aba2a00c04413bdf26c74f9e5c515644a517b728",
"aba2a00c04413bdf26c74f9e5c515644a517b728"
] | [
"bfield/python/ex03_plot_loopxyz_magnitude.py",
"ode/python/ex01_ode.py"
] | [
"################################################################################\n#\n# BFIELD\n#\n# Simple example of plot of the magnitude of the magnetic field\n# produced by a current loop, using its Cartesian components\n#\n#\n################################################################################\n\nimport numpy as np\nimport bfield\nimport matplotlib.pyplot as plt\n\n# Current Loop\nRa = 0.05\nI0 = 100.\nNturns = 1\nCenter = np.array([0,0,0])\nAngles = np.array([90,0,0]) * np.pi/180.0\n\n# X,Y Grid\nX = np.linspace(-0.1, 0.1, 50 )\nY = np.linspace(-0.1, 0.1, 50 )\n\n# B-field magnitude\nBnorm = np.zeros((X.size,Y.size))\nfor i in range(0,X.size):\n for j in range(0,Y.size):\n Point = np.array([ X[i], Y[j], 0.0 ])\n Bx,By,Bz = bfield.loopxyz(Ra,I0,Nturns,Center,Angles,Point)\n Bnorm[i][j] = np.sqrt(Bx*Bx + By*By + Bz*Bz)\n\nplt.figure(1)\nXX,YY = np.meshgrid(X,Y)\nplt.contourf(np.transpose(XX),np.transpose(YY),Bnorm,30)\nplt.colorbar()\nplt.xlabel('X [m]')\nplt.ylabel('Y [m]')\nplt.title('B-field magnitude [T] of a Current Loop')\nplt.savefig('ex03_plot_loopxyz_magnitude.png',dpi=150)\nplt.show()\n",
"import ode\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef fun(t,y):\n ydot = y - t**2 + 1\n return ydot\n\ndef main():\n tn = np.linspace( 0.0, 2.0, 5 ) # Grid\n y0 = np.array( [ 0.5 ] ) # Initial condition\n y_ef = ode.euler( fun, tn, y0 ) # Forward Euler\n y_mp = ode.midpoint( fun, tn, y0 ) # Explicit Midpoint\n y_rk = ode.rk4( fun, tn, y0 ) # Runge-Kutta 4\n y_an = tn**2 + 2.0*tn + 1.0 - 0.5*np.exp(tn) # Analytical\n\n plt.figure(1)\n plt.plot( tn, y_ef, 'ro-', label='Forward Euler (1st)' )\n plt.plot( tn, y_mp, 'go-', label='Explicit Mid-Point (2nd)' )\n plt.plot( tn, y_rk, 'bx-', label='Runge-Kutta (4th)' )\n plt.plot( tn, y_an, 'k-', label='Analytical Solution' )\n plt.xlabel('t')\n plt.ylabel('y')\n plt.legend(loc=2)\n plt.savefig('ex01_ode_solution.png')\n plt.show()\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"numpy.transpose",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.meshgrid",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"numpy.linspace",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
djoker07/facial_keipoint_detection | [
"112564b12330b0b18be8665a70c92c09e3434ce8"
] | [
"models.py"
] | [
"## TODO: define the convolutional neural network architecture\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n# can use the below import should you choose to initialize the weights of your Net\nimport torch.nn.init as I\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n \n ## TODO: Define all the layers of this CNN, the only requirements are:\n ## 1. This network takes in a square (same width and height), grayscale image as input\n ## 2. It ends with a linear layer that represents the keypoints\n ## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs\n \n # As an example, you've been given a convolutional layer, which you may (but don't have to) change:\n # 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel\n self.conv1 = nn.Conv2d(1, 32, 5, padding=2)\n self.pool1 = nn.MaxPool2d(4, 4)\n \n self.conv2 = nn.Conv2d(32, 64, 3, padding=1)\n self.pool2 = nn.MaxPool2d(2, 2)\n \n self.conv3 = nn.Conv2d(64, 128, 1)\n self.pool3 = nn.MaxPool2d(2, 2)\n \n# self.conv4 = nn.Conv2d(128, 256, 1)\n# self.pool4 = nn.MaxPool2d(2, 2)\n \n #calculate input size 32 * 52 * 52\n fc1_input_size = 128 * 14 * 14\n self.lin1 = nn.Linear(fc1_input_size, 1000)\n self.lin2 = nn.Linear(1000, (68 * 2))\n\n \n ## Note that among the layers to add, consider including:\n # maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting\n \n\n \n def forward(self, x):\n ## TODO: Define the feedforward behavior of this model\n ## x is the input image and, as an example, here you may choose to include a pool/conv step:\n# x = self.pool1(F.relu(self.conv1(x)))\n\n drop1 = nn.Dropout(0.1)\n drop2 = nn.Dropout(0.2)\n drop3 = nn.Dropout(0.3)\n drop4 = nn.Dropout(0.4)\n \n x = drop1(self.pool1(F.relu(self.conv1(x))))\n x = drop2(self.pool2(F.relu(self.conv2(x))))\n x = drop3(self.pool3(F.relu(self.conv3(x))))\n \n x = x.view(x.size(0), -1)\n x = drop4(F.relu(self.lin1(x)))\n x = self.lin2(x)\n \n # a modified x, having gone through all the layers of your model, should be returned\n return x\n"
] | [
[
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Dropout",
"torch.nn.Conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BranYang/pandas | [
"1033e8b1195d4071253889ada60523832285354c"
] | [
"pandas/tests/extension/decimal/test_decimal.py"
] | [
"import decimal\n\nimport numpy as np\nimport pandas as pd\nimport pandas.util.testing as tm\nimport pytest\n\nfrom pandas.tests.extension import base\n\nfrom .array import DecimalDtype, DecimalArray, make_data\n\n\[email protected]\ndef dtype():\n return DecimalDtype()\n\n\[email protected]\ndef data():\n return DecimalArray(make_data())\n\n\[email protected]\ndef data_missing():\n return DecimalArray([decimal.Decimal('NaN'), decimal.Decimal(1)])\n\n\[email protected]\ndef data_repeated():\n def gen(count):\n for _ in range(count):\n yield DecimalArray(make_data())\n yield gen\n\n\[email protected]\ndef data_for_sorting():\n return DecimalArray([decimal.Decimal('1'),\n decimal.Decimal('2'),\n decimal.Decimal('0')])\n\n\[email protected]\ndef data_missing_for_sorting():\n return DecimalArray([decimal.Decimal('1'),\n decimal.Decimal('NaN'),\n decimal.Decimal('0')])\n\n\[email protected]\ndef na_cmp():\n return lambda x, y: x.is_nan() and y.is_nan()\n\n\[email protected]\ndef na_value():\n return decimal.Decimal(\"NaN\")\n\n\[email protected]\ndef data_for_grouping():\n b = decimal.Decimal('1.0')\n a = decimal.Decimal('0.0')\n c = decimal.Decimal('2.0')\n na = decimal.Decimal('NaN')\n return DecimalArray([b, b, na, na, a, a, b, c])\n\n\nclass BaseDecimal(object):\n\n def assert_series_equal(self, left, right, *args, **kwargs):\n\n left_na = left.isna()\n right_na = right.isna()\n\n tm.assert_series_equal(left_na, right_na)\n return tm.assert_series_equal(left[~left_na],\n right[~right_na],\n *args, **kwargs)\n\n def assert_frame_equal(self, left, right, *args, **kwargs):\n # TODO(EA): select_dtypes\n tm.assert_index_equal(\n left.columns, right.columns,\n exact=kwargs.get('check_column_type', 'equiv'),\n check_names=kwargs.get('check_names', True),\n check_exact=kwargs.get('check_exact', False),\n check_categorical=kwargs.get('check_categorical', True),\n obj='{obj}.columns'.format(obj=kwargs.get('obj', 'DataFrame')))\n\n decimals = (left.dtypes == 'decimal').index\n\n for col in decimals:\n self.assert_series_equal(left[col], right[col],\n *args, **kwargs)\n\n left = left.drop(columns=decimals)\n right = right.drop(columns=decimals)\n tm.assert_frame_equal(left, right, *args, **kwargs)\n\n\nclass TestDtype(BaseDecimal, base.BaseDtypeTests):\n pass\n\n\nclass TestInterface(BaseDecimal, base.BaseInterfaceTests):\n pass\n\n\nclass TestConstructors(BaseDecimal, base.BaseConstructorsTests):\n pass\n\n\nclass TestReshaping(BaseDecimal, base.BaseReshapingTests):\n pass\n\n\nclass TestGetitem(BaseDecimal, base.BaseGetitemTests):\n\n def test_take_na_value_other_decimal(self):\n arr = DecimalArray([decimal.Decimal('1.0'),\n decimal.Decimal('2.0')])\n result = arr.take([0, -1], allow_fill=True,\n fill_value=decimal.Decimal('-1.0'))\n expected = DecimalArray([decimal.Decimal('1.0'),\n decimal.Decimal('-1.0')])\n self.assert_extension_array_equal(result, expected)\n\n\nclass TestMissing(BaseDecimal, base.BaseMissingTests):\n pass\n\n\nclass TestMethods(BaseDecimal, base.BaseMethodsTests):\n @pytest.mark.parametrize('dropna', [True, False])\n @pytest.mark.xfail(reason=\"value_counts not implemented yet.\")\n def test_value_counts(self, all_data, dropna):\n all_data = all_data[:10]\n if dropna:\n other = np.array(all_data[~all_data.isna()])\n else:\n other = all_data\n\n result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()\n expected = pd.Series(other).value_counts(dropna=dropna).sort_index()\n\n tm.assert_series_equal(result, expected)\n\n\nclass TestCasting(BaseDecimal, base.BaseCastingTests):\n pass\n\n\nclass TestGroupby(BaseDecimal, base.BaseGroupbyTests):\n pass\n\n\ndef test_series_constructor_coerce_data_to_extension_dtype_raises():\n xpr = (\"Cannot cast data to extension dtype 'decimal'. Pass the \"\n \"extension array directly.\")\n with tm.assert_raises_regex(ValueError, xpr):\n pd.Series([0, 1, 2], dtype=DecimalDtype())\n\n\ndef test_series_constructor_with_same_dtype_ok():\n arr = DecimalArray([decimal.Decimal('10.0')])\n result = pd.Series(arr, dtype=DecimalDtype())\n expected = pd.Series(arr)\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_constructor_coerce_extension_array_to_dtype_raises():\n arr = DecimalArray([decimal.Decimal('10.0')])\n xpr = r\"Cannot specify a dtype 'int64' .* \\('decimal'\\).\"\n\n with tm.assert_raises_regex(ValueError, xpr):\n pd.Series(arr, dtype='int64')\n\n\ndef test_dataframe_constructor_with_same_dtype_ok():\n arr = DecimalArray([decimal.Decimal('10.0')])\n\n result = pd.DataFrame({\"A\": arr}, dtype=DecimalDtype())\n expected = pd.DataFrame({\"A\": arr})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dataframe_constructor_with_different_dtype_raises():\n arr = DecimalArray([decimal.Decimal('10.0')])\n\n xpr = \"Cannot coerce extension array to dtype 'int64'. \"\n with tm.assert_raises_regex(ValueError, xpr):\n pd.DataFrame({\"A\": arr}, dtype='int64')\n"
] | [
[
"pandas.Series",
"pandas.util.testing.assert_raises_regex",
"pandas.util.testing.assert_series_equal",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.24",
"0.20",
"0.25"
],
"scipy": [],
"tensorflow": []
}
] |
sethuiyer/mlhub | [
"6be271c0070a0c0bb90dd92aceb344e7415bb1db"
] | [
"Pokemon Identifier/poketype.py"
] | [
"import pickle\nimport pandas as pd \nfrom sklearn.utils import resample\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.svm import LinearSVC\nimport os \n\nclass PokemonTypeIdentifier():\n \"\"\"\n This class identifies the pokemon type of a user given pokemon name.\n \"\"\"\n def __init__(self):\n self.isModelLoaded = False\n self.isFileFound = False\n if os.path.isfile(\"models/tfidf.pickle\") and os.path.isfile(\"models/model.pickle\"):\n self.tfidf = pickle.load(open(\"models/tfidf.pickle\",\"rb\"))\n self.model = pickle.load(open(\"models/model.pickle\",\"rb\"))\n self.isModelLoaded = True\n if os.path.isfile('updated_pokemon.csv'):\n df = pd.read_csv('updated_pokemon.csv')\n category = list(dict(df['Type 1'].value_counts()).keys())\n df_majority = df[df['Type 1'] == 'Water']\n for i in range(1,len(category)):\n df_minority = df[df['Type 1'] == category[i]]\n df_minority_upsampled = resample(df_minority, \n replace=True, # sample with replacement\n n_samples=103, # to match majority class\n random_state=123) # reproducible results\n df_majority = pd.concat([df_majority, df_minority_upsampled])\n encoded_labels,decoded_labels = pd.factorize(df_majority['Type 1'])\n self.decoded_labels = decoded_labels\n self.isFileFound = True\n if not self.isModelLoaded and self.isFileFound:\n \n\n self.tfidf = TfidfVectorizer(min_df=2, max_features = None, strip_accents = 'unicode', norm='l2',\n analyzer = 'char', token_pattern = r'\\w{1,}',ngram_range=(1,5),\n use_idf = 1, smooth_idf = 1, sublinear_tf = 1, stop_words = 'english')\n\n features = self.tfidf.fit_transform(df_majority['Name']).toarray()\n encoded_labels,decoded_labels = pd.factorize(df_majority['Type 1'])\n self.model = LinearSVC().fit(features,encoded_labels)\n self.decoded_labels = decoded_labels\n if not self.isModelLoaded or not self.isFileFound:\n raise AttributeError(\"Required File Doesn't Exist.\")\n def predict_type(self,poke_str):\n \"\"\"\n Finds the probable Pokemon type given the user string.\n Input: A string, of which type is to be identified.\n Output: The Probable pokemon type \n \"\"\"\n return self.decoded_labels[self.model.predict(self.tfidf.transform([poke_str]))[0]]\n\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.factorize",
"sklearn.svm.LinearSVC",
"sklearn.utils.resample",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
feynmanliang/beanmachine | [
"5dea2b9f6387f2f7fd1e53b0915a1b8405f2b46b",
"225114d9964b90c3a49adddc4387b4a47d1b4262",
"5dea2b9f6387f2f7fd1e53b0915a1b8405f2b46b"
] | [
"src/beanmachine/ppl/legacy/inference/abstract_infer.py",
"src/beanmachine/ppl/world/utils.py",
"src/beanmachine/ppl/inference/proposer/single_site_random_walk_proposer.py"
] | [
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport platform\nimport random\nfrom abc import ABCMeta, abstractmethod\nfrom typing import ClassVar, Dict, List\n\nimport torch\nimport torch.multiprocessing as mp\nfrom beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples\nfrom beanmachine.ppl.inference.utils import (\n _verify_queries_and_observations,\n VerboseLevel,\n)\nfrom beanmachine.ppl.legacy.world import World\nfrom beanmachine.ppl.model.rv_identifier import RVIdentifier\nfrom beanmachine.ppl.model.utils import LogLevel\nfrom torch import Tensor\nfrom torch.multiprocessing import Queue\n\n\nLOGGER = logging.getLogger(\"beanmachine\")\n\n\nclass AbstractInference(object, metaclass=ABCMeta):\n \"\"\"\n Abstract inference object that all inference algorithms inherit from.\n \"\"\"\n\n world_: World\n _rand_int_max: ClassVar[int] = 2**62\n\n def __init__(self):\n self.initial_world_ = World()\n self.world_ = self.initial_world_\n self.queries_ = []\n self.observations_ = {}\n\n @staticmethod\n def set_seed(seed: int):\n torch.manual_seed(seed)\n random.seed(seed)\n\n def initialize_world(\n self,\n initialize_from_prior: bool = False,\n ):\n \"\"\"\n Initializes the world variables with queries and observation calls.\n\n :param initialize_from_prior: boolean to initialize samples from prior\n approximation.\n \"\"\"\n self.world_ = self.initial_world_.copy()\n self.world_.set_observations(self.observations_)\n self.world_.set_initialize_from_prior(initialize_from_prior)\n\n for node in self.observations_:\n # makes the call for the observation node, which will run sample(node())\n # that results in adding its corresponding Variable and its dependent\n # Variable to the world\n self.world_.call(node)\n for node in self.queries_:\n # makes the call for the query node, which will run sample(node())\n # that results in adding its corresponding Variable and its dependent\n # Variable to the world.\n self.world_.call(node)\n self.world_.accept_diff()\n\n def reset(self):\n \"\"\"\n Resets world, mode and observation\n \"\"\"\n self.world_ = self.initial_world_.copy()\n self.queries_ = []\n self.observations_ = {}\n\n\nclass AbstractMCInference(AbstractInference, metaclass=ABCMeta):\n \"\"\"\n Abstract inference object for Monte Carlo inference.\n \"\"\"\n\n _observations_must_be_rv: bool = True\n\n @staticmethod\n def set_seed_for_chain(random_seed: int, chain: int):\n AbstractInference.set_seed(random_seed + chain * 31)\n\n @abstractmethod\n def _infer(\n self,\n num_samples: int,\n num_adaptive_samples: int = 0,\n verbose: VerboseLevel = VerboseLevel.LOAD_BAR,\n initialize_from_prior: bool = False,\n ) -> Dict[RVIdentifier, Tensor]:\n \"\"\"\n Abstract method to be implemented by classes that inherit from\n AbstractInference.\n \"\"\"\n raise NotImplementedError(\"Inference algorithm must implement _infer.\")\n\n def _parallel_infer(\n self,\n queue: Queue,\n chain: int,\n num_samples: int,\n random_seed: int,\n num_adaptive_samples: int,\n verbose: VerboseLevel,\n ):\n try:\n AbstractMCInference.set_seed_for_chain(random_seed, chain)\n rv_dict = self._infer(num_samples, num_adaptive_samples, verbose)\n string_dict = {str(rv): tensor.detach() for rv, tensor in rv_dict.items()}\n queue.put((None, chain, string_dict))\n except BaseException as x:\n LOGGER.log(\n LogLevel.ERROR.value, \"Error: Parallel infererence chain failed.\"\n )\n queue.put((x, chain, {}))\n\n def infer(\n self,\n queries: List[RVIdentifier],\n observations: Dict[RVIdentifier, Tensor],\n num_samples: int,\n num_chains: int = 4,\n run_in_parallel: bool = False,\n num_adaptive_samples: int = 0,\n verbose: VerboseLevel = VerboseLevel.LOAD_BAR,\n initialize_from_prior: bool = False,\n ) -> MonteCarloSamples:\n \"\"\"\n Run inference algorithms and reset the world/mode at the end.\n\n All tensors in `queries` and `observations` must be allocated on the\n same `torch.device`. Inference algorithms will attempt to allocate\n intermediate tensors on the same device.\n\n :param queries: random variables to query\n :param observations: observed random variables with their values\n :param num_samples: number of samples excluding adaptation to collect.\n :param num_chains: number of chains to run\n :param num_adaptive_samples: number of steps to allow proposer adaptation.\n :param verbose: Integer indicating how much output to print to stdio\n :param initialize_from_prior: boolean to initialize samples from prior\n :returns: view of data for chains and samples for query\n \"\"\"\n\n _verify_queries_and_observations(\n queries, observations, self._observations_must_be_rv\n )\n random_seed = torch.randint(AbstractInference._rand_int_max, (1,)).int().item()\n self.queries_ = queries\n self.observations_ = observations\n if num_chains > 1 and run_in_parallel:\n if platform.system() == \"Windows\":\n raise RuntimeError(\n \"Running inference in parallel is not currently support on Windows\"\n )\n\n ctx = mp.get_context(\"fork\")\n manager = ctx.Manager()\n q = manager.Queue()\n for chain in range(num_chains):\n p = ctx.Process(\n target=self._parallel_infer,\n args=(\n q,\n chain,\n num_samples,\n random_seed,\n num_adaptive_samples,\n verbose,\n ),\n )\n p.start()\n\n chain_queries = [{}] * num_chains\n for _ in range(num_chains):\n (error, chain, string_dict) = q.get()\n if error is not None:\n raise error\n rv_dict = {rv: string_dict[str(rv)] for rv in queries}\n chain_queries[chain] = rv_dict\n else:\n chain_queries = []\n for chain in range(num_chains):\n AbstractMCInference.set_seed_for_chain(random_seed, chain)\n rv_dicts = self._infer(\n num_samples,\n num_adaptive_samples,\n verbose,\n initialize_from_prior,\n )\n chain_queries.append(rv_dicts)\n monte_carlo_samples = MonteCarloSamples(chain_queries, num_adaptive_samples)\n self.reset()\n return monte_carlo_samples\n",
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom collections.abc import Iterable\nfrom typing import Iterable as IterableType, overload, Type, Union\n\nimport torch\nimport torch.distributions as dist\nimport torch.distributions.constraints as constraints\nfrom torch.distributions import Distribution\nfrom torch.distributions.transforms import Transform\n\n\nConstraintType = Union[constraints.Constraint, Type]\n\n\nclass BetaDimensionTransform(Transform):\n \"\"\"\n Volume preserving transformation to the Beta distribution support.\n \"\"\"\n\n bijective = True\n domain = constraints.real\n codomain = constraints.real_vector\n\n def __eq__(self, other):\n return isinstance(other, BetaDimensionTransform)\n\n def _call(self, x):\n return torch.cat((x.unsqueeze(-1), (1 - x).unsqueeze(-1)), -1)\n\n def _inverse(self, y):\n return y[..., 0] / y.sum(dim=-1)\n\n def forward_shape(self, shape):\n return shape + (2,)\n\n def inverse_shape(self, shape):\n return shape[:-1]\n\n def log_abs_det_jacobian(self, x, y):\n return torch.zeros_like(x)\n\n\ndef _unwrap(constraint: ConstraintType):\n if isinstance(constraint, constraints.independent):\n return _unwrap(constraint.base_constraint)\n return constraint if isinstance(constraint, type) else constraint.__class__\n\n\ndef _is_constraint_eq(constraint1: ConstraintType, constraint2: ConstraintType):\n return _unwrap(constraint1) == _unwrap(constraint2)\n\n\n@overload\ndef is_constraint_eq(\n constraint: ConstraintType, check_constraints: ConstraintType\n) -> bool:\n ...\n\n\n@overload\ndef is_constraint_eq(\n constraint: ConstraintType, check_constraints: IterableType[ConstraintType]\n) -> IterableType[bool]:\n ...\n\n\ndef is_constraint_eq(\n constraint: ConstraintType,\n check_constraints: Union[ConstraintType, IterableType[ConstraintType]],\n) -> Union[bool, IterableType[bool]]:\n \"\"\"\n This provides an equality check that works for different constraints\n specified in :mod:`torch.distributions.constraints`. If `constraint` is\n `constraints.Independent`, then the `base_constraint` is checked. If\n `check_constraints` is a single `Constraint` type or instance this\n returns a `True` if the given `constraint` matches `check_constraints`.\n Otherwise, if `check_constraints` is an iterable, this returns a `bool`\n list that represents an element-wise check.\n\n :param constraint: A constraint class or instance.\n :param check_constraints: A constraint class or instance or an iterable\n containing constraint classes or instances to check against.\n :returns: bool (or a list of bool) values indicating if the given constraint\n equals the constraint in `check_constraints`.\n \"\"\"\n if isinstance(check_constraints, Iterable):\n return [_is_constraint_eq(constraint, c) for c in check_constraints]\n return _is_constraint_eq(constraint, check_constraints)\n\n\ndef get_default_transforms(distribution: Distribution) -> dist.Transform:\n \"\"\"\n Get transforms of a distribution to transform it from constrained space\n into unconstrained space.\n\n :param distribution: the distribution to check\n :returns: a Transform that need to be applied to the distribution\n to transform it from constrained space into unconstrained space\n \"\"\"\n if distribution.support.is_discrete:\n return dist.transforms.identity_transform\n else:\n return dist.biject_to(distribution.support).inv\n\n\ndef initialize_value(distribution: Distribution, initialize_from_prior: bool = False):\n \"\"\"\n Initialized the Variable value\n\n :param initialize_from_prior: if true, returns sample from prior\n :returns: the value to the set the Variable value to\n \"\"\"\n sample_val = distribution.sample()\n if initialize_from_prior:\n return sample_val\n support = distribution.support\n if isinstance(support, dist.constraints.independent):\n support = support.base_constraint\n if initialize_from_prior:\n return sample_val\n elif is_constraint_eq(support, dist.constraints.real):\n return torch.zeros_like(sample_val)\n elif is_constraint_eq(support, dist.constraints.simplex):\n value = torch.ones_like(sample_val)\n return value / sample_val.shape[-1]\n elif is_constraint_eq(support, dist.constraints.greater_than):\n return (\n torch.ones(\n sample_val.shape, dtype=sample_val.dtype, device=sample_val.device\n )\n + support.lower_bound\n )\n elif is_constraint_eq(support, dist.constraints.boolean):\n return dist.Bernoulli(torch.ones_like(sample_val) / 2).sample()\n elif is_constraint_eq(support, dist.constraints.interval):\n lower_bound = torch.ones_like(sample_val) * support.lower_bound\n upper_bound = torch.ones_like(sample_val) * support.upper_bound\n return dist.Uniform(lower_bound, upper_bound).sample()\n elif is_constraint_eq(support, dist.constraints.integer_interval):\n integer_interval = support.upper_bound - support.lower_bound\n return dist.Categorical(\n (torch.ones(integer_interval, device=sample_val.device)).expand(\n sample_val.shape + (integer_interval,)\n )\n ).sample()\n elif is_constraint_eq(support, dist.constraints.nonnegative_integer):\n return (\n torch.ones(\n sample_val.shape, dtype=sample_val.dtype, device=sample_val.device\n )\n + support.lower_bound\n )\n return sample_val\n",
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.distributions as dist\nfrom beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (\n SingleSiteAncestralProposer,\n)\nfrom beanmachine.ppl.world import World\nfrom beanmachine.ppl.world.utils import is_constraint_eq\n\n\nclass SingleSiteRandomWalkProposer(SingleSiteAncestralProposer):\n def __init__(\n self,\n node,\n step_size: float,\n ):\n self.step_size = step_size\n self.target_acc_rate = {False: torch.tensor(0.44), True: torch.tensor(0.234)}\n self._iter = 0\n super().__init__(node)\n\n def do_adaptation(self, world, accept_log_prob, *args, **kwargs) -> None:\n if torch.isnan(accept_log_prob):\n return\n accept_prob = accept_log_prob.exp()\n val_shape = world[self.node].shape\n if len(val_shape) == 0 or val_shape[0] == 1:\n target_acc_rate = self.target_acc_rate[False]\n c = torch.reciprocal(target_acc_rate)\n else:\n target_acc_rate = self.target_acc_rate[True]\n c = torch.reciprocal(1.0 - target_acc_rate)\n\n new_step_size = self.step_size * torch.exp(\n (accept_prob - target_acc_rate) * c / (self._iter + 1.0)\n )\n self._iter += 1\n\n self.step_size = new_step_size.item()\n\n def get_proposal_distribution(self, world: World) -> dist.Distribution:\n \"\"\"Propose a new value for self.node using the prior distribution.\"\"\"\n node = world.get_variable(self.node)\n node_support = node.distribution.support\n\n if is_constraint_eq(node_support, dist.constraints.real):\n return dist.Normal(node.value, self.step_size)\n elif any(\n is_constraint_eq(\n node_support,\n (dist.constraints.greater_than, dist.constraints.greater_than_eq),\n )\n ):\n lower_bound = node_support.lower_bound\n proposal_distribution = self.gamma_dist_from_moments(\n node.value - lower_bound, self.step_size**2\n )\n transform = dist.AffineTransform(loc=lower_bound, scale=1.0)\n transformed_proposal = dist.TransformedDistribution(\n proposal_distribution, transform\n )\n return transformed_proposal\n elif is_constraint_eq(node_support, dist.constraints.interval):\n lower_bound = node_support.lower_bound\n width = node_support.upper_bound - lower_bound\n mu = (node.value - lower_bound) / width\n sigma = (\n torch.ones(node.value.shape, device=node.value.device)\n * self.step_size\n / width\n )\n proposal_distribution = self.beta_dist_from_moments(mu, sigma)\n transform = dist.AffineTransform(loc=lower_bound, scale=width)\n transformed_proposal = dist.TransformedDistribution(\n proposal_distribution, transform\n )\n return transformed_proposal\n elif is_constraint_eq(node_support, dist.constraints.simplex):\n proposal_distribution = self.dirichlet_dist_from_moments(\n node.value, self.step_size\n )\n return proposal_distribution\n else:\n # default to ancestral\n return super().get_proposal_distribution(world)\n\n def gamma_dist_from_moments(self, expectation, sigma):\n \"\"\"\n Returns a Gamma distribution.\n\n :param expectation: expectation value\n :param sigma: sigma value\n :returns: returns the Beta distribution given mu and sigma.\n \"\"\"\n beta = expectation / (sigma**2)\n beta = torch.clamp(beta, min=1e-3)\n alpha = expectation * beta\n alpha = torch.clamp(alpha, min=1e-3)\n distribution = dist.Gamma(concentration=alpha, rate=beta)\n return distribution\n\n def beta_dist_from_moments(self, mu, sigma):\n \"\"\"\n Returns a Beta distribution.\n\n :param mu: mu value\n :param sigma: sigma value\n :returns: returns the Beta distribution given mu and sigma.\n \"\"\"\n mu = torch.clamp(mu, 1e-3, 1 - 1e-3)\n sigma = torch.clamp(sigma, 1e-3, (mu * (1 - mu)).min().item())\n \"\"\"\n https://stats.stackexchange.com/questions/12232/calculating-the-\n parameters-of-a-beta-distribution-using-the-mean-and-variance\n \"\"\"\n alpha = ((1.0 - mu) / (sigma**2) - (1.0 / mu)) * (mu**2)\n beta = alpha * (1.0 / mu - 1.0)\n distribution = dist.Beta(concentration1=alpha, concentration0=beta)\n return distribution\n\n def dirichlet_dist_from_moments(self, mu, sigma):\n \"\"\"\n Returns a Dirichlet distribution. The variances of a Dirichlet\n distribution are inversely proportional to the norm of the concentration\n vector. However, variance is only set as a scalar, not as a vector.\n So the individual variances of the Dirichlet are not tuned, only the\n magnitude of the entire vector.\n\n :param mu: mu value\n :param sigma: sigma value\n :returns: returns the Dirichlet distribution given mu and sigma.\n \"\"\"\n alpha = mu / (torch.norm(mu) * sigma**2)\n return dist.Dirichlet(concentration=alpha)\n"
] | [
[
"torch.multiprocessing.get_context",
"torch.manual_seed",
"torch.randint"
],
[
"torch.ones",
"torch.zeros_like",
"torch.distributions.Uniform",
"torch.ones_like",
"torch.distributions.biject_to"
],
[
"torch.distributions.TransformedDistribution",
"torch.norm",
"torch.ones",
"torch.distributions.Gamma",
"torch.isnan",
"torch.distributions.Beta",
"torch.tensor",
"torch.exp",
"torch.distributions.AffineTransform",
"torch.reciprocal",
"torch.distributions.Dirichlet",
"torch.distributions.Normal",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
UWaterloo-ASL/LAS_Gym | [
"916043d59aeca3deb8875abb280dcecbdcd50f0f",
"916043d59aeca3deb8875abb280dcecbdcd50f0f"
] | [
"ArchiveTestCode/Red Light Excited Visitor Simulator/LivingArchitectureEnv.py",
"interaction_Single_Agent_and_Pendulum_Env.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 8 16:12:32 2018\n\n@author: jack.lingheng.meng\n\"\"\"\ntry:\n import vrep\nexcept:\n print ('--------------------------------------------------------------')\n print ('\"vrep.py\" could not be imported. This means very probably that')\n print ('either \"vrep.py\" or the remoteApi library could not be found.')\n print ('Make sure both are in the same folder as this file,')\n print ('or appropriately adjust the file \"vrep.py\"')\n print ('--------------------------------------------------------------')\n print ('')\n\nimport gym\nfrom gym import spaces\nimport time\nimport numpy as np\nimport warnings\n\nclass LivingArchitectureEnv(gym.Env):\n def __init__(self):\n print ('Program started')\n # connect to V-REP server\n vrep.simxFinish(-1) # just in case, close all opened connections\n self.clientID = vrep.simxStart('127.0.0.1',19997,True,True,5000,5) # Connect to V-REP\n if self.clientID!=-1:\n print ('Connected to remote API server')\n else:\n print ('Failed connecting to remote API server')\n # start simulate\n self._def_op_mode = vrep.simx_opmode_blocking\n self._set_joint_op_mode = vrep.simx_opmode_oneshot\n self._set_light_op_mode = vrep.simx_opmode_oneshot\n self._set_visitor_op_mode = vrep.simx_opmode_oneshot\n \n # To get sensor data\n # vrep.simx_opmode_buffer: does not work, don't know why?\n # vrep.simx_opmode_blocking: too slow\n # vrep.simx_opmode_oneshot: works pretty good\n self._get_prox_op_mode = vrep.simx_opmode_oneshot \n self._get_light_op_mode = vrep.simx_opmode_oneshot\n \n \n \n vrep.simxStartSimulation(self.clientID, self._def_op_mode)\n \n # get object names and handles\n self._get_object_name_and_handle()\n \n # initialize action and observation space\n print(\"Initialize LAS action and observation space...\")\n self.prox_sensor_num = len(self.proxSensorHandles)\n self.smas_num = len(self.jointHandles)\n self.lights_num = len(self.lightHandles)\n self.sensors_dim = self.prox_sensor_num + self.lights_num * (1+3)\n self.actuators_dim = self.smas_num + self.lights_num * (1+3) # light state & color\n \n self.act_max = np.array([np.inf]*self.actuators_dim)\n self.act_min = - np.array([np.inf]*self.actuators_dim)\n self.obs_max = np.array([1.]*self.sensors_dim)\n self.obs_min = - np.array([1.]*self.sensors_dim)\n \n self.observation_space = spaces.Box(self.obs_min, self.obs_max)\n self.action_space = spaces.Box(self.act_min, self.act_max)\n print(\"Initialization of LAS done!\")\n \n # initialize Visitor action and observation space\n print(\"Initialize Visitor action and observation space...\")\n self.visitor_num = len(self.visitorHandles)\n self.visitor_action_dim = self.visitor_num * 2 # visitor's position (x,y,0)\n self.visitor_action_max = np.array([7,9]*self.visitor_num) # later we should find a way to automatic get this limit\n self.visitor_action_min = np.array([-7,-9]*self.visitor_num)\n self.visitor_action_space = spaces.Box(self.visitor_action_min, self.visitor_action_max)\n \n # initialize Single Visitor action and observation space\n print(\"Initialize Visitor action and observation space...\")\n self.single_visitor_action_dim = self.visitor_num * 2 # visitor's position (x,y,0)\n self.single_visitor_action_max = np.array([7,9]) # later we should find a way to automatic get this limit\n self.single_visitor_action_min = np.array([-7,-9])\n self.single_visitor_action_space = spaces.Box(self.single_visitor_action_min, self.single_visitor_action_max)\n \n print(\"Initialization of visitor done!\")\n \n self.reward = 0\n \n \n def _get_object_name_and_handle(self):\n \"\"\"\n # When call vrep.simxGetObjectGroupData to abstract object name and handle\n # choose appropriate objectType parameter:\n # joint: vrep.sim_object_joint_type\n # proximity sensor: vrep.sim_object_proximitysensor_type\n # light: vrep.sim_object_light_type\n # visitor target position: vrep.sim_object_dummy_type\n # visitor body: vrep.sim_object_shape_type\n \"\"\"\n dataType = 0 # 0: retrieves the object names (in stringData.)\n print(\"Get objects' names and handles ...\")\n\n # proximity sensor\n proxSensorIndex = []\n rc = vrep.simx_return_initialize_error_flag\n while rc != vrep.simx_return_ok:\n rc, proxSensorHandles, intData, floatData, proxSensorNames = vrep.simxGetObjectGroupData(self.clientID,vrep.sim_object_proximitysensor_type, dataType, self._def_op_mode)\n if rc==vrep.simx_return_ok:\n print ('Get Prox Sensor Success!!!!!') # display the reply from V-REP (in this case, just a string)\n for i, name in enumerate(proxSensorNames):\n if \"_node#\" in name:\n print(\"Proximity Sensor: {}, and handle: {}\".format(name, proxSensorHandles[i]))\n proxSensorIndex.append(i)\n break\n else:\n print ('Fail to get proximity sensors!!!')\n # light\n lightIndex = []\n rc = vrep.simx_return_initialize_error_flag\n while rc != vrep.simx_return_ok:\n rc, lightHandles, intData, floatData, lightNames = vrep.simxGetObjectGroupData(self.clientID,vrep.sim_object_light_type, dataType, self._def_op_mode)\n if rc==vrep.simx_return_ok:\n print ('Get Lihgt Success!!!!!') # display the reply from V-REP (in this case, just a string)\n for i, name in enumerate(lightNames):\n if \"_node#\" in name:\n print(\"Light: {}, and handle: {}\".format(name, lightHandles[i]))\n lightIndex.append(i)\n break\n else:\n print ('Fail to get lights!!!')\n # joint\n jointIndex = []\n rc = vrep.simx_return_initialize_error_flag\n while rc != vrep.simx_return_ok:\n rc, jointHandles, intData, floatData, jointNames = vrep.simxGetObjectGroupData(self.clientID,vrep.sim_object_joint_type, dataType, self._def_op_mode)\n if rc==vrep.simx_return_ok:\n print ('Get Joint Success!!!!!') # display the reply from V-REP (in this case, just a string)\n for i, name in enumerate(jointNames):\n if \"_node#\" in name:\n print(\"Joint: {}, and handle: {}\".format(name, jointHandles[i]))\n jointIndex.append(i)\n break\n else:\n print ('Fail to get joints!!!')\n \n # visitor targetPosition\n visitorIndex = []\n rc = vrep.simx_return_initialize_error_flag\n while rc != vrep.simx_return_ok:\n rc, visitorHandles, intData, floatData, visitorNames = vrep.simxGetObjectGroupData(self.clientID,vrep.sim_object_dummy_type, dataType, self._def_op_mode)\n if rc==vrep.simx_return_ok:\n print ('Get Visitor Success!!!!!') # display the reply from V-REP (in this case, just a string)\n for i, name in enumerate(visitorNames):\n if \"TargetPosition_Visitor#\" in name:\n print(\"Visitor: {}, and handle: {}\".format(name, visitorHandles[i]))\n visitorIndex.append(i)\n break\n else:\n print ('Fail to get visitors!!!')\n # visitor body\n visitorBodyIndex = []\n rc = vrep.simx_return_initialize_error_flag\n while rc != vrep.simx_return_ok:\n rc, visitorBodyHandles, intData, floatData, visitorBodyNames = vrep.simxGetObjectGroupData(self.clientID,vrep.sim_object_shape_type, dataType, self._def_op_mode)\n if rc==vrep.simx_return_ok:\n print ('Get Visitor Body Success!!!!!') # display the reply from V-REP (in this case, just a string)\n for i, name in enumerate(visitorBodyNames):\n if \"Body_Visitor#\" in name:\n print(\"Visitor body: {}, and handle: {}\".format(name, visitorBodyHandles[i]))\n visitorBodyIndex.append(i)\n break\n else:\n print ('Fail to get visitors body!!!')\n \n proxSensorHandles = np.array(proxSensorHandles)\n proxSensorNames = np.array(proxSensorNames)\n lightHandles = np.array(lightHandles)\n lightNames = np.array(lightNames)\n jointHandles = np.array(jointHandles)\n jointNames = np.array(jointNames)\n visitorHandles = np.array(visitorHandles)\n visitorNames = np.array(visitorNames)\n visitorBodyHandles = np.array(visitorBodyHandles)\n visitorBodyNames = np.array(visitorBodyNames)\n # All objects handels and names\n self.proxSensorHandles = proxSensorHandles[proxSensorIndex]\n self.proxSensorNames = proxSensorNames[proxSensorIndex]\n self.lightHandles = lightHandles[lightIndex]\n self.lightNames = lightNames[lightIndex]\n self.jointHandles = jointHandles[jointIndex]\n self.jointNames = jointNames[jointIndex]\n self.visitorNames = visitorNames[visitorIndex]\n self.visitorHandles = visitorHandles[visitorIndex]\n self.visitorBodyNames = visitorBodyNames[visitorBodyIndex]\n self.visitorBodyHandles = visitorBodyHandles[visitorBodyIndex]\n\n def step_LAS(self, action):\n \"\"\"\n Take one step of action\n Input: action\n Output: observation, reward, done, info\n \"\"\"\n #\n action = np.clip(action, self.act_min, self.act_max)\n # split action for light and sma\n action_smas = action[:self.smas_num]\n action_lights_state = action[self.smas_num:self.smas_num+self.lights_num]\n action_lights_state = action_lights_state.astype(int)\n action_lights_color = action[self.smas_num+self.lights_num:]\n # taking action\n #start = time.time()\n vrep.simxPauseCommunication(self.clientID,True) #temporarily halting the communication thread \n self._set_all_joint_position(action_smas)\n self._set_all_light_state(action_lights_state,action_lights_color)\n vrep.simxPauseCommunication(self.clientID,False) #and evaluated at the same time\n #print(\"Action running time: {}\".format(time.time()-start))\n \n # observe\n #start = time.time()\n self._self_observe()\n #print(\"Observation running time: {}\".format(time.time()-start))\n # caculate reward\n self._reward()\n \n done = False\n \n return self.observation, self.reward, done, []\n \n def step_visitor(self, position):\n \"\"\"\n This interface is for change visitor's position.\n Input: position\n Output: observation, reward, done, info\n \"\"\"\n #\n position = np.clip(position,self.visitor_action_min, self.visitor_action_max)\n vrep.simxPauseCommunication(self.clientID,True)\n self._set_all_visitor_position(position)\n vrep.simxPauseCommunication(self.clientID,False)\n \n self._self_observe()\n self._reward_visitor()\n done = False\n return self.observation, self.reward_visitor, done, [] \n\n def step_single_visitor(self, name, position):\n \"\"\"\n This interface is for change visitor's position.\n Input: position\n Output: observation, reward, done, info\n \"\"\"\n #\n position = np.clip(position,self.single_visitor_action_min, self.single_visitor_action_max)\n #vrep.simxPauseCommunication(self.clientID,True)\n self._set_single_visitor_position(name, position)\n #vrep.simxPauseCommunication(self.clientID,False)\n \n self._self_observe()\n self._reward_visitor()\n done = False\n return self.observation, self.reward_visitor, done, [] \n \n def step_red_light_excited_visitor(self, targetPositionName, bodyName, action):\n \"\"\"\n A specific interface for red excited visitor:\n return observation:\n light state: observation[:lightNum]\n light color: observation[lightNum:lightNum * 4]\n light position: observation[lightNum * 4:lightNum * 5]\n visitor position: observation[lightNum*5:]\n \"\"\"\n move = action[0]\n position = action[1:3] # we can leave z coordinate\n #print(\"Set position:{}\".format(position))\n position = np.clip(position,self.single_visitor_action_min, self.single_visitor_action_max)\n # if move == 1, move; otherwise don't move.\n if move == 1:\n #vrep.simxPauseCommunication(self.clientID,True)\n #print(\"Set Position in Vrep: {}\".format(position))\n self._set_single_visitor_position(targetPositionName, position)\n #vrep.simxPauseCommunication(self.clientID,False)\n \n observation = self._self_observe_for_red_excited_visitor(bodyName)\n #print(\"len(observation):{}\".format(len(observation)))\n reward = 0\n done = False\n return observation, reward, done, []\n \n def _set_single_visitor_position(self, targetPositionName, position):\n visitorIndex = np.where(self.visitorNames == targetPositionName)\n if len(visitorIndex[0]) == 0:\n print(\"Not found visitor: {}\".format(targetPositionName))\n else:\n vrep.simxSetObjectPosition(self.clientID, self.visitorHandles[visitorIndex], -1, [position[0],position[1],0], self._set_visitor_op_mode)\n def _get_single_visitor_body_position(self, bodyName):\n \"\"\"\n Give bodyName, return bodyPosition\n \"\"\"\n bodyPosition = np.zeros(3)\n visitorBodyIndex = np.where(self.visitorBodyNames == bodyName)\n if len(visitorBodyIndex[0]) == 0:\n print(\"Not found visitor: {}\".format(bodyName))\n else:\n res, bodyPosition = vrep.simxGetObjectPosition(self.clientID, self.visitorBodyHandles[visitorBodyIndex], -1, self._get_light_op_mode)\n #print(\"Visitor position: {}\".format(position))\n return np.array(bodyPosition)\n \n def _set_all_visitor_position(self, position):\n visitorNum = len(self.visitorHandles)\n for i in range(visitorNum):\n vrep.simxSetObjectPosition(self.clientID, self.visitorHandles[i], -1, [position[i*2],position[i*2+1],0], self._set_visitor_op_mode)\n \n def _set_all_joint_position(self, targetPosition):\n jointNum = len(self.jointHandles)\n for i in range(jointNum):\n vrep.simxSetJointTargetPosition(self.clientID, self.jointHandles[i], targetPosition[i], self._set_joint_op_mode)\n \n def _set_all_light_state(self, targetState, targetColor):\n lightNum = len(self.lightHandles)\n if len(targetState) != lightNum:\n print(\"len(targetState) != lightNum\")\n \n # inner function: remote function call to set light state\n def _set_light_state(clientID, name, handle, targetState, targetColor, opMode):\n\n emptyBuff = bytearray()\n res,retInts,retFloats,retStrings,retBuffer = vrep.simxCallScriptFunction(clientID,\n name,\n vrep.sim_scripttype_childscript,\n 'setLightStateAndColor',\n [handle, targetState],targetColor,[],emptyBuff,\n opMode)\n if res != vrep.simx_return_ok:\n warnings.warn(\"Remote function call: setLightStateAndColor fail in Class AnyLight.\")\n # inner function end\n for i in range(lightNum):\n _set_light_state(self.clientID, str(self.lightNames[i]), self.lightHandles[i], targetState[i], targetColor[i*3:(i+1)*3], self._set_light_op_mode)\n\n def _reward(self):\n \"\"\" calculate reward based on observation of prximity sensor\"\"\"\n self.reward = np.mean(self.observation[:self.prox_sensor_num])\n return self.reward\n \n def _reward_visitor(self):\n \"\"\"\n Calculate reward for visitor\n \"\"\"\n self.reward_visitor = 0\n return self.reward_visitor\n \n def _self_observe(self):\n \"\"\"\n This observe function is for LAS:\n proximity sensors\n light state\n light color\n \"\"\"\n proxStates, proxPosition = self._get_all_prox_data()\n lightStates, lightDiffsePart, lightSpecularPart = self._get_all_light_data()\n self.observation = np.concatenate((proxStates, lightStates, lightDiffsePart.flatten()))\n return self.observation\n \n def _self_observe_for_red_excited_visitor(self,bodyName):\n \"\"\"\n This obervave function is for visitors:\n light state: observation[:lightNum]\n light color: observation[lightNum:lightNum * 4]\n light position: observation[lightNum * 4:lightNum * 5]\n visitor position: observation[lightNum*5:]\n \"\"\"\n lightStates, lightDiffsePart, lightSpecularPart = self._get_all_light_data()\n lightPositions = self._get_all_light_position()\n visitorBodyPosition = self._get_single_visitor_body_position(bodyName)\n self.obser_for_red_light_excited_visitor = np.concatenate((lightStates,\n lightDiffsePart.flatten(),\n lightPositions.flatten(),\n visitorBodyPosition.flatten()))\n #print(\"length self.obser_for_red_light_excited_visitor:{}\".format(len(self.obser_for_red_light_excited_visitor)))\n return self.obser_for_red_light_excited_visitor\n \n def _get_all_prox_data(self):\n \"\"\"\n Get all proximity sensory data\n \"\"\"\n proxSensorNum = len(self.proxSensorHandles)\n proxStates = np.zeros(proxSensorNum)\n proxPosition = np.zeros([proxSensorNum, 3])\n for i in range(proxSensorNum):\n code, proxStates[i], proxPosition[i,:], handle, snv = vrep.simxReadProximitySensor(self.clientID, self.proxSensorHandles[i], self._get_prox_op_mode)\n return proxStates, proxPosition\n \n def _get_all_light_data(self):\n \"\"\"\n Get all light data:\n return:\n lightStates, lightDiffsePart, lightSpecularPart\n \"\"\"\n lightNum = len(self.lightHandles)\n #print(\"lightNum:{}\".format(lightNum))\n lightStates = np.zeros(lightNum)\n lightDiffsePart = np.zeros([lightNum,3])\n lightSpecularPart = np.zeros([lightNum,3])\n \n # inner function to get light state and color\n def _get_light_state_and_color(clientID, name , handle, op_mode):\n emptyBuff = bytearray()\n res,retInts,retFloats,retStrings,retBuffer=vrep.simxCallScriptFunction(clientID,\n name,\n vrep.sim_scripttype_childscript,\n 'getLightStateAndColor',\n [handle],[],[],emptyBuff,\n op_mode)\n if res==vrep.simx_return_ok:\n #print ('getLightStateAndColor works! ',retStrings[0]) # display the reply from V-REP (in this case, just a string)\n lightState = retInts[0]\n diffusePart = [retFloats[0],retFloats[1],retFloats[2]]\n specularPart = retFloats[3],retFloats[4],retFloats[5]\n return lightState, diffusePart, specularPart\n else:\n warnings.warn(\"Remote function call: getLightStateAndColor fail in Class AnyLight.\")\n return -1, [0,0,0], [0,0,0]\n # inner function end\n \n for i in range(lightNum):\n lightStates[i], lightDiffsePart[i,:], lightSpecularPart[i,:] = _get_light_state_and_color(self.clientID, str(self.lightNames[i]), self.lightHandles[i], self._get_light_op_mode)\n \n return lightStates, lightDiffsePart, lightSpecularPart\n \n def _get_all_light_position(self):\n \"\"\"\n Get all lights position:\n return:\n lightPositions\n \"\"\"\n lightNum = self.lights_num\n #print(\"_get_all_light_position lightNum:{}\".format(lightNum))\n lightPositions = np.zeros([lightNum, 3]) # 3: (x, y, z)\n for i in range(lightNum):\n res, lightPositions[i,:] = vrep.simxGetObjectPosition(self.clientID, self.lightHandles[i], -1, self._get_light_op_mode)\n return lightPositions\n \n def reset_env_for_LAS_red_light_excited_visitor(self, bodyName):\n vrep.simxStartSimulation(self.clientID, self._def_op_mode)\n observationForLAS = self._self_observe()\n observationForRedLightExcitedVisitor = self._self_observe_for_red_excited_visitor(bodyName)\n \n done = False\n rewardLAS = 0\n rewardVisitor = 0\n info = []\n return observationForLAS, observationForRedLightExcitedVisitor, rewardLAS, rewardVisitor, done, info\n \n def reset(self):\n #vrep.simxStopSimulation(self.clientID, self._def_op_mode)\n vrep.simxStartSimulation(self.clientID, self._def_op_mode)\n \n self._self_observe()\n self._reward()\n self._reward_visitor()\n done = False\n return self.observation, self.reward, self.reward_visitor, done\n \n def destroy(self):\n \"\"\"\n Finish simulation and release connection to server.\n \"\"\"\n vrep.simxStopSimulation(self.clientID, self._def_op_mode)\n vrep.simxFinish(self.clientID)",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 26 20:03:57 2018\n\n@author: jack.lingheng.meng\n\"\"\"\nimport logging\nimport tensorflow as tf\n#tf.logging.set_verbosity(tf.logging.ERROR)\nfrom datetime import datetime\nimport os\nimport gym\nimport matplotlib.pyplot as plt\n\nfrom Environment.LASEnv import LASEnv\nfrom LASAgent.InternalEnvOfAgent import InternalEnvOfAgent\n\n# Logging\nexperiment_results_dir = os.path.join(os.path.abspath('..'), 'ROM_Experiment_results')\nif not os.path.exists(experiment_results_dir):\n os.makedirs(experiment_results_dir)\nlogging.basicConfig(filename = os.path.join(experiment_results_dir,'ROM_experiment_'+datetime.now().strftime(\"%Y%m%d_%H%M%S\")+'.log'), \n level = logging.DEBUG,\n format='%(asctime)s:%(levelname)s: %(message)s')\n\nif __name__ == '__main__':\n sess = tf.Session()\n # Instantiate LAS environment object\n# envLAS = LASEnv('127.0.0.1', 19997, reward_function_type = 'occupancy')\n# observation = envLAS.reset()\n env = gym.make('Pendulum-v0')\n observation = env.reset()\n #######################################################################\n # Instatiate LAS-Agent #\n #######################################################################\n # Note: 1. Set load_pretrained_agent_flag to \"True\" only when you have \n # and want to load pretrained agent.\n # 2. Keep observation unchanged if using pretrained agent.\n agent_name = 'CartPole_v0'\n observation_space = env.observation_space\n action_space = env.action_space\n observation_space_name = [], \n action_space_name = []\n x_order_MDP = 1\n x_order_MDP_observation_type = 'concatenate_observation'\n occupancy_reward_type = 'IR_distance'\n interaction_mode = 'virtual_interaction'\n load_pretrained_agent_flag = False\n \n agent = InternalEnvOfAgent(agent_name, \n observation_space, \n action_space,\n observation_space_name, \n action_space_name,\n x_order_MDP,\n x_order_MDP_observation_type,\n occupancy_reward_type,\n interaction_mode,\n load_pretrained_agent_flag)\n #######################################################################\n max_episode_num = 1000\n try:\n for episode in range(max_episode_num):\n observation = env.reset()\n done = False\n reward = 0\n i = 1\n cumulative_reward = 0\n while not done:\n env.render()\n take_action_flag, action = agent.feed_observation(observation, reward, done)\n if take_action_flag == True:\n observation, reward, done, info = env.step(action)\n cumulative_reward += reward\n print('Episode: {}, Step: {}, Reward: {}'.format(episode, i, reward))\n i += 1\n plt.scatter(episode, cumulative_reward, c=\"r\")\n if episode % 5 == 0:\n plt.show()\n plt.pause(0.05)\n except KeyboardInterrupt:\n agent.stop()\n\n \n "
] | [
[
"numpy.clip",
"numpy.mean",
"numpy.array",
"numpy.where",
"numpy.zeros"
],
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show",
"matplotlib.pyplot.pause",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
Keck-DataReductionPipelines/KCWI_DRP | [
"5073a137a8edddf9ff894aef445f877d7186a355"
] | [
"kcwidrp/primitives/FluxCalibrate.py"
] | [
"from keckdrpframework.primitives.base_primitive import BasePrimitive\nfrom kcwidrp.primitives.kcwi_file_primitives import kcwi_fits_writer, \\\n kcwi_fits_reader, get_master_name, strip_fname\nfrom kcwidrp.core.kcwi_correct_extin import kcwi_correct_extin\n\nimport os\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\nfrom astropy.io import fits as pf\nfrom astropy import units as u\nfrom astropy.nddata import CCDData\n\n\nclass FluxCalibrate(BasePrimitive):\n\n def __init__(self, action, context):\n BasePrimitive.__init__(self, action, context)\n self.logger = context.pipeline_logger\n\n def _pre_condition(self):\n \"\"\"\n Checks if we can calibrate flux based on the processing table\n :return:\n \"\"\"\n self.logger.info(\"Checking precondition for FluxCalibrate\")\n target_type = 'INVSENS'\n tab = self.context.proctab.search_proctab(frame=self.action.args.ccddata,\n target_type=target_type,\n nearest=True)\n self.logger.info(\"pre condition got %d invsens files, expected >= 1\"\n % len(tab))\n if len(tab) <= 0:\n self.action.args.invsname = None\n return False\n else:\n self.action.args.invsname = get_master_name(tab, target_type)\n return True\n\n def _perform(self):\n # Header keyword to update\n key = 'STDCOR'\n keycom = 'std corrected?'\n target_type = 'INVSENS'\n obj = None\n sky = None\n\n self.logger.info(\"Calibrating object flux\")\n tab = self.context.proctab.search_proctab(frame=self.action.args.ccddata,\n target_type=target_type,\n nearest=True)\n self.logger.info(\"%d invsens files found\" % len(tab))\n\n if self.action.args.invsname is not None:\n\n # read in master calibration (inverse sensitivity)\n invsname = self.action.args.invsname\n self.logger.info(\"Reading invsens: %s\" % invsname)\n hdul = pf.open(os.path.join(self.config.instrument.cwd,\n 'redux', invsname))\n mcal = hdul[0].data[1, :]\n mchdr = hdul[0].header\n hdul.close()\n # get dimensions\n mcsz = mcal.shape\n # get master std waves\n mcw0 = mchdr['CRVAL1']\n mcdw = mchdr['CDELT1']\n mcwav = mcw0 + np.arange(mcsz[0]) * mcdw\n # get master std image number\n msimgno = mchdr['FRAMENO']\n # get input object image dimensions\n sz = self.action.args.ccddata.data.shape\n # get object waves\n w0 = self.action.args.ccddata.header['CRVAL3']\n dw = self.action.args.ccddata.header['CD3_3']\n wav = w0 + np.arange(sz[0]) * dw\n # get exposure time\n expt = self.action.args.ccddata.header['XPOSURE']\n # resample onto object waves, if needed\n if w0 != mcw0 or dw != mcdw or wav[-1] != mcwav[-1] or \\\n sz[0] != mcsz[0]:\n self.logger.warning(\"wavelength scales not identical, \"\n \"resampling standard\")\n print(w0, mcw0, dw, mcdw, wav[-1], mcwav[-1], sz[0], mcsz[0])\n mcint = interp1d(mcwav, mcal, kind='cubic',\n fill_value='extrapolate')\n mscal = mcint(wav) * 1.e16 / expt\n else:\n mscal = mcal * 1.e16 / expt\n\n # extinction correct calibration\n kcwi_correct_extin(mscal, self.action.args.ccddata.header,\n logger=self.logger)\n # do calibration\n for isl in range(sz[2]):\n for ix in range(sz[1]):\n self.action.args.ccddata.data[:, ix, isl] *= mscal\n self.action.args.ccddata.uncertainty.array[:, ix, isl] *= \\\n mscal ** 2\n\n # check for obj, sky cubes\n if self.action.args.nasmask and self.action.args.numopen > 1:\n ofn = self.action.args.name\n # obj cube\n objfn = strip_fname(ofn) + '_ocubed.fits'\n full_path = os.path.join(\n self.config.instrument.cwd,\n self.config.instrument.output_directory, objfn)\n if os.path.exists(full_path):\n obj = kcwi_fits_reader(full_path)[0]\n # do calibration\n for isl in range(sz[2]):\n for ix in range(sz[1]):\n obj.data[:, ix, isl] *= mscal\n # sky cube\n skyfn = strip_fname(ofn) + '_scubed.fits'\n full_path = os.path.join(\n self.config.instrument.cwd,\n self.config.instrument.output_directory, skyfn)\n if os.path.exists(full_path):\n sky = kcwi_fits_reader(full_path)[0]\n # do calibration\n for isl in range(sz[2]):\n for ix in range(sz[1]):\n sky.data[:, ix, isl] *= mscal\n\n # units\n flam16_u = 1.e16 * u.erg / (u.angstrom * u.cm ** 2 * u.s)\n self.action.args.ccddata.unit = flam16_u\n self.action.args.ccddata.uncertainty.unit = flam16_u\n if obj is not None:\n obj.unit = flam16_u\n if sky is not None:\n sky.unit = flam16_u\n # update header keywords\n self.action.args.ccddata.header[key] = (True, keycom)\n self.action.args.ccddata.header['MSFILE'] = (invsname,\n \"Master std filename\")\n self.action.args.ccddata.header['MSIMNO'] = (\n msimgno, 'master std image number')\n else:\n\n self.action.args.ccddata.header[key] = (False, keycom)\n\n log_string = FluxCalibrate.__module__\n self.action.args.ccddata.header['HISTORY'] = log_string\n\n # write out icubes image\n kcwi_fits_writer(self.action.args.ccddata,\n table=self.action.args.table,\n output_file=self.action.args.name,\n output_dir=self.config.instrument.output_directory,\n suffix=\"icubes\")\n self.context.proctab.update_proctab(frame=self.action.args.ccddata,\n suffix=\"icubes\",\n filename=self.action.args.name)\n self.context.proctab.write_proctab()\n\n # check for sky, obj cube\n if obj is not None:\n out_obj = CCDData(obj, meta=self.action.args.ccddata.header,\n unit=self.action.args.ccddata.unit)\n kcwi_fits_writer(\n out_obj, output_file=self.action.args.name,\n output_dir=self.config.instrument.output_directory,\n suffix=\"ocubes\")\n\n if sky is not None:\n out_sky = CCDData(sky, meta=self.action.args.ccddata.header,\n unit=self.action.args.ccddata.unit)\n kcwi_fits_writer(\n out_sky, output_file=self.action.args.name,\n output_dir=self.config.instrument.output_directory,\n suffix=\"scubes\")\n\n self.logger.info(log_string)\n\n return self.action.args\n # END: class FluxCalibrate()\n"
] | [
[
"numpy.arange",
"scipy.interpolate.interp1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
shmouses/EELSpecNet | [
"a9aa085782a08e903e718012d5ce5979c18f7d23"
] | [
"main.py"
] | [
"import EELSpecNet\nimport GenerateData as gene\nimport tensorflow as tf\nimport numpy as np\nimport tensorflow.experimental.numpy as tnp\ntnp.experimental_enable_numpy_behavior()\n\ndef main():\n\n model = EELSpecNet.EELSpecNetModel_CNN_10D(2048)\n op = tf.keras.optimizers.Adam(learning_rate=5e-5)\n model.compile(optimizer=op, loss='BinaryCrossentropy', metrics=['mape', 'mse'])\n\n train_target, train_initial = gene.training_signal_set(6000, -2, 0.005, 0.015, 2048, 0.05)\n print(\"---------------- Training signal generation done !!! ----------------------\")\n\n tnp_convolved_loaded = tnp.asarray(train_initial)\n tnp_original_loaded = tnp.asarray(train_target)\n x_dim, e_dim = np.shape(train_initial)\n tnp_original_loaded += 0.001\n tnp_convolved_loaded += 0.001\n tnp_data_original = tnp_original_loaded.reshape((x_dim, 1, e_dim, 1))\n tnp_data_convolved = tnp_convolved_loaded.reshape((x_dim, 1, e_dim, 1))\n tnp_train_original = tnp_data_original[:, :, :, :]\n tnp_train_convolved = tnp_data_convolved[:, :, :, :]\n\n model.fit(tnp_train_convolved, tnp_train_original, validation_split=0.16, batch_size=16, epochs=1000)\n print(\"------------------------ Training done !!! ------------------------\")\n\n eval_target, eval_initial, eval_peaks, eval_psf, eval_metadata = gene.eval_signal_set(2000, -2, 0.005, 0.015, 2048,\n 0.05)\n print(\"---------------- Evaluation signal generation done !!! ----------------------\")\n\n eval_target += 0.001\n eval_initial += 0.001\n eval_target = eval_target.reshape((2000, 1, 2048, 1))\n eval_initial = eval_initial.reshape((2000, 1, 2048, 1))\n\n model.evaluate(eval_initial, eval_target)\n prediction = model.predict(eval_initial)\n prediction = prediction.reshape((2000, 2048))\n np.save(\"deconv_evaluation_signal.npy\", prediction)\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"numpy.save",
"tensorflow.keras.optimizers.Adam",
"numpy.shape",
"tensorflow.experimental.numpy.experimental_enable_numpy_behavior",
"tensorflow.experimental.numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MarcoGorelli/pymc | [
"75ea2a80cb27773e93b7b207043077953940e6ff",
"140dab0199dfb751951ba99175295c07feb00264",
"140dab0199dfb751951ba99175295c07feb00264",
"140dab0199dfb751951ba99175295c07feb00264",
"140dab0199dfb751951ba99175295c07feb00264"
] | [
"pymc/distributions/shape_utils.py",
"pymc/sampling_jax.py",
"pymc/plots/posteriorplot.py",
"pymc/sampling.py",
"pymc/tests/test_aesaraf.py"
] | [
"# Copyright 2021 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# -*- coding: utf-8 -*-\n\"\"\"\nA collection of common shape operations needed for broadcasting\nsamples from probability distributions for stochastic nodes in PyMC.\n\"\"\"\n\nimport warnings\n\nfrom typing import Optional, Sequence, Tuple, Union\n\nimport numpy as np\n\nfrom aesara.graph.basic import Constant, Variable\nfrom aesara.tensor.var import TensorVariable\n\nfrom pymc.aesaraf import change_rv_size, pandas_to_array\nfrom pymc.exceptions import ShapeError, ShapeWarning\n\n__all__ = [\n \"to_tuple\",\n \"shapes_broadcasting\",\n \"broadcast_dist_samples_shape\",\n \"get_broadcastable_dist_samples\",\n \"broadcast_distribution_samples\",\n \"broadcast_dist_samples_to\",\n \"rv_size_is_none\",\n]\n\n\ndef to_tuple(shape):\n \"\"\"Convert ints, arrays, and Nones to tuples\n\n Parameters\n ----------\n shape: None, int or array-like\n Represents the shape to convert to tuple.\n\n Returns\n -------\n If `shape` is None, returns an empty tuple. If it's an int, (shape,) is\n returned. If it is array-like, tuple(shape) is returned.\n \"\"\"\n if shape is None:\n return tuple()\n temp = np.atleast_1d(shape)\n if temp.size == 0:\n return tuple()\n else:\n return tuple(temp)\n\n\ndef _check_shape_type(shape):\n out = []\n try:\n shape = np.atleast_1d(shape)\n for s in shape:\n if isinstance(s, np.ndarray) and s.ndim > 0:\n raise TypeError(f\"Value {s} is not a valid integer\")\n o = int(s)\n if o != s:\n raise TypeError(f\"Value {s} is not a valid integer\")\n out.append(o)\n except Exception:\n raise TypeError(f\"Supplied value {shape} does not represent a valid shape\")\n return tuple(out)\n\n\ndef shapes_broadcasting(*args, raise_exception=False):\n \"\"\"Return the shape resulting from broadcasting multiple shapes.\n Represents numpy's broadcasting rules.\n\n Parameters\n ----------\n *args: array-like of int\n Tuples or arrays or lists representing the shapes of arrays to be\n broadcast.\n raise_exception: bool (optional)\n Controls whether to raise an exception or simply return `None` if\n the broadcasting fails.\n\n Returns\n -------\n Resulting shape. If broadcasting is not possible and `raise_exception` is\n False, then `None` is returned. If `raise_exception` is `True`, a\n `ValueError` is raised.\n \"\"\"\n x = list(_check_shape_type(args[0])) if args else ()\n for arg in args[1:]:\n y = list(_check_shape_type(arg))\n if len(x) < len(y):\n x, y = y, x\n if len(y) > 0:\n x[-len(y) :] = [\n j if i == 1 else i if j == 1 else i if i == j else 0\n for i, j in zip(x[-len(y) :], y)\n ]\n if not all(x):\n if raise_exception:\n raise ValueError(\n \"Supplied shapes {} do not broadcast together\".format(\n \", \".join([f\"{a}\" for a in args])\n )\n )\n else:\n return None\n return tuple(x)\n\n\ndef broadcast_dist_samples_shape(shapes, size=None):\n \"\"\"Apply shape broadcasting to shape tuples but assuming that the shapes\n correspond to draws from random variables, with the `size` tuple possibly\n prepended to it. The `size` prepend is ignored to consider if the supplied\n `shapes` can broadcast or not. It is prepended to the resulting broadcasted\n `shapes`, if any of the shape tuples had the `size` prepend.\n\n Parameters\n ----------\n shapes: Iterable of tuples holding the distribution samples shapes\n size: None, int or tuple (optional)\n size of the sample set requested.\n\n Returns\n -------\n tuple of the resulting shape\n\n Examples\n --------\n .. code-block:: python\n\n size = 100\n shape0 = (size,)\n shape1 = (size, 5)\n shape2 = (size, 4, 5)\n out = broadcast_dist_samples_shape([shape0, shape1, shape2],\n size=size)\n assert out == (size, 4, 5)\n\n .. code-block:: python\n\n size = 100\n shape0 = (size,)\n shape1 = (5,)\n shape2 = (4, 5)\n out = broadcast_dist_samples_shape([shape0, shape1, shape2],\n size=size)\n assert out == (size, 4, 5)\n\n .. code-block:: python\n\n size = 100\n shape0 = (1,)\n shape1 = (5,)\n shape2 = (4, 5)\n out = broadcast_dist_samples_shape([shape0, shape1, shape2],\n size=size)\n assert out == (4, 5)\n \"\"\"\n if size is None:\n broadcasted_shape = shapes_broadcasting(*shapes)\n if broadcasted_shape is None:\n raise ValueError(\n \"Cannot broadcast provided shapes {} given size: {}\".format(\n \", \".join([f\"{s}\" for s in shapes]), size\n )\n )\n return broadcasted_shape\n shapes = [_check_shape_type(s) for s in shapes]\n _size = to_tuple(size)\n # samples shapes without the size prepend\n sp_shapes = [s[len(_size) :] if _size == s[: min([len(_size), len(s)])] else s for s in shapes]\n try:\n broadcast_shape = shapes_broadcasting(*sp_shapes, raise_exception=True)\n except ValueError:\n raise ValueError(\n \"Cannot broadcast provided shapes {} given size: {}\".format(\n \", \".join([f\"{s}\" for s in shapes]), size\n )\n )\n broadcastable_shapes = []\n for shape, sp_shape in zip(shapes, sp_shapes):\n if _size == shape[: len(_size)]:\n # If size prepends the shape, then we have to add broadcasting axis\n # in the middle\n p_shape = (\n shape[: len(_size)]\n + (1,) * (len(broadcast_shape) - len(sp_shape))\n + shape[len(_size) :]\n )\n else:\n p_shape = shape\n broadcastable_shapes.append(p_shape)\n return shapes_broadcasting(*broadcastable_shapes, raise_exception=True)\n\n\ndef get_broadcastable_dist_samples(\n samples, size=None, must_bcast_with=None, return_out_shape=False\n):\n \"\"\"Get a view of the samples drawn from distributions which adds new axises\n in between the `size` prepend and the distribution's `shape`. These views\n should be able to broadcast the samples from the distrubtions taking into\n account the `size` (i.e. the number of samples) of the draw, which is\n prepended to the sample's `shape`. Optionally, one can supply an extra\n `must_bcast_with` to try to force samples to be able to broadcast with a\n given shape. A `ValueError` is raised if it is not possible to broadcast\n the provided samples.\n\n Parameters\n ----------\n samples: Iterable of ndarrays holding the sampled values\n size: None, int or tuple (optional)\n size of the sample set requested.\n must_bcast_with: None, int or tuple (optional)\n Tuple shape to which the samples must be able to broadcast\n return_out_shape: bool (optional)\n If `True`, this function also returns the output's shape and not only\n samples views.\n\n Returns\n -------\n broadcastable_samples: List of the broadcasted sample arrays\n broadcast_shape: If `return_out_shape` is `True`, the resulting broadcast\n shape is returned.\n\n Examples\n --------\n .. code-block:: python\n\n must_bcast_with = (3, 1, 5)\n size = 100\n sample0 = np.random.randn(size)\n sample1 = np.random.randn(size, 5)\n sample2 = np.random.randn(size, 4, 5)\n out = broadcast_dist_samples_to(\n [sample0, sample1, sample2],\n size=size,\n must_bcast_with=must_bcast_with,\n )\n assert out[0].shape == (size, 1, 1, 1)\n assert out[1].shape == (size, 1, 1, 5)\n assert out[2].shape == (size, 1, 4, 5)\n assert np.all(sample0[:, None, None, None] == out[0])\n assert np.all(sample1[:, None, None] == out[1])\n assert np.all(sample2[:, None] == out[2])\n\n .. code-block:: python\n\n size = 100\n must_bcast_with = (3, 1, 5)\n sample0 = np.random.randn(size)\n sample1 = np.random.randn(5)\n sample2 = np.random.randn(4, 5)\n out = broadcast_dist_samples_to(\n [sample0, sample1, sample2],\n size=size,\n must_bcast_with=must_bcast_with,\n )\n assert out[0].shape == (size, 1, 1, 1)\n assert out[1].shape == (5,)\n assert out[2].shape == (4, 5)\n assert np.all(sample0[:, None, None, None] == out[0])\n assert np.all(sample1 == out[1])\n assert np.all(sample2 == out[2])\n \"\"\"\n samples = [np.asarray(p) for p in samples]\n _size = to_tuple(size)\n must_bcast_with = to_tuple(must_bcast_with)\n # Raw samples shapes\n p_shapes = [p.shape for p in samples] + [_check_shape_type(must_bcast_with)]\n out_shape = broadcast_dist_samples_shape(p_shapes, size=size)\n # samples shapes without the size prepend\n sp_shapes = [\n s[len(_size) :] if _size == s[: min([len(_size), len(s)])] else s for s in p_shapes\n ]\n broadcast_shape = shapes_broadcasting(*sp_shapes, raise_exception=True)\n broadcastable_samples = []\n for param, p_shape, sp_shape in zip(samples, p_shapes, sp_shapes):\n if _size == p_shape[: min([len(_size), len(p_shape)])]:\n # If size prepends the shape, then we have to add broadcasting axis\n # in the middle\n slicer_head = [slice(None)] * len(_size)\n slicer_tail = [np.newaxis] * (len(broadcast_shape) - len(sp_shape)) + [\n slice(None)\n ] * len(sp_shape)\n else:\n # If size does not prepend the shape, then we have leave the\n # parameter as is\n slicer_head = []\n slicer_tail = [slice(None)] * len(sp_shape)\n broadcastable_samples.append(param[tuple(slicer_head + slicer_tail)])\n if return_out_shape:\n return broadcastable_samples, out_shape\n else:\n return broadcastable_samples\n\n\ndef broadcast_distribution_samples(samples, size=None):\n \"\"\"Broadcast samples drawn from distributions taking into account the\n size (i.e. the number of samples) of the draw, which is prepended to\n the sample's shape.\n\n Parameters\n ----------\n samples: Iterable of ndarrays holding the sampled values\n size: None, int or tuple (optional)\n size of the sample set requested.\n\n Returns\n -------\n List of broadcasted sample arrays\n\n Examples\n --------\n .. code-block:: python\n\n size = 100\n sample0 = np.random.randn(size)\n sample1 = np.random.randn(size, 5)\n sample2 = np.random.randn(size, 4, 5)\n out = broadcast_distribution_samples([sample0, sample1, sample2],\n size=size)\n assert all((o.shape == (size, 4, 5) for o in out))\n assert np.all(sample0[:, None, None] == out[0])\n assert np.all(sample1[:, None, :] == out[1])\n assert np.all(sample2 == out[2])\n\n .. code-block:: python\n\n size = 100\n sample0 = np.random.randn(size)\n sample1 = np.random.randn(5)\n sample2 = np.random.randn(4, 5)\n out = broadcast_distribution_samples([sample0, sample1, sample2],\n size=size)\n assert all((o.shape == (size, 4, 5) for o in out))\n assert np.all(sample0[:, None, None] == out[0])\n assert np.all(sample1 == out[1])\n assert np.all(sample2 == out[2])\n \"\"\"\n return np.broadcast_arrays(*get_broadcastable_dist_samples(samples, size=size))\n\n\ndef broadcast_dist_samples_to(to_shape, samples, size=None):\n \"\"\"Broadcast samples drawn from distributions to a given shape, taking into\n account the size (i.e. the number of samples) of the draw, which is\n prepended to the sample's shape.\n\n Parameters\n ----------\n to_shape: Tuple shape onto which the samples must be able to broadcast\n samples: Iterable of ndarrays holding the sampled values\n size: None, int or tuple (optional)\n size of the sample set requested.\n\n Returns\n -------\n List of the broadcasted sample arrays\n\n Examples\n --------\n .. code-block:: python\n\n to_shape = (3, 1, 5)\n size = 100\n sample0 = np.random.randn(size)\n sample1 = np.random.randn(size, 5)\n sample2 = np.random.randn(size, 4, 5)\n out = broadcast_dist_samples_to(\n to_shape,\n [sample0, sample1, sample2],\n size=size\n )\n assert np.all((o.shape == (size, 3, 4, 5) for o in out))\n assert np.all(sample0[:, None, None, None] == out[0])\n assert np.all(sample1[:, None, None] == out[1])\n assert np.all(sample2[:, None] == out[2])\n\n .. code-block:: python\n\n size = 100\n to_shape = (3, 1, 5)\n sample0 = np.random.randn(size)\n sample1 = np.random.randn(5)\n sample2 = np.random.randn(4, 5)\n out = broadcast_dist_samples_to(\n to_shape,\n [sample0, sample1, sample2],\n size=size\n )\n assert np.all((o.shape == (size, 3, 4, 5) for o in out))\n assert np.all(sample0[:, None, None, None] == out[0])\n assert np.all(sample1 == out[1])\n assert np.all(sample2 == out[2])\n \"\"\"\n samples, to_shape = get_broadcastable_dist_samples(\n samples, size=size, must_bcast_with=to_shape, return_out_shape=True\n )\n return [np.broadcast_to(o, to_shape) for o in samples]\n\n\n# User-provided can be lazily specified as scalars\nShape = Union[int, TensorVariable, Sequence[Union[int, TensorVariable, type(Ellipsis)]]]\nDims = Union[str, Sequence[Union[str, None, type(Ellipsis)]]]\nSize = Union[int, TensorVariable, Sequence[Union[int, TensorVariable]]]\n\n# After conversion to vectors\nWeakShape = Union[TensorVariable, Tuple[Union[int, TensorVariable, type(Ellipsis)], ...]]\nWeakDims = Tuple[Union[str, None, type(Ellipsis)], ...]\n\n# After Ellipsis were substituted\nStrongShape = Union[TensorVariable, Tuple[Union[int, TensorVariable], ...]]\nStrongDims = Sequence[Union[str, None]]\nStrongSize = Union[TensorVariable, Tuple[Union[int, TensorVariable], ...]]\n\n\ndef convert_dims(dims: Dims) -> Optional[WeakDims]:\n \"\"\"Process a user-provided dims variable into None or a valid dims tuple.\"\"\"\n if dims is None:\n return None\n\n if isinstance(dims, str):\n dims = (dims,)\n elif isinstance(dims, (list, tuple)):\n dims = tuple(dims)\n else:\n raise ValueError(f\"The `dims` parameter must be a tuple, str or list. Actual: {type(dims)}\")\n\n if any(d == Ellipsis for d in dims[:-1]):\n raise ValueError(f\"Ellipsis in `dims` may only appear in the last position. Actual: {dims}\")\n\n return dims\n\n\ndef convert_shape(shape: Shape) -> Optional[WeakShape]:\n \"\"\"Process a user-provided shape variable into None or a valid shape object.\"\"\"\n if shape is None:\n return None\n\n if isinstance(shape, int) or (isinstance(shape, TensorVariable) and shape.ndim == 0):\n shape = (shape,)\n elif isinstance(shape, (list, tuple)):\n shape = tuple(shape)\n else:\n raise ValueError(\n f\"The `shape` parameter must be a tuple, TensorVariable, int or list. Actual: {type(shape)}\"\n )\n\n if isinstance(shape, tuple) and any(s == Ellipsis for s in shape[:-1]):\n raise ValueError(\n f\"Ellipsis in `shape` may only appear in the last position. Actual: {shape}\"\n )\n\n return shape\n\n\ndef convert_size(size: Size) -> Optional[StrongSize]:\n \"\"\"Process a user-provided size variable into None or a valid size object.\"\"\"\n if size is None:\n return None\n\n if isinstance(size, int) or (isinstance(size, TensorVariable) and size.ndim == 0):\n size = (size,)\n elif isinstance(size, (list, tuple)):\n size = tuple(size)\n else:\n raise ValueError(\n f\"The `size` parameter must be a tuple, TensorVariable, int or list. Actual: {type(size)}\"\n )\n\n if isinstance(size, tuple) and Ellipsis in size:\n raise ValueError(f\"The `size` parameter cannot contain an Ellipsis. Actual: {size}\")\n\n return size\n\n\ndef resize_from_dims(\n dims: WeakDims, ndim_implied: int, model\n) -> Tuple[int, StrongSize, StrongDims]:\n \"\"\"Determines a potential resize shape from a `dims` tuple.\n\n Parameters\n ----------\n dims : array-like\n A vector of dimension names, None or Ellipsis.\n ndim_implied : int\n Number of RV dimensions that were implied from its inputs alone.\n model : pm.Model\n The current model on stack.\n\n Returns\n -------\n ndim_resize : int\n Number of dimensions that should be added through resizing.\n resize_shape : array-like\n The shape of the new dimensions.\n \"\"\"\n if Ellipsis in dims:\n # Auto-complete the dims tuple to the full length.\n # We don't have a way to know the names of implied\n # dimensions, so they will be `None`.\n dims = (*dims[:-1], *[None] * ndim_implied)\n\n ndim_resize = len(dims) - ndim_implied\n\n # All resize dims must be known already (numerically or symbolically).\n unknowndim_resize_dims = set(dims[:ndim_resize]) - set(model.dim_lengths)\n if unknowndim_resize_dims:\n raise KeyError(\n f\"Dimensions {unknowndim_resize_dims} are unknown to the model and cannot be used to specify a `size`.\"\n )\n\n # The numeric/symbolic resize tuple can be created using model.RV_dim_lengths\n resize_shape = tuple(model.dim_lengths[dname] for dname in dims[:ndim_resize])\n return ndim_resize, resize_shape, dims\n\n\ndef resize_from_observed(\n observed, ndim_implied: int\n) -> Tuple[int, StrongSize, Union[np.ndarray, Variable]]:\n \"\"\"Determines a potential resize shape from observations.\n\n Parameters\n ----------\n observed : scalar, array-like\n The value of the `observed` kwarg to the RV creation.\n ndim_implied : int\n Number of RV dimensions that were implied from its inputs alone.\n\n Returns\n -------\n ndim_resize : int\n Number of dimensions that should be added through resizing.\n resize_shape : array-like\n The shape of the new dimensions.\n observed : scalar, array-like\n Observations as numpy array or `Variable`.\n \"\"\"\n if not hasattr(observed, \"shape\"):\n observed = pandas_to_array(observed)\n ndim_resize = observed.ndim - ndim_implied\n resize_shape = tuple(observed.shape[d] for d in range(ndim_resize))\n return ndim_resize, resize_shape, observed\n\n\ndef find_size(shape=None, size=None, ndim_supp=None):\n \"\"\"Determines the size keyword argument for creating a Distribution.\n\n Parameters\n ----------\n shape : tuple\n A tuple specifying the final shape of a distribution\n size : tuple\n A tuple specifying the size of a distribution\n ndim_supp : int\n The support dimension of the distribution.\n 0 if a univariate distribution, 1 if a multivariate distribution.\n\n Returns\n -------\n create_size : int\n The size argument to be passed to the distribution\n ndim_expected : int\n Number of dimensions expected after distribution was created\n ndim_batch : int\n Number of batch dimensions\n ndim_supp : int\n Number of support dimensions\n \"\"\"\n\n ndim_expected = None\n ndim_batch = None\n create_size = None\n\n if shape is not None:\n if Ellipsis in shape:\n # Ellipsis short-hands all implied dimensions. Therefore\n # we don't know how many dimensions to expect.\n ndim_expected = ndim_batch = None\n # Create the RV with its implied shape and resize later\n create_size = None\n else:\n ndim_expected = len(tuple(shape))\n ndim_batch = ndim_expected - ndim_supp\n create_size = tuple(shape)[:ndim_batch]\n elif size is not None:\n ndim_expected = ndim_supp + len(tuple(size))\n ndim_batch = ndim_expected - ndim_supp\n create_size = size\n\n return create_size, ndim_expected, ndim_batch, ndim_supp\n\n\ndef maybe_resize(\n rv_out,\n rv_op,\n dist_params,\n ndim_expected,\n ndim_batch,\n ndim_supp,\n shape,\n size,\n **kwargs,\n):\n \"\"\"Resize a distribution if necessary.\n\n Parameters\n ----------\n rv_out : RandomVariable\n The RandomVariable to be resized if necessary\n rv_op : RandomVariable.__class__\n The RandomVariable class to recreate it\n dist_params : dict\n Input parameters to recreate the RandomVariable\n ndim_expected : int\n Number of dimensions expected after distribution was created\n ndim_batch : int\n Number of batch dimensions\n ndim_supp : int\n The support dimension of the distribution.\n 0 if a univariate distribution, 1 if a multivariate distribution.\n shape : tuple\n A tuple specifying the final shape of a distribution\n size : tuple\n A tuple specifying the size of a distribution\n\n Returns\n -------\n rv_out : int\n The size argument to be passed to the distribution\n \"\"\"\n ndim_actual = rv_out.ndim\n ndims_unexpected = ndim_actual != ndim_expected\n\n if shape is not None and ndims_unexpected:\n if Ellipsis in shape:\n # Resize and we're done!\n rv_out = change_rv_size(rv_var=rv_out, new_size=shape[:-1], expand=True)\n else:\n # This is rare, but happens, for example, with MvNormal(np.ones((2, 3)), np.eye(3), shape=(2, 3)).\n # Recreate the RV without passing `size` to created it with just the implied dimensions.\n rv_out = rv_op(*dist_params, size=None, **kwargs)\n\n # Now resize by any remaining \"extra\" dimensions that were not implied from support and parameters\n if rv_out.ndim < ndim_expected:\n expand_shape = shape[: ndim_expected - rv_out.ndim]\n rv_out = change_rv_size(rv_var=rv_out, new_size=expand_shape, expand=True)\n if not rv_out.ndim == ndim_expected:\n raise ShapeError(\n f\"Failed to create the RV with the expected dimensionality. \"\n f\"This indicates a severe problem. Please open an issue.\",\n actual=ndim_actual,\n expected=ndim_batch + ndim_supp,\n )\n\n # Warn about the edge cases where the RV Op creates more dimensions than\n # it should based on `size` and `RVOp.ndim_supp`.\n if size is not None and ndims_unexpected:\n warnings.warn(\n f\"You may have expected a ({len(tuple(size))}+{ndim_supp})-dimensional RV, but the resulting RV will be {ndim_actual}-dimensional.\"\n ' To silence this warning use `warnings.simplefilter(\"ignore\", pm.ShapeWarning)`.',\n ShapeWarning,\n )\n\n return rv_out\n\n\ndef rv_size_is_none(size: Variable) -> bool:\n \"\"\"Check wether an rv size is None (ie., at.Constant([]))\"\"\"\n return isinstance(size, Constant) and size.data.size == 0\n",
"# pylint: skip-file\nimport os\nimport re\nimport sys\nimport warnings\n\nfrom typing import Callable, List\n\nfrom aesara.graph import optimize_graph\nfrom aesara.tensor import TensorVariable\n\nxla_flags = os.getenv(\"XLA_FLAGS\", \"\")\nxla_flags = re.sub(r\"--xla_force_host_platform_device_count=.+\\s\", \"\", xla_flags).split()\nos.environ[\"XLA_FLAGS\"] = \" \".join([f\"--xla_force_host_platform_device_count={100}\"] + xla_flags)\n\nimport aesara.tensor as at\nimport arviz as az\nimport jax\nimport numpy as np\nimport pandas as pd\n\nfrom aesara.assert_op import Assert\nfrom aesara.compile import SharedVariable\nfrom aesara.graph.basic import clone_replace, graph_inputs\nfrom aesara.graph.fg import FunctionGraph\nfrom aesara.link.jax.dispatch import jax_funcify\n\nfrom pymc import Model, modelcontext\nfrom pymc.aesaraf import compile_rv_inplace\n\nwarnings.warn(\"This module is experimental.\")\n\n\n@jax_funcify.register(Assert)\ndef jax_funcify_Assert(op, **kwargs):\n # Jax does not allow assert whose values aren't known during JIT compilation\n # within it's JIT-ed code. Hence we need to make a simple pass through\n # version of the Assert Op.\n # https://github.com/google/jax/issues/2273#issuecomment-589098722\n def assert_fn(value, *inps):\n return value\n\n return assert_fn\n\n\ndef replace_shared_variables(graph: List[TensorVariable]) -> List[TensorVariable]:\n \"\"\"Replace shared variables in graph by their constant values\n\n Raises\n ------\n ValueError\n If any shared variable contains default_updates\n \"\"\"\n\n shared_variables = [var for var in graph_inputs(graph) if isinstance(var, SharedVariable)]\n\n if any(hasattr(var, \"default_update\") for var in shared_variables):\n raise ValueError(\n \"Graph contains shared variables with default_update which cannot \"\n \"be safely replaced.\"\n )\n\n replacements = {var: at.constant(var.get_value(borrow=True)) for var in shared_variables}\n\n new_graph = clone_replace(graph, replace=replacements)\n return new_graph\n\n\ndef get_jaxified_logp(model: Model) -> Callable:\n \"\"\"Compile model.logpt into an optimized jax function\"\"\"\n\n logpt = replace_shared_variables([model.logpt])[0]\n\n logpt_fgraph = FunctionGraph(outputs=[logpt], clone=False)\n optimize_graph(logpt_fgraph, include=[\"fast_run\"], exclude=[\"cxx_only\", \"BlasOpt\"])\n\n # We now jaxify the optimized fgraph\n logp_fn = jax_funcify(logpt_fgraph)\n\n if isinstance(logp_fn, (list, tuple)):\n # This handles the new JAX backend, which always returns a tuple\n logp_fn = logp_fn[0]\n\n def logp_fn_wrap(x):\n res = logp_fn(*x)\n\n if isinstance(res, (list, tuple)):\n # This handles the new JAX backend, which always returns a tuple\n res = res[0]\n\n # Jax expects a potential with the opposite sign of model.logpt\n return -res\n\n return logp_fn_wrap\n\n\ndef sample_numpyro_nuts(\n draws=1000,\n tune=1000,\n chains=4,\n target_accept=0.8,\n random_seed=10,\n model=None,\n progress_bar=True,\n keep_untransformed=False,\n):\n from numpyro.infer import MCMC, NUTS\n\n model = modelcontext(model)\n\n tic1 = pd.Timestamp.now()\n print(\"Compiling...\", file=sys.stdout)\n\n rv_names = [rv.name for rv in model.value_vars]\n init_state = [model.initial_point[rv_name] for rv_name in rv_names]\n init_state_batched = jax.tree_map(lambda x: np.repeat(x[None, ...], chains, axis=0), init_state)\n\n logp_fn = get_jaxified_logp(model)\n\n nuts_kernel = NUTS(\n potential_fn=logp_fn,\n target_accept_prob=target_accept,\n adapt_step_size=True,\n adapt_mass_matrix=True,\n dense_mass=False,\n )\n\n pmap_numpyro = MCMC(\n nuts_kernel,\n num_warmup=tune,\n num_samples=draws,\n num_chains=chains,\n postprocess_fn=None,\n chain_method=\"parallel\",\n progress_bar=progress_bar,\n )\n\n tic2 = pd.Timestamp.now()\n print(\"Compilation time = \", tic2 - tic1, file=sys.stdout)\n\n print(\"Sampling...\", file=sys.stdout)\n\n seed = jax.random.PRNGKey(random_seed)\n map_seed = jax.random.split(seed, chains)\n\n pmap_numpyro.run(map_seed, init_params=init_state_batched, extra_fields=(\"num_steps\",))\n raw_mcmc_samples = pmap_numpyro.get_samples(group_by_chain=True)\n\n tic3 = pd.Timestamp.now()\n print(\"Sampling time = \", tic3 - tic2, file=sys.stdout)\n\n print(\"Transforming variables...\", file=sys.stdout)\n mcmc_samples = []\n for i, (value_var, raw_samples) in enumerate(zip(model.value_vars, raw_mcmc_samples)):\n raw_samples = at.constant(np.asarray(raw_samples))\n\n rv = model.values_to_rvs[value_var]\n transform = getattr(value_var.tag, \"transform\", None)\n\n if transform is not None:\n # TODO: This will fail when the transformation depends on another variable\n # such as in interval transform with RVs as edges\n trans_samples = transform.backward(raw_samples, *rv.owner.inputs)\n trans_samples.name = rv.name\n mcmc_samples.append(trans_samples)\n\n if keep_untransformed:\n raw_samples.name = value_var.name\n mcmc_samples.append(raw_samples)\n else:\n raw_samples.name = rv.name\n mcmc_samples.append(raw_samples)\n\n mcmc_varnames = [var.name for var in mcmc_samples]\n mcmc_samples = compile_rv_inplace(\n [],\n mcmc_samples,\n mode=\"JAX\",\n )()\n\n tic4 = pd.Timestamp.now()\n print(\"Transformation time = \", tic4 - tic3, file=sys.stdout)\n\n posterior = {k: v for k, v in zip(mcmc_varnames, mcmc_samples)}\n az_trace = az.from_dict(posterior=posterior)\n\n return az_trace\n",
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nimport warnings\n\nfrom typing import TYPE_CHECKING, Any, Callable\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom pymc.backends.base import MultiTrace\n\nif TYPE_CHECKING:\n from arviz.data.inference_data import InferenceData\n\n\ndef plot_posterior_predictive_glm(\n trace: InferenceData | MultiTrace,\n eval: np.ndarray | None = None,\n lm: Callable | None = None,\n samples: int = 30,\n **kwargs: Any\n) -> None:\n \"\"\"Plot posterior predictive of a linear model.\n\n Parameters\n ----------\n trace: InferenceData or MultiTrace\n Output of pm.sample()\n eval: <array>\n Array over which to evaluate lm\n lm: function <default: linear function>\n Function mapping parameters at different points\n to their respective outputs.\n input: point, sample\n output: estimated value\n samples: int <default=30>\n How many posterior samples to draw.\n kwargs : mapping, optional\n Additional keyword arguments are passed to ``matplotlib.pyplot.plot()``.\n\n Warnings\n --------\n The `plot_posterior_predictive_glm` function will be removed in a future PyMC release.\n \"\"\"\n warnings.warn(\n \"The `plot_posterior_predictive_glm` function will migrate to Arviz in a future release. \"\n \"\\nKeep up to date with `ArviZ <https://arviz-devs.github.io/arviz/>`_ for future updates.\",\n FutureWarning,\n )\n\n if lm is None:\n lm = lambda x, sample: sample[\"Intercept\"] + sample[\"x\"] * x\n\n if eval is None:\n eval = np.linspace(0, 1, 100)\n\n # Set default plotting arguments\n if \"lw\" not in kwargs and \"linewidth\" not in kwargs:\n kwargs[\"lw\"] = 0.2\n if \"c\" not in kwargs and \"color\" not in kwargs:\n kwargs[\"c\"] = \"k\"\n\n if not isinstance(trace, MultiTrace):\n trace = trace.posterior.to_dataframe().to_dict(orient=\"records\")\n\n for rand_loc in np.random.randint(0, len(trace), samples):\n rand_sample = trace[rand_loc]\n plt.plot(eval, lm(eval, rand_sample), **kwargs)\n # Make sure to not plot label multiple times\n kwargs.pop(\"label\", None)\n\n plt.title(\"Posterior predictive\")\n",
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Functions for MCMC sampling.\"\"\"\n\nimport collections.abc as abc\nimport logging\nimport pickle\nimport sys\nimport time\nimport warnings\n\nfrom collections import defaultdict\nfrom copy import copy\nfrom typing import Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union, cast\n\nimport aesara.gradient as tg\nimport cloudpickle\nimport numpy as np\nimport xarray\n\nfrom aesara.compile.mode import Mode\nfrom aesara.tensor.sharedvar import SharedVariable\nfrom arviz import InferenceData\nfrom fastprogress.fastprogress import progress_bar\n\nimport pymc as pm\n\nfrom pymc.aesaraf import change_rv_size, compile_rv_inplace, inputvars, walk_model\nfrom pymc.backends.arviz import _DefaultTrace\nfrom pymc.backends.base import BaseTrace, MultiTrace\nfrom pymc.backends.ndarray import NDArray\nfrom pymc.bart.pgbart import PGBART\nfrom pymc.blocking import DictToArrayBijection\nfrom pymc.distributions import NoDistribution\nfrom pymc.exceptions import IncorrectArgumentsError, SamplingError\nfrom pymc.initial_point import (\n PointType,\n StartDict,\n filter_rvs_to_jitter,\n make_initial_point_fns_per_chain,\n)\nfrom pymc.model import Model, modelcontext\nfrom pymc.parallel_sampling import Draw, _cpu_count\nfrom pymc.step_methods import (\n NUTS,\n BinaryGibbsMetropolis,\n BinaryMetropolis,\n CategoricalGibbsMetropolis,\n CompoundStep,\n DEMetropolis,\n HamiltonianMC,\n Metropolis,\n Slice,\n)\nfrom pymc.step_methods.arraystep import BlockedStep, PopulationArrayStepShared\nfrom pymc.step_methods.hmc import quadpotential\nfrom pymc.util import (\n chains_and_samples,\n dataset_to_point_list,\n get_default_varnames,\n get_untransformed_name,\n is_transformed_name,\n)\nfrom pymc.vartypes import discrete_types\n\nsys.setrecursionlimit(10000)\n\n__all__ = [\n \"sample\",\n \"iter_sample\",\n \"sample_posterior_predictive\",\n \"sample_posterior_predictive_w\",\n \"init_nuts\",\n \"sample_prior_predictive\",\n]\n\nSTEP_METHODS = (\n NUTS,\n HamiltonianMC,\n Metropolis,\n BinaryMetropolis,\n BinaryGibbsMetropolis,\n Slice,\n CategoricalGibbsMetropolis,\n PGBART,\n)\nStep = Union[BlockedStep, CompoundStep]\n\nArrayLike = Union[np.ndarray, List[float]]\nPointList = List[PointType]\nBackend = Union[BaseTrace, MultiTrace, NDArray]\n\n_log = logging.getLogger(\"pymc\")\n\n\ndef instantiate_steppers(\n model, steps: List[Step], selected_steps, step_kwargs=None\n) -> Union[Step, List[Step]]:\n \"\"\"Instantiate steppers assigned to the model variables.\n\n This function is intended to be called automatically from ``sample()``, but\n may be called manually.\n\n Parameters\n ----------\n model : Model object\n A fully-specified model object\n steps : list\n A list of zero or more step function instances that have been assigned to some subset of\n the model's parameters.\n selected_steps : dict\n A dictionary that maps a step method class to a list of zero or more model variables.\n step_kwargs : dict\n Parameters for the samplers. Keys are the lower case names of\n the step method, values a dict of arguments. Defaults to None.\n\n Returns\n -------\n methods : list or step\n List of step methods associated with the model's variables, or step method\n if there is only one.\n \"\"\"\n if step_kwargs is None:\n step_kwargs = {}\n\n used_keys = set()\n for step_class, vars in selected_steps.items():\n if vars:\n args = step_kwargs.get(step_class.name, {})\n used_keys.add(step_class.name)\n step = step_class(vars=vars, model=model, **args)\n steps.append(step)\n\n unused_args = set(step_kwargs).difference(used_keys)\n if unused_args:\n raise ValueError(\"Unused step method arguments: %s\" % unused_args)\n\n if len(steps) == 1:\n return steps[0]\n\n return steps\n\n\ndef assign_step_methods(model, step=None, methods=STEP_METHODS, step_kwargs=None):\n \"\"\"Assign model variables to appropriate step methods.\n\n Passing a specified model will auto-assign its constituent stochastic\n variables to step methods based on the characteristics of the variables.\n This function is intended to be called automatically from ``sample()``, but\n may be called manually. Each step method passed should have a\n ``competence()`` method that returns an ordinal competence value\n corresponding to the variable passed to it. This value quantifies the\n appropriateness of the step method for sampling the variable.\n\n Parameters\n ----------\n model : Model object\n A fully-specified model object\n step : step function or vector of step functions\n One or more step functions that have been assigned to some subset of\n the model's parameters. Defaults to ``None`` (no assigned variables).\n methods : vector of step method classes\n The set of step methods from which the function may choose. Defaults\n to the main step methods provided by PyMC.\n step_kwargs : dict\n Parameters for the samplers. Keys are the lower case names of\n the step method, values a dict of arguments.\n\n Returns\n -------\n methods : list\n List of step methods associated with the model's variables.\n \"\"\"\n steps = []\n assigned_vars = set()\n\n if step is not None:\n try:\n steps += list(step)\n except TypeError:\n steps.append(step)\n for step in steps:\n assigned_vars = assigned_vars.union(set(step.vars))\n\n # Use competence classmethods to select step methods for remaining\n # variables\n selected_steps = defaultdict(list)\n for var in model.value_vars:\n if var not in assigned_vars:\n # determine if a gradient can be computed\n has_gradient = var.dtype not in discrete_types\n if has_gradient:\n try:\n tg.grad(model.logpt, var)\n except (NotImplementedError, tg.NullTypeGradError):\n has_gradient = False\n # select the best method\n rv_var = model.values_to_rvs[var]\n selected = max(\n methods,\n key=lambda method, var=rv_var, has_gradient=has_gradient: method._competence(\n var, has_gradient\n ),\n )\n selected_steps[selected].append(var)\n\n return instantiate_steppers(model, steps, selected_steps, step_kwargs)\n\n\ndef _print_step_hierarchy(s: Step, level=0) -> None:\n if isinstance(s, CompoundStep):\n _log.info(\">\" * level + \"CompoundStep\")\n for i in s.methods:\n _print_step_hierarchy(i, level + 1)\n else:\n varnames = \", \".join(\n [\n get_untransformed_name(v.name) if is_transformed_name(v.name) else v.name\n for v in s.vars\n ]\n )\n _log.info(\">\" * level + f\"{s.__class__.__name__}: [{varnames}]\")\n\n\ndef all_continuous(vars, model):\n \"\"\"Check that vars not include discrete variables or BART variables, excepting observed RVs.\"\"\"\n\n vars_ = [var for var in vars if not (var.owner and hasattr(var.tag, \"observations\"))]\n\n if any(\n [\n (\n var.dtype in discrete_types\n or isinstance(model.values_to_rvs[var].owner.op, NoDistribution)\n )\n for var in vars_\n ]\n ):\n return False\n else:\n return True\n\n\ndef sample(\n draws=1000,\n step=None,\n init=\"auto\",\n n_init=200_000,\n initvals: Optional[Union[StartDict, Sequence[Optional[StartDict]]]] = None,\n trace: Optional[Union[BaseTrace, List[str]]] = None,\n chain_idx=0,\n chains=None,\n cores=None,\n tune=1000,\n progressbar=True,\n model=None,\n random_seed=None,\n discard_tuned_samples=True,\n compute_convergence_checks=True,\n callback=None,\n jitter_max_retries=10,\n *,\n return_inferencedata=True,\n idata_kwargs: dict = None,\n mp_ctx=None,\n **kwargs,\n) -> Union[InferenceData, MultiTrace]:\n r\"\"\"Draw samples from the posterior using the given step methods.\n\n Multiple step methods are supported via compound step methods.\n\n Parameters\n ----------\n draws : int\n The number of samples to draw. Defaults to 1000. The number of tuned samples are discarded\n by default. See ``discard_tuned_samples``.\n init : str\n Initialization method to use for auto-assigned NUTS samplers.\n See `pm.init_nuts` for a list of all options.\n step : function or iterable of functions\n A step function or collection of functions. If there are variables without step methods,\n step methods for those variables will be assigned automatically. By default the NUTS step\n method will be used, if appropriate to the model; this is a good default for beginning\n users.\n n_init : int\n Number of iterations of initializer. Only works for 'ADVI' init methods.\n initvals : optional, dict, array of dict\n Dict or list of dicts with initial value strategies to use instead of the defaults from `Model.initial_values`.\n The keys should be names of transformed random variables.\n Initialization methods for NUTS (see ``init`` keyword) can overwrite the default.\n trace : backend or list\n This should be a backend instance, or a list of variables to track.\n If None or a list of variables, the NDArray backend is used.\n chain_idx : int\n Chain number used to store sample in backend. If ``chains`` is greater than one, chain\n numbers will start here.\n chains : int\n The number of chains to sample. Running independent chains is important for some\n convergence statistics and can also reveal multiple modes in the posterior. If ``None``,\n then set to either ``cores`` or 2, whichever is larger.\n cores : int\n The number of chains to run in parallel. If ``None``, set to the number of CPUs in the\n system, but at most 4.\n tune : int\n Number of iterations to tune, defaults to 1000. Samplers adjust the step sizes, scalings or\n similar during tuning. Tuning samples will be drawn in addition to the number specified in\n the ``draws`` argument, and will be discarded unless ``discard_tuned_samples`` is set to\n False.\n progressbar : bool, optional default=True\n Whether or not to display a progress bar in the command line. The bar shows the percentage\n of completion, the sampling speed in samples per second (SPS), and the estimated remaining\n time until completion (\"expected time of arrival\"; ETA).\n model : Model (optional if in ``with`` context)\n Model to sample from. The model needs to have free random variables.\n random_seed : int or list of ints\n Random seed(s) used by the sampling steps. A list is accepted if\n ``cores`` is greater than one.\n discard_tuned_samples : bool\n Whether to discard posterior samples of the tune interval.\n compute_convergence_checks : bool, default=True\n Whether to compute sampler statistics like Gelman-Rubin and ``effective_n``.\n callback : function, default=None\n A function which gets called for every sample from the trace of a chain. The function is\n called with the trace and the current draw and will contain all samples for a single trace.\n the ``draw.chain`` argument can be used to determine which of the active chains the sample\n is drawn from.\n Sampling can be interrupted by throwing a ``KeyboardInterrupt`` in the callback.\n jitter_max_retries : int\n Maximum number of repeated attempts (per chain) at creating an initial matrix with uniform jitter\n that yields a finite probability. This applies to ``jitter+adapt_diag`` and ``jitter+adapt_full``\n init methods.\n return_inferencedata : bool\n Whether to return the trace as an :class:`arviz:arviz.InferenceData` (True) object or a `MultiTrace` (False)\n Defaults to `True`.\n idata_kwargs : dict, optional\n Keyword arguments for :func:`pymc.to_inference_data`\n mp_ctx : multiprocessing.context.BaseContent\n A multiprocessing context for parallel sampling. See multiprocessing\n documentation for details.\n\n Returns\n -------\n trace : pymc.backends.base.MultiTrace or arviz.InferenceData\n A ``MultiTrace`` or ArviZ ``InferenceData`` object that contains the samples.\n\n Notes\n -----\n Optional keyword arguments can be passed to ``sample`` to be delivered to the\n ``step_method``\\ s used during sampling.\n\n If your model uses only one step method, you can address step method kwargs\n directly. In particular, the NUTS step method has several options including:\n\n * target_accept : float in [0, 1]. The step size is tuned such that we\n approximate this acceptance rate. Higher values like 0.9 or 0.95 often\n work better for problematic posteriors\n * max_treedepth : The maximum depth of the trajectory tree\n * step_scale : float, default 0.25\n The initial guess for the step size scaled down by :math:`1/n**(1/4)`,\n where n is the dimensionality of the parameter space\n\n If your model uses multiple step methods, aka a Compound Step, then you have\n two ways to address arguments to each step method:\n\n A. If you let ``sample()`` automatically assign the ``step_method``\\ s,\n and you can correctly anticipate what they will be, then you can wrap\n step method kwargs in a dict and pass that to sample() with a kwarg set\n to the name of the step method.\n e.g. for a CompoundStep comprising NUTS and BinaryGibbsMetropolis,\n you could send:\n\n 1. ``target_accept`` to NUTS: nuts={'target_accept':0.9}\n 2. ``transit_p`` to BinaryGibbsMetropolis: binary_gibbs_metropolis={'transit_p':.7}\n\n Note that available names are:\n\n ``nuts``, ``hmc``, ``metropolis``, ``binary_metropolis``,\n ``binary_gibbs_metropolis``, ``categorical_gibbs_metropolis``,\n ``DEMetropolis``, ``DEMetropolisZ``, ``slice``\n\n B. If you manually declare the ``step_method``\\ s, within the ``step``\n kwarg, then you can address the ``step_method`` kwargs directly.\n e.g. for a CompoundStep comprising NUTS and BinaryGibbsMetropolis,\n you could send ::\n\n step=[pm.NUTS([freeRV1, freeRV2], target_accept=0.9),\n pm.BinaryGibbsMetropolis([freeRV3], transit_p=.7)]\n\n You can find a full list of arguments in the docstring of the step methods.\n\n Examples\n --------\n .. code:: ipython\n\n In [1]: import pymc as pm\n ...: n = 100\n ...: h = 61\n ...: alpha = 2\n ...: beta = 2\n\n In [2]: with pm.Model() as model: # context management\n ...: p = pm.Beta(\"p\", alpha=alpha, beta=beta)\n ...: y = pm.Binomial(\"y\", n=n, p=p, observed=h)\n ...: idata = pm.sample()\n\n In [3]: az.summary(idata, kind=\"stats\")\n\n Out[3]:\n mean sd hdi_3% hdi_97%\n p 0.609 0.047 0.528 0.699\n \"\"\"\n if \"start\" in kwargs:\n if initvals is not None:\n raise ValueError(\"Passing both `start` and `initvals` is not supported.\")\n warnings.warn(\n \"The `start` kwarg was renamed to `initvals` and can now do more. Please check the docstring.\",\n FutureWarning,\n stacklevel=2,\n )\n initvals = kwargs.pop(\"start\")\n\n model = modelcontext(model)\n if not model.free_RVs:\n raise SamplingError(\n \"Cannot sample from the model, since the model does not contain any free variables.\"\n )\n\n if cores is None:\n cores = min(4, _cpu_count())\n\n if chains is None:\n chains = max(2, cores)\n if random_seed == -1:\n random_seed = None\n if chains == 1 and isinstance(random_seed, int):\n random_seed = [random_seed]\n\n if random_seed is None or isinstance(random_seed, int):\n if random_seed is not None:\n np.random.seed(random_seed)\n random_seed = [np.random.randint(2 ** 30) for _ in range(chains)]\n\n if not isinstance(random_seed, abc.Iterable):\n raise TypeError(\"Invalid value for `random_seed`. Must be tuple, list or int\")\n\n if not discard_tuned_samples and not return_inferencedata:\n warnings.warn(\n \"Tuning samples will be included in the returned `MultiTrace` object, which can lead to\"\n \" complications in your downstream analysis. Please consider to switch to `InferenceData`:\\n\"\n \"`pm.sample(..., return_inferencedata=True)`\",\n UserWarning,\n stacklevel=2,\n )\n\n # small trace warning\n if draws == 0:\n msg = \"Tuning was enabled throughout the whole trace.\"\n _log.warning(msg)\n elif draws < 500:\n msg = \"Only %s samples in chain.\" % draws\n _log.warning(msg)\n\n draws += tune\n\n initial_points = None\n if step is None and init is not None and all_continuous(model.value_vars, model):\n try:\n # By default, try to use NUTS\n _log.info(\"Auto-assigning NUTS sampler...\")\n initial_points, step = init_nuts(\n init=init,\n chains=chains,\n n_init=n_init,\n model=model,\n seeds=random_seed,\n progressbar=progressbar,\n jitter_max_retries=jitter_max_retries,\n tune=tune,\n initvals=initvals,\n **kwargs,\n )\n except (AttributeError, NotImplementedError, tg.NullTypeGradError):\n # gradient computation failed\n _log.info(\"Initializing NUTS failed. Falling back to elementwise auto-assignment.\")\n _log.debug(\"Exception in init nuts\", exec_info=True)\n step = assign_step_methods(model, step, step_kwargs=kwargs)\n else:\n step = assign_step_methods(model, step, step_kwargs=kwargs)\n\n if isinstance(step, list):\n step = CompoundStep(step)\n\n if initial_points is None:\n # Time to draw/evaluate numeric start points for each chain.\n ipfns = make_initial_point_fns_per_chain(\n model=model,\n overrides=initvals,\n jitter_rvs=filter_rvs_to_jitter(step),\n chains=chains,\n )\n initial_points = [ipfn(seed) for ipfn, seed in zip(ipfns, random_seed)]\n\n # One final check that shapes and logps at the starting points are okay.\n for ip in initial_points:\n model.check_start_vals(ip)\n _check_start_shape(model, ip)\n\n sample_args = {\n \"draws\": draws,\n \"step\": step,\n \"start\": initial_points,\n \"trace\": trace,\n \"chain\": chain_idx,\n \"chains\": chains,\n \"tune\": tune,\n \"progressbar\": progressbar,\n \"model\": model,\n \"random_seed\": random_seed,\n \"cores\": cores,\n \"callback\": callback,\n \"discard_tuned_samples\": discard_tuned_samples,\n }\n parallel_args = {\n \"mp_ctx\": mp_ctx,\n }\n\n sample_args.update(kwargs)\n\n has_population_samplers = np.any(\n [\n isinstance(m, PopulationArrayStepShared)\n for m in (step.methods if isinstance(step, CompoundStep) else [step])\n ]\n )\n\n parallel = cores > 1 and chains > 1 and not has_population_samplers\n t_start = time.time()\n if parallel:\n _log.info(f\"Multiprocess sampling ({chains} chains in {cores} jobs)\")\n _print_step_hierarchy(step)\n try:\n trace = _mp_sample(**sample_args, **parallel_args)\n except pickle.PickleError:\n _log.warning(\"Could not pickle model, sampling singlethreaded.\")\n _log.debug(\"Pickling error:\", exec_info=True)\n parallel = False\n except AttributeError as e:\n if not str(e).startswith(\"AttributeError: Can't pickle\"):\n raise\n _log.warning(\"Could not pickle model, sampling singlethreaded.\")\n _log.debug(\"Pickling error:\", exec_info=True)\n parallel = False\n if not parallel:\n if has_population_samplers:\n has_demcmc = np.any(\n [\n isinstance(m, DEMetropolis)\n for m in (step.methods if isinstance(step, CompoundStep) else [step])\n ]\n )\n _log.info(f\"Population sampling ({chains} chains)\")\n\n initial_point_model_size = sum(initial_points[0][n.name].size for n in model.value_vars)\n\n if has_demcmc and chains < 3:\n raise ValueError(\n \"DEMetropolis requires at least 3 chains. \"\n \"For this {}-dimensional model you should use ≥{} chains\".format(\n initial_point_model_size, initial_point_model_size + 1\n )\n )\n if has_demcmc and chains <= initial_point_model_size:\n warnings.warn(\n \"DEMetropolis should be used with more chains than dimensions! \"\n \"(The model has {} dimensions.)\".format(initial_point_model_size),\n UserWarning,\n stacklevel=2,\n )\n _print_step_hierarchy(step)\n trace = _sample_population(parallelize=cores > 1, **sample_args)\n else:\n _log.info(f\"Sequential sampling ({chains} chains in 1 job)\")\n _print_step_hierarchy(step)\n trace = _sample_many(**sample_args)\n\n t_sampling = time.time() - t_start\n # count the number of tune/draw iterations that happened\n # ideally via the \"tune\" statistic, but not all samplers record it!\n if \"tune\" in trace.stat_names:\n stat = trace.get_sampler_stats(\"tune\", chains=chain_idx)\n # when CompoundStep is used, the stat is 2 dimensional!\n if len(stat.shape) == 2:\n stat = stat[:, 0]\n stat = tuple(stat)\n n_tune = stat.count(True)\n n_draws = stat.count(False)\n else:\n # these may be wrong when KeyboardInterrupt happened, but they're better than nothing\n n_tune = min(tune, len(trace))\n n_draws = max(0, len(trace) - n_tune)\n\n if discard_tuned_samples:\n trace = trace[n_tune:]\n\n # save metadata in SamplerReport\n trace.report._n_tune = n_tune\n trace.report._n_draws = n_draws\n trace.report._t_sampling = t_sampling\n\n if \"variable_inclusion\" in trace.stat_names:\n for strace in trace._straces.values():\n for stat in strace._stats:\n if \"variable_inclusion\" in stat:\n if trace.nchains > 1:\n stat[\"variable_inclusion\"] = np.vstack(stat[\"variable_inclusion\"])\n else:\n stat[\"variable_inclusion\"] = [np.vstack(stat[\"variable_inclusion\"])]\n\n if \"bart_trees\" in trace.stat_names:\n for strace in trace._straces.values():\n for stat in strace._stats:\n if \"bart_trees\" in stat:\n if trace.nchains > 1:\n stat[\"bart_trees\"] = np.vstack(stat[\"bart_trees\"])\n else:\n stat[\"bart_trees\"] = [np.vstack(stat[\"bart_trees\"])]\n\n n_chains = len(trace.chains)\n _log.info(\n f'Sampling {n_chains} chain{\"s\" if n_chains > 1 else \"\"} for {n_tune:_d} tune and {n_draws:_d} draw iterations '\n f\"({n_tune*n_chains:_d} + {n_draws*n_chains:_d} draws total) \"\n f\"took {trace.report.t_sampling:.0f} seconds.\"\n )\n\n idata = None\n if compute_convergence_checks or return_inferencedata:\n ikwargs = dict(model=model, save_warmup=not discard_tuned_samples)\n if idata_kwargs:\n ikwargs.update(idata_kwargs)\n idata = pm.to_inference_data(trace, **ikwargs)\n\n if compute_convergence_checks:\n if draws - tune < 100:\n warnings.warn(\n \"The number of samples is too small to check convergence reliably.\", stacklevel=2\n )\n else:\n trace.report._run_convergence_checks(idata, model)\n trace.report._log_summary()\n\n if return_inferencedata:\n return idata\n else:\n return trace\n\n\ndef _check_start_shape(model, start: PointType):\n \"\"\"Checks that the prior evaluations and initial points have identical shapes.\n\n Parameters\n ----------\n model : pm.Model\n The current model on context.\n start : dict\n The complete dictionary mapping (transformed) variable names to numeric initial values.\n \"\"\"\n e = \"\"\n try:\n actual_shapes = model.eval_rv_shapes()\n except NotImplementedError as ex:\n warnings.warn(f\"Unable to validate shapes: {ex.args[0]}\", UserWarning)\n return\n for name, sval in start.items():\n ashape = actual_shapes.get(name)\n sshape = np.shape(sval)\n if ashape != tuple(sshape):\n e += f\"\\nExpected shape {ashape} for var '{name}', got: {sshape}\"\n if e != \"\":\n raise ValueError(f\"Bad shape in start point:{e}\")\n\n\ndef _sample_many(\n draws,\n chain: int,\n chains: int,\n start: Sequence[PointType],\n random_seed: list,\n step,\n callback=None,\n **kwargs,\n):\n \"\"\"Samples all chains sequentially.\n\n Parameters\n ----------\n draws: int\n The number of samples to draw\n chain: int\n Number of the first chain in the sequence.\n chains: int\n Total number of chains to sample.\n start: list\n Starting points for each chain\n random_seed: list\n A list of seeds, one for each chain\n step: function\n Step function\n\n Returns\n -------\n trace: MultiTrace\n Contains samples of all chains\n \"\"\"\n traces: List[Backend] = []\n for i in range(chains):\n trace = _sample(\n draws=draws,\n chain=chain + i,\n start=start[i],\n step=step,\n random_seed=random_seed[i],\n callback=callback,\n **kwargs,\n )\n if trace is None:\n if len(traces) == 0:\n raise ValueError(\"Sampling stopped before a sample was created.\")\n else:\n break\n elif len(trace) < draws:\n if len(traces) == 0:\n traces.append(trace)\n break\n else:\n traces.append(trace)\n return MultiTrace(traces)\n\n\ndef _sample_population(\n draws: int,\n chain: int,\n chains: int,\n start: Sequence[PointType],\n random_seed,\n step,\n tune,\n model,\n progressbar: bool = True,\n parallelize=False,\n **kwargs,\n):\n \"\"\"Performs sampling of a population of chains using the ``PopulationStepper``.\n\n Parameters\n ----------\n draws : int\n The number of samples to draw\n chain : int\n The number of the first chain in the population\n chains : int\n The total number of chains in the population\n start : list\n Start points for each chain\n random_seed : int or list of ints, optional\n A list is accepted if more if ``cores`` is greater than one.\n step : function\n Step function (should be or contain a population step method)\n tune : int, optional\n Number of iterations to tune, if applicable (defaults to None)\n model : Model (optional if in ``with`` context)\n progressbar : bool\n Show progress bars? (defaults to True)\n parallelize : bool\n Setting for multiprocess parallelization\n\n Returns\n -------\n trace : MultiTrace\n Contains samples of all chains\n \"\"\"\n sampling = _prepare_iter_population(\n draws,\n [chain + c for c in range(chains)],\n step,\n start,\n parallelize,\n tune=tune,\n model=model,\n random_seed=random_seed,\n progressbar=progressbar,\n )\n\n if progressbar:\n sampling = progress_bar(sampling, total=draws, display=progressbar)\n\n latest_traces = None\n for it, traces in enumerate(sampling):\n latest_traces = traces\n return MultiTrace(latest_traces)\n\n\ndef _sample(\n chain: int,\n progressbar: bool,\n random_seed,\n start: PointType,\n draws: int,\n step=None,\n trace: Optional[Union[BaseTrace, List[str]]] = None,\n tune=None,\n model: Optional[Model] = None,\n callback=None,\n **kwargs,\n):\n \"\"\"Main iteration for singleprocess sampling.\n\n Multiple step methods are supported via compound step methods.\n\n Parameters\n ----------\n chain : int\n Number of the chain that the samples will belong to.\n progressbar : bool\n Whether or not to display a progress bar in the command line. The bar shows the percentage\n of completion, the sampling speed in samples per second (SPS), and the estimated remaining\n time until completion (\"expected time of arrival\"; ETA).\n random_seed : int or list of ints\n A list is accepted if ``cores`` is greater than one.\n start : dict\n Starting point in parameter space (or partial point)\n draws : int\n The number of samples to draw\n step : function\n Step function\n trace : backend or list\n This should be a backend instance, or a list of variables to track.\n If None or a list of variables, the NDArray backend is used.\n tune : int, optional\n Number of iterations to tune, if applicable (defaults to None)\n model : Model (optional if in ``with`` context)\n\n Returns\n -------\n strace : pymc.backends.base.BaseTrace\n A ``BaseTrace`` object that contains the samples for this chain.\n \"\"\"\n skip_first = kwargs.get(\"skip_first\", 0)\n\n trace = copy(trace)\n\n sampling = _iter_sample(draws, step, start, trace, chain, tune, model, random_seed, callback)\n _pbar_data = {\"chain\": chain, \"divergences\": 0}\n _desc = \"Sampling chain {chain:d}, {divergences:,d} divergences\"\n if progressbar:\n sampling = progress_bar(sampling, total=draws, display=progressbar)\n sampling.comment = _desc.format(**_pbar_data)\n try:\n strace = None\n for it, (strace, diverging) in enumerate(sampling):\n if it >= skip_first and diverging:\n _pbar_data[\"divergences\"] += 1\n if progressbar:\n sampling.comment = _desc.format(**_pbar_data)\n except KeyboardInterrupt:\n pass\n return strace\n\n\ndef iter_sample(\n draws: int,\n step,\n start: PointType,\n trace=None,\n chain=0,\n tune: Optional[int] = None,\n model: Optional[Model] = None,\n random_seed: Optional[Union[int, List[int]]] = None,\n callback=None,\n):\n \"\"\"Generate a trace on each iteration using the given step method.\n\n Multiple step methods ared supported via compound step methods. Returns the\n amount of time taken.\n\n Parameters\n ----------\n draws : int\n The number of samples to draw\n step : function\n Step function\n start : dict\n Starting point in parameter space (or partial point).\n trace : backend or list\n This should be a backend instance, or a list of variables to track.\n If None or a list of variables, the NDArray backend is used.\n chain : int, optional\n Chain number used to store sample in backend. If ``cores`` is greater than one, chain numbers\n will start here.\n tune : int, optional\n Number of iterations to tune, if applicable (defaults to None)\n model : Model (optional if in ``with`` context)\n random_seed : int or list of ints, optional\n A list is accepted if more if ``cores`` is greater than one.\n callback :\n A function which gets called for every sample from the trace of a chain. The function is\n called with the trace and the current draw and will contain all samples for a single trace.\n the ``draw.chain`` argument can be used to determine which of the active chains the sample\n is drawn from.\n Sampling can be interrupted by throwing a ``KeyboardInterrupt`` in the callback.\n\n Yields\n ------\n trace : MultiTrace\n Contains all samples up to the current iteration\n\n Examples\n --------\n ::\n\n for trace in iter_sample(500, step):\n ...\n \"\"\"\n sampling = _iter_sample(draws, step, start, trace, chain, tune, model, random_seed, callback)\n for i, (strace, _) in enumerate(sampling):\n yield MultiTrace([strace[: i + 1]])\n\n\ndef _iter_sample(\n draws,\n step,\n start: PointType,\n trace: Optional[Union[BaseTrace, List[str]]] = None,\n chain=0,\n tune=None,\n model=None,\n random_seed=None,\n callback=None,\n):\n \"\"\"Generator for sampling one chain. (Used in singleprocess sampling.)\n\n Parameters\n ----------\n draws : int\n The number of samples to draw\n step : function\n Step function\n start : dict\n Starting point in parameter space (or partial point).\n Must contain numeric (transformed) initial values for all (transformed) free variables.\n trace : backend or list\n This should be a backend instance, or a list of variables to track.\n If None or a list of variables, the NDArray backend is used.\n chain : int, optional\n Chain number used to store sample in backend. If ``cores`` is greater than one, chain numbers\n will start here.\n tune : int, optional\n Number of iterations to tune, if applicable (defaults to None)\n model : Model (optional if in ``with`` context)\n random_seed : int or list of ints, optional\n A list is accepted if more if ``cores`` is greater than one.\n\n Yields\n ------\n strace : BaseTrace\n The trace object containing the samples for this chain\n diverging : bool\n Indicates if the draw is divergent. Only available with some samplers.\n \"\"\"\n model = modelcontext(model)\n draws = int(draws)\n\n if draws < 1:\n raise ValueError(\"Argument `draws` must be greater than 0.\")\n\n strace = _choose_backend(trace, model=model)\n\n try:\n step = CompoundStep(step)\n except TypeError:\n pass\n\n point = start\n\n if step.generates_stats and strace.supports_sampler_stats:\n strace.setup(draws, chain, step.stats_dtypes)\n else:\n strace.setup(draws, chain)\n\n try:\n step.tune = bool(tune)\n if hasattr(step, \"reset_tuning\"):\n step.reset_tuning()\n for i in range(draws):\n stats = None\n diverging = False\n\n if i == 0 and hasattr(step, \"iter_count\"):\n step.iter_count = 0\n if i == tune:\n step = stop_tuning(step)\n if step.generates_stats:\n point, stats = step.step(point)\n if strace.supports_sampler_stats:\n strace.record(point, stats)\n diverging = i > tune and stats and stats[0].get(\"diverging\")\n else:\n strace.record(point)\n else:\n point = step.step(point)\n strace.record(point)\n if callback is not None:\n warns = getattr(step, \"warnings\", None)\n callback(\n trace=strace,\n draw=Draw(chain, i == draws, i, i < tune, stats, point, warns),\n )\n\n yield strace, diverging\n except KeyboardInterrupt:\n strace.close()\n if hasattr(step, \"warnings\"):\n warns = step.warnings()\n strace._add_warnings(warns)\n raise\n except BaseException:\n strace.close()\n raise\n else:\n strace.close()\n if hasattr(step, \"warnings\"):\n warns = step.warnings()\n strace._add_warnings(warns)\n\n\nclass PopulationStepper:\n \"\"\"Wraps population of step methods to step them in parallel with single or multiprocessing.\"\"\"\n\n def __init__(self, steppers, parallelize, progressbar=True):\n \"\"\"Use multiprocessing to parallelize chains.\n\n Falls back to sequential evaluation if multiprocessing fails.\n\n In the multiprocessing mode of operation, a new process is started for each\n chain/stepper and Pipes are used to communicate with the main process.\n\n Parameters\n ----------\n steppers : list\n A collection of independent step methods, one for each chain.\n parallelize : bool\n Indicates if parallelization via multiprocessing is desired.\n progressbar : bool\n Should we display a progress bar showing relative progress?\n \"\"\"\n self.nchains = len(steppers)\n self.is_parallelized = False\n self._primary_ends = []\n self._processes = []\n self._steppers = steppers\n if parallelize:\n try:\n # configure a child process for each stepper\n _log.info(\n \"Attempting to parallelize chains to all cores. You can turn this off with `pm.sample(cores=1)`.\"\n )\n import multiprocessing\n\n for c, stepper in (\n enumerate(progress_bar(steppers)) if progressbar else enumerate(steppers)\n ):\n secondary_end, primary_end = multiprocessing.Pipe()\n stepper_dumps = cloudpickle.dumps(stepper, protocol=4)\n process = multiprocessing.Process(\n target=self.__class__._run_secondary,\n args=(c, stepper_dumps, secondary_end),\n name=f\"ChainWalker{c}\",\n )\n # we want the child process to exit if the parent is terminated\n process.daemon = True\n # Starting the process might fail and takes time.\n # By doing it in the constructor, the sampling progress bar\n # will not be confused by the process start.\n process.start()\n self._primary_ends.append(primary_end)\n self._processes.append(process)\n self.is_parallelized = True\n except Exception:\n _log.info(\n \"Population parallelization failed. \"\n \"Falling back to sequential stepping of chains.\"\n )\n _log.debug(\"Error was: \", exec_info=True)\n else:\n _log.info(\n \"Chains are not parallelized. You can enable this by passing \"\n \"`pm.sample(cores=n)`, where n > 1.\"\n )\n return super().__init__()\n\n def __enter__(self):\n \"\"\"Do nothing: processes are already started in ``__init__``.\"\"\"\n return\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if len(self._processes) > 0:\n try:\n for primary_end in self._primary_ends:\n primary_end.send(None)\n for process in self._processes:\n process.join(timeout=3)\n except Exception:\n _log.warning(\"Termination failed.\")\n return\n\n @staticmethod\n def _run_secondary(c, stepper_dumps, secondary_end):\n \"\"\"This method is started on a separate process to perform stepping of a chain.\n\n Parameters\n ----------\n c : int\n number of this chain\n stepper : BlockedStep\n a step method such as CompoundStep\n secondary_end : multiprocessing.connection.PipeConnection\n This is our connection to the main process\n \"\"\"\n # re-seed each child process to make them unique\n np.random.seed(None)\n try:\n stepper = cloudpickle.loads(stepper_dumps)\n # the stepper is not necessarily a PopulationArraySharedStep itself,\n # but rather a CompoundStep. PopulationArrayStepShared.population\n # has to be updated, therefore we identify the substeppers first.\n population_steppers = []\n for sm in stepper.methods if isinstance(stepper, CompoundStep) else [stepper]:\n if isinstance(sm, PopulationArrayStepShared):\n population_steppers.append(sm)\n while True:\n incoming = secondary_end.recv()\n # receiving a None is the signal to exit\n if incoming is None:\n break\n tune_stop, population = incoming\n if tune_stop:\n stop_tuning(stepper)\n # forward the population to the PopulationArrayStepShared objects\n # This is necessary because due to the process fork, the population\n # object is no longer shared between the steppers.\n for popstep in population_steppers:\n popstep.population = population\n update = stepper.step(population[c])\n secondary_end.send(update)\n except Exception:\n _log.exception(f\"ChainWalker{c}\")\n return\n\n def step(self, tune_stop, population):\n \"\"\"Step the entire population of chains.\n\n Parameters\n ----------\n tune_stop : bool\n Indicates if the condition (i == tune) is fulfilled\n population : list\n Current Points of all chains\n\n Returns\n -------\n update : list\n List of (Point, stats) tuples for all chains\n \"\"\"\n updates = [None] * self.nchains\n if self.is_parallelized:\n for c in range(self.nchains):\n self._primary_ends[c].send((tune_stop, population))\n # Blockingly get the step outcomes\n for c in range(self.nchains):\n updates[c] = self._primary_ends[c].recv()\n else:\n for c in range(self.nchains):\n if tune_stop:\n self._steppers[c] = stop_tuning(self._steppers[c])\n updates[c] = self._steppers[c].step(population[c])\n return updates\n\n\ndef _prepare_iter_population(\n draws: int,\n chains: list,\n step,\n start: Sequence[PointType],\n parallelize: bool,\n tune=None,\n model=None,\n random_seed=None,\n progressbar=True,\n):\n \"\"\"Prepare a PopulationStepper and traces for population sampling.\n\n Parameters\n ----------\n draws : int\n The number of samples to draw\n chains : list\n The chain numbers in the population\n step : function\n Step function (should be or contain a population step method)\n start : list\n Start points for each chain\n parallelize : bool\n Setting for multiprocess parallelization\n tune : int, optional\n Number of iterations to tune, if applicable (defaults to None)\n model : Model (optional if in ``with`` context)\n random_seed : int or list of ints, optional\n A list is accepted if more if ``cores`` is greater than one.\n progressbar : bool\n ``progressbar`` argument for the ``PopulationStepper``, (defaults to True)\n\n Returns\n -------\n _iter_population : generator\n Yields traces of all chains at the same time\n \"\"\"\n # chains contains the chain numbers, but for indexing we need indices...\n nchains = len(chains)\n model = modelcontext(model)\n draws = int(draws)\n\n if draws < 1:\n raise ValueError(\"Argument `draws` should be above 0.\")\n\n # The initialization of traces, samplers and points must happen in the right order:\n # 1. traces are initialized\n # 2. population of points is created\n # 3. steppers are initialized and linked to the points object\n # 4. traces are configured to track the sampler stats\n # 5. a PopulationStepper is configured for parallelized stepping\n\n # 1. prepare a BaseTrace for each chain\n traces = [_choose_backend(None, model=model) for chain in chains]\n\n # 2. create a population (points) that tracks each chain\n # it is updated as the chains are advanced\n population = [start[c] for c in range(nchains)]\n\n # 3. Set up the steppers\n steppers: List[Step] = []\n for c in range(nchains):\n # need indepenent samplers for each chain\n # it is important to copy the actual steppers (but not the delta_logp)\n if isinstance(step, CompoundStep):\n chainstep = CompoundStep([copy(m) for m in step.methods])\n else:\n chainstep = copy(step)\n # link population samplers to the shared population state\n for sm in chainstep.methods if isinstance(step, CompoundStep) else [chainstep]:\n if isinstance(sm, PopulationArrayStepShared):\n sm.link_population(population, c)\n steppers.append(chainstep)\n\n # 4. configure tracking of sampler stats\n for c in range(nchains):\n if steppers[c].generates_stats and traces[c].supports_sampler_stats:\n traces[c].setup(draws, c, steppers[c].stats_dtypes)\n else:\n traces[c].setup(draws, c)\n\n # 5. configure the PopulationStepper (expensive call)\n popstep = PopulationStepper(steppers, parallelize, progressbar=progressbar)\n\n # Because the preparations above are expensive, the actual iterator is\n # in another method. This way the progbar will not be disturbed.\n return _iter_population(draws, tune, popstep, steppers, traces, population)\n\n\ndef _iter_population(draws, tune, popstep, steppers, traces, points):\n \"\"\"Iterate a ``PopulationStepper``.\n\n Parameters\n ----------\n draws : int\n number of draws per chain\n tune : int\n number of tuning steps\n popstep : PopulationStepper\n the helper object for (parallelized) stepping of chains\n steppers : list\n The step methods for each chain\n traces : list\n Traces for each chain\n points : list\n population of chain states\n\n Yields\n ------\n traces : list\n List of trace objects of the individual chains\n \"\"\"\n try:\n with popstep:\n # iterate draws of all chains\n for i in range(draws):\n # this call steps all chains and returns a list of (point, stats)\n # the `popstep` may interact with subprocesses internally\n updates = popstep.step(i == tune, points)\n\n # apply the update to the points and record to the traces\n for c, strace in enumerate(traces):\n if steppers[c].generates_stats:\n points[c], stats = updates[c]\n if strace.supports_sampler_stats:\n strace.record(points[c], stats)\n else:\n strace.record(points[c])\n else:\n points[c] = updates[c]\n strace.record(points[c])\n # yield the state of all chains in parallel\n yield traces\n except KeyboardInterrupt:\n for c, strace in enumerate(traces):\n strace.close()\n if hasattr(steppers[c], \"report\"):\n steppers[c].report._finalize(strace)\n raise\n except BaseException:\n for c, strace in enumerate(traces):\n strace.close()\n raise\n else:\n for c, strace in enumerate(traces):\n strace.close()\n if hasattr(steppers[c], \"report\"):\n steppers[c].report._finalize(strace)\n\n\ndef _choose_backend(trace: Optional[Union[BaseTrace, List[str]]], **kwds) -> Backend:\n \"\"\"Selects or creates a NDArray trace backend for a particular chain.\n\n Parameters\n ----------\n trace : BaseTrace, list, or None\n This should be a BaseTrace, or list of variables to track.\n If None or a list of variables, the NDArray backend is used.\n **kwds :\n keyword arguments to forward to the backend creation\n\n Returns\n -------\n trace : BaseTrace\n The incoming, or a brand new trace object.\n \"\"\"\n if isinstance(trace, BaseTrace) and len(trace) > 0:\n raise ValueError(\"Continuation of traces is no longer supported.\")\n if isinstance(trace, MultiTrace):\n raise ValueError(\"Starting from existing MultiTrace objects is no longer supported.\")\n\n if isinstance(trace, BaseTrace):\n return trace\n if trace is None:\n return NDArray(**kwds)\n\n return NDArray(vars=trace, **kwds)\n\n\ndef _mp_sample(\n draws: int,\n tune: int,\n step,\n chains: int,\n cores: int,\n chain: int,\n random_seed: list,\n start: Sequence[PointType],\n progressbar=True,\n trace: Optional[Union[BaseTrace, List[str]]] = None,\n model=None,\n callback=None,\n discard_tuned_samples=True,\n mp_ctx=None,\n **kwargs,\n):\n \"\"\"Main iteration for multiprocess sampling.\n\n Parameters\n ----------\n draws : int\n The number of samples to draw\n tune : int, optional\n Number of iterations to tune, if applicable (defaults to None)\n step : function\n Step function\n chains : int\n The number of chains to sample.\n cores : int\n The number of chains to run in parallel.\n chain : int\n Number of the first chain.\n random_seed : list of ints\n Random seeds for each chain.\n start : list\n Starting points for each chain.\n Dicts must contain numeric (transformed) initial values for all (transformed) free variables.\n progressbar : bool\n Whether or not to display a progress bar in the command line.\n trace : BaseTrace, list, or None\n This should be a backend instance, or a list of variables to track\n If None or a list of variables, the NDArray backend is used.\n model : Model (optional if in ``with`` context)\n callback : Callable\n A function which gets called for every sample from the trace of a chain. The function is\n called with the trace and the current draw and will contain all samples for a single trace.\n the ``draw.chain`` argument can be used to determine which of the active chains the sample\n is drawn from.\n Sampling can be interrupted by throwing a ``KeyboardInterrupt`` in the callback.\n\n Returns\n -------\n trace : pymc.backends.base.MultiTrace\n A ``MultiTrace`` object that contains the samples for all chains.\n \"\"\"\n import pymc.parallel_sampling as ps\n\n # We did draws += tune in pm.sample\n draws -= tune\n\n traces = []\n for idx in range(chain, chain + chains):\n if trace is not None:\n strace = _choose_backend(copy(trace), model=model)\n else:\n strace = _choose_backend(None, model=model)\n\n if step.generates_stats and strace.supports_sampler_stats:\n strace.setup(draws + tune, idx, step.stats_dtypes)\n else:\n strace.setup(draws + tune, idx)\n traces.append(strace)\n\n sampler = ps.ParallelSampler(\n draws,\n tune,\n chains,\n cores,\n random_seed,\n start,\n step,\n chain,\n progressbar,\n mp_ctx=mp_ctx,\n )\n try:\n try:\n with sampler:\n for draw in sampler:\n trace = traces[draw.chain - chain]\n if trace.supports_sampler_stats and draw.stats is not None:\n trace.record(draw.point, draw.stats)\n else:\n trace.record(draw.point)\n if draw.is_last:\n trace.close()\n if draw.warnings is not None:\n trace._add_warnings(draw.warnings)\n\n if callback is not None:\n callback(trace=trace, draw=draw)\n\n except ps.ParallelSamplingError as error:\n trace = traces[error._chain - chain]\n trace._add_warnings(error._warnings)\n for trace in traces:\n trace.close()\n\n multitrace = MultiTrace(traces)\n multitrace._report._log_summary()\n raise\n return MultiTrace(traces)\n except KeyboardInterrupt:\n if discard_tuned_samples:\n traces, length = _choose_chains(traces, tune)\n else:\n traces, length = _choose_chains(traces, 0)\n return MultiTrace(traces)[:length]\n finally:\n for trace in traces:\n trace.close()\n\n\ndef _choose_chains(traces, tune):\n \"\"\"\n Filter and slice traces such that (n_traces * len(shortest_trace)) is maximized.\n\n We get here after a ``KeyboardInterrupt``, and so the different\n traces have different lengths. We therefore pick the number of\n traces such that (number of traces) * (length of shortest trace)\n is maximised.\n \"\"\"\n if tune is None:\n tune = 0\n\n if not traces:\n return []\n\n lengths = [max(0, len(trace) - tune) for trace in traces]\n if not sum(lengths):\n raise ValueError(\"Not enough samples to build a trace.\")\n\n idxs = np.argsort(lengths)\n l_sort = np.array(lengths)[idxs]\n\n use_until = np.argmax(l_sort * np.arange(1, l_sort.shape[0] + 1)[::-1])\n final_length = l_sort[use_until]\n\n return [traces[idx] for idx in idxs[use_until:]], final_length + tune\n\n\ndef stop_tuning(step):\n \"\"\"Stop tuning the current step method.\"\"\"\n step.stop_tuning()\n return step\n\n\ndef sample_posterior_predictive(\n trace,\n samples: Optional[int] = None,\n model: Optional[Model] = None,\n var_names: Optional[List[str]] = None,\n size: Optional[int] = None,\n keep_size: Optional[bool] = False,\n random_seed=None,\n progressbar: bool = True,\n mode: Optional[Union[str, Mode]] = None,\n return_inferencedata=True,\n idata_kwargs: dict = None,\n) -> Union[InferenceData, Dict[str, np.ndarray]]:\n \"\"\"Generate posterior predictive samples from a model given a trace.\n\n Parameters\n ----------\n trace : backend, list, xarray.Dataset, arviz.InferenceData, or MultiTrace\n Trace generated from MCMC sampling, or a list of dicts (eg. points or from find_MAP()),\n or xarray.Dataset (eg. InferenceData.posterior or InferenceData.prior)\n samples : int\n Number of posterior predictive samples to generate. Defaults to one posterior predictive\n sample per posterior sample, that is, the number of draws times the number of chains. It\n is not recommended to modify this value; when modified, some chains may not be represented\n in the posterior predictive sample.\n model : Model (optional if in ``with`` context)\n Model used to generate ``trace``\n vars : iterable\n Variables for which to compute the posterior predictive samples.\n Deprecated: please use ``var_names`` instead.\n var_names : Iterable[str]\n Names of variables for which to compute the posterior predictive samples.\n size : int\n The number of random draws from the distribution specified by the parameters in each\n sample of the trace. Not recommended unless more than ndraws times nchains posterior\n predictive samples are needed.\n keep_size : bool, optional\n Force posterior predictive sample to have the same shape as posterior and sample stats\n data: ``(nchains, ndraws, ...)``. Overrides samples and size parameters.\n random_seed : int\n Seed for the random number generator.\n progressbar : bool\n Whether or not to display a progress bar in the command line. The bar shows the percentage\n of completion, the sampling speed in samples per second (SPS), and the estimated remaining\n time until completion (\"expected time of arrival\"; ETA).\n mode:\n The mode used by ``aesara.function`` to compile the graph.\n return_inferencedata : bool\n Whether to return an :class:`arviz:arviz.InferenceData` (True) object or a dictionary (False).\n Defaults to True.\n idata_kwargs : dict, optional\n Keyword arguments for :func:`pymc.to_inference_data`\n\n Returns\n -------\n arviz.InferenceData or Dict\n An ArviZ ``InferenceData`` object containing the posterior predictive samples (default), or\n a dictionary with variable names as keys, and samples as numpy arrays.\n \"\"\"\n\n _trace: Union[MultiTrace, PointList]\n if isinstance(trace, InferenceData):\n _trace = dataset_to_point_list(trace.posterior)\n elif isinstance(trace, xarray.Dataset):\n _trace = dataset_to_point_list(trace)\n else:\n _trace = trace\n\n nchain: int\n len_trace: int\n if isinstance(trace, (InferenceData, xarray.Dataset)):\n nchain, len_trace = chains_and_samples(trace)\n else:\n len_trace = len(_trace)\n try:\n nchain = _trace.nchains\n except AttributeError:\n nchain = 1\n\n if keep_size and samples is not None:\n raise IncorrectArgumentsError(\"Should not specify both keep_size and samples arguments\")\n if keep_size and size is not None:\n raise IncorrectArgumentsError(\"Should not specify both keep_size and size arguments\")\n\n if samples is None:\n if isinstance(_trace, MultiTrace):\n samples = sum(len(v) for v in _trace._straces.values())\n elif isinstance(_trace, list) and all(isinstance(x, dict) for x in _trace):\n # this is a list of points\n samples = len(_trace)\n else:\n raise TypeError(\n \"Do not know how to compute number of samples for trace argument of type %s\"\n % type(_trace)\n )\n\n assert samples is not None\n if samples < len_trace * nchain:\n warnings.warn(\n \"samples parameter is smaller than nchains times ndraws, some draws \"\n \"and/or chains may not be represented in the returned posterior \"\n \"predictive sample\",\n stacklevel=2,\n )\n\n model = modelcontext(model)\n\n if model.potentials:\n warnings.warn(\n \"The effect of Potentials on other parameters is ignored during posterior predictive sampling. \"\n \"This is likely to lead to invalid or biased predictive samples.\",\n UserWarning,\n stacklevel=2,\n )\n\n if var_names is not None:\n vars_ = [model[x] for x in var_names]\n else:\n vars_ = model.observed_RVs + model.auto_deterministics\n\n if random_seed is not None:\n warnings.warn(\n \"In this version, RNG seeding is managed by the Model objects. \"\n \"See the `rng_seeder` argument in Model's constructor.\",\n FutureWarning,\n stacklevel=2,\n )\n\n indices = np.arange(samples)\n\n if progressbar:\n indices = progress_bar(indices, total=samples, display=progressbar)\n\n vars_to_sample = list(get_default_varnames(vars_, include_transformed=False))\n\n if not vars_to_sample:\n return {}\n\n if not hasattr(_trace, \"varnames\"):\n inputs_and_names = [\n (rv, rv.name)\n for rv in walk_model(vars_to_sample, walk_past_rvs=True)\n if rv not in vars_to_sample\n and rv in model.named_vars.values()\n and not isinstance(rv, SharedVariable)\n ]\n if inputs_and_names:\n inputs, input_names = zip(*inputs_and_names)\n else:\n inputs, input_names = [], []\n else:\n output_names = [v.name for v in vars_to_sample if v.name is not None]\n input_names = [\n n\n for n in _trace.varnames\n if n not in output_names and not isinstance(model[n], SharedVariable)\n ]\n inputs = [model[n] for n in input_names]\n\n if size is not None:\n vars_to_sample = [change_rv_size(v, size, expand=True) for v in vars_to_sample]\n\n sampler_fn = compile_rv_inplace(\n inputs,\n vars_to_sample,\n allow_input_downcast=True,\n accept_inplace=True,\n on_unused_input=\"ignore\",\n mode=mode,\n )\n\n ppc_trace_t = _DefaultTrace(samples)\n try:\n if hasattr(_trace, \"_straces\"):\n # trace dict is unordered, but we want to return ppc samples in\n # a predictable ordering, so sort the chain indices\n chain_idx_mapping = sorted(_trace._straces.keys())\n for idx in indices:\n if nchain > 1:\n # the trace object will either be a MultiTrace (and have _straces)...\n if hasattr(_trace, \"_straces\"):\n chain_idx, point_idx = np.divmod(idx, len_trace)\n chain_idx = chain_idx % nchain\n # chain indices might not always start at 0, convert to proper index\n chain_idx = chain_idx_mapping[chain_idx]\n param = cast(MultiTrace, _trace)._straces[chain_idx].point(point_idx)\n # ... or a PointList\n else:\n param = cast(PointList, _trace)[idx % (len_trace * nchain)]\n # there's only a single chain, but the index might hit it multiple times if\n # the number of indices is greater than the length of the trace.\n else:\n param = _trace[idx % len_trace]\n\n values = sampler_fn(*(param[n] for n in input_names))\n\n for k, v in zip(vars_, values):\n ppc_trace_t.insert(k.name, v, idx)\n except KeyboardInterrupt:\n pass\n\n ppc_trace = ppc_trace_t.trace_dict\n if keep_size:\n for k, ary in ppc_trace.items():\n ppc_trace[k] = ary.reshape((nchain, len_trace, *ary.shape[1:]))\n\n if not return_inferencedata:\n return ppc_trace\n ikwargs = dict(model=model)\n if idata_kwargs:\n ikwargs.update(idata_kwargs)\n return pm.to_inference_data(posterior_predictive=ppc_trace, **ikwargs)\n\n\ndef sample_posterior_predictive_w(\n traces,\n samples: Optional[int] = None,\n models: Optional[List[Model]] = None,\n weights: Optional[ArrayLike] = None,\n random_seed: Optional[int] = None,\n progressbar: bool = True,\n return_inferencedata=True,\n idata_kwargs: dict = None,\n):\n \"\"\"Generate weighted posterior predictive samples from a list of models and\n a list of traces according to a set of weights.\n\n Parameters\n ----------\n traces : list or list of lists\n List of traces generated from MCMC sampling (xarray.Dataset, arviz.InferenceData, or\n MultiTrace), or a list of list containing dicts from find_MAP() or points. The number of\n traces should be equal to the number of weights.\n samples : int, optional\n Number of posterior predictive samples to generate. Defaults to the\n length of the shorter trace in traces.\n models : list of Model\n List of models used to generate the list of traces. The number of models should be equal to\n the number of weights and the number of observed RVs should be the same for all models.\n By default a single model will be inferred from ``with`` context, in this case results will\n only be meaningful if all models share the same distributions for the observed RVs.\n weights : array-like, optional\n Individual weights for each trace. Default, same weight for each model.\n random_seed : int, optional\n Seed for the random number generator.\n progressbar : bool, optional default True\n Whether or not to display a progress bar in the command line. The bar shows the percentage\n of completion, the sampling speed in samples per second (SPS), and the estimated remaining\n time until completion (\"expected time of arrival\"; ETA).\n return_inferencedata : bool\n Whether to return an :class:`arviz:arviz.InferenceData` (True) object or a dictionary (False).\n Defaults to True.\n idata_kwargs : dict, optional\n Keyword arguments for :func:`pymc.to_inference_data`\n\n Returns\n -------\n arviz.InferenceData or Dict\n An ArviZ ``InferenceData`` object containing the posterior predictive samples from the\n weighted models (default), or a dictionary with variable names as keys, and samples as\n numpy arrays.\n \"\"\"\n if isinstance(traces[0], InferenceData):\n n_samples = [\n trace.posterior.sizes[\"chain\"] * trace.posterior.sizes[\"draw\"] for trace in traces\n ]\n traces = [dataset_to_point_list(trace.posterior) for trace in traces]\n elif isinstance(traces[0], xarray.Dataset):\n n_samples = [trace.sizes[\"chain\"] * trace.sizes[\"draw\"] for trace in traces]\n traces = [dataset_to_point_list(trace) for trace in traces]\n else:\n n_samples = [len(i) * i.nchains for i in traces]\n\n if models is None:\n models = [modelcontext(models)] * len(traces)\n\n if random_seed:\n warnings.warn(\n \"In this version, RNG seeding is managed by the Model objects. \"\n \"See the `rng_seeder` argument in Model's constructor.\",\n FutureWarning,\n stacklevel=2,\n )\n\n for model in models:\n if model.potentials:\n warnings.warn(\n \"The effect of Potentials on other parameters is ignored during posterior predictive sampling. \"\n \"This is likely to lead to invalid or biased predictive samples.\",\n UserWarning,\n stacklevel=2,\n )\n break\n\n if weights is None:\n weights = [1] * len(traces)\n\n if len(traces) != len(weights):\n raise ValueError(\"The number of traces and weights should be the same\")\n\n if len(models) != len(weights):\n raise ValueError(\"The number of models and weights should be the same\")\n\n length_morv = len(models[0].observed_RVs)\n if any(len(i.observed_RVs) != length_morv for i in models):\n raise ValueError(\"The number of observed RVs should be the same for all models\")\n\n weights = np.asarray(weights)\n p = weights / np.sum(weights)\n\n min_tr = min(n_samples)\n\n n = (min_tr * p).astype(\"int\")\n # ensure n sum up to min_tr\n idx = np.argmax(n)\n n[idx] = n[idx] + min_tr - np.sum(n)\n trace = []\n for i, j in enumerate(n):\n tr = traces[i]\n len_trace = len(tr)\n try:\n nchain = tr.nchains\n except AttributeError:\n nchain = 1\n\n indices = np.random.randint(0, nchain * len_trace, j)\n if nchain > 1:\n chain_idx, point_idx = np.divmod(indices, len_trace)\n for idx in zip(chain_idx, point_idx):\n trace.append(tr._straces[idx[0]].point(idx[1]))\n else:\n for idx in indices:\n trace.append(tr[idx])\n\n obs = [x for m in models for x in m.observed_RVs]\n variables = np.repeat(obs, n)\n\n lengths = list({np.atleast_1d(observed).shape for observed in obs})\n\n if len(lengths) == 1:\n size = [None for i in variables]\n elif len(lengths) > 2:\n raise ValueError(\"Observed variables could not be broadcast together\")\n else:\n size = []\n x = np.zeros(shape=lengths[0])\n y = np.zeros(shape=lengths[1])\n b = np.broadcast(x, y)\n for var in variables:\n # XXX: This needs to be refactored\n shape = None # np.shape(np.atleast_1d(var.distribution.default()))\n if shape != b.shape:\n size.append(b.shape)\n else:\n size.append(None)\n len_trace = len(trace)\n\n if samples is None:\n samples = len_trace\n\n indices = np.random.randint(0, len_trace, samples)\n\n if progressbar:\n indices = progress_bar(indices, total=samples, display=progressbar)\n\n try:\n ppc = defaultdict(list)\n for idx in indices:\n param = trace[idx]\n var = variables[idx]\n # TODO sample_posterior_predictive_w is currently only work for model with\n # one observed.\n # XXX: This needs to be refactored\n # ppc[var.name].append(draw_values([var], point=param, size=size[idx])[0])\n raise NotImplementedError()\n\n except KeyboardInterrupt:\n pass\n else:\n ppc = {k: np.asarray(v) for k, v in ppc.items()}\n if not return_inferencedata:\n return ppc\n ikwargs = dict(model=models)\n if idata_kwargs:\n ikwargs.update(idata_kwargs)\n return pm.to_inference_data(posterior_predictive=ppc, **ikwargs)\n\n\ndef sample_prior_predictive(\n samples=500,\n model: Optional[Model] = None,\n var_names: Optional[Iterable[str]] = None,\n random_seed=None,\n mode: Optional[Union[str, Mode]] = None,\n return_inferencedata=True,\n idata_kwargs: dict = None,\n) -> Union[InferenceData, Dict[str, np.ndarray]]:\n \"\"\"Generate samples from the prior predictive distribution.\n\n Parameters\n ----------\n samples : int\n Number of samples from the prior predictive to generate. Defaults to 500.\n model : Model (optional if in ``with`` context)\n var_names : Iterable[str]\n A list of names of variables for which to compute the posterior predictive\n samples. Defaults to both observed and unobserved RVs. Transformed values\n are not included unless explicitly defined in var_names.\n random_seed : int\n Seed for the random number generator.\n mode:\n The mode used by ``aesara.function`` to compile the graph.\n return_inferencedata : bool\n Whether to return an :class:`arviz:arviz.InferenceData` (True) object or a dictionary (False).\n Defaults to True.\n idata_kwargs : dict, optional\n Keyword arguments for :func:`pymc.to_inference_data`\n\n Returns\n -------\n arviz.InferenceData or Dict\n An ArviZ ``InferenceData`` object containing the prior and prior predictive samples (default),\n or a dictionary with variable names as keys and samples as numpy arrays.\n \"\"\"\n model = modelcontext(model)\n\n if model.potentials:\n warnings.warn(\n \"The effect of Potentials on other parameters is ignored during prior predictive sampling. \"\n \"This is likely to lead to invalid or biased predictive samples.\",\n UserWarning,\n stacklevel=2,\n )\n\n if var_names is None:\n prior_pred_vars = model.observed_RVs + model.auto_deterministics\n prior_vars = (\n get_default_varnames(model.unobserved_RVs, include_transformed=True) + model.potentials\n )\n vars_: Set[str] = {var.name for var in prior_vars + prior_pred_vars}\n else:\n vars_ = set(var_names)\n\n if random_seed is not None:\n warnings.warn(\n \"In this version, RNG seeding is managed by the Model objects. \"\n \"See the `rng_seeder` argument in Model's constructor.\",\n FutureWarning,\n stacklevel=2,\n )\n\n names = get_default_varnames(vars_, include_transformed=False)\n vars_to_sample = [model[name] for name in names]\n\n # Any variables from var_names that are missing must be transformed variables.\n # Misspelled variables would have raised a KeyError above.\n missing_names = vars_.difference(names)\n for name in missing_names:\n transformed_value_var = model[name]\n rv_var = model.values_to_rvs[transformed_value_var]\n transform = transformed_value_var.tag.transform\n transformed_rv_var = transform.forward(rv_var, *rv_var.owner.inputs)\n\n names.append(name)\n vars_to_sample.append(transformed_rv_var)\n\n # If the user asked for the transformed variable in var_names, but not the\n # original RV, we add it manually here\n if rv_var.name not in names:\n names.append(rv_var.name)\n vars_to_sample.append(rv_var)\n\n inputs = [i for i in inputvars(vars_to_sample) if not isinstance(i, SharedVariable)]\n\n sampler_fn = compile_rv_inplace(\n inputs, vars_to_sample, allow_input_downcast=True, accept_inplace=True, mode=mode\n )\n\n values = zip(*(sampler_fn() for i in range(samples)))\n\n data = {k: np.stack(v) for k, v in zip(names, values)}\n if data is None:\n raise AssertionError(\"No variables sampled: attempting to sample %s\" % names)\n\n prior: Dict[str, np.ndarray] = {}\n for var_name in vars_:\n if var_name in data:\n prior[var_name] = data[var_name]\n\n if not return_inferencedata:\n return prior\n ikwargs = dict(model=model)\n if idata_kwargs:\n ikwargs.update(idata_kwargs)\n return pm.to_inference_data(prior=prior, **ikwargs)\n\n\ndef _init_jitter(\n model: Model,\n initvals: Optional[Union[StartDict, Sequence[Optional[StartDict]]]],\n seeds: Sequence[int],\n jitter: bool,\n jitter_max_retries: int,\n) -> PointType:\n \"\"\"Apply a uniform jitter in [-1, 1] to the test value as starting point in each chain.\n\n ``model.check_start_vals`` is used to test whether the jittered starting\n values produce a finite log probability. Invalid values are resampled\n unless `jitter_max_retries` is achieved, in which case the last sampled\n values are returned.\n\n Parameters\n ----------\n jitter: bool\n Whether to apply jitter or not.\n jitter_max_retries : int\n Maximum number of repeated attempts at initializing values (per chain).\n\n Returns\n -------\n start : ``pymc.model.Point``\n Starting point for sampler\n \"\"\"\n\n ipfns = make_initial_point_fns_per_chain(\n model=model,\n overrides=initvals,\n jitter_rvs=set(model.free_RVs) if jitter else {},\n chains=len(seeds),\n )\n\n if not jitter:\n return [ipfn(seed) for ipfn, seed in zip(ipfns, seeds)]\n\n initial_points = []\n for ipfn, seed in zip(ipfns, seeds):\n rng = np.random.RandomState(seed)\n for i in range(jitter_max_retries + 1):\n point = ipfn(seed)\n if i < jitter_max_retries:\n try:\n model.check_start_vals(point)\n except SamplingError:\n # Retry with a new seed\n seed = rng.randint(2 ** 30, dtype=np.int64)\n else:\n break\n initial_points.append(point)\n return initial_points\n\n\ndef init_nuts(\n *,\n init=\"auto\",\n chains=1,\n n_init=500_000,\n model=None,\n seeds: Sequence[int] = None,\n progressbar=True,\n jitter_max_retries=10,\n tune=None,\n initvals: Optional[Union[StartDict, Sequence[Optional[StartDict]]]] = None,\n **kwargs,\n) -> Tuple[Sequence[PointType], NUTS]:\n \"\"\"Set up the mass matrix initialization for NUTS.\n\n NUTS convergence and sampling speed is extremely dependent on the\n choice of mass/scaling matrix. This function implements different\n methods for choosing or adapting the mass matrix.\n\n Parameters\n ----------\n init : str\n Initialization method to use.\n\n * auto: Choose a default initialization method automatically.\n Currently, this is ``jitter+adapt_diag``, but this can change in the future. If you\n depend on the exact behaviour, choose an initialization method explicitly.\n * adapt_diag: Start with a identity mass matrix and then adapt a diagonal based on the\n variance of the tuning samples. All chains use the test value (usually the prior mean)\n as starting point.\n * jitter+adapt_diag: Same as ``adapt_diag``, but use test value plus a uniform jitter in\n [-1, 1] as starting point in each chain.\n * jitter+adapt_diag_grad:\n An experimental initialization method that uses information from gradients and samples\n during tuning.\n * advi+adapt_diag: Run ADVI and then adapt the resulting diagonal mass matrix based on the\n sample variance of the tuning samples.\n * advi: Run ADVI to estimate posterior mean and diagonal mass matrix.\n * advi_map: Initialize ADVI with MAP and use MAP as starting point.\n * map: Use the MAP as starting point. This is discouraged.\n * adapt_full: Adapt a dense mass matrix using the sample covariances. All chains use the\n test value (usually the prior mean) as starting point.\n * jitter+adapt_full: Same as ``adapt_full``, but use test value plus a uniform jitter in\n [-1, 1] as starting point in each chain.\n\n chains : int\n Number of jobs to start.\n initvals : optional, dict or list of dicts\n Dict or list of dicts with initial values to use instead of the defaults from `Model.initial_values`.\n The keys should be names of transformed random variables.\n n_init : int\n Number of iterations of initializer. Only works for 'ADVI' init methods.\n model : Model (optional if in ``with`` context)\n seeds : list\n Seed values for each chain.\n progressbar : bool\n Whether or not to display a progressbar for advi sampling.\n jitter_max_retries : int\n Maximum number of repeated attempts (per chain) at creating an initial matrix with uniform jitter\n that yields a finite probability. This applies to ``jitter+adapt_diag`` and ``jitter+adapt_full``\n init methods.\n **kwargs : keyword arguments\n Extra keyword arguments are forwarded to pymc.NUTS.\n\n Returns\n -------\n initial_points : list\n Starting points for each chain.\n nuts_sampler : ``pymc.step_methods.NUTS``\n Instantiated and initialized NUTS sampler object\n \"\"\"\n model = modelcontext(model)\n\n vars = kwargs.get(\"vars\", model.value_vars)\n if set(vars) != set(model.value_vars):\n raise ValueError(\"Must use init_nuts on all variables of a model.\")\n if not all_continuous(vars, model):\n raise ValueError(\"init_nuts can only be used for models with only \" \"continuous variables.\")\n\n if not isinstance(init, str):\n raise TypeError(\"init must be a string.\")\n\n if init is not None:\n init = init.lower()\n\n if init == \"auto\":\n init = \"jitter+adapt_diag\"\n\n if seeds is None:\n seeds = model.rng_seeder.randint(2 ** 30, dtype=np.int64, size=chains)\n if not isinstance(seeds, (list, tuple, np.ndarray)):\n raise ValueError(f\"The `seeds` must be array-like. Got {type(seeds)} instead.\")\n if len(seeds) != chains:\n raise ValueError(\n f\"Number of seeds ({len(seeds)}) does not match the number of chains ({chains}).\"\n )\n\n _log.info(f\"Initializing NUTS using {init}...\")\n\n cb = [\n pm.callbacks.CheckParametersConvergence(tolerance=1e-2, diff=\"absolute\"),\n pm.callbacks.CheckParametersConvergence(tolerance=1e-2, diff=\"relative\"),\n ]\n\n initial_points = _init_jitter(\n model,\n initvals,\n seeds=seeds,\n jitter=\"jitter\" in init,\n jitter_max_retries=jitter_max_retries,\n )\n\n apoints = [DictToArrayBijection.map(point) for point in initial_points]\n apoints_data = [apoint.data for apoint in apoints]\n\n if init == \"adapt_diag\":\n mean = np.mean(apoints_data, axis=0)\n var = np.ones_like(mean)\n n = len(var)\n potential = quadpotential.QuadPotentialDiagAdapt(n, mean, var, 10)\n elif init == \"jitter+adapt_diag\":\n mean = np.mean(apoints_data, axis=0)\n var = np.ones_like(mean)\n n = len(var)\n potential = quadpotential.QuadPotentialDiagAdapt(n, mean, var, 10)\n elif init == \"jitter+adapt_diag_grad\":\n mean = np.mean(apoints_data, axis=0)\n var = np.ones_like(mean)\n n = len(var)\n\n if tune is not None and tune > 250:\n stop_adaptation = tune - 50\n else:\n stop_adaptation = None\n\n potential = quadpotential.QuadPotentialDiagAdaptExp(\n n,\n mean,\n alpha=0.02,\n use_grads=True,\n stop_adaptation=stop_adaptation,\n )\n elif init == \"advi+adapt_diag\":\n approx = pm.fit(\n random_seed=seeds[0],\n n=n_init,\n method=\"advi\",\n model=model,\n callbacks=cb,\n progressbar=progressbar,\n obj_optimizer=pm.adagrad_window,\n )\n initial_points = list(approx.sample(draws=chains))\n std_apoint = approx.std.eval()\n cov = std_apoint ** 2\n mean = approx.mean.get_value()\n weight = 50\n n = len(cov)\n potential = quadpotential.QuadPotentialDiagAdapt(n, mean, cov, weight)\n elif init == \"advi\":\n approx = pm.fit(\n random_seed=seeds[0],\n n=n_init,\n method=\"advi\",\n model=model,\n callbacks=cb,\n progressbar=progressbar,\n obj_optimizer=pm.adagrad_window,\n )\n initial_points = list(approx.sample(draws=chains))\n cov = approx.std.eval() ** 2\n potential = quadpotential.QuadPotentialDiag(cov)\n elif init == \"advi_map\":\n start = pm.find_MAP(include_transformed=True)\n approx = pm.MeanField(model=model, start=start)\n pm.fit(\n random_seed=seeds[0],\n n=n_init,\n method=pm.KLqp(approx),\n callbacks=cb,\n progressbar=progressbar,\n obj_optimizer=pm.adagrad_window,\n )\n initial_points = list(approx.sample(draws=chains))\n cov = approx.std.eval() ** 2\n potential = quadpotential.QuadPotentialDiag(cov)\n elif init == \"map\":\n start = pm.find_MAP(include_transformed=True)\n cov = pm.find_hessian(point=start)\n initial_points = [start] * chains\n potential = quadpotential.QuadPotentialFull(cov)\n elif init == \"adapt_full\":\n mean = np.mean(apoints_data * chains, axis=0)\n initial_point = initial_points[0]\n initial_point_model_size = sum(initial_point[n.name].size for n in model.value_vars)\n cov = np.eye(initial_point_model_size)\n potential = quadpotential.QuadPotentialFullAdapt(initial_point_model_size, mean, cov, 10)\n elif init == \"jitter+adapt_full\":\n mean = np.mean(apoints_data, axis=0)\n initial_point = initial_points[0]\n initial_point_model_size = sum(initial_point[n.name].size for n in model.value_vars)\n cov = np.eye(initial_point_model_size)\n potential = quadpotential.QuadPotentialFullAdapt(initial_point_model_size, mean, cov, 10)\n else:\n raise ValueError(f\"Unknown initializer: {init}.\")\n\n step = pm.NUTS(potential=potential, model=model, **kwargs)\n\n return initial_points, step\n",
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom itertools import product\n\nimport aesara\nimport aesara.tensor as at\nimport numpy as np\nimport numpy.ma as ma\nimport numpy.testing as npt\nimport pandas as pd\nimport pytest\nimport scipy.sparse as sps\n\nfrom aesara.graph.basic import Constant, Variable, ancestors\nfrom aesara.tensor.random.basic import normal, uniform\nfrom aesara.tensor.random.op import RandomVariable\nfrom aesara.tensor.subtensor import AdvancedIncSubtensor, AdvancedIncSubtensor1\nfrom aesara.tensor.type import TensorType\nfrom aesara.tensor.var import TensorVariable\n\nimport pymc as pm\n\nfrom pymc.aesaraf import (\n _conversion_map,\n change_rv_size,\n extract_obs_data,\n pandas_to_array,\n rvs_to_value_vars,\n take_along_axis,\n walk_model,\n)\nfrom pymc.exceptions import ShapeError\nfrom pymc.vartypes import int_types\n\nFLOATX = str(aesara.config.floatX)\nINTX = str(_conversion_map[FLOATX])\n\n\ndef test_change_rv_size():\n loc = at.as_tensor_variable([1, 2])\n rv = normal(loc=loc)\n assert rv.ndim == 1\n assert tuple(rv.shape.eval()) == (2,)\n\n with pytest.raises(ShapeError, match=\"must be ≤1-dimensional\"):\n change_rv_size(rv, new_size=[[2, 3]])\n with pytest.raises(ShapeError, match=\"must be ≤1-dimensional\"):\n change_rv_size(rv, new_size=at.as_tensor_variable([[2, 3], [4, 5]]))\n\n rv_new = change_rv_size(rv, new_size=(3,), expand=True)\n assert rv_new.ndim == 2\n assert tuple(rv_new.shape.eval()) == (3, 2)\n\n # Make sure that the shape used to determine the expanded size doesn't\n # depend on the old `RandomVariable`.\n rv_new_ancestors = set(ancestors((rv_new,)))\n assert loc in rv_new_ancestors\n assert rv not in rv_new_ancestors\n\n rv_newer = change_rv_size(rv_new, new_size=(4,), expand=True)\n assert rv_newer.ndim == 3\n assert tuple(rv_newer.shape.eval()) == (4, 3, 2)\n\n # Make sure we avoid introducing a `Cast` by converting the new size before\n # constructing the new `RandomVariable`\n rv = normal(0, 1)\n new_size = np.array([4, 3], dtype=\"int32\")\n rv_newer = change_rv_size(rv, new_size=new_size, expand=False)\n assert rv_newer.ndim == 2\n assert isinstance(rv_newer.owner.inputs[1], Constant)\n assert tuple(rv_newer.shape.eval()) == (4, 3)\n\n rv = normal(0, 1)\n new_size = at.as_tensor(np.array([4, 3], dtype=\"int32\"))\n rv_newer = change_rv_size(rv, new_size=new_size, expand=True)\n assert rv_newer.ndim == 2\n assert tuple(rv_newer.shape.eval()) == (4, 3)\n\n rv = normal(0, 1)\n new_size = at.as_tensor(2, dtype=\"int32\")\n rv_newer = change_rv_size(rv, new_size=new_size, expand=True)\n assert rv_newer.ndim == 1\n assert tuple(rv_newer.shape.eval()) == (2,)\n\n\nclass TestBroadcasting:\n def test_make_shared_replacements(self):\n \"\"\"Check if pm.make_shared_replacements preserves broadcasting.\"\"\"\n\n with pm.Model() as test_model:\n test1 = pm.Normal(\"test1\", mu=0.0, sigma=1.0, size=(1, 10))\n test2 = pm.Normal(\"test2\", mu=0.0, sigma=1.0, size=(10, 1))\n\n # Replace test1 with a shared variable, keep test 2 the same\n replacement = pm.make_shared_replacements(\n test_model.initial_point, [test_model.test2], test_model\n )\n assert (\n test_model.test1.broadcastable\n == replacement[test_model.test1.tag.value_var].broadcastable\n )\n\n def test_metropolis_sampling(self):\n \"\"\"Check if the Metropolis sampler can handle broadcasting.\"\"\"\n with pm.Model() as test_model:\n test1 = pm.Normal(\"test1\", mu=0.0, sigma=1.0, size=(1, 10))\n test2 = pm.Normal(\"test2\", mu=test1, sigma=1.0, size=(10, 10))\n\n step = pm.Metropolis()\n # TODO FIXME: Assert whatever it is we're testing\n pm.sample(tune=5, draws=7, cores=1, step=step, compute_convergence_checks=False)\n\n\ndef _make_along_axis_idx(arr_shape, indices, axis):\n # compute dimensions to iterate over\n if str(indices.dtype) not in int_types:\n raise IndexError(\"`indices` must be an integer array\")\n shape_ones = (1,) * indices.ndim\n dest_dims = list(range(axis)) + [None] + list(range(axis + 1, indices.ndim))\n\n # build a fancy index, consisting of orthogonal aranges, with the\n # requested index inserted at the right location\n fancy_index = []\n for dim, n in zip(dest_dims, arr_shape):\n if dim is None:\n fancy_index.append(indices)\n else:\n ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim + 1 :]\n fancy_index.append(np.arange(n).reshape(ind_shape))\n\n return tuple(fancy_index)\n\n\nif hasattr(np, \"take_along_axis\"):\n np_take_along_axis = np.take_along_axis\nelse:\n\n def np_take_along_axis(arr, indices, axis):\n if arr.shape[axis] <= 32:\n # We can safely test with numpy's choose\n arr = np.moveaxis(arr, axis, 0)\n indices = np.moveaxis(indices, axis, 0)\n out = np.choose(indices, arr)\n return np.moveaxis(out, 0, axis)\n else:\n # numpy's choose cannot handle such a large axis so we\n # just use the implementation of take_along_axis. This is kind of\n # cheating because our implementation is the same as the one below\n if axis < 0:\n _axis = arr.ndim + axis\n else:\n _axis = axis\n if _axis < 0 or _axis >= arr.ndim:\n raise ValueError(f\"Supplied axis {axis} is out of bounds\")\n return arr[_make_along_axis_idx(arr.shape, indices, _axis)]\n\n\nclass TestTakeAlongAxis:\n def setup_class(self):\n self.inputs_buffer = dict()\n self.output_buffer = dict()\n self.func_buffer = dict()\n\n def _input_tensors(self, shape):\n ndim = len(shape)\n arr = TensorType(FLOATX, [False] * ndim)(\"arr\")\n indices = TensorType(INTX, [False] * ndim)(\"indices\")\n arr.tag.test_value = np.zeros(shape, dtype=FLOATX)\n indices.tag.test_value = np.zeros(shape, dtype=INTX)\n return arr, indices\n\n def get_input_tensors(self, shape):\n ndim = len(shape)\n try:\n return self.inputs_buffer[ndim]\n except KeyError:\n arr, indices = self._input_tensors(shape)\n self.inputs_buffer[ndim] = arr, indices\n return arr, indices\n\n def _output_tensor(self, arr, indices, axis):\n return take_along_axis(arr, indices, axis)\n\n def get_output_tensors(self, shape, axis):\n ndim = len(shape)\n try:\n return self.output_buffer[(ndim, axis)]\n except KeyError:\n arr, indices = self.get_input_tensors(shape)\n out = self._output_tensor(arr, indices, axis)\n self.output_buffer[(ndim, axis)] = out\n return out\n\n def _function(self, arr, indices, out):\n return aesara.function([arr, indices], [out])\n\n def get_function(self, shape, axis):\n ndim = len(shape)\n try:\n return self.func_buffer[(ndim, axis)]\n except KeyError:\n arr, indices = self.get_input_tensors(shape)\n out = self.get_output_tensors(shape, axis)\n func = self._function(arr, indices, out)\n self.func_buffer[(ndim, axis)] = func\n return func\n\n @staticmethod\n def get_input_values(shape, axis, samples):\n arr = np.random.randn(*shape).astype(FLOATX)\n size = list(shape)\n size[axis] = samples\n size = tuple(size)\n indices = np.random.randint(low=0, high=shape[axis], size=size, dtype=INTX)\n return arr, indices\n\n @pytest.mark.parametrize(\n [\"shape\", \"axis\", \"samples\"],\n product(\n [\n (1,),\n (3,),\n (3, 1),\n (3, 2),\n (1, 1),\n (1, 2),\n (40, 40), # choose fails here\n (5, 1, 1),\n (5, 1, 2),\n (5, 3, 1),\n (5, 3, 2),\n ],\n [0, -1],\n [1, 10],\n ),\n ids=str,\n )\n def test_take_along_axis(self, shape, axis, samples):\n arr, indices = self.get_input_values(shape, axis, samples)\n func = self.get_function(shape, axis)\n assert np.allclose(np_take_along_axis(arr, indices, axis=axis), func(arr, indices)[0])\n\n @pytest.mark.parametrize(\n [\"shape\", \"axis\", \"samples\"],\n product(\n [\n (1,),\n (3,),\n (3, 1),\n (3, 2),\n (1, 1),\n (1, 2),\n (40, 40), # choose fails here\n (5, 1, 1),\n (5, 1, 2),\n (5, 3, 1),\n (5, 3, 2),\n ],\n [0, -1],\n [1, 10],\n ),\n ids=str,\n )\n def test_take_along_axis_grad(self, shape, axis, samples):\n if axis < 0:\n _axis = len(shape) + axis\n else:\n _axis = axis\n # Setup the aesara function\n t_arr, t_indices = self.get_input_tensors(shape)\n t_out2 = aesara.grad(\n at.sum(self._output_tensor(t_arr ** 2, t_indices, axis)),\n t_arr,\n )\n func = aesara.function([t_arr, t_indices], [t_out2])\n\n # Test that the gradient gives the same output as what is expected\n arr, indices = self.get_input_values(shape, axis, samples)\n expected_grad = np.zeros_like(arr)\n slicer = [slice(None)] * len(shape)\n for i in range(indices.shape[axis]):\n slicer[axis] = i\n inds = indices[tuple(slicer)].reshape(shape[:_axis] + (1,) + shape[_axis + 1 :])\n inds = _make_along_axis_idx(shape, inds, _axis)\n expected_grad[inds] += 1\n expected_grad *= 2 * arr\n out = func(arr, indices)[0]\n assert np.allclose(out, expected_grad)\n\n @pytest.mark.parametrize(\"axis\", [-4, 4], ids=str)\n def test_axis_failure(self, axis):\n arr, indices = self.get_input_tensors((3, 1))\n with pytest.raises(ValueError):\n take_along_axis(arr, indices, axis=axis)\n\n def test_ndim_failure(self):\n arr = TensorType(FLOATX, [False] * 3)(\"arr\")\n indices = TensorType(INTX, [False] * 2)(\"indices\")\n arr.tag.test_value = np.zeros((1,) * arr.ndim, dtype=FLOATX)\n indices.tag.test_value = np.zeros((1,) * indices.ndim, dtype=INTX)\n with pytest.raises(ValueError):\n take_along_axis(arr, indices)\n\n def test_dtype_failure(self):\n arr = TensorType(FLOATX, [False] * 3)(\"arr\")\n indices = TensorType(FLOATX, [False] * 3)(\"indices\")\n arr.tag.test_value = np.zeros((1,) * arr.ndim, dtype=FLOATX)\n indices.tag.test_value = np.zeros((1,) * indices.ndim, dtype=FLOATX)\n with pytest.raises(IndexError):\n take_along_axis(arr, indices)\n\n\ndef test_extract_obs_data():\n\n with pytest.raises(TypeError):\n extract_obs_data(at.matrix())\n\n data = np.random.normal(size=(2, 3))\n data_at = at.as_tensor(data)\n mask = np.random.binomial(1, 0.5, size=(2, 3)).astype(bool)\n\n for val_at in (data_at, aesara.shared(data)):\n res = extract_obs_data(val_at)\n\n assert isinstance(res, np.ndarray)\n assert np.array_equal(res, data)\n\n # AdvancedIncSubtensor check\n data_m = np.ma.MaskedArray(data, mask)\n missing_values = data_at.type()[mask]\n constant = at.as_tensor(data_m.filled())\n z_at = at.set_subtensor(constant[mask.nonzero()], missing_values)\n\n assert isinstance(z_at.owner.op, (AdvancedIncSubtensor, AdvancedIncSubtensor1))\n\n res = extract_obs_data(z_at)\n\n assert isinstance(res, np.ndarray)\n assert np.ma.allequal(res, data_m)\n\n # AdvancedIncSubtensor1 check\n data = np.random.normal(size=(3,))\n data_at = at.as_tensor(data)\n mask = np.random.binomial(1, 0.5, size=(3,)).astype(bool)\n\n data_m = np.ma.MaskedArray(data, mask)\n missing_values = data_at.type()[mask]\n constant = at.as_tensor(data_m.filled())\n z_at = at.set_subtensor(constant[mask.nonzero()], missing_values)\n\n assert isinstance(z_at.owner.op, (AdvancedIncSubtensor, AdvancedIncSubtensor1))\n\n res = extract_obs_data(z_at)\n\n assert isinstance(res, np.ndarray)\n assert np.ma.allequal(res, data_m)\n\n\[email protected](\"input_dtype\", [\"int32\", \"int64\", \"float32\", \"float64\"])\ndef test_pandas_to_array(input_dtype):\n \"\"\"\n Ensure that pandas_to_array returns the dense array, masked array,\n graph variable, TensorVariable, or sparse matrix as appropriate.\n \"\"\"\n # Create the various inputs to the function\n sparse_input = sps.csr_matrix(np.eye(3)).astype(input_dtype)\n dense_input = np.arange(9).reshape((3, 3)).astype(input_dtype)\n\n input_name = \"input_variable\"\n aesara_graph_input = at.as_tensor(dense_input, name=input_name)\n pandas_input = pd.DataFrame(dense_input)\n\n # All the even numbers are replaced with NaN\n missing_numpy_input = np.array([[np.nan, 1, np.nan], [3, np.nan, 5], [np.nan, 7, np.nan]])\n missing_pandas_input = pd.DataFrame(missing_numpy_input)\n masked_array_input = ma.array(dense_input, mask=(np.mod(dense_input, 2) == 0))\n\n # Create a generator object. Apparently the generator object needs to\n # yield numpy arrays.\n square_generator = (np.array([i ** 2], dtype=int) for i in range(100))\n\n # Alias the function to be tested\n func = pandas_to_array\n\n #####\n # Perform the various tests\n #####\n # Check function behavior with dense arrays and pandas dataframes\n # without missing values\n for input_value in [dense_input, pandas_input]:\n func_output = func(input_value)\n assert isinstance(func_output, np.ndarray)\n assert func_output.shape == input_value.shape\n npt.assert_allclose(func_output, dense_input)\n\n # Check function behavior with sparse matrix inputs\n sparse_output = func(sparse_input)\n assert sps.issparse(sparse_output)\n assert sparse_output.shape == sparse_input.shape\n npt.assert_allclose(sparse_output.toarray(), sparse_input.toarray())\n\n # Check function behavior when using masked array inputs and pandas\n # objects with missing data\n for input_value in [missing_numpy_input, masked_array_input, missing_pandas_input]:\n func_output = func(input_value)\n assert isinstance(func_output, ma.core.MaskedArray)\n assert func_output.shape == input_value.shape\n npt.assert_allclose(func_output, masked_array_input)\n\n # Check function behavior with Aesara graph variable\n aesara_output = func(aesara_graph_input)\n assert isinstance(aesara_output, Variable)\n npt.assert_allclose(aesara_output.eval(), aesara_graph_input.eval())\n intX = pm.aesaraf._conversion_map[aesara.config.floatX]\n if dense_input.dtype == intX or dense_input.dtype == aesara.config.floatX:\n assert aesara_output.owner is None # func should not have added new nodes\n assert aesara_output.name == input_name\n else:\n assert aesara_output.owner is not None # func should have casted\n assert aesara_output.owner.inputs[0].name == input_name\n\n if \"float\" in input_dtype:\n assert aesara_output.dtype == aesara.config.floatX\n else:\n assert aesara_output.dtype == intX\n\n # Check function behavior with generator data\n generator_output = func(square_generator)\n\n # Output is wrapped with `pm.floatX`, and this unwraps\n wrapped = generator_output.owner.inputs[0]\n # Make sure the returned object has .set_gen and .set_default methods\n assert hasattr(wrapped, \"set_gen\")\n assert hasattr(wrapped, \"set_default\")\n # Make sure the returned object is an Aesara TensorVariable\n assert isinstance(wrapped, TensorVariable)\n\n\ndef test_pandas_to_array_pandas_index():\n data = pd.Index([1, 2, 3])\n result = pandas_to_array(data)\n expected = np.array([1, 2, 3])\n np.testing.assert_array_equal(result, expected)\n\n\ndef test_walk_model():\n d = at.vector(\"d\")\n b = at.vector(\"b\")\n c = uniform(0.0, d)\n c.name = \"c\"\n e = at.log(c)\n a = normal(e, b)\n a.name = \"a\"\n\n test_graph = at.exp(a + 1)\n res = list(walk_model((test_graph,)))\n assert a in res\n assert c not in res\n\n res = list(walk_model((test_graph,), walk_past_rvs=True))\n assert a in res\n assert c in res\n\n res = list(walk_model((test_graph,), walk_past_rvs=True, stop_at_vars={e}))\n assert a in res\n assert c not in res\n\n\ndef test_rvs_to_value_vars():\n\n with pm.Model() as m:\n a = pm.Uniform(\"a\", 0.0, 1.0)\n b = pm.Uniform(\"b\", 0, a + 1.0)\n c = pm.Normal(\"c\")\n d = at.log(c + b) + 2.0\n\n a_value_var = m.rvs_to_values[a]\n assert a_value_var.tag.transform\n\n b_value_var = m.rvs_to_values[b]\n c_value_var = m.rvs_to_values[c]\n\n (res,), replaced = rvs_to_value_vars((d,))\n\n assert res.owner.op == at.add\n log_output = res.owner.inputs[0]\n assert log_output.owner.op == at.log\n log_add_output = res.owner.inputs[0].owner.inputs[0]\n assert log_add_output.owner.op == at.add\n c_output = log_add_output.owner.inputs[0]\n\n # We make sure that the random variables were replaced\n # with their value variables\n assert c_output == c_value_var\n b_output = log_add_output.owner.inputs[1]\n assert b_output == b_value_var\n\n res_ancestors = list(walk_model((res,), walk_past_rvs=True))\n res_rv_ancestors = [\n v for v in res_ancestors if v.owner and isinstance(v.owner.op, RandomVariable)\n ]\n\n # There shouldn't be any `RandomVariable`s in the resulting graph\n assert len(res_rv_ancestors) == 0\n assert b_value_var in res_ancestors\n assert c_value_var in res_ancestors\n assert a_value_var not in res_ancestors\n\n (res,), replaced = rvs_to_value_vars((d,), apply_transforms=True)\n\n res_ancestors = list(walk_model((res,), walk_past_rvs=True))\n res_rv_ancestors = [\n v for v in res_ancestors if v.owner and isinstance(v.owner.op, RandomVariable)\n ]\n\n assert len(res_rv_ancestors) == 0\n assert a_value_var in res_ancestors\n assert b_value_var in res_ancestors\n assert c_value_var in res_ancestors\n"
] | [
[
"numpy.atleast_1d",
"numpy.asarray",
"numpy.broadcast_to"
],
[
"numpy.asarray",
"numpy.repeat",
"pandas.Timestamp.now"
],
[
"matplotlib.pyplot.title",
"numpy.linspace"
],
[
"numpy.asarray",
"numpy.broadcast",
"numpy.mean",
"numpy.random.randint",
"numpy.ones_like",
"numpy.arange",
"numpy.eye",
"numpy.stack",
"numpy.atleast_1d",
"numpy.argmax",
"numpy.repeat",
"numpy.zeros",
"numpy.divmod",
"numpy.argsort",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"numpy.random.seed",
"numpy.shape",
"numpy.vstack"
],
[
"numpy.ma.allequal",
"pandas.DataFrame",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.moveaxis",
"numpy.random.randint",
"scipy.sparse.issparse",
"numpy.allclose",
"numpy.arange",
"numpy.eye",
"pandas.Index",
"numpy.choose",
"numpy.zeros",
"numpy.testing.assert_allclose",
"numpy.random.binomial",
"numpy.array",
"numpy.ma.MaskedArray",
"numpy.array_equal",
"numpy.testing.assert_array_equal",
"numpy.random.normal",
"numpy.mod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
zouguojian/FDN-Learning | [
"ef89cb7d6654d5ac2425621ec6b330652a281f16"
] | [
"data_/data_save.py"
] | [
"# -- coding: utf-8 --\n\nimport os\nimport csv\n\ndef go_though(open_file):\n for root, dirs, files in os.walk(r''+str(open_file)):\n for file in files:\n # # 获取文件所属目录\n # print(root)\n # 获取文件路径\n print(os.path.join(root, file))\n\ndef re_hour(hour):\n if len(str(hour))<2:return '0'+str(hour)\n else:return str(hour)\n\ndef re_month(month):\n if len(str(month))<2:return '0'+str(month)\n else:return str(month)\n\ndef re_day(day):\n if len(str(day))<2:return '0'+str(day)\n else:return str(day)\n\nfrom datetime import datetime\ndef re_time(time_list): #[year,month,day,hour]\n time =''.join([time_list[i]+'-' for i in range(len(time_list))]).strip('-')\n time = datetime.strptime(time, '%Y-%m-%d-%H')\n return time\n\nimport numpy as np\ndef witer_(open_file,day,month,year,format,writer):\n for d in range(day, 32):\n if os.path.exists(open_file + year + re_month(month) + re_day(d) + format):\n read = pd.read_csv(open_file + year + re_month(month) + re_day(d) + format)\n print(list(np.reshape(sites[:,0],newshape=(-1))))\n data=read[list(sites[:,0])]\n # return read.loc[read['城市'] == '上海'].values\n # print(open_file + year + re_month(month) + re_day(d) + format)\n\ndef go_though(open_file,day,month,year,format,write_file,write_name):\n file = open(write_file+write_name, 'w', encoding='utf-8')\n writer=csv.writer(file)\n writer.writerow(data_colum)\n for m in range(month,13):\n if day!=1:\n witer_(open_file, day, m, year, format, writer)\n day=1\n else:\n witer_(open_file, day, m, year, format, writer)\n file.close()\n return\n\nimport pandas as pd\ndef read_site(file):\n read=pd.read_excel(file)\n print(list(read.keys()))\n return read.loc[read['城市']=='上海'].values\n\ndata_colum=['time','site','AQI','PM2.5','PM2.5_24h','PM10','PM10_24h','SO2','SO2_24h','NO2','NO2_24h','O3','O3_24h','O3_8h','O3_8h_24h','CO','CO_24h']\nopen_file='/Users/guojianzou/Downloads/站点_20200101-20201231/china_sites_'\nopen_site='/Users/guojianzou/Downloads/站点列表-2021.01.01起.xlsx'\nwrite_file='/Users/guojianzou/Downloads/'\nwrite_name='2020.csv'\nday=5\nmonth=1\nyear='2020'\n\n#读取站点信息,包括:['监测点编码', '监测点名称', '城市', '经度', '纬度', '对照点']\nsites=read_site(open_site)\nprint(sites)\n\n# 遍历数据源,并开始存储\ngo_though(open_file,day,month,year,'.csv',write_file,write_name)\n"
] | [
[
"numpy.reshape",
"pandas.read_excel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rcaneill/xgcm-1 | [
"b68a4c70fdb8ade2e7462a958c810a07b0fbcfdb"
] | [
"xgcm/test/test_metrics_ops_single_axis.py"
] | [
"from __future__ import print_function\nimport pytest\nimport xarray as xr\nimport numpy as np\n\nfrom xgcm.grid import Grid, Axis\nfrom xgcm.test.datasets import datasets_grid_metric\n\n\[email protected](\"funcname\", [\"interp\", \"diff\", \"min\", \"max\", \"cumsum\"])\[email protected](\"grid_type\", [\"B\", \"C\"])\[email protected](\"variable\", [\"tracer\", \"u\", \"v\"])\nclass TestParametrized:\n @pytest.mark.parametrize(\"axis\", [\"X\", \"Y\"])\n @pytest.mark.parametrize(\"metric_weighted\", [\"X\", (\"Y\",), (\"X\", \"Y\"), [\"X\", \"Y\"]])\n @pytest.mark.parametrize(\n \"periodic\", [\"True\", \"False\", {\"X\": True, \"Y\": False}, {\"X\": False, \"Y\": True}]\n )\n @pytest.mark.parametrize(\"boundary\", [\"fill\", \"extend\"])\n def test_weighted_metric(\n self, funcname, grid_type, variable, axis, metric_weighted, periodic, boundary\n ):\n \"\"\"tests the correct execution of weighted ops along a single axis\"\"\"\n # metric_weighted allows the interpolation of e.g. a surface flux to be conservative\n # It multiplies the values with a metric like the area, then performs interpolation\n # and divides by the same metric (area) for the new grid position\n ds, coords, metrics = datasets_grid_metric(grid_type)\n grid = Grid(ds, coords=coords, metrics=metrics, periodic=periodic)\n func = getattr(grid, funcname)\n\n metric = grid.get_metric(ds[variable], metric_weighted)\n expected_raw = func(ds[variable] * metric, axis, boundary=boundary)\n metric_new = grid.get_metric(expected_raw, metric_weighted)\n expected = expected_raw / metric_new\n new = func(\n ds[variable], axis, metric_weighted=metric_weighted, boundary=boundary\n )\n assert new.equals(expected)\n\n @pytest.mark.parametrize(\n \"multi_axis\", [\"X\", [\"X\"], (\"Y\"), [\"X\", \"Y\"], (\"Y\", \"X\")]\n )\n def test_weighted_metric_multi_axis(\n self, funcname, grid_type, variable, multi_axis, metric_weighted, boundary\n ):\n \"\"\"tests if the output for multiple axis is the same as when\n executing the single axis ops in serial\"\"\"\n ds, coords, metrics = datasets_grid_metric(grid_type)\n grid = Grid(ds, coords=coords, metrics=metrics)\n\n func = getattr(grid, funcname)\n expected = ds[variable]\n for ax in multi_axis:\n if isinstance(metric_weighted, dict):\n metric_weighted_axis = metric_weighted[ax]\n else:\n metric_weighted_axis = metric_weighted\n expected = func(\n expected,\n ax,\n metric_weighted=metric_weighted_axis,\n boundary=boundary,\n )\n\n new = func(\n ds[variable],\n multi_axis,\n metric_weighted=metric_weighted,\n boundary=boundary,\n )\n assert new.equals(expected)\n\n\[email protected](\n \"funcname\",\n [\"interp\", \"diff\", \"min\", \"max\", \"cumsum\", \"derivative\", \"cumint\"],\n)\[email protected](\"boundary\", [\"fill\", \"extend\"])\[email protected](\"fill_value\", [0, 10, None])\ndef test_boundary_global_input(funcname, boundary, fill_value):\n \"\"\"Test that globally defined boundary values result in\n the same output as when the parameters are defined on either\n the grid or axis methods\n \"\"\"\n ds, coords, metrics = datasets_grid_metric(\"C\")\n axis = \"X\"\n\n # Test results by globally specifying fill value/boundary on grid object\n grid_global = Grid(\n ds,\n coords=coords,\n metrics=metrics,\n periodic=False,\n boundary=boundary,\n fill_value=fill_value,\n )\n func_global = getattr(grid_global, funcname)\n global_result = func_global(ds.tracer, axis)\n\n # Test results by manually specifying fill value/boundary on grid method\n grid_manual = Grid(\n ds, coords=coords, metrics=metrics, periodic=False, boundary=boundary\n )\n func_manual = getattr(grid_manual, funcname)\n manual_result = func_manual(\n ds.tracer, axis, boundary=boundary, fill_value=fill_value\n )\n xr.testing.assert_allclose(global_result, manual_result)\n\n\ndef test_average_unmatched_missing():\n # Tests the behavior of grid.average on an array which has missing values, not present in the metric\n x = np.arange(10)\n data = xr.DataArray(np.ones(10), dims=\"x\", coords={\"x\": x})\n weights = data * 30\n ds = xr.Dataset({\"data\": data})\n ds = ds.assign_coords(weights=weights)\n # create an xgcm grid\n grid = Grid(ds, coords={\"X\": {\"center\": \"x\"}}, metrics={\"X\": [\"weights\"]})\n\n # average the unmasked array\n expected = grid.average(ds.data, \"X\")\n\n # now lets introduce a missing value in the data\n ds.data[6:8] = np.nan\n\n # assert that the result for both the full and the masked array is equal,\n # since both only have ones in them.\n xr.testing.assert_allclose(expected, grid.average(ds.data, \"X\"))\n\n\ndef test_derivative_uniform_grid():\n # this is a uniform grid\n # a non-uniform grid would provide a more rigorous test\n dx = 10.0\n dy = 10.0\n arr = [\n [1.0, 2.0, 4.0, 3.0],\n [4.0, 7.0, 1.0, 2.0],\n [3.0, 1.0, 0.0, 9.0],\n [8.0, 5.0, 2.0, 1.0],\n ]\n ds = xr.Dataset(\n {\"foo\": ((\"XC\", \"YC\"), arr)},\n coords={\n \"XC\": ((\"XC\",), [0.5, 1.5, 2.5, 3.5]),\n \"XG\": ((\"XG\",), [0, 1.0, 2.0, 3.0]),\n \"dXC\": ((\"XC\",), [dx, dx, dx, dx]),\n \"dXG\": ((\"XG\",), [dx, dx, dx, dx]),\n \"YC\": ((\"YC\",), [0.5, 1.5, 2.5, 3.5]),\n \"YG\": ((\"YG\",), [0, 1.0, 2.0, 3.0]),\n \"dYC\": ((\"YC\",), [dy, dy, dy, dy]),\n \"dYG\": ((\"YG\",), [dy, dy, dy, dy]),\n },\n )\n\n grid = Grid(\n ds,\n coords={\n \"X\": {\"center\": \"XC\", \"left\": \"XG\"},\n \"Y\": {\"center\": \"YC\", \"left\": \"YG\"},\n },\n metrics={(\"X\",): [\"dXC\", \"dXG\"], (\"Y\",): [\"dYC\", \"dYG\"]},\n periodic=True,\n )\n\n # Test x direction\n dfoo_dx = grid.derivative(ds.foo, \"X\")\n expected = grid.diff(ds.foo, \"X\") / dx\n assert dfoo_dx.equals(expected)\n\n # Test y direction\n dfoo_dy = grid.derivative(ds.foo, \"Y\")\n expected = grid.diff(ds.foo, \"Y\") / dy\n assert dfoo_dy.equals(expected)\n\n\ndef test_derivative_c_grid():\n # test derivatives with synthetic C grid data\n\n ds, coords, metrics = datasets_grid_metric(\"C\")\n grid = Grid(ds, coords=coords, metrics=metrics)\n\n # tracer point\n var = \"tracer\"\n test_axes = [\"X\", \"Y\", \"Z\"]\n test_dx = [\"dx_e\", \"dy_n\", \"dz_w\"]\n for ax, dx in zip(test_axes, test_dx):\n _run_single_derivative_test(grid, ax, ds[var], ds[dx])\n\n # zonal velocity point\n var = \"u\"\n test_dx = [\"dx_t\", \"dy_ne\", \"dz_w_e\"]\n for ax, dx in zip(test_axes, test_dx):\n _run_single_derivative_test(grid, ax, ds[var], ds[dx])\n\n # meridional velocity point\n var = \"v\"\n test_dx = [\"dx_ne\", \"dy_t\", \"dz_w_n\"]\n for ax, dx in zip(test_axes, test_dx):\n _run_single_derivative_test(grid, ax, ds[var], ds[dx])\n\n # vertical velocity point\n var = \"wt\"\n test_dx = [\"dx_e\", \"dy_n\", \"dz_t\"]\n for ax, dx in zip(test_axes, test_dx):\n _run_single_derivative_test(grid, ax, ds[var], ds[dx])\n\n\ndef test_derivative_b_grid():\n # test derivatives with synthetic B grid data\n\n ds, coords, metrics = datasets_grid_metric(\"B\")\n grid = Grid(ds, coords=coords, metrics=metrics)\n\n # tracer point\n var = \"tracer\"\n test_axes = [\"X\", \"Y\", \"Z\"]\n test_dx = [\"dx_e\", \"dy_n\", \"dz_w\"]\n for ax, dx in zip(test_axes, test_dx):\n _run_single_derivative_test(grid, ax, ds[var], ds[dx])\n\n # zonal velocity point\n var = \"u\"\n test_dx = [\"dx_n\", \"dy_e\", \"dz_w_ne\"]\n for ax, dx in zip(test_axes, test_dx):\n _run_single_derivative_test(grid, ax, ds[var], ds[dx])\n\n # meridional velocity point\n var = \"v\"\n test_dx = [\"dx_n\", \"dy_e\", \"dz_w_ne\"]\n for ax, dx in zip(test_axes, test_dx):\n _run_single_derivative_test(grid, ax, ds[var], ds[dx])\n\n # vertical velocity point\n var = \"wt\"\n test_dx = [\"dx_e\", \"dy_n\", \"dz_t\"]\n for ax, dx in zip(test_axes, test_dx):\n _run_single_derivative_test(grid, ax, ds[var], ds[dx])\n\n\n# run this for each axis and each field in dataset\ndef _run_single_derivative_test(grid, axis, fld, dx):\n\n dvar_dx = grid.derivative(fld, axis)\n expected = grid.diff(fld, axis) / dx\n\n assert dvar_dx.equals(expected.reset_coords(drop=True))\n"
] | [
[
"numpy.arange",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
syth0le/mat_mod_labs | [
"9c87e771beb75566bb824e34f4520e8b2ca0a4ce",
"9c87e771beb75566bb824e34f4520e8b2ca0a4ce"
] | [
"lab_2/graphics.py",
"lab_3/main.py"
] | [
"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy import interpolate\nfrom abc import ABCMeta, abstractmethod\n\nfrom utils.sorting import bubble_sort\n\n\nclass ABSMethods(metaclass=ABCMeta):\n def __init__(self, function):\n self.function = function\n\n @abstractmethod\n def drawGraphic(self, vectors):\n pass\n\n\nclass Lagranz(ABSMethods):\n\n def __call__(self, *args, **kwargs):\n call = self.function()\n self.drawGraphic(call)\n return call\n\n def counter(self, x, y, xl):\n z = 0\n for j in range(len(y)):\n p1 = 1\n p2 = 1\n for i in range(len(x)):\n if i == j:\n p1 = p1 * 1\n p2 = p2 * 1\n else:\n p1 = p1 * (xl - x[i])\n p2 = p2 * (x[j] - x[i])\n z = z + y[j] * p1 / p2\n return z\n\n def drawGraphic(self, vectors):\n\n for vector in vectors:\n vector = bubble_sort(vector)\n x = vector[0]\n y = vector[1]\n xl = np.linspace(np.min(x), np.max(x))\n yl = self.counter(x, y, xl)\n plt.scatter(x, y)\n plt.plot(xl, yl)\n\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title(\"Lagranz Method\")\n plt.show()\n\n\nclass InterpolationLinear(ABSMethods):\n\n def __call__(self, *args, **kwargs):\n call = self.function()\n self.drawGraphic(call)\n return call\n\n def counter(self, x, y, xl):\n yx = 0\n for i in range(len(x)):\n if x[i - 1] <= xl <= x[i]:\n yp = y[i] - y[i - 1]\n xp = x[i] - x[i - 1]\n yx = y[i] + ((yp / xp) * (xl - x[i]))\n break\n return yx\n\n def drawGraphic(self, vectors):\n for vector in vectors:\n vector = bubble_sort(vector)\n x = vector[0]\n y = vector[1]\n yl = [self.counter(x, y, i) for i in x]\n plt.scatter(x, y)\n plt.plot(x, yl)\n\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title(\"Piecewise linear interpolation Method\")\n plt.show()\n\n\nclass InterpolationParabolic(ABSMethods):\n\n def __call__(self, *args, **kwargs):\n call = self.function()\n self.drawGraphic(call)\n return call\n\n def counter(self, x, y, t):\n z = 0\n for i in range(len(x) - 1):\n if x[i] <= t <= x[i + 1]:\n M = np.array(\n [[x[i - 1] ** 2, x[i - 1], 1], [x[i] ** 2, x[i], 1], [x[i + 1] ** 2, x[i + 1], 1]])\n v = np.array([y[i - 1], y[i], y[i + 1]])\n solve = np.linalg.solve(M, v)\n z = solve[0] * t ** 2 + solve[1] * t + solve[2]\n i += 1\n return z\n\n def drawGraphic(self, vectors):\n for vector in vectors:\n vector = bubble_sort(vector)\n x = vector[0]\n y = vector[1]\n plt.scatter(x, y)\n xnew = np.linspace(np.min(x), np.max(x), 10000)\n ynew = [self.counter(x, y, i) for i in xnew]\n plt.plot(xnew, ynew)\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title(\"Piecewise parabolic interpolation Method\")\n plt.show()\n\n\nclass InterpolationSpline(ABSMethods):\n\n def __call__(self, *args, **kwargs):\n call = self.function()\n self.drawGraphic(call)\n return call\n\n def counter(self, x, y):\n tck = interpolate.splrep(x, y, s=0)\n xl = np.linspace(np.min(x), np.max(x))\n yl = interpolate.splev(xl, tck, der=0)\n return xl, yl\n\n def drawGraphic(self, vectors):\n for vector in vectors:\n vector = bubble_sort(vector)\n x = vector[0]\n y = vector[1]\n xl, yl = self.counter(x, y)\n plt.scatter(x, y)\n plt.plot(xl, yl)\n\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title(\"Spline interpolation Method\")\n plt.show()\n\n\nclass Graphics(ABSMethods):\n\n def __call__(self, *args, **kwargs):\n # call = self.function()\n # print(call)\n call = 0\n self.drawGraphic(call)\n return call\n\n def drawGraphic(self, call):\n print(\"\\n1 - Lagranz\\n2 - Linear\\n3 - Parabolic\\n4 - Spline\")\n command = int(input(\"Введите номер задания (из методички):\"))\n if command == 1:\n meth = Lagranz(self.function)\n meth()\n elif command == 2:\n meth = InterpolationLinear(self.function)\n meth()\n elif command == 3:\n meth = InterpolationParabolic(self.function)\n meth()\n elif command == 4:\n meth = InterpolationSpline(self.function)\n meth()\n else:\n print(\"Invalid command\")\n",
"import numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom utils.delimeter import Delimeter\nfrom utils.files_chain import FilesChain\nfrom utils.sorting import bubble_sort\n\n\nclass Approximation:\n\n def __init__(self, function):\n self.function = function\n\n def __call__(self, *args, **kwargs):\n call = self.function()\n self.drawGraphic(call)\n return call\n\n def drawGraphic(self, vectors):\n print(\"Введите степень аппроксимирующего полинома:\")\n degree = int(input())\n for vector in vectors:\n vector = bubble_sort(vector)\n x = vector[0]\n y = vector[1]\n self.approximation(x, y, degree)\n plt.scatter(x, y)\n\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title(\"Approximation\")\n plt.show()\n\n def approximation(self, x, y, degree):\n fp, residuals, rank, sv, rcond = np.polyfit(x, y, degree, full=True)\n f = np.poly1d(fp)\n fx = np.linspace(np.min(x), np.max(x), 10000)\n plt.plot(fx, f(fx))\n plt.grid(True)\n\n\n@Approximation\n@Delimeter\ndef main():\n FILEIO = FilesChain()\n return FILEIO.client_code()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"scipy.interpolate.splrep",
"numpy.linalg.solve",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"numpy.min",
"scipy.interpolate.splev",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.polyfit",
"numpy.poly1d",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
w86763777/Pytorch-Unified-FID-IS-Score | [
"6a2620d6da0faa66bb798aa47c7e0e49ef2032b6"
] | [
"pytorch_gan_metrics/calc_fid_stats.py"
] | [
"import argparse\nimport os\n\nimport numpy as np\nfrom torch.utils.data import DataLoader\n\nfrom . import ImageDataset\nfrom .core import get_inception_feature\n\n\ndef calc_and_save_stats(path, output, batch_size):\n dataset = ImageDataset(path, exts=['png', 'jpg'])\n loader = DataLoader(dataset, batch_size=batch_size, num_workers=4)\n acts, = get_inception_feature(loader, dims=[2048], verbose=True)\n\n mu = np.mean(acts, axis=0)\n sigma = np.cov(acts, rowvar=False)\n\n if os.path.dirname(output) != \"\":\n os.makedirs(os.path.dirname(output), exist_ok=True)\n np.savez_compressed(output, mu=mu, sigma=sigma)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"Pre-calculate statistics of images\")\n parser.add_argument(\"--path\", type=str, required=True,\n help='path to image directory')\n parser.add_argument(\"--output\", type=str, required=True,\n help=\"output path\")\n parser.add_argument(\"--batch_size\", type=int, default=50,\n help=\"batch size (default=50)\")\n args = parser.parse_args()\n\n calc_and_save_stats(args.path, args.output, args.batch_size)\n"
] | [
[
"numpy.savez_compressed",
"numpy.cov",
"torch.utils.data.DataLoader",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NeelGhoshal/probability | [
"df96f3d56eff92c6b06fbac68dc58e095e28fed6",
"df96f3d56eff92c6b06fbac68dc58e095e28fed6",
"df96f3d56eff92c6b06fbac68dc58e095e28fed6"
] | [
"tensorflow_probability/python/math/psd_kernels/exponentiated_quadratic.py",
"tensorflow_probability/python/bijectors/power_transform_test.py",
"tensorflow_probability/python/bijectors/bijector_properties_test.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The ExponentiatedQuadratic kernel.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.math.psd_kernels import positive_semidefinite_kernel as psd_kernel\nfrom tensorflow_probability.python.math.psd_kernels.internal import util\n\n\n__all__ = ['ExponentiatedQuadratic']\n\n\nclass ExponentiatedQuadratic(psd_kernel.AutoCompositeTensorPsdKernel):\n \"\"\"The ExponentiatedQuadratic kernel.\n\n Sometimes called the \"squared exponential\", \"Gaussian\" or \"radial basis\n function\", this kernel function has the form\n\n ```none\n k(x, y) = amplitude**2 * exp(-||x - y||**2 / (2 * length_scale**2))\n ```\n\n where the double-bars represent vector length (ie, Euclidean, or L2 norm).\n \"\"\"\n\n def __init__(self,\n amplitude=None,\n length_scale=None,\n feature_ndims=1,\n validate_args=False,\n name='ExponentiatedQuadratic'):\n \"\"\"Construct an ExponentiatedQuadratic kernel instance.\n\n Args:\n amplitude: floating point `Tensor` that controls the maximum value\n of the kernel. Must be broadcastable with `length_scale` and inputs to\n `apply` and `matrix` methods. Must be greater than zero. A value of\n `None` is treated like 1.\n Default value: None\n length_scale: floating point `Tensor` that controls how sharp or wide the\n kernel shape is. This provides a characteristic \"unit\" of length against\n which `||x - y||` can be compared for scale. Must be broadcastable with\n `amplitude` and inputs to `apply` and `matrix` methods. A value of\n `None` is treated like 1.\n Default value: None\n feature_ndims: Python `int` number of rightmost dims to include in the\n squared difference norm in the exponential.\n validate_args: If `True`, parameters are checked for validity despite\n possibly degrading runtime performance\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name):\n dtype = util.maybe_get_common_dtype(\n [amplitude, length_scale])\n self._amplitude = tensor_util.convert_nonref_to_tensor(\n amplitude, name='amplitude', dtype=dtype)\n self._length_scale = tensor_util.convert_nonref_to_tensor(\n length_scale, name='length_scale', dtype=dtype)\n super(ExponentiatedQuadratic, self).__init__(\n feature_ndims,\n dtype=dtype,\n name=name,\n validate_args=validate_args,\n parameters=parameters)\n\n @property\n def amplitude(self):\n \"\"\"Amplitude parameter.\"\"\"\n return self._amplitude\n\n @property\n def length_scale(self):\n \"\"\"Length scale parameter.\"\"\"\n return self._length_scale\n\n def _batch_shape(self):\n scalar_shape = tf.TensorShape([])\n return tf.broadcast_static_shape(\n scalar_shape if self.amplitude is None else self.amplitude.shape,\n scalar_shape if self.length_scale is None else self.length_scale.shape)\n\n def _batch_shape_tensor(self):\n return tf.broadcast_dynamic_shape(\n [] if self.amplitude is None else tf.shape(self.amplitude),\n [] if self.length_scale is None else tf.shape(self.length_scale))\n\n def _apply_with_distance(\n self, x1, x2, pairwise_square_distance, example_ndims=0):\n exponent = -0.5 * pairwise_square_distance\n if self.length_scale is not None:\n length_scale = tf.convert_to_tensor(self.length_scale)\n length_scale = util.pad_shape_with_ones(\n length_scale, example_ndims)\n exponent = exponent / length_scale**2\n\n if self.amplitude is not None:\n amplitude = tf.convert_to_tensor(self.amplitude)\n amplitude = util.pad_shape_with_ones(amplitude, example_ndims)\n exponent = exponent + 2. * tf.math.log(amplitude)\n\n return tf.exp(exponent)\n\n def _apply(self, x1, x2, example_ndims=0):\n pairwise_square_distance = util.sum_rightmost_ndims_preserving_shape(\n tf.math.squared_difference(x1, x2), self.feature_ndims)\n return self._apply_with_distance(\n x1, x2, pairwise_square_distance, example_ndims=example_ndims)\n\n def _matrix(self, x1, x2):\n pairwise_square_distance = util.pairwise_square_distance_matrix(\n x1, x2, self.feature_ndims)\n return self._apply_with_distance(\n x1, x2, pairwise_square_distance, example_ndims=2)\n\n def _tensor(self, x1, x2, x1_example_ndims, x2_example_ndims):\n pairwise_square_distance = util.pairwise_square_distance_tensor(\n x1, x2, self.feature_ndims, x1_example_ndims, x2_example_ndims)\n return self._apply_with_distance(\n x1, x2, pairwise_square_distance,\n example_ndims=(x1_example_ndims + x2_example_ndims))\n\n def _parameter_control_dependencies(self, is_init):\n if not self.validate_args:\n return []\n assertions = []\n for arg_name, arg in dict(amplitude=self.amplitude,\n length_scale=self.length_scale).items():\n if arg is not None and is_init != tensor_util.is_ref(arg):\n assertions.append(assert_util.assert_positive(\n arg,\n message='{} must be positive.'.format(arg_name)))\n return assertions\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for Bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport numpy as np\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python.bijectors import bijector_test_util\nfrom tensorflow_probability.python.internal import test_util\n\n\n@test_util.test_all_tf_execution_regimes\nclass PowerTransformBijectorTest(test_util.TestCase):\n \"\"\"Tests correctness of the power transformation.\"\"\"\n\n dtype = np.float32\n use_static_shape = True\n\n def testBijector(self):\n c = 0.2\n bijector = tfb.PowerTransform(power=c, validate_args=True)\n self.assertStartsWith(bijector.name, 'power_transform')\n x = np.array([[[-1.], [2.], [-5. + 1e-4]]])\n y = (1. + x * c)**(1. / c)\n self.assertAllClose(y, self.evaluate(bijector.forward(x)))\n self.assertAllClose(x, self.evaluate(bijector.inverse(y)))\n self.assertAllClose(\n (c - 1.) * np.sum(np.log(y), axis=-1),\n self.evaluate(bijector.inverse_log_det_jacobian(y, event_ndims=1)))\n self.assertAllClose(\n self.evaluate(-bijector.inverse_log_det_jacobian(y, event_ndims=1)),\n self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=1)),\n rtol=1e-4,\n atol=0.)\n\n def testScalarCongruency(self):\n bijector = tfb.PowerTransform(power=0.2, validate_args=True)\n bijector_test_util.assert_scalar_congruency(\n bijector, lower_x=-2., upper_x=1.5, eval_func=self.evaluate, rtol=0.05)\n\n def testBijectiveAndFinite(self):\n bijector = tfb.PowerTransform(power=0.2, validate_args=True)\n x = np.linspace(-4.999, 10, num=10).astype(np.float32)\n y = np.logspace(0.001, 10, num=10).astype(np.float32)\n bijector_test_util.assert_bijective_and_finite(\n bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)\n\n def testDtype(self):\n bijector = tfb.PowerTransform(power=0.2, validate_args=True)\n x = self.make_input([-0.5, 1., 3.])\n y = self.make_input([0.3, 3., 1.2])\n self.assertIs(bijector.forward(x).dtype, x.dtype)\n self.assertIs(bijector.inverse(y).dtype, y.dtype)\n self.assertIs(\n bijector.forward_log_det_jacobian(x, event_ndims=0).dtype, x.dtype)\n self.assertIs(\n bijector.inverse_log_det_jacobian(y, event_ndims=0).dtype, y.dtype)\n\n\nif __name__ == '__main__':\n test_util.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Property-based tests for TFP bijectors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nfrom absl.testing import parameterized\nimport hypothesis as hp\nfrom hypothesis import strategies as hps\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python import experimental\nfrom tensorflow_probability.python.bijectors import hypothesis_testlib as bijector_hps\nfrom tensorflow_probability.python.bijectors import invert as invert_lib\nfrom tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.internal import test_util\nfrom tensorflow_probability.python.util.deferred_tensor import DeferredTensor\n\nfrom tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import\n\nTF2_FRIENDLY_BIJECTORS = (\n 'Ascending',\n 'BatchNormalization',\n # 'CategoricalToDiscrete', TODO(b/137956955): Add support\n # for hypothesis testing\n 'CholeskyOuterProduct',\n 'Cumsum',\n 'DiscreteCosineTransform',\n 'Exp',\n 'Expm1',\n 'FillScaleTriL',\n 'FillTriangular',\n 'FrechetCDF',\n 'GeneralizedExtremeValueCDF',\n 'GeneralizedPareto',\n 'GompertzCDF',\n 'GumbelCDF',\n 'Identity',\n 'Inline',\n 'Invert',\n 'IteratedSigmoidCentered',\n 'KumaraswamyCDF',\n 'Log',\n 'Log1p',\n 'MatrixInverseTriL',\n 'MoyalCDF',\n 'NormalCDF',\n 'Ordered',\n 'Permute',\n 'Power',\n 'PowerTransform',\n 'RationalQuadraticSpline',\n 'RayleighCDF',\n 'Reciprocal',\n 'Reshape',\n 'Scale',\n 'ScaleMatvecDiag',\n 'ScaleMatvecLU',\n 'ScaleMatvecTriL',\n 'Shift',\n 'ShiftedGompertzCDF',\n 'Sigmoid',\n 'Sinh',\n 'SinhArcsinh',\n 'SoftClip',\n 'Softfloor',\n 'Softplus',\n 'Softsign',\n 'Square',\n 'Tanh',\n 'TransformDiagonal',\n 'Transpose',\n 'WeibullCDF',\n)\n\nBIJECTOR_PARAMS_NDIMS = {\n 'FrechetCDF': dict(loc=0, scale=0, concentration=0),\n 'GompertzCDF': dict(concentration=0, rate=0),\n 'GumbelCDF': dict(loc=0, scale=0),\n 'GeneralizedExtremeValueCDF': dict(loc=0, scale=0, concentration=0),\n 'GeneralizedPareto': dict(loc=0, scale=0, concentration=0),\n 'KumaraswamyCDF': dict(concentration1=0, concentration0=0),\n 'MoyalCDF': dict(loc=0, scale=0),\n 'Power': dict(power=0),\n 'RayleighCDF': dict(scale=0),\n 'Scale': dict(scale=0),\n 'ScaleMatvecDiag': dict(scale_diag=1),\n 'ScaleMatvecLU': dict(lower_upper=2, permutation=1),\n 'ScaleMatvecTriL': dict(scale_tril=2),\n 'Shift': dict(shift=0),\n 'ShiftedGompertzCDF': dict(concentration=0, rate=0),\n 'SinhArcsinh': dict(skewness=0, tailweight=0),\n 'Softfloor': dict(temperature=0),\n 'Softplus': dict(hinge_softness=0),\n 'RationalQuadraticSpline': dict(bin_widths=1, bin_heights=1, knot_slopes=1),\n 'WeibullCDF': dict(concentration=0, scale=0),\n}\n\nMUTEX_PARAMS = (\n set(['scale', 'log_scale']),\n)\n\nFLDJ = 'forward_log_det_jacobian'\nILDJ = 'inverse_log_det_jacobian'\n\nINVERT_LDJ = {FLDJ: ILDJ, ILDJ: FLDJ}\n\nNO_LDJ_GRADS_EXPECTED = {\n 'BatchNormalization': dict(beta={FLDJ, ILDJ}),\n 'FrechetCDF': dict(loc={ILDJ}),\n 'GeneralizedExtremeValueCDF': dict(loc={ILDJ}),\n 'GumbelCDF': dict(loc={ILDJ}),\n 'MoyalCDF': dict(loc={ILDJ}),\n 'Shift': dict(shift={FLDJ, ILDJ}),\n}\n\nTRANSFORM_DIAGONAL_ALLOWLIST = {\n 'BatchNormalization',\n 'DiscreteCosineTransform',\n 'Exp',\n 'Expm1',\n 'GompertzCDF',\n 'GumbelCDF',\n 'GeneralizedExtremeValueCDF',\n 'GeneralizedPareto',\n 'Identity',\n 'Inline',\n 'KumaraswamyCDF',\n 'MoyalCDF',\n 'NormalCDF',\n 'PowerTransform',\n 'Power',\n 'RayleighCDF',\n 'Reciprocal',\n 'Scale',\n 'ScaleMatvecDiag',\n 'ScaleMatvecLU',\n 'ScaleMatvecTriL',\n 'Shift',\n 'ShiftedGompertzCDF',\n 'Sigmoid',\n 'Sinh',\n 'SinhArcsinh',\n 'Softplus',\n 'Softsign',\n 'Square',\n 'Tanh',\n 'WeibullCDF',\n}\n\nAUTOVECTORIZATION_IS_BROKEN = [\n 'BatchNormalization', # Might (necessarily) violate shape semantics?\n]\n\nAUTOVECTORIZATION_RTOL = collections.defaultdict(lambda: 1e-5)\nAUTOVECTORIZATION_RTOL.update({\n 'Invert': 1e-2, # Can contain poorly-conditioned bijectors.\n 'MatvecLU': 1e-4, # TODO(b/156638569) tighten this.\n 'ScaleMatvecLU': 1e-2, # TODO(b/151041130) tighten this.\n 'ScaleMatvecTriL': 1e-3}) # TODO(b/150250388) tighten this.\nAUTOVECTORIZATION_ATOL = collections.defaultdict(lambda: 1e-5)\nAUTOVECTORIZATION_ATOL.update({\n 'ScaleMatvecLU': 1e-2, # TODO(b/151041130) tighten this.\n 'ScaleMatvecTriL': 1e-1}) # TODO(b/150250388) tighten this.\n\n\nCOMPOSITE_TENSOR_IS_BROKEN = [\n 'BatchNormalization', # tf.layers arg\n 'RationalQuadraticSpline', # TODO(b/185628453): Debug loss of static info.\n]\n\nCOMPOSITE_TENSOR_RTOL = collections.defaultdict(lambda: 2e-6)\nCOMPOSITE_TENSOR_RTOL.update({\n 'PowerTransform': 1e-5,\n})\nCOMPOSITE_TENSOR_ATOL = collections.defaultdict(lambda: 1e-6)\n\n\ndef is_invert(bijector):\n return isinstance(bijector, (tfb.Invert, invert_lib._Invert))\n\n\ndef is_transform_diagonal(bijector):\n return isinstance(bijector, tfb.TransformDiagonal)\n\n\ndef is_generalized_pareto(bijector):\n return isinstance(bijector, tfb.GeneralizedPareto)\n\n\n# pylint is unable to handle @hps.composite (e.g. complains \"No value for\n# argument '...' in function call\"), so disable this lint for the file.\n\n# pylint: disable=no-value-for-parameter\n\n\[email protected]\ndef broadcasting_params(draw,\n bijector_name,\n batch_shape,\n event_dim=None,\n enable_vars=False):\n \"\"\"Draws a dict of parameters which should yield the given batch shape.\"\"\"\n params_event_ndims = BIJECTOR_PARAMS_NDIMS.get(bijector_name, {})\n\n def _constraint(param):\n return constraint_for(bijector_name, param)\n\n return draw(\n tfp_hps.broadcasting_params(\n batch_shape,\n params_event_ndims,\n event_dim=event_dim,\n enable_vars=enable_vars,\n constraint_fn_for=_constraint,\n mutex_params=MUTEX_PARAMS))\n\n\n# TODO(b/141098791): Eliminate this.\[email protected]_composite_tensor\nclass CallableModule(tf.Module, experimental.AutoCompositeTensor):\n \"\"\"Convenience object for capturing variables closed over by Inline.\"\"\"\n\n def __init__(self, fn, varobj):\n self._fn = fn\n self._varobj = varobj\n\n def __call__(self, *args, **kwargs):\n return self._fn(*args, **kwargs)\n\n\[email protected]\ndef bijectors(draw, bijector_name=None, batch_shape=None, event_dim=None,\n enable_vars=False, allowed_bijectors=None, validate_args=True,\n return_duplicate=False):\n \"\"\"Strategy for drawing Bijectors.\n\n The emitted bijector may be a basic bijector or an `Invert` of a basic\n bijector, but not a compound like `Chain`.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n bijector_name: Optional Python `str`. If given, the produced bijectors\n will all have this type. If omitted, Hypothesis chooses one from\n the allowlist `TF2_FRIENDLY_BIJECTORS`.\n batch_shape: An optional `TensorShape`. The batch shape of the resulting\n bijector. Hypothesis will pick one if omitted.\n event_dim: Optional Python int giving the size of each of the underlying\n distribution's parameters' event dimensions. This is shared across all\n parameters, permitting square event matrices, compatible location and\n scale Tensors, etc. If omitted, Hypothesis will choose one.\n enable_vars: TODO(bjp): Make this `True` all the time and put variable\n initialization in slicing_test. If `False`, the returned parameters are\n all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`\n `tfp.util.TransformedVariable`}\n allowed_bijectors: Optional list of `str` Bijector names to sample from.\n Bijectors not in this list will not be returned or instantiated as\n part of a meta-bijector (Chain, Invert, etc.). Defaults to\n `TF2_FRIENDLY_BIJECTORS`.\n validate_args: Python `bool`; whether to enable runtime checks.\n return_duplicate: Python `bool`: If `False` return a single bijector. If\n `True` return a tuple of two bijectors of the same type, instantiated with\n the same parameters.\n\n Returns:\n bijectors: A strategy for drawing bijectors with the specified `batch_shape`\n (or an arbitrary one if omitted).\n \"\"\"\n if allowed_bijectors is None:\n allowed_bijectors = TF2_FRIENDLY_BIJECTORS\n if bijector_name is None:\n bijector_name = draw(hps.sampled_from(allowed_bijectors))\n if batch_shape is None:\n batch_shape = draw(tfp_hps.shapes())\n if event_dim is None:\n event_dim = draw(hps.integers(min_value=2, max_value=6))\n if bijector_name == 'Invert':\n underlying_name = draw(\n hps.sampled_from(sorted(set(allowed_bijectors) - {'Invert'})))\n underlying = draw(\n bijectors(\n bijector_name=underlying_name,\n batch_shape=batch_shape,\n event_dim=event_dim,\n enable_vars=enable_vars,\n allowed_bijectors=allowed_bijectors,\n validate_args=validate_args))\n bijector_params = {'bijector': underlying}\n msg = 'Forming Invert bijector with underlying bijector {}.'\n hp.note(msg.format(underlying))\n elif bijector_name == 'TransformDiagonal':\n underlying_name = draw(\n hps.sampled_from(sorted(\n set(allowed_bijectors) & set(TRANSFORM_DIAGONAL_ALLOWLIST))))\n underlying = draw(\n bijectors(\n bijector_name=underlying_name,\n batch_shape=(),\n event_dim=event_dim,\n enable_vars=enable_vars,\n allowed_bijectors=allowed_bijectors,\n validate_args=validate_args))\n bijector_params = {'diag_bijector': underlying}\n msg = 'Forming TransformDiagonal bijector with underlying bijector {}.'\n hp.note(msg.format(underlying))\n elif bijector_name == 'Inline':\n scale = draw(tfp_hps.maybe_variable(\n hps.sampled_from(np.float32([1., -1., 2, -2.])), enable_vars))\n b = tfb.Scale(scale=scale)\n\n bijector_params = dict(\n forward_fn=CallableModule(b.forward, b),\n inverse_fn=b.inverse,\n forward_log_det_jacobian_fn=lambda x: b.forward_log_det_jacobian( # pylint: disable=g-long-lambda\n x, event_ndims=b.forward_min_event_ndims),\n forward_min_event_ndims=b.forward_min_event_ndims,\n is_constant_jacobian=b.is_constant_jacobian,\n is_increasing=b._internal_is_increasing, # pylint: disable=protected-access\n )\n elif bijector_name == 'DiscreteCosineTransform':\n dct_type = hps.integers(min_value=2, max_value=3)\n bijector_params = {'dct_type': draw(dct_type)}\n elif bijector_name == 'GeneralizedPareto':\n concentration = hps.floats(min_value=-200., max_value=200)\n scale = hps.floats(min_value=1e-2, max_value=200)\n loc = hps.floats(min_value=-200, max_value=200)\n bijector_params = {'concentration': draw(concentration),\n 'scale': draw(scale),\n 'loc': draw(loc)}\n elif bijector_name == 'PowerTransform':\n power = hps.floats(min_value=1e-6, max_value=10.)\n bijector_params = {'power': draw(power)}\n elif bijector_name == 'Permute':\n event_ndims = draw(hps.integers(min_value=1, max_value=2))\n axis = hps.integers(min_value=-event_ndims, max_value=-1)\n # This is a permutation of dimensions within an axis.\n # (Contrast with `Transpose` below.)\n bijector_params = {\n 'axis': draw(axis),\n 'permutation': draw(tfp_hps.maybe_variable(\n hps.permutations(np.arange(event_dim)), enable_vars,\n dtype=tf.int32))\n }\n elif bijector_name == 'Reshape':\n event_shape_out = draw(tfp_hps.shapes(min_ndims=1))\n # TODO(b/142135119): Wanted to draw general input and output shapes like the\n # following, but Hypothesis complained about filtering out too many things.\n # event_shape_in = draw(tfp_hps.shapes(min_ndims=1))\n # hp.assume(event_shape_out.num_elements() == event_shape_in.num_elements())\n event_shape_in = [event_shape_out.num_elements()]\n bijector_params = {'event_shape_out': event_shape_out,\n 'event_shape_in': event_shape_in}\n elif bijector_name == 'Transpose':\n event_ndims = draw(hps.integers(min_value=0, max_value=2))\n # This is a permutation of axes.\n # (Contrast with `Permute` above.)\n bijector_params = {'perm': draw(hps.permutations(np.arange(event_ndims)))}\n else:\n bijector_params = draw(\n broadcasting_params(bijector_name, batch_shape, event_dim=event_dim,\n enable_vars=enable_vars))\n ctor = getattr(tfb, bijector_name)\n hp.note('Forming {} bijector with params {}.'.format(\n bijector_name, bijector_params))\n bijector = ctor(validate_args=validate_args, **bijector_params)\n if not return_duplicate:\n return bijector\n return (bijector, ctor(validate_args=validate_args, **bijector_params))\n\n\ndef constrain_forward_shape(bijector, shape):\n \"\"\"Constrain the shape so it is compatible with bijector.forward.\n\n Args:\n bijector: A `Bijector`.\n shape: A TensorShape or compatible, giving the desired event shape.\n\n Returns:\n shape: A TensorShape, giving an event shape compatible with\n `bijector.forward`, loosely inspired by the input `shape`.\n \"\"\"\n if is_invert(bijector):\n return constrain_inverse_shape(bijector.bijector, shape=shape)\n\n # TODO(b/146897388): Enable bijectors with parameter-dependent support.\n support = bijector_hps.bijector_supports()[\n type(bijector).__name__].forward\n if support == tfp_hps.Support.VECTOR_SIZE_TRIANGULAR:\n # Need to constrain the shape.\n shape[-1] = int(shape[-1] * (shape[-1] + 1) / 2)\n if isinstance(bijector, tfb.Reshape):\n # Note: This relies on the out event shape being fully determined\n shape = tf.get_static_value(bijector._event_shape_in)\n return tf.TensorShape(shape)\n\n\ndef constrain_inverse_shape(bijector, shape):\n \"\"\"Constrain the shape so it is compatible with bijector.inverse.\n\n Args:\n bijector: A `Bijector`.\n shape: A TensorShape or compatible, giving the desired event shape.\n\n Returns:\n shape: A TensorShape, giving an event shape compatible with\n `bijector.inverse`, loosely inspired by the input `shape`.\n \"\"\"\n if is_invert(bijector):\n return constrain_forward_shape(bijector.bijector, shape=shape)\n if isinstance(bijector, tfb.Reshape):\n # Note: This relies on the out event shape being fully determined\n shape = tf.get_static_value(bijector._event_shape_out)\n return tf.TensorShape(shape)\n\n\[email protected]\ndef domain_tensors(draw, bijector, shape=None):\n \"\"\"Strategy for drawing Tensors in the domain of a bijector.\n\n If the bijector's domain is constrained, this proceeds by drawing an\n unconstrained Tensor and then transforming it to fit. The constraints are\n declared in `bijectors.hypothesis_testlib.bijector_supports`. The\n transformations are defined by `tfp_hps.constrainer`.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n bijector: A `Bijector` in whose domain the Tensors will be.\n shape: An optional `TensorShape`. The shape of the resulting\n Tensors. Hypothesis will pick one if omitted.\n\n Returns:\n tensors: A strategy for drawing domain Tensors for the desired bijector.\n \"\"\"\n if is_invert(bijector):\n return draw(codomain_tensors(bijector.bijector, shape))\n elif is_transform_diagonal(bijector):\n return draw(domain_tensors(bijector.diag_bijector, shape))\n if shape is None:\n shape = draw(tfp_hps.shapes())\n bijector_name = type(bijector).__name__\n support = bijector_hps.bijector_supports()[bijector_name].forward\n if isinstance(bijector, tfb.PowerTransform):\n constraint_fn = bijector_hps.power_transform_constraint(bijector.power)\n elif isinstance(bijector, tfb.FrechetCDF):\n constraint_fn = bijector_hps.frechet_constraint(bijector.loc)\n elif isinstance(bijector, tfb.GeneralizedExtremeValueCDF):\n constraint_fn = bijector_hps.gev_constraint(bijector.loc,\n bijector.scale,\n bijector.concentration)\n else:\n constraint_fn = tfp_hps.constrainer(support)\n return draw(tfp_hps.constrained_tensors(constraint_fn, shape))\n\n\[email protected]\ndef codomain_tensors(draw, bijector, shape=None):\n \"\"\"Strategy for drawing Tensors in the codomain of a bijector.\n\n If the bijector's codomain is constrained, this proceeds by drawing an\n unconstrained Tensor and then transforming it to fit. The constraints are\n declared in `bijectors.hypothesis_testlib.bijector_supports`. The\n transformations are defined by `tfp_hps.constrainer`.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n bijector: A `Bijector` in whose codomain the Tensors will be.\n shape: An optional `TensorShape`. The shape of the resulting\n Tensors. Hypothesis will pick one if omitted.\n\n Returns:\n tensors: A strategy for drawing codomain Tensors for the desired bijector.\n \"\"\"\n if is_invert(bijector):\n return draw(domain_tensors(bijector.bijector, shape))\n elif is_transform_diagonal(bijector):\n return draw(codomain_tensors(bijector.diag_bijector, shape))\n if shape is None:\n shape = draw(tfp_hps.shapes())\n bijector_name = type(bijector).__name__\n support = bijector_hps.bijector_supports()[bijector_name].inverse\n if is_generalized_pareto(bijector):\n constraint_fn = bijector_hps.generalized_pareto_constraint(\n bijector.loc, bijector.scale, bijector.concentration)\n else:\n constraint_fn = tfp_hps.constrainer(support)\n return draw(tfp_hps.constrained_tensors(constraint_fn, shape))\n\n\ndef assert_no_none_grad(bijector, method, wrt_vars, grads):\n for var, grad in zip(wrt_vars, grads):\n expect_grad = var.dtype not in (tf.int32, tf.int64)\n if 'log_det_jacobian' in method:\n if tensor_util.is_ref(var):\n # We check tensor_util.is_ref to account for xs/ys being in vars.\n var_name = var.name.rstrip('_0123456789:').split('/')[-1]\n else:\n var_name = '[arg]'\n to_check = bijector\n while is_invert(to_check) or is_transform_diagonal(to_check):\n to_check = to_check.bijector if is_invert(to_check) else to_check\n to_check = (to_check.diag_bijector\n if is_transform_diagonal(to_check) else to_check)\n to_check_method = INVERT_LDJ[method] if is_invert(bijector) else method\n if var_name == '[arg]' and bijector.is_constant_jacobian:\n expect_grad = False\n exempt_var_method = NO_LDJ_GRADS_EXPECTED.get(type(to_check).__name__, {})\n if to_check_method in exempt_var_method.get(var_name, ()):\n expect_grad = False\n\n if expect_grad != (grad is not None):\n raise AssertionError('{} `{}` -> {} grad for bijector {}'.format(\n 'Missing' if expect_grad else 'Unexpected', method, var, bijector))\n\n\ndef _ldj_tensor_conversions_allowed(bijector, is_forward):\n if is_invert(bijector):\n return _ldj_tensor_conversions_allowed(bijector.bijector, not is_forward)\n elif is_transform_diagonal(bijector):\n return _ldj_tensor_conversions_allowed(bijector.diag_bijector, is_forward)\n elif is_generalized_pareto(bijector):\n return max(\n _ldj_tensor_conversions_allowed(\n bijector._negative_concentration_bijector(), is_forward),\n _ldj_tensor_conversions_allowed(\n bijector._non_negative_concentration_bijector, is_forward))\n elif is_forward:\n return 2 if hasattr(bijector, '_forward_log_det_jacobian') else 4\n else:\n return 2 if hasattr(bijector, '_inverse_log_det_jacobian') else 4\n\n\n@test_util.test_all_tf_execution_regimes\nclass BijectorPropertiesTest(test_util.TestCase):\n\n def _draw_bijector(self, bijector_name, data,\n batch_shape=None, allowed_bijectors=None,\n validate_args=True):\n event_dim = data.draw(hps.integers(min_value=2, max_value=6))\n bijector = data.draw(\n bijectors(bijector_name=bijector_name, event_dim=event_dim,\n enable_vars=True, batch_shape=batch_shape,\n allowed_bijectors=allowed_bijectors,\n validate_args=validate_args))\n self.evaluate(tf.group(*[v.initializer for v in bijector.variables]))\n return bijector, event_dim\n\n def _draw_domain_tensor(self, bijector, data, event_dim, sample_shape=()):\n # TODO(axch): Would be nice to get rid of all this shape inference logic and\n # just rely on a notion of batch and event shape for bijectors, so we can\n # pass those through `domain_tensors` and `codomain_tensors` and use\n # `tensors_in_support`. However, `RationalQuadraticSpline` behaves weirdly\n # somehow and I got confused.\n codomain_event_shape = [event_dim] * bijector.inverse_min_event_ndims\n codomain_event_shape = constrain_inverse_shape(\n bijector, codomain_event_shape)\n shp = bijector.inverse_event_shape(codomain_event_shape)\n shp = functools.reduce(tensorshape_util.concatenate, [\n sample_shape,\n data.draw(\n tfp_hps.broadcast_compatible_shape(\n shp[:shp.ndims - bijector.forward_min_event_ndims])),\n shp[shp.ndims - bijector.forward_min_event_ndims:]])\n xs = tf.identity(data.draw(domain_tensors(bijector, shape=shp)), name='xs')\n\n return xs\n\n def _draw_codomain_tensor(self, bijector, data, event_dim, sample_shape=()):\n return self._draw_domain_tensor(tfb.Invert(bijector),\n data=data,\n event_dim=event_dim,\n sample_shape=sample_shape)\n\n @parameterized.named_parameters(\n {'testcase_name': bname, 'bijector_name': bname}\n for bname in TF2_FRIENDLY_BIJECTORS)\n @hp.given(hps.data())\n @tfp_hps.tfp_hp_settings()\n def testBijector(self, bijector_name, data):\n tfp_hps.guitar_skip_if_matches('Tanh', bijector_name, 'b/144163991')\n\n bijector, event_dim = self._draw_bijector(bijector_name, data)\n\n # Forward mapping: Check differentiation through forward mapping with\n # respect to the input and parameter variables. Also check that any\n # variables are not referenced overmuch.\n xs = self._draw_domain_tensor(bijector, data, event_dim)\n wrt_vars = [xs] + [v for v in bijector.trainable_variables\n if v.dtype.is_floating]\n with tf.GradientTape() as tape:\n with tfp_hps.assert_no_excessive_var_usage(\n 'method `forward` of {}'.format(bijector)):\n tape.watch(wrt_vars)\n # TODO(b/73073515): Fix graph mode gradients with bijector caching.\n ys = bijector.forward(xs + 0)\n grads = tape.gradient(ys, wrt_vars)\n assert_no_none_grad(bijector, 'forward', wrt_vars, grads)\n\n # For scalar bijectors, verify correctness of the _is_increasing method.\n # TODO(b/148459057): Except, don't verify Softfloor on Guitar because\n # of numerical problem.\n def exception(bijector):\n if not tfp_hps.running_under_guitar():\n return False\n if isinstance(bijector, tfb.Softfloor):\n return True\n if is_invert(bijector):\n return exception(bijector.bijector)\n return False\n if (bijector.forward_min_event_ndims == 0 and\n bijector.inverse_min_event_ndims == 0 and\n not exception(bijector)):\n dydx = grads[0]\n hp.note('dydx: {}'.format(dydx))\n isfinite = tf.math.is_finite(dydx)\n incr_or_slope_eq0 = bijector._internal_is_increasing() | tf.equal(dydx, 0) # pylint: disable=protected-access\n self.assertAllEqual(\n isfinite & incr_or_slope_eq0,\n isfinite & (dydx >= 0) | tf.zeros_like(incr_or_slope_eq0))\n\n # FLDJ: Check differentiation through forward log det jacobian with\n # respect to the input and parameter variables. Also check that any\n # variables are not referenced overmuch.\n event_ndims = data.draw(\n hps.integers(\n min_value=bijector.forward_min_event_ndims,\n max_value=xs.shape.ndims))\n with tf.GradientTape() as tape:\n max_permitted = _ldj_tensor_conversions_allowed(bijector, is_forward=True)\n with tfp_hps.assert_no_excessive_var_usage(\n 'method `forward_log_det_jacobian` of {}'.format(bijector),\n max_permissible=max_permitted):\n tape.watch(wrt_vars)\n # TODO(b/73073515): Fix graph mode gradients with bijector caching.\n ldj = bijector.forward_log_det_jacobian(xs + 0, event_ndims=event_ndims)\n grads = tape.gradient(ldj, wrt_vars)\n assert_no_none_grad(bijector, 'forward_log_det_jacobian', wrt_vars, grads)\n\n # Inverse mapping: Check differentiation through inverse mapping with\n # respect to the codomain \"input\" and parameter variables. Also check that\n # any variables are not referenced overmuch.\n ys = self._draw_codomain_tensor(bijector, data, event_dim)\n wrt_vars = [ys] + [v for v in bijector.trainable_variables\n if v.dtype.is_floating]\n with tf.GradientTape() as tape:\n with tfp_hps.assert_no_excessive_var_usage(\n 'method `inverse` of {}'.format(bijector)):\n tape.watch(wrt_vars)\n # TODO(b/73073515): Fix graph mode gradients with bijector caching.\n xs = bijector.inverse(ys + 0)\n grads = tape.gradient(xs, wrt_vars)\n assert_no_none_grad(bijector, 'inverse', wrt_vars, grads)\n\n # ILDJ: Check differentiation through inverse log det jacobian with respect\n # to the codomain \"input\" and parameter variables. Also check that any\n # variables are not referenced overmuch.\n event_ndims = data.draw(\n hps.integers(\n min_value=bijector.inverse_min_event_ndims,\n max_value=ys.shape.ndims))\n with tf.GradientTape() as tape:\n max_permitted = _ldj_tensor_conversions_allowed(\n bijector, is_forward=False)\n with tfp_hps.assert_no_excessive_var_usage(\n 'method `inverse_log_det_jacobian` of {}'.format(bijector),\n max_permissible=max_permitted):\n tape.watch(wrt_vars)\n # TODO(b/73073515): Fix graph mode gradients with bijector caching.\n ldj = bijector.inverse_log_det_jacobian(ys + 0, event_ndims=event_ndims)\n grads = tape.gradient(ldj, wrt_vars)\n assert_no_none_grad(bijector, 'inverse_log_det_jacobian', wrt_vars, grads)\n\n # Verify that `_is_permutation` implies constant zero Jacobian.\n if bijector._is_permutation:\n self.assertTrue(bijector._is_constant_jacobian)\n self.assertAllEqual(ldj, 0.)\n\n # Verify correctness of batch shape.\n xs_batch_shapes = tf.nest.map_structure(\n lambda x, nd: ps.shape(x)[:ps.rank(x) - nd],\n xs,\n bijector.inverse_event_ndims(event_ndims))\n empirical_batch_shape = functools.reduce(\n ps.broadcast_shape,\n nest.flatten_up_to(bijector.forward_min_event_ndims, xs_batch_shapes))\n batch_shape = bijector.experimental_batch_shape(y_event_ndims=event_ndims)\n if tensorshape_util.is_fully_defined(batch_shape):\n self.assertAllEqual(empirical_batch_shape, batch_shape)\n self.assertAllEqual(empirical_batch_shape,\n bijector.experimental_batch_shape_tensor(\n y_event_ndims=event_ndims))\n\n # Check that the outputs of forward_dtype and inverse_dtype match the dtypes\n # of the outputs of forward and inverse.\n self.assertAllEqualNested(ys.dtype, bijector.forward_dtype(xs.dtype))\n self.assertAllEqualNested(xs.dtype, bijector.inverse_dtype(ys.dtype))\n\n @parameterized.named_parameters({\n 'testcase_name': bname, 'bijector_name': bname\n } for bname in TF2_FRIENDLY_BIJECTORS)\n @hp.given(hps.data())\n @tfp_hps.tfp_hp_settings()\n def testParameterProperties(self, bijector_name, data):\n if tf.config.functions_run_eagerly() or not tf.executing_eagerly():\n self.skipTest('To reduce test weight, parameter properties tests run in '\n 'eager mode only.')\n\n non_trainable_params = (\n 'bijector', # Several.\n 'forward_fn', # Inline.\n 'inverse_fn', # Inline.\n 'forward_min_event_ndims', # Inline.\n 'inverse_min_event_ndims', # Inline.\n 'event_shape_out', # Reshape.\n 'event_shape_in', # Reshape.\n 'perm', # Transpose.\n 'rightmost_transposed_ndims', # Transpose.\n 'diag_bijector', # TransformDiagonal.\n 'diag_shift' # FillScaleTriL (doesn't support batch shape).\n )\n bijector, event_dim = self._draw_bijector(\n bijector_name, data,\n validate_args=True,\n allowed_bijectors=TF2_FRIENDLY_BIJECTORS)\n\n # Extract the full shape of an output from this bijector.\n xs = self._draw_domain_tensor(bijector, data, event_dim)\n ys = bijector.forward(xs)\n output_shape = ps.shape(ys)\n sample_and_batch_ndims = (ps.rank_from_shape(output_shape) -\n bijector.inverse_min_event_ndims)\n\n try:\n params = type(bijector).parameter_properties()\n params64 = type(bijector).parameter_properties(dtype=tf.float64)\n except NotImplementedError as e:\n self.skipTest(str(e))\n\n seeds = samplers.split_seed(test_util.test_seed(), n=len(params))\n new_parameters = {}\n for i, (param_name, param) in enumerate(params.items()):\n if param_name in non_trainable_params:\n continue\n\n # Check that the shape_fn is consistent with event_ndims.\n try:\n param_shape = param.shape_fn(sample_shape=output_shape)\n except NotImplementedError:\n self.skipTest('No shape function implemented for bijector {} '\n 'parameter {}.'.format(bijector_name, param_name))\n self.assertGreaterEqual(\n param.event_ndims,\n ps.rank_from_shape(param_shape) - sample_and_batch_ndims)\n\n if param.is_preferred:\n try:\n param_bijector = param.default_constraining_bijector_fn()\n except NotImplementedError:\n self.skipTest('No constraining bijector implemented for {} '\n 'parameter {}.'.format(bijector_name, param_name))\n unconstrained_shape = (\n param_bijector.inverse_event_shape_tensor(param_shape))\n unconstrained_param = samplers.normal(\n unconstrained_shape, seed=seeds[i])\n new_parameters[param_name] = param_bijector.forward(unconstrained_param)\n\n # Check that passing a float64 `eps` works with float64 parameters.\n b_float64 = params64[param_name].default_constraining_bijector_fn()\n b_float64(tf.cast(unconstrained_param, tf.float64))\n\n # Copy over any non-trainable parameters.\n new_parameters.update({\n k: v\n for (k, v) in bijector.parameters.items()\n if k in non_trainable_params\n })\n\n # Sanity check that we got valid parameters.\n new_parameters['validate_args'] = True\n new_bijector = type(bijector)(**new_parameters)\n self.evaluate(tf.group(*[v.initializer for v in new_bijector.variables]))\n xs = self._draw_domain_tensor(new_bijector, data, event_dim)\n self.evaluate(new_bijector.forward(xs))\n\n @parameterized.named_parameters(\n {'testcase_name': bname, 'bijector_name': bname}\n for bname in (set(TF2_FRIENDLY_BIJECTORS) -\n set(AUTOVECTORIZATION_IS_BROKEN)))\n @hp.given(hps.data())\n @tfp_hps.tfp_hp_settings()\n def testAutoVectorization(self, bijector_name, data):\n\n # TODO(b/150161911): reconcile numeric behavior of eager and graph mode.\n if tf.executing_eagerly():\n return\n\n bijector, event_dim = self._draw_bijector(\n bijector_name, data,\n batch_shape=[], # Avoid conflict with vmap sample dimension.\n validate_args=False, # Work around lack of `If` support in vmap.\n allowed_bijectors=(set(TF2_FRIENDLY_BIJECTORS) -\n set(AUTOVECTORIZATION_IS_BROKEN)))\n atol = AUTOVECTORIZATION_ATOL[bijector_name]\n rtol = AUTOVECTORIZATION_RTOL[bijector_name]\n\n # Forward\n n = 3\n xs = self._draw_domain_tensor(bijector, data, event_dim, sample_shape=[n])\n ys = bijector.forward(xs)\n vectorized_ys = tf.vectorized_map(bijector.forward, xs,\n fallback_to_while_loop=False)\n self.assertAllClose(*self.evaluate((ys, vectorized_ys)),\n atol=atol, rtol=rtol)\n\n # FLDJ\n event_ndims = data.draw(\n hps.integers(\n min_value=bijector.forward_min_event_ndims,\n max_value=ps.rank_from_shape(xs.shape) - 1))\n fldj_fn = functools.partial(bijector.forward_log_det_jacobian,\n event_ndims=event_ndims)\n vectorized_fldj = tf.vectorized_map(fldj_fn, xs,\n fallback_to_while_loop=False)\n fldj = tf.broadcast_to(fldj_fn(xs), tf.shape(vectorized_fldj))\n self.assertAllClose(*self.evaluate((fldj, vectorized_fldj)),\n atol=atol, rtol=rtol)\n\n # Inverse\n ys = self._draw_codomain_tensor(bijector, data, event_dim, sample_shape=[n])\n xs = bijector.inverse(ys)\n vectorized_xs = tf.vectorized_map(bijector.inverse, ys,\n fallback_to_while_loop=False)\n self.assertAllClose(*self.evaluate((xs, vectorized_xs)),\n atol=atol, rtol=rtol)\n\n # ILDJ\n event_ndims = data.draw(\n hps.integers(\n min_value=bijector.inverse_min_event_ndims,\n max_value=ps.rank_from_shape(ys.shape) - 1))\n ildj_fn = functools.partial(bijector.inverse_log_det_jacobian,\n event_ndims=event_ndims)\n vectorized_ildj = tf.vectorized_map(ildj_fn, ys,\n fallback_to_while_loop=False)\n ildj = tf.broadcast_to(ildj_fn(ys), tf.shape(vectorized_ildj))\n self.assertAllClose(*self.evaluate((ildj, vectorized_ildj)),\n atol=atol, rtol=rtol)\n\n @parameterized.named_parameters(\n {'testcase_name': bname, 'bijector_name': bname}\n for bname in TF2_FRIENDLY_BIJECTORS)\n @hp.given(hps.data())\n @tfp_hps.tfp_hp_settings()\n def testHashing(self, bijector_name, data):\n bijector_1, bijector_2 = data.draw(\n bijectors(bijector_name=bijector_name,\n enable_vars=True, return_duplicate=True))\n self.assertEqual(hash(bijector_1), hash(bijector_2))\n\n @parameterized.named_parameters(\n {'testcase_name': bname, 'bijector_name': bname}\n for bname in TF2_FRIENDLY_BIJECTORS)\n @hp.given(hps.data())\n @tfp_hps.tfp_hp_settings()\n def testEquality(self, bijector_name, data):\n bijector_1, bijector_2 = data.draw(\n bijectors(bijector_name=bijector_name,\n enable_vars=True, return_duplicate=True))\n self.assertEqual(bijector_1, bijector_2)\n self.assertFalse(bijector_1 != bijector_2) # pylint: disable=g-generic-assert\n\n @parameterized.named_parameters(\n {'testcase_name': bname, 'bijector_name': bname}\n for bname in (set(TF2_FRIENDLY_BIJECTORS) -\n set(COMPOSITE_TENSOR_IS_BROKEN)))\n @hp.given(hps.data())\n @tfp_hps.tfp_hp_settings()\n def testCompositeTensor(self, bijector_name, data):\n\n bijector, event_dim = self._draw_bijector(\n bijector_name, data,\n batch_shape=[],\n validate_args=True,\n allowed_bijectors=(set(TF2_FRIENDLY_BIJECTORS) -\n set(COMPOSITE_TENSOR_IS_BROKEN)))\n\n if type(bijector) is invert_lib._Invert: # pylint: disable=unidiomatic-typecheck\n if isinstance(bijector.bijector, tf.__internal__.CompositeTensor):\n raise TypeError('`_Invert` should wrap only non-`CompositeTensor` '\n 'bijectors.')\n self.skipTest('`_Invert` bijectors are not `CompositeTensor`s.')\n\n if not tf.executing_eagerly():\n bijector = tf.nest.map_structure(\n lambda x: (tf.convert_to_tensor(x) # pylint: disable=g-long-lambda\n if isinstance(x, DeferredTensor) else x),\n bijector,\n expand_composites=True)\n\n self.assertIsInstance(bijector, tf.__internal__.CompositeTensor)\n flat = tf.nest.flatten(bijector, expand_composites=True)\n unflat = tf.nest.pack_sequence_as(bijector, flat, expand_composites=True)\n\n # Compare forward maps before and after compositing.\n n = 3\n xs = self._draw_domain_tensor(bijector, data, event_dim, sample_shape=[n])\n before_ys = bijector.forward(xs)\n after_ys = unflat.forward(xs)\n self.assertAllClose(*self.evaluate((before_ys, after_ys)))\n\n # Compare inverse maps before and after compositing.\n ys = self._draw_codomain_tensor(bijector, data, event_dim, sample_shape=[n])\n before_xs = bijector.inverse(ys)\n after_xs = unflat.inverse(ys)\n self.assertAllClose(*self.evaluate((before_xs, after_xs)))\n\n # Input to tf.function\n self.assertAllClose(\n before_ys,\n tf.function(lambda b: b.forward(xs))(bijector),\n rtol=COMPOSITE_TENSOR_RTOL[bijector_name],\n atol=COMPOSITE_TENSOR_ATOL[bijector_name])\n\n # Forward mapping: Check differentiation through forward mapping with\n # respect to the input and parameter variables. Also check that any\n # variables are not referenced overmuch.\n xs = self._draw_domain_tensor(bijector, data, event_dim)\n wrt_vars = [xs] + [v for v in bijector.trainable_variables\n if v.dtype.is_floating]\n with tf.GradientTape() as tape:\n tape.watch(wrt_vars)\n # TODO(b/73073515): Fix graph mode gradients with bijector caching.\n ys = bijector.forward(xs + 0)\n grads = tape.gradient(ys, wrt_vars)\n assert_no_none_grad(bijector, 'forward', wrt_vars, grads)\n\n\ndef ensure_nonzero(x):\n return tf.where(x < 1e-6, tf.constant(1e-3, x.dtype), x)\n\n\nCONSTRAINTS = {\n 'concentration':\n tfp_hps.softplus_plus_eps(),\n 'concentration0':\n tfp_hps.softplus_plus_eps(),\n 'concentration1':\n tfp_hps.softplus_plus_eps(),\n 'hinge_softness':\n tfp_hps.softplus_plus_eps(),\n 'power':\n # Restrict to positive since `Invert(Power(...))` tests the negation.\n tfp_hps.softplus_plus_eps(),\n 'rate':\n tfp_hps.softplus_plus_eps(),\n 'scale':\n tfp_hps.softplus_plus_eps(),\n 'tailweight':\n tfp_hps.softplus_plus_eps(),\n 'temperature':\n tfp_hps.softplus_plus_eps(eps=0.5),\n 'Scale.scale':\n tfp_hps.softplus_plus_eps(),\n 'ScaleMatvecDiag.scale_diag':\n tfp_hps.softplus_plus_eps(),\n 'ScaleMatvecTriL.scale_tril':\n tfp_hps.lower_tril_positive_definite,\n # Lower bound concentration to 1e-1 to avoid\n # overflow for the inverse.\n 'ShiftedGompertzCDF.concentration':\n lambda x: tf.math.softplus(x) + 1e-1,\n 'bin_widths':\n bijector_hps.spline_bin_size_constraint,\n 'bin_heights':\n bijector_hps.spline_bin_size_constraint,\n 'knot_slopes':\n bijector_hps.spline_slope_constraint,\n 'lower_upper':\n lambda x: tf.linalg.set_diag(x, ensure_nonzero(tf.linalg.diag_part(x))),\n 'permutation':\n lambda x: tf.math.top_k(x, k=x.shape[-1]).indices,\n}\n\n\ndef constraint_for(bijector_name=None, param=None):\n if param is not None:\n return CONSTRAINTS.get('{}.{}'.format(bijector_name, param),\n CONSTRAINTS.get(param, tfp_hps.identity_fn))\n return CONSTRAINTS.get(bijector_name, tfp_hps.identity_fn)\n\n\nif __name__ == '__main__':\n np.set_printoptions(floatmode='unique', precision=None)\n test_util.main()\n"
] | [
[
"tensorflow.compat.v2.exp",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.broadcast_static_shape",
"tensorflow.compat.v2.math.squared_difference",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.TensorShape"
],
[
"numpy.logspace",
"numpy.log",
"numpy.array",
"numpy.linspace"
],
[
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.math.softplus",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.linalg.diag_part",
"numpy.arange",
"tensorflow.compat.v2.math.is_finite",
"tensorflow.compat.v2.vectorized_map",
"numpy.float32",
"tensorflow.compat.v2.group",
"tensorflow.python.util.nest.flatten_up_to",
"tensorflow.compat.v2.math.top_k",
"tensorflow.compat.v2.equal",
"tensorflow.compat.v2.config.functions_run_eagerly",
"tensorflow.compat.v2.nest.pack_sequence_as",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.get_static_value",
"numpy.set_printoptions",
"tensorflow.compat.v2.GradientTape",
"tensorflow.compat.v2.cast"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jonaswagner2826/MECH6327 | [
"2b55aaf6f9e1bcf5cc684f5c853cadec26acf9d2"
] | [
"InClass&Examples/InClass_20210303.py"
] | [
"\n\n\n\"\"\"\nIn Class Ex:\nConstrained \\infty-norm minimization\n\nProblem:\n minimize:\n \\norm{x}_\\infty\n subject to:\n A x \\leq b\n\nNotes:\n \\norm{x}_\\infty = \\max_i \\abs{x_i}\n = \\max_i \\{x_i, -x_i\\}\n = \\max_i \\{x_1, \\dots, x_n, -x_1, \\dots, -x_n\\}\n\nEpigraph Form:\n minimize:\n t\n subject to:\n \\max_i \\{x_1, \\dots, x_n, -x_1, \\dots, -x_n\\} \\leq t\n Ax \\leq b\n\nEquivelent Form:\n minimize:\n t\n subject to:\n x_i \\leq t, i = 1, \\dots, n\n -x_i \\leq t, i = 1, \\dots, n\n Ax \\leq b\n\nEquivelent Form:\n minimize:\n t\n subject to:\n - 1 t \\leq x \\leq 1 t\n Ax \\leq b\n\"\"\"\n\n\nimport cvxpy as cp\nimport numpy as np\n\n\nn = 10\nm = 100\n\nA = np.random.rand(m, n)\nb = np.random.rand(m, 1) - 1\n\n\nX = cp.Variable((n, n))\nprob = cp.Problem(cp.Minimize(cp.max(cp.sum(cp.abs(X)))),\n [A @ X <= b])\n\n\n# Print result.\nprint(\"The optimal value is\", prob.value)\nprint(\"A solution X is\")\nprint(X.value)\n\n\n# doesn't work..... don't really feel like troubleshooting though\n"
] | [
[
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yotamitai/Agent-Disagreements | [
"858583db448c5621273824527f1f4181ebf55a2b"
] | [
"disagreements/disagreement.py"
] | [
"from copy import deepcopy\n\nimport cv2\nimport imageio\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom os.path import join\nfrom PIL import ImageFont, ImageDraw, Image\n\nfrom disagreements.common.utils import make_clean_dirs, save_image, create_video\nfrom disagreements.logging_info import log\nfrom disagreements.get_trajectories import trajectory_importance_max_min\n\n\ndef get_trajectories(trace):\n \"\"\"for each trajectory of agent 2 - find corresponding trajectory of agent 1\"\"\"\n for i, a2_traj in enumerate(trace.a2_trajectories):\n start_idx, end_idx = a2_traj[0].id[1], a2_traj[-1].id[1]\n a1_traj = trace.states[start_idx:end_idx + 1]\n a1_traj_q_values = [x.action_values for x in a1_traj]\n a2_traj_q_values = [x.action_values for x in a2_traj]\n a1_traj_indexes = [x.id[1] for x in a1_traj]\n a2_traj_indexes = list(range(start_idx, end_idx + 1))\n dt = DisagreementTrajectory(trace.disagreement_indexes[i], a1_traj_indexes,\n a2_traj_indexes, trace.trajectory_length, trace.episode, i,\n a1_traj_q_values, a2_traj_q_values,\n trace.a1_values_for_a2_states[i],\n trace.a2_values_for_a1_states[start_idx:end_idx + 1],\n trace.agent_ratio)\n trace.a1_trajectory_indexes.append(a1_traj_indexes)\n trace.disagreement_trajectories.append(dt)\n\n\ndef get_frames(trace, s1_indexes, s2_indexes, s2_traj, mark_position=None):\n a1_frames = [trace.states[x].image for x in s1_indexes]\n a2_frames = [trace.a2_trajectories[s2_traj][x - min(s2_indexes)].image for x in s2_indexes]\n assert len(a1_frames) == trace.trajectory_length, 'Error in highlight frame length'\n assert len(a2_frames) == trace.trajectory_length, 'Error in highlight frame length'\n da_index = trace.trajectory_length // 2 - 1\n if mark_position:\n \"\"\"mark disagreement state\"\"\"\n a1_frames[da_index] = mark_agent(a1_frames[da_index], text='Disagreement',\n position=mark_position)\n a2_frames[da_index] = a1_frames[da_index]\n return a1_frames, a2_frames\n\n\nclass State(object):\n def __init__(self, idx, episode, obs, state, action_values, img, features, **kwargs):\n self.observation = obs\n self.image = img\n self.state = state\n self.action_values = action_values\n self.features = features\n self.kwargs = kwargs\n self.id = (episode, idx)\n\n def plot_image(self):\n plt.imshow(self.image)\n plt.show()\n\n def save_image(self, path, name):\n imageio.imwrite(path + '/' + name + '.png', self.image)\n\n\nclass DisagreementTrajectory(object):\n def __init__(self, da_index, a1_states, a2_states, horizon, episode, i, a1_s_a_values,\n a2_s_a_values, a1_values_for_a2_states, a2_values_for_a1_states, agent_ratio):\n self.a1_states = a1_states\n self.a2_states = a2_states\n self.episode = episode\n self.trajectory_index = i\n self.horizon = horizon\n self.da_index = da_index\n self.disagreement_score = None\n self.importance = None\n self.state_importance_list = []\n self.agent_ratio = agent_ratio\n self.a1_s_a_values = a1_s_a_values\n self.a2_s_a_values = a2_s_a_values\n self.a1_values_for_a2_states = a1_values_for_a2_states\n self.a2_values_for_a1_states = a2_values_for_a1_states\n self.importance_funcs = {\n \"max_min\": trajectory_importance_max_min,\n \"max_avg\": trajectory_importance_max_min,\n \"avg\": trajectory_importance_max_min,\n \"avg_delta\": trajectory_importance_max_min,\n }\n\n def calculate_state_disagreement_extent(self, importance):\n self.state_importance = importance\n da_idx = self.da_index\n traj_da_idx = self.a1_states.index(da_idx)\n s1_vals, s2_vals = self.a1_s_a_values[traj_da_idx], self.a2_s_a_values[traj_da_idx]\n if importance == 'sb':\n return self.second_best_confidence(s1_vals, s2_vals)\n elif importance == 'bety':\n return self.better_than_you_confidence(s1_vals, s2_vals)\n\n def calculate_trajectory_importance(self, trace, i, importance):\n \"\"\"calculate trajectory score\"\"\"\n s_i, e_i = self.a1_states[0], self.a1_states[-1]\n self.trajectory_importance = importance\n rel_idx = e_i - s_i\n if importance == \"last_state\":\n s1, s2 = trace.states[e_i], trace.a2_trajectories[i][rel_idx]\n return self.trajectory_importance_last_state(s1, s2, rel_idx)\n else:\n return self.get_trajectory_importance(importance, rel_idx)\n\n def get_trajectory_importance(self, importance, end):\n \"\"\"state values\"\"\"\n s1_a1_vals = self.a1_s_a_values\n s1_a2_vals = self.a2_values_for_a1_states\n s2_a1_vals = self.a1_values_for_a2_states[:end + 1]\n s2_a2_vals = self.a2_s_a_values[:end + 1]\n \"\"\"calculate value of all individual states in both trajectories,\n as ranked by both agents\"\"\"\n traj1_states_importance, traj2_states_importance = [], []\n for i in range(len(s1_a1_vals)):\n traj1_states_importance.append(self.get_state_value(s1_a1_vals[i], s1_a2_vals[i]))\n traj2_states_importance.append(self.get_state_value(s2_a1_vals[i], s2_a2_vals[i]))\n \"\"\"calculate score of trajectories\"\"\"\n traj1_score = self.importance_funcs[importance](traj1_states_importance)\n traj2_score = self.importance_funcs[importance](traj2_states_importance)\n \"\"\"return the difference between them. bigger == greater disagreement\"\"\"\n return abs(traj1_score - traj2_score)\n\n def trajectory_importance_last_state(self, s1, s2, idx):\n if s1.image.tolist() == s2.image.tolist(): return 0\n \"\"\"state values\"\"\"\n s1_a1_vals = self.a1_s_a_values[-1]\n s1_a2_vals = self.a2_values_for_a1_states[-1]\n s2_a1_vals = self.a1_values_for_a2_states[idx]\n s2_a2_vals = self.a2_s_a_values[idx]\n \"\"\"the value of the state is defined by the best available action from it\"\"\"\n s1_score = max(s1_a1_vals) * self.agent_ratio + max(s1_a2_vals)\n s2_score = max(s2_a1_vals) * self.agent_ratio + max(s2_a2_vals)\n return abs(s1_score - s2_score)\n\n def second_best_confidence(self, a1_vals, a2_vals):\n \"\"\"compare best action to second-best action\"\"\"\n sorted_1 = sorted(a1_vals, reverse=True)\n sorted_2 = sorted(a2_vals, reverse=True)\n a1_diff = sorted_1[0] - sorted_1[1] * self.agent_ratio\n a2_diff = sorted_2[0] - sorted_2[1]\n return a1_diff + a2_diff\n\n def better_than_you_confidence(self, a1_vals, a2_vals):\n a1_diff = (max(a1_vals) - a1_vals[np.argmax(a2_vals)]) * self.agent_ratio\n a2_diff = max(a2_vals) - a2_vals[np.argmax(a1_vals)]\n return a1_diff + a2_diff\n\n def get_state_value(self, a1_vals, a2_vals):\n \"\"\"\n the value of the state is defined by the best available action from it, as this is\n calculated by estimated future returns\n \"\"\"\n return max(a1_vals) * self.agent_ratio + max(a2_vals)\n\n def normalize_q_values(self, a1_max, a1_min, a2_max, a2_min):\n self.a1_s_a_values = (np.array(self.a1_s_a_values) - a1_min) / (a1_max - a1_min)\n self.a2_s_a_values = (np.array(self.a2_s_a_values) - a2_min) / (a2_max - a2_min)\n self.a1_values_for_a2_states = (np.array(self.a1_values_for_a2_states) - a1_min) / (\n a1_max - a1_min)\n self.a2_values_for_a1_states = (np.array(self.a2_values_for_a1_states) - a2_min) / (\n a2_max - a2_min)\n\n\ndef disagreement(timestep, trace, env2, a1, a2, obs, s):\n trajectory_states, trajectory_scores = \\\n disagreement_states(trace, env2, a2, timestep, obs, s)\n a1.interface.update_trace(trace, a1, timestep, trajectory_states, trajectory_scores )\n\n\ndef save_disagreements(a1_DAs, a2_DAs, output_dir, fps):\n highlight_frames_dir = join(output_dir, \"highlight_frames\")\n video_dir = join(output_dir, \"videos\")\n make_clean_dirs(video_dir)\n make_clean_dirs(join(video_dir, 'temp'))\n make_clean_dirs(highlight_frames_dir)\n dir = join(video_dir, 'temp')\n\n height, width, layers = a1_DAs[0][0].shape\n size = (width, height)\n trajectory_length = len(a1_DAs[0])\n da_idx = trajectory_length // 2\n for hl_i in range(len(a1_DAs)):\n for img_i in range(len(a1_DAs[hl_i])):\n save_image(highlight_frames_dir, \"a1_DA{}_Frame{}\".format(str(hl_i), str(img_i)),\n a1_DAs[hl_i][img_i])\n save_image(highlight_frames_dir, \"a2_DA{}_Frame{}\".format(str(hl_i), str(img_i)),\n a2_DAs[hl_i][img_i])\n\n \"\"\"up to disagreement\"\"\"\n create_video('together' + str(hl_i), highlight_frames_dir, dir, \"a1_DA\" + str(hl_i), size,\n da_idx, fps, add_pause=[0,0])\n \"\"\"from disagreement\"\"\"\n name1, name2 = \"a1_DA\" + str(hl_i), \"a2_DA\" + str(hl_i)\n create_video(name1, highlight_frames_dir, dir, name1, size,\n trajectory_length, fps, start=da_idx, add_pause=[0, 0])\n create_video(name2, highlight_frames_dir, dir, name2, size,\n trajectory_length, fps, start=da_idx, add_pause=[0, 0])\n return video_dir\n\n\n# def get_pre_disagreement_states(t, horizon, states):\n# start = t - (horizon // 2) + 1\n# pre_disagreement_states = []\n# if start < 0:\n# pre_disagreement_states = [states[0] for _ in range(abs(start))]\n# start = 0\n# pre_disagreement_states = pre_disagreement_states + states[start:]\n# return pre_disagreement_states\n\n\ndef disagreement_states(trace, env, agent, timestep, obs, s):\n horizon, da_rewards = env.args.horizon, []\n start = timestep - (horizon // 2) + 1\n if start < 0: start = 0\n trajectory_states = trace.states[start:]\n da_state = deepcopy(trajectory_states[-1])\n da_state.action_values = agent.interface.get_state_action_values(agent, s)\n trajectory_states[-1] = da_state\n done = False\n next_timestep = timestep + 1\n for step in range(next_timestep, next_timestep + (horizon // 2)):\n if done: break\n a = agent.interface.get_next_action(agent, obs, s)\n obs, r, done, info = env.step(a)\n s = agent.interface.get_state_from_obs(agent, obs)\n s_a_values = agent.interface.get_state_action_values(agent, s)\n frame = env.render(mode='rgb_array')\n features = agent.interface.get_features(env)\n state_obj = State(step, trace.episode, obs, s, s_a_values, frame, features)\n trajectory_states.append(state_obj)\n da_rewards.append(r)\n agent.interface.da_states_functionality(trace, params=s_a_values)\n return trajectory_states, da_rewards\n\n\ndef get_top_k_disagreements(traces, args):\n \"\"\"obtain the N-most important trajectories\"\"\"\n top_k_diverse_trajectories, discarded_context = [], []\n \"\"\"get all trajectories\"\"\"\n all_trajectories = []\n for trace in traces:\n all_trajectories += [t for t in trace.disagreement_trajectories]\n sorted_trajectories = sorted(all_trajectories, key=lambda x: x.importance, reverse=True)\n \"\"\"select trajectories\"\"\"\n seen_indexes = {i: [] for i in range(len(traces))}\n for d in sorted_trajectories:\n t_indexes = d.a1_states\n intersecting_indexes = set(seen_indexes[d.episode]).intersection(set(t_indexes))\n if len(intersecting_indexes) > args.similarity_limit:\n discarded_context.append(d)\n continue\n seen_indexes[d.episode] += t_indexes\n top_k_diverse_trajectories.append(d)\n if len(top_k_diverse_trajectories) == args.n_disagreements:\n break\n\n if not len(top_k_diverse_trajectories) == args.n_disagreements:\n top_k_diverse_trajectories += discarded_context\n top_k_diverse_trajectories = top_k_diverse_trajectories[:args.n_disagreements]\n\n log(f'Chosen disagreements:')\n for d in top_k_diverse_trajectories:\n log(f'Name: ({d.episode},{d.da_index})')\n\n return top_k_diverse_trajectories\n\n\ndef make_same_length(trajectories, horizon, traces):\n \"\"\"make all trajectories the same length\"\"\"\n for d in trajectories:\n if len(d.a1_states) < horizon:\n \"\"\"insert to start of video\"\"\"\n da_traj_idx = d.a1_states.index(d.da_index)\n for _ in range((horizon // 2) - da_traj_idx - 1):\n d.a1_states.insert(0, d.a1_states[0])\n d.a2_states.insert(0, d.a1_states[0])\n \"\"\"insert to end of video\"\"\"\n while len(d.a1_states) < horizon:\n last_idx = d.a1_states[-1]\n if last_idx < len(traces[d.episode].states) - 1:\n last_idx += 1\n d.a1_states.append(last_idx)\n else:\n d.a1_states.append(last_idx)\n\n for _ in range(horizon - len(d.a2_states)):\n d.a2_states.append(d.a2_states[-1])\n return trajectories\n\n\ndef mark_agent(img, action=None, text=None, position=None, color=255, thickness=2):\n assert position, 'Error - No position provided for marking agent'\n img2 = img.copy()\n top_left = (position[0], position[1])\n bottom_right = (position[0] + 30, position[1] + 15)\n cv2.rectangle(img2, top_left, bottom_right, color, thickness)\n\n \"\"\"add action text\"\"\"\n if action or text:\n font = ImageFont.truetype('Roboto-Regular.ttf', 20)\n text = text or f'Chosen action: {ACTION_DICT[action]}'\n image = Image.fromarray(img2, 'RGB')\n draw = ImageDraw.Draw(image)\n draw.text((40, 40), text, (255, 255, 255), font=font)\n img_array = np.asarray(image)\n return img_array\n\n return img2\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.asarray",
"numpy.argmax",
"numpy.array",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mumingpo/590op-final-project | [
"95851d2f430cdc24b9d834c6a50b069fc637a8ed"
] | [
"src/data_utils.py"
] | [
"import numpy as np\n\ndef read_data(path, M, N, offset=0):\n \"\"\"\n Read u.data in the default format.\n The memory cost associated with a [943, 1682] matrix of floats are not big,\n so we can still do this.\n \"Might\" run into trouble for larger datasets,\n where we will need to handle things in batches.\n \n Params:\n M: number of users\n N: number of movies\n offset: center of ratings (to assist in regularization.\n \n Return:\n arr: [M, N] matrix of user ratings of movies\n omega: [M, N] matrix indicating where user rating is valid\n \"\"\"\n\n arr = np.zeros([M, N], dtype=np.float)\n omega = np.full([M, N], False, dtype=np.bool)\n\n with open(path, \"rt\") as f:\n for line in f:\n if line == \"\":\n continue\n # fields are \"user\", \"movie\", \"rating\", and \"timestamp\" respectively in order,\n # delimited by '\\t'\n fields = line.split('\\t')\n if len(fields) != 4:\n raise ValueError(\"Data corruption: line contains {}\".format(fields))\n\n user, movie = [int(field) - 1 for field in fields[:2]]\n rating = int(fields[2])\n arr[user][movie] = rating - offset\n omega[user][movie] = True\n \n return arr, omega"
] | [
[
"numpy.zeros",
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Alessi0X/GraKeL | [
"222fcdde4071afebeb3d7bd724b1f979ede8df7f"
] | [
"grakel/kernels/kernel.py"
] | [
"\"\"\"The main class file representing a kernel.\"\"\"\n# Author: Ioannis Siglidis <[email protected]>\n# License: BSD 3 clause\nimport collections\nimport warnings\nimport copy\n\nimport numpy as np\n\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.externals import joblib\n\nfrom grakel.graph import Graph\nfrom grakel.kernels._c_functions import k_to_ij_triangular\nfrom grakel.kernels._c_functions import k_to_ij_rectangular\n\n# Python 2/3 cross-compatibility import\nfrom six import iteritems\ntry:\n import itertools.imap as map\nexcept ImportError:\n pass\n\n\nclass Kernel(BaseEstimator, TransformerMixin):\n \"\"\"A general class for graph kernels.\n\n At default a kernel is considered as pairwise. Doing so the coder that\n adds a new kernel, possibly only needs to overwrite the attributes:\n `parse_input` and `pairwise_operation` on the new kernel object.\n\n Parameters\n ----------\n n_jobs : int or None, optional\n Defines the number of jobs of a joblib.Parallel objects needed for parallelization\n or None for direct execution.\n\n normalize : bool, optional\n Normalize the output of the graph kernel.\n\n verbose : bool, optional\n Define if messages will be printed on stdout.\n\n Attributes\n ----------\n X : list\n Stores the input that occurs from parse input, on fit input data.\n Default format of the list objects is `grakel.graph.graph`.\n\n _graph_format : str\n Stores in which type the graphs will need to be stored.\n\n _verbose : bool\n Defines if two print arguments on stdout.\n\n _normalize : bool\n Defines if normalization will be applied on the kernel matrix.\n\n _valid_parameters : set\n Holds the default valid parameters names for initialization.\n\n _method_calling : int\n An inside enumeration defines which method calls another method.\n - 1 stands for fit\n - 2 stands for fit_transform\n - 3 stands for transform\n\n _parallel : sklearn.external.joblib.Parallel or None\n A Parallel initialized object to imply parallelization to kernel execution.\n The use of this object depends on the implementation of each base kernel.\n\n \"\"\"\n\n X = None\n _graph_format = \"dictionary\"\n _method_calling = 0\n\n def __init__(self,\n n_jobs=None,\n normalize=False,\n verbose=False):\n \"\"\"`__init__` for `kernel` object.\"\"\"\n self.verbose = verbose\n self.n_jobs = n_jobs\n self.normalize = normalize\n self._initialized = dict(n_jobs=False)\n\n def fit(self, X, y=None):\n \"\"\"Fit a dataset, for a transformer.\n\n Parameters\n ----------\n X : iterable\n Each element must be an iterable with at most three features and at\n least one. The first that is obligatory is a valid graph structure\n (adjacency matrix or edge_dictionary) while the second is\n node_labels and the third edge_labels (that fitting the given graph\n format). The train samples.\n\n y : None\n There is no need of a target in a transformer, yet the pipeline API\n requires this parameter.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n self._is_transformed = False\n self._method_calling = 1\n\n # Parameter initialization\n self.initialize()\n\n # Input validation and parsing\n if X is None:\n raise ValueError('`fit` input cannot be None')\n else:\n self.X = self.parse_input(X)\n\n # Return the transformer\n return self\n\n def transform(self, X):\n \"\"\"Calculate the kernel matrix, between given and fitted dataset.\n\n Parameters\n ----------\n X : iterable\n Each element must be an iterable with at most three features and at\n least one. The first that is obligatory is a valid graph structure\n (adjacency matrix or edge_dictionary) while the second is\n node_labels and the third edge_labels (that fitting the given graph\n format). If None the kernel matrix is calculated upon fit data.\n The test samples.\n\n Returns\n -------\n K : numpy array, shape = [n_targets, n_input_graphs]\n corresponding to the kernel matrix, a calculation between\n all pairs of graphs between target an features\n\n \"\"\"\n self._method_calling = 3\n # Check is fit had been called\n check_is_fitted(self, ['X'])\n\n # Input validation and parsing\n if X is None:\n raise ValueError('`transform` input cannot be None')\n else:\n Y = self.parse_input(X)\n\n # Transform - calculate kernel matrix\n km = self._calculate_kernel_matrix(Y)\n self._Y = Y\n\n # Self transform must appear before the diagonal call on normilization\n self._is_transformed = True\n if self.normalize:\n X_diag, Y_diag = self.diagonal()\n km /= np.sqrt(np.outer(Y_diag, X_diag))\n return km\n\n def fit_transform(self, X):\n \"\"\"Fit and transform, on the same dataset.\n\n Parameters\n ----------\n X : iterable\n Each element must be an iterable with at most three features and at\n least one. The first that is obligatory is a valid graph structure\n (adjacency matrix or edge_dictionary) while the second is\n node_labels and the third edge_labels (that fitting the given graph\n format). If None the kernel matrix is calculated upon fit data.\n The test samples.\n\n y : None\n There is no need of a target in a transformer, yet the pipeline API\n requires this parameter.\n\n Returns\n -------\n K : numpy array, shape = [n_targets, n_input_graphs]\n corresponding to the kernel matrix, a calculation between\n all pairs of graphs between target an features\n\n \"\"\"\n self._method_calling = 2\n self.fit(X)\n\n # Transform - calculate kernel matrix\n km = self._calculate_kernel_matrix()\n\n self._X_diag = np.diagonal(km)\n if self.normalize:\n return km / np.sqrt(np.outer(self._X_diag, self._X_diag))\n else:\n return km\n\n def _calculate_kernel_matrix(self, Y=None):\n \"\"\"Calculate the kernel matrix given a target_graph and a kernel.\n\n Each a matrix is calculated between all elements of Y on the rows and\n all elements of X on the columns.\n\n Parameters\n ----------\n Y : list, default=None\n A list of graph type objects. If None kernel is calculated between\n X and itself.\n\n Returns\n -------\n K : numpy array, shape = [n_targets, n_inputs]\n The kernel matrix: a calculation between all pairs of graphs\n between targets and inputs. If Y is None targets and inputs\n are the taken from self.X. Otherwise Y corresponds to targets\n and self.X to inputs.\n\n \"\"\"\n if Y is None:\n K = np.zeros(shape=(len(self.X), len(self.X)))\n if self._parallel is None:\n cache = list()\n for (i, x) in enumerate(self.X):\n K[i, i] = self.pairwise_operation(x, x)\n for (j, y) in enumerate(cache):\n K[j, i] = self.pairwise_operation(y, x)\n cache.append(x)\n else:\n dim = len(self.X)\n n_jobs, nsamples = self._n_jobs, ((dim+1)*(dim))//2\n\n def kij(k):\n return k_to_ij_triangular(k, dim)\n\n split = [iter(((i, j), (self.X[i], self.X[j])) for i, j in\n map(kij, range(*rg))) for rg in indexes(n_jobs, nsamples)]\n\n self._parallel(joblib.delayed(assign)(s, K, self.pairwise_operation) for s in split)\n K = np.triu(K) + np.triu(K, 1).T\n\n else:\n K = np.zeros(shape=(len(Y), len(self.X)))\n if self._parallel is None:\n for (j, y) in enumerate(Y):\n for (i, x) in enumerate(self.X):\n K[j, i] = self.pairwise_operation(y, x)\n else:\n dim_X, dim_Y = len(self.X), len(Y)\n n_jobs, nsamples = self._n_jobs, (dim_X * dim_Y)\n\n def kij(k):\n return k_to_ij_rectangular(k, dim_X)\n\n split = [iter(((j, i), (Y[j], self.X[i])) for i, j in\n map(kij, range(*rg))) for rg in indexes(n_jobs, nsamples)]\n\n self._parallel(joblib.delayed(assign)(s, K, self.pairwise_operation) for s in split)\n return K\n\n def diagonal(self):\n \"\"\"Calculate the kernel matrix diagonal of the fit/transformed data.\n\n Parameters\n ----------\n None.\n\n Returns\n -------\n X_diag : np.array\n The diagonal of the kernel matrix between the fitted data.\n This consists of each element calculated with itself.\n\n Y_diag : np.array\n The diagonal of the kernel matrix, of the transform.\n This consists of each element calculated with itself.\n\n \"\"\"\n # Check is fit had been called\n check_is_fitted(self, ['X'])\n try:\n check_is_fitted(self, ['_X_diag'])\n except NotFittedError:\n # Calculate diagonal of X\n self._X_diag = np.empty(shape=(len(self.X),))\n for (i, x) in enumerate(self.X):\n self._X_diag[i] = self.pairwise_operation(x, x)\n\n try:\n # If transform has happened return both diagonals\n check_is_fitted(self, ['_Y'])\n Y_diag = np.empty(shape=(len(self._Y),))\n for (i, y) in enumerate(self._Y):\n Y_diag[i] = self.pairwise_operation(y, y)\n\n return self._X_diag, Y_diag\n except NotFittedError:\n # Else just return both X_diag\n return self._X_diag\n\n def parse_input(self, X):\n \"\"\"Parse the given input and raise errors if it is invalid.\n\n Parameters\n ----------\n X : iterable\n For the input to pass the test, we must have:\n Each element must be an iterable with at most three features and at\n least one. The first that is obligatory is a valid graph structure\n (adjacency matrix or edge_dictionary) while the second is\n node_labels and the third edge_labels (that correspond to the given\n graph format). A valid input also consists of graph type objects.\n\n Returns\n -------\n Xp : list\n List of graph type objects.\n\n \"\"\"\n if not isinstance(X, collections.Iterable):\n raise TypeError('input must be an iterable\\n')\n else:\n Xp = list()\n for (i, x) in enumerate(iter(X)):\n is_iter = isinstance(x, collections.Iterable)\n if is_iter:\n x = list(x)\n if is_iter and len(x) in [0, 1, 2, 3]:\n if len(x) == 0:\n warnings.warn('Ignoring empty element' +\n 'on index: '+str(i)+'..')\n continue\n elif len(x) == 1:\n Xp.append(Graph(x[0], {}, {},\n self._graph_format))\n elif len(x) == 2:\n Xp.append(Graph(x[0], x[1], {}, self._graph_format))\n else:\n Xp.append(Graph(x[0], x[1], x[2], self._graph_format))\n elif type(x) is Graph:\n Xp.append(x)\n else:\n raise TypeError('Each element of X must have at least ' +\n 'one and at most 3 elements.\\n')\n if len(Xp) == 0:\n raise ValueError('Parsed input is empty.')\n return Xp\n\n def initialize(self):\n \"\"\"Initialize all transformer arguments, needing initialisation.\"\"\"\n if not self._initialized[\"n_jobs\"]:\n if type(self.n_jobs) is not int and self.n_jobs is not None:\n raise ValueError('n_jobs parameter must be an int '\n 'indicating the number of jobs as in joblib or None')\n elif self.n_jobs is None:\n self._parallel = None\n else:\n self._parallel = joblib.Parallel(n_jobs=self.n_jobs,\n backend=\"threading\",\n pre_dispatch='all')\n self._n_jobs = self._parallel._effective_n_jobs()\n self._initialized[\"n_jobs\"] = True\n\n def pairwise_operation(self, x, y):\n \"\"\"Calculate a pairwise kernel between two elements.\n\n Parameters\n ----------\n x, y : Object\n Objects as occur from parse_input.\n\n Returns\n -------\n kernel : number\n The kernel value.\n\n \"\"\"\n raise NotImplementedError('Pairwise operation is not implemented!')\n\n def set_params(self, **params):\n \"\"\"Call the parent method.\"\"\"\n if len(self._initialized):\n # Copy the parameters\n params = copy.deepcopy(params)\n\n # Iterate over the parameters\n for key, value in iteritems(params):\n key, delim, sub_key = key.partition('__')\n if delim:\n if sub_key in self._initialized:\n self._initialized[sub_key] = False\n elif key in self._initialized:\n self._initialized[key] = False\n\n # Set parameters\n super(Kernel, self).set_params(**params)\n\n\ndef indexes(n_jobs, nsamples):\n \"\"\"Distribute samples accross n_jobs.\"\"\"\n n_jobs = n_jobs\n\n if n_jobs >= nsamples:\n for i in range(nsamples):\n yield (i, i+1)\n else:\n ns = nsamples/n_jobs\n start = 0\n for i in range(n_jobs-1):\n end = start + ns\n yield (int(start), int(end))\n start = end\n yield (int(start), nsamples)\n\n\ndef assign(data, K, pairwise_operation):\n \"\"\"Assign list values of an iterable to a numpy array while calculating a pairwise operation.\"\"\"\n for d in data:\n K[d[0][0], d[0][1]] = pairwise_operation(d[1][0], d[1][1])\n"
] | [
[
"sklearn.utils.validation.check_is_fitted",
"sklearn.externals.joblib.delayed",
"numpy.outer",
"numpy.triu",
"numpy.diagonal",
"sklearn.externals.joblib.Parallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
atharvamh/Predicting-IDC-in-Breast-Cancer | [
"b9b4dff53256042947749c0ad0ba0536e9984a5a"
] | [
"data_preprocess.py"
] | [
"import os\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport matplotlib.pyplot as plt\nimport fnmatch\n\nfrom sklearn.model_selection import train_test_split\nfrom glob import glob\n\nimagePatches = glob('./IDC_regular_ps50_idx5/**/*.png',recursive = True)\n\ndef multiplot():\n plt.rcParams['figure.figsize'] = (10.0, 10.0)\n plt.subplots_adjust(wspace=0, hspace=0)\n count = 0\n for i in imagePatches[0:20]:\n im = cv2.imread(i)\n im = cv2.resize(im,(50,50))\n plt.subplot(5,4,count+1)\n plt.imshow(cv2.cvtColor(im,cv2.COLOR_BGR2RGB));plt.axis('off')\n count += 1\n \nimages_zero = '*class0.png'\nimages_one = '*class1.png'\nclass_zero = fnmatch.filter(imagePatches,images_zero)\nclass_one = fnmatch.filter(imagePatches,images_one)\n\ndef process_images(lower,upper):\n X = []\n Y = []\n\n WIDTH = 224\n HEIGHT = 224\n for img in imagePatches[lower:upper]:\n fullim = cv2.imread(img)\n X.append(cv2.resize((fullim),(WIDTH,HEIGHT),interpolation = cv2.INTER_CUBIC))\n\n if img in class_zero:\n Y.append(0)\n elif img in class_one:\n Y.append(1)\n else:\n return\n \n return X,Y\n \nX,Y = process_images(0,5000)\ndf = pd.DataFrame()\ndf['images'] = X\ndf['labels'] = Y\nX2=df[\"images\"]\nY2=df[\"labels\"]\nX2=np.array(X2)\nimgs0=[]\nimgs1=[]\nimgs0 = X2[Y2==0]\nimgs1 = X2[Y2==1]\n\ndef Datainfo(a,b):\n print('Total number of images: {}'.format(len(a)))\n print('Number of IDC(-) Images: {}'.format(np.sum(b==0)))\n print('Number of IDC(+) Images: {}'.format(np.sum(b==1)))\n print('Percentage of positive images: {:.2f}%'.format(100*np.mean(b)))\n print('Image shape (Width, Height, Channels): {}'.format(a[0].shape))\n \nX = np.array(X)\nX = X/255.0\n\nX_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.2)\nY_trainHot = tf.keras.utils.to_categorical(Y_train,num_classes=2)\nY_testHot = tf.keras.utils.to_categorical(Y_test,num_classes=2)\n"
] | [
[
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"numpy.mean",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplots_adjust",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
BBarrow31500/molssi-best-practices | [
"737d46aaab087bb9d5c45d2d76bc6f2e879fa060"
] | [
"molecool/io/xyz.py"
] | [
"\"\"\"\nThis module reads and writes xyz files.\n\"\"\"\n\nimport os\nimport numpy as np\n\ndef open_xyz(file_location):\n \n # Open an xyz file and return symbols and coordinates.\n xyz_file = np.genfromtxt(fname=file_location, skip_header=2, dtype='unicode')\n symbols = xyz_file[:,0]\n coords = (xyz_file[:,1:])\n coords = coords.astype(np.float)\n return symbols, coords\n\ndef write_xyz(file_location, symbols, coordinates):\n \n # Write an xyz file given a file location, symbols, and coordinates.\n num_atoms = len(symbols)\n \n with open(file_location, 'w+') as f:\n f.write('{}\\n'.format(num_atoms))\n f.write('XYZ file\\n')\n \n for i in range(num_atoms):\n f.write('{}\\t{}\\t{}\\t{}\\n'.format(symbols[i], \n coordinates[i,0], coordinates[i,1], coordinates[i,2]))\n"
] | [
[
"numpy.genfromtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
paroj/async-ev-cnn | [
"f7e70e75b07f43afef8ffd7eaf6f43ddefab0ae0"
] | [
"cython_setup.py"
] | [
"from sys import platform\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Build import cythonize\nimport numpy\n\n\next_modules = [\n Extension(\n \"src.libs.cutils\",\n [\"src/libs/cutils.pyx\"],\n extra_compile_args=['/openmp' if platform == \"win32\" else '-fopenmp']\n )\n]\n\nsetup(\n ext_modules=cythonize(ext_modules),\n include_dirs=[numpy.get_include()],\n)"
] | [
[
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ryanpdwyer/sigutils | [
"341513f403dee0b2c49e9630c86f0483c0d2d359"
] | [
"sigutils/plot.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n\n- We should have plotting functions\n\n bode_ba(ba, ...)\n Takes an analog transfer function in ba form\n bode_z(b, a=1, fs, ...)\n Takes a digital transfer function in z form. Is fs, nyq, or dt preferred?\n bode_zpk(zpk, fs?, ...)\n Use zpk form (or state space?)\n bode_s(sympy_expression, var, ...)\n Takes a sympy expression, var, evaulates it at 2*pi*f...\n bode_f(func, ...)\n Takes a function, which will be evaluated to determine the response\n\n\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom collections import Counter\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import signal\n\nfrom sigutils._util import (freqresp, freqz, mag_phase)\n\n\ndef adjust_y_ticks(ax, delta):\n \"\"\"Adjust the y-axis tick marks on ax to the spacing delta.\"\"\"\n ylim = np.array(ax.get_ylim()) / delta\n ymin = ylim[0] // 1 # Round towards - infinity\n ymax = -(-ylim[1] // 1) # Trick to round towards + infinity\n # Note: this rounds away from zero so we never make the axis limits smaller\n ax_new_lim = np.array([ymin, ymax]) * delta\n ax_new_ticks = np.arange(ax_new_lim[0], ax_new_lim[1] + 1, delta)\n ax.set_ybound(*ax_new_lim)\n ax.set_yticks(ax_new_ticks)\n\n\ndef adjust_ylim_ticks(ax, ylim):\n if ylim is not None:\n ax.set_ylim(ylim[0], ylim[1])\n if len(ylim) == 3:\n adjust_y_ticks(ax, ylim[2])\n\n\n# def adjust_x_ticks(ax, delta):\n# xlim = np.array(ax.get_xlim()) / delta\n# xmin = xlim[0] // 1\n# xmax = -(-xlim[1] // 1)\n# ax_new_ticks = np.arange(xmin, xmax + delta*0.5, delta)\n# ax_new_ticks[]\n\n\ndef find_crossings(x, a=0):\n \"\"\"Return array indices where x - a changes sign.\n\n See http://stackoverflow.com/a/29674950/2823213\"\"\"\n x = np.atleast_1d(x)\n return np.where(np.diff(np.signbit(x - a).astype(int)))[0]\n\n\ndef find_repeated_roots(x):\n \"\"\"\"\"\"\n cnt = Counter()\n x_iterable = list(x)\n while x_iterable != []:\n xi = x_iterable[0]\n compared_equal = np.isclose(xi, x_iterable)\n equal_indices = np.nonzero(compared_equal)[0]\n for i in equal_indices[::-1]:\n x_iterable.pop(i)\n\n cnt[xi] = np.sum(compared_equal)\n\n return {key: val for key, val in cnt.items() if val > 1}\n\n\ndef _x_per_inch(ax):\n \"\"\"Conversion factor between the plot x variable and the figure width.\n\n For example, \"\"\"\n xlim = ax.get_xlim()\n return (xlim[1] - xlim[0]) / ax.get_figure().get_figwidth()\n\n\ndef _y_per_inch(ax):\n ylim = ax.get_ylim()\n return (ylim[1] - ylim[0]) / ax.get_figure().get_figheight()\n\n\ndef _font_pt_to_inches(x):\n \"\"\"Convert points to inches (1 inch = 72 points).\"\"\"\n return x / 72.\n\n\ndef magtime(freq, resp, t, impulse, freq_lim=None, freq_log=False, dB=True,\n mag_lim=None, step=False, stem=False, figax=None, rcParams={}):\n \"\"\"\"\"\"\n mag, _ = mag_phase(resp, dB=dB)\n\n rcParamsDefault = {'figure.figsize' : (8,6),\n 'lines.linewidth': 1.5,\n 'figure.dpi' : 300,\n 'savefig.dpi' : 300,\n 'axes.labelsize' : 12,}\n rcParamsDefault.update(rcParams)\n\n if figax is None:\n with mpl.rc_context(rcParamsDefault):\n fig, (ax1, ax2) = plt.subplots(nrows=2)\n else:\n fig, (ax1, ax2) = figax\n\n ax1.yaxis.grid(True, linestyle='-', color='.8', zorder=0)\n\n if freq_log:\n ax1.semilogx(freq, mag)\n else:\n ax1.plot(freq, mag)\n\n if dB:\n ax1.set_ylabel('Magnitude [dB]')\n else:\n ax1.set_ylabel('Magnitude')\n\n ax1.set_xlim(freq[0], freq[-1])\n\n if step:\n y = np.cumsum(impulse)\n h_lines = [0, 1]\n else:\n y = impulse\n h_lines = [0]\n\n if stem:\n ax2.stem(t, y, linestyle='-', markerfmt='.', basefmt='k-')\n else:\n ax2.plot(t, y)\n\n ax2.set_xlim(t.min(), t.max())\n ax2.hlines(h_lines, t.min(), t.max(), color='0.8', zorder=0)\n ax1.set_xlabel(\"Frequency\")\n ax2.set_xlabel(\"Time / Samples\")\n\n return fig, (ax1, ax2)\n\n\ndef iir_impulse(b, a, N=1000, prob=0.005):\n freq, resp = freqz(b, a, fs=1, xlim=None, N=N, xlog=False)\n bandwidth = np.sum(np.abs(resp)) * (freq[1] - freq[0])\n\n n = int(1/(6*bandwidth))\n difference = 1\n i1 = 0\n\n while difference >= prob:\n impulse = i1\n n *= 1.5\n x = np.zeros(2*n+1)\n x[0] = 1\n i1 = signal.lfilter(b, a, x)\n difference = 1 - np.sum(abs(impulse)) / np.sum(abs(i1))\n\n return impulse\n\n\ndef impulse_z(b, a, fs=1, N=1000, prob=0.005):\n a = np.atleast_1d(a)\n\n if a.size == 1:\n impulse = b/a\n else:\n impulse = iir_impulse(b, a, N=N, prob=prob)\n\n t = np.arange(impulse.size) / fs\n\n return t, impulse\n\n\ndef magtime_z(b, a=1, fs=1, freq_lim=None, N=1000, freq_log=False, dB=True,\n mag_lim=None, prob=0.005, step=False, centered=False, stem=False,\n figax=None, rcParams={}):\n \"\"\"Plot the frequency domain (magnitude vs. frequency) and time domain\n (impulse or step) response for a digital filter.\n\n Parameters\n ----------\n b: \"\"\"\n freq, resp = freqz(b, a, fs=fs, xlim=freq_lim, N=N, xlog=freq_log)\n t, impulse = impulse_z(b, a, fs, N=N, prob=prob)\n\n figax = magtime(freq, resp, t, impulse, freq_lim=freq_lim,\n freq_log=freq_log, dB=dB, mag_lim=mag_lim, step=step,\n stem=stem, figax=figax, rcParams=rcParams)\n\n return figax\n\n\ndef magtime_firs(bs, fs=1, freq_lim=None, N=1000, freq_log=False, dB=True,\n mag_lim=None, prob=0.005, step=False, centered=False,\n stem=False, figax=None, rcParams={}):\n for b in bs:\n figax = magtime_z(b, a=1, fs=fs,\n freq_lim=freq_lim, N=N, freq_log=freq_log,\n dB=dB, mag_lim=mag_lim, prob=prob, step=step,\n centered=centered, stem=stem, figax=figax,\n rcParams=rcParams)\n\n return figax\n\n\ndef magtime_zz(systems, fs=1, freq_lim=None, N=1000, freq_log=False, dB=True,\n mag_lim=None, prob=0.005, step=False, centered=False,\n stem=False, figax=None, rcParams={}):\n for system in systems:\n b = system[0]\n if len(system) == 1:\n a = 1\n elif len(system) == 2:\n a = system[1]\n else:\n raise ValueError(\n \"Digital system ({0}) has more than two elements.\".format(\n system))\n\n figax = magtime_z(b, a, freq_lim=freq_lim, N=N, freq_log=freq_log,\n dB=dB, mag_lim=mag_lim, prob=prob, step=step,\n centered=centered, stem=stem, figax=figax,\n rcParams=rcParams)\n\n return figax\n\n\n# To do: should db=True be an option?\ndef bode(freq, resp, xlim=None, xlog=True, mag_lim=None, phase_lim=None,\n gain_point=None, figax=None, rcParams=None):\n \"\"\"Make a nice bode plot for the given frequency, magnitude, and phase data.\n\n Parameters\n ----------\n freq : array_like\n Array of frequencies used for the Bode plot\n resp : array_like\n Complex response evaluated at the frequencies in freq\n xlim : tuple of (x_min, x_max), optional\n Minimum and maximum values (x_min, x_max) of the plot's x-axis\n xlog : bool, optional\n Use a log (True) or linear (False) scale for the x-axis\n mag_lim : tuple of (mag_min, mag_max, mag_delta), optional\n A three element tuple containing the magnitude axis minimum, maximum\n and tick spacing\n phase_lim : tuple of (phase_min, phase_max, phase_delta), optional\n A three element tuple containing the phase axis minimum, maximum\n and tick spacing\n gain_point : float, optional\n If given, draws a vertical line on the bode plot when the gain crosses\n this point.\n figax : tuple of (fig, (ax1, ax2)), optional\n The figure and axes to create the plot on, if given. If omitted, a new\n figure and axes are created\n rcParams : dictionary, optional\n matplotlib rc settings dictionary\n\n Returns\n -------\n figax : tuple of (fig, (ax1, ax2))\n The figure and axes of the bode plot\n\n \"\"\"\n mag, phase = mag_phase(resp, dB=True, degrees=True)\n\n rcParamsDefault = {'figure.figsize' : (8,6),\n 'lines.linewidth': 1.5,\n 'figure.dpi' : 300,\n 'savefig.dpi' : 300,\n 'axes.labelsize' : 12,}\n\n if rcParams is not None:\n rcParamsDefault.update(rcParams)\n\n with mpl.rc_context(rcParamsDefault):\n if figax is None:\n fig, (ax1, ax2) = plt.subplots(nrows=2)\n else:\n fig, (ax1, ax2) = figax\n\n # Light grey major y gridlines\n ax1.yaxis.grid(True, linestyle='-', color='.8')\n ax2.yaxis.grid(True, linestyle='-', color='.8')\n\n if xlog:\n ax1.semilogx(freq, mag)\n ax2.semilogx(freq, phase)\n else:\n ax1.plot(freq, mag)\n ax2.plot(freq, phase)\n\n if xlim is not None:\n ax1.set_xlim(*xlim)\n ax2.set_xlim(*xlim)\n\n adjust_ylim_ticks(ax1, mag_lim)\n adjust_ylim_ticks(ax2, phase_lim)\n\n if gain_point is not None:\n # Would be nice to switch this for high-pass applications\n gain_index = find_crossings(mag, gain_point)\n for i in gain_index:\n ax1.axvline(x=freq[i], color='k', linestyle='--')\n ax2.axvline(x=freq[i], color='k', linestyle='--')\n\n ax1.set_ylabel('Magnitude [dB]')\n ax2.set_ylabel('Phase [deg.]')\n ax2.set_xlabel('Frequency')\n fig.tight_layout()\n return fig, (ax1, ax2)\n\n\ndef bodes(freq, resp, xlim=None, xlog=True, mag_lim=None, phase_lim=None,\n gain_point=None, figax=None, rcParams=None):\n \"\"\"Make a nice bode plot for several filters at once.\n\n Parameters\n ----------\n freq : list of arrays\n frequencies used for the Bode plot\n resp : list of array\n Complex response evaluated at the frequencies in freq\n xlim : tuple of (x_min, x_max), optional\n Minimum and maximum values (x_min, x_max) of the plot's x-axis\n xlog : bool, optional\n Use a log (True) or linear (False) scale for the x-axis\n mag_lim : tuple of (mag_min, mag_max, mag_delta), optional\n A three element tuple containing the magnitude axis minimum, maximum\n and tick spacing\n phase_lim : tuple of (phase_min, phase_max, phase_delta), optional\n A three element tuple containing the phase axis minimum, maximum\n and tick spacing\n gain_point : float, optional\n If given, draws a vertical line on the bode plot when the gain crosses\n this point.\n figax : tuple of (fig, (ax1, ax2)), optional\n The figure and axes to create the plot on, if given. If omitted, a new\n figure and axes are created\n rcParams : dictionary, optional\n matplotlib rc settings dictionary\n\n Returns\n -------\n figax : tuple of (fig, (ax1, ax2))\n The figure and axes of the bode plot\n\n \"\"\"\n for f, r in zip(freq, resp):\n figax = bode(f, r, xlim=xlim, xlog=xlog, mag_lim=mag_lim,\n phase_lim=phase_lim, gain_point=gain_point,\n figax=figax, rcParams=rcParams)\n\n return figax\n\n\ndef bode_sys(system, xlim=None, N=10000, xlog=True, mag_lim=None,\n phase_lim=None, gain_point=None, figax=None, rcParams=None):\n \"\"\"Make a nice bode plot for the given system.\n\n Parameters\n ----------\n system : an instance of the LTI class or a tuple describing the system.\n The following gives the number of elements in the tuple and\n the interpretation:\n\n * 2 (num, den)\n * 3 (zeros, poles, gain)\n * 4 (A, B, C, D)\n xlim : tuple of (x_min, x_max), optional\n Minimum and maximum values (x_min, x_max) of the plot's x-axis\n N : int, optional\n The number of points to calculate the system response at\n xlog : bool, optional\n Use a log (True) or linear (False) scale for the x-axis\n mag_lim : tuple of (mag_min, mag_max, mag_delta), optional\n A three element tuple containing the magnitude axis minimum, maximum\n and tick spacing\n phase_lim : tuple of (phase_min, phase_max, phase_delta), optional\n A three element tuple containing the phase axis minimum, maximum\n and tick spacing\n gain_point : float, optional\n If given, draws a vertical line on the bode plot at this point\n figax : tuple of (fig, (ax1, ax2)), optional\n The figure and axes to create the plot on, if given. If omitted, a new\n figure and axes are created\n rcParams : dictionary, optional\n matplotlib rc settings dictionary\"\"\"\n freq, resp = freqresp(system, xlim=xlim, N=N, xlog=xlog)\n\n return bode(freq, resp, xlim=xlim, xlog=xlog, mag_lim=mag_lim,\n phase_lim=phase_lim, gain_point=gain_point,\n figax=figax, rcParams=rcParams)\n\n\ndef bode_syss(systems, xlim=None, N=10000, xlog=True, mag_lim=None,\n phase_lim=None, gain_point=None, figax=None, rcParams=None):\n \"\"\"Make a nice bode plot for the given system.\n\n Parameters\n ----------\n systems : an iterable containing instances of the LTI class or a tuple\n describing the system. The following gives the number of elements\n in the tuple and the interpretation:\n\n * 2 (num, den)\n * 3 (zeros, poles, gain)\n * 4 (A, B, C, D)\n xlim : tuple of (x_min, x_max), optional\n Minimum and maximum values (x_min, x_max) of the plot's x-axis\n N : int, optional\n The number of points to calculate the system response at\n xlog : bool, optional\n Use a log (True) or linear (False) scale for the x-axis\n mag_lim : tuple of (mag_min, mag_max, mag_delta), optional\n A three element tuple containing the magnitude axis minimum, maximum\n and tick spacing\n phase_lim : tuple of (phase_min, phase_max, phase_delta), optional\n A three element tuple containing the phase axis minimum, maximum\n and tick spacing\n gain_point : float, optional\n If given, draws a vertical line on the bode plot at this point\n figax : tuple of (fig, (ax1, ax2)), optional\n The figure and axes to create the plot on, if given. If omitted, a new\n figure and axes are created\n rcParams : dictionary, optional\n matplotlib rc settings dictionary\"\"\"\n for system in systems:\n figax = bode_sys(system, xlim=xlim, N=N, xlog=xlog, mag_lim=mag_lim,\n phase_lim=phase_lim, gain_point=gain_point,\n figax=figax, rcParams=rcParams)\n return figax\n\n\ndef bode_z(b, a=1, fs=1, xlim=None, N=1000, xlog=False, mag_lim=None,\n phase_lim=None, gain_point=None, figax=None, rcParams=None):\n \"\"\"Make a nice bode plot for a discrete time system.\n\n Parameters\n ----------\nb : array_like\n The numerator coefficient vector in a 1-D sequence.\na : array_like\n The denominator coefficient vector in a 1-D sequence. If ``a[0]``\n is not 1, then both `a` and `b` are normalized by ``a[0]``.\n\n \"\"\"\n freq, resp = freqz(b=b, a=a, fs=fs, xlim=xlim, N=N, xlog=xlog)\n\n return bode(freq, resp, xlim=xlim, xlog=xlog, mag_lim=mag_lim,\n phase_lim=phase_lim, gain_point=gain_point,\n figax=figax, rcParams=rcParams)\n\n\ndef bode_firs(bs, fs=1, xlim=None, N=1000, xlog=False, mag_lim=None,\n phase_lim=None, gain_point=None, figax=None, rcParams=None):\n for b in bs:\n figax = bode_z(b, a=1, fs=fs, xlim=xlim, N=N, xlog=xlog,\n mag_lim=mag_lim, phase_lim=phase_lim,\n gain_point=gain_point, figax=figax,\n rcParams=rcParams)\n return figax\n\n\ndef bode_zz(systems, fs=1, xlim=None, N=1000, xlog=False, mag_lim=None,\n phase_lim=None, gain_point=None, figax=None, rcParams=None):\n \"\"\"\"\"\"\n for system in systems:\n b = system[0]\n if len(system) == 1:\n a = 1\n elif len(system) == 2:\n a = system[1]\n else:\n raise ValueError(\n \"Digital system ({0}) has more than two elements.\".format(\n system))\n\n figax = bode_z(b, a, fs=fs, xlim=xlim, N=N, xlog=xlog,\n mag_lim=mag_lim, phase_lim=phase_lim,\n gain_point=gain_point, figax=figax,\n rcParams=rcParams)\n\n return figax\n\n\ndef bode_an_dig(analogs, digitals, fs, xlim=None, N=1000, xlog=False,\n mag_lim=None, phase_lim=None, gain_point=None, figax=None,\n rcParams=None):\n \"\"\"Plots analog and digital systems together on the same axes.\"\"\"\n\n figax = bode_syss(analogs, N=N, xlim=xlim, xlog=xlog, mag_lim=mag_lim,\n phase_lim=phase_lim, gain_point=gain_point,\n figax=figax, rcParams=rcParams)\n\n bode_zz(digitals, fs=fs, xlim=xlim, xlog=xlog, mag_lim=mag_lim,\n phase_lim=phase_lim, gain_point=gain_point,\n figax=figax, rcParams=rcParams)\n\n return figax\n\n\ndef _pole_zero(z, p, k, xlim=None, ylim=None, figax=None, rcParams=None):\n z = np.atleast_1d(z)\n p = np.atleast_1d(p)\n\n rcParamsDefault = {'figure.figsize' : (6,6),\n 'lines.linewidth': 1.5,\n 'figure.dpi' : 300,\n 'savefig.dpi' : 300,\n 'axes.labelsize' : 12,}\n if rcParams is not None:\n rcParamsDefault.update(rcParams)\n\n with mpl.rc_context(rcParamsDefault):\n if figax is None:\n fig, ax = plt.subplots()\n else:\n fig, ax = figax\n\n markersize = mpl.rcParams['lines.markersize']\n markeredgewidth = mpl.rcParams['lines.markeredgewidth']\n\n zeros, = ax.plot(z.real, z.imag, linewidth=0, marker='o',\n markerfacecolor=None,)\n poles, = ax.plot(p.real, p.imag, linewidth=0, color=zeros.get_color(),\n marker ='x', markeredgewidth=3.5*markeredgewidth,\n markersize=markersize*1.5)\n\n ax.set_xlim(-1.5, 1.5)\n ax.set_ylim(-1.5, 1.5)\n circ = plt.Circle((0, 0), radius=1, linewidth=1,\n fill=False, color='gray')\n ax.axvline(0, color='k')\n ax.axhline(0, color='k')\n ax.add_patch(circ)\n ax.grid()\n\n x_per_inch = _x_per_inch(ax)\n y_per_inch = _y_per_inch(ax)\n\n m_f = mpl.rcParams['font.size']\n m_z = zeros.get_markersize()\n\n m_inch_z = _font_pt_to_inches(m_z/2. + m_f/2.)\n\n m_x_z = m_inch_z * x_per_inch\n m_y_z = m_inch_z * y_per_inch\n\n m_p = poles.get_markersize()\n m_inch_p = _font_pt_to_inches(m_p/2. + m_f/2.)\n m_x_p = m_inch_p * x_per_inch\n m_y_p = m_inch_z * y_per_inch\n\n rep_zeros = find_repeated_roots(z)\n rep_poles = find_repeated_roots(p)\n\n for pt, val in rep_zeros.items():\n ax.text(pt.real + m_x_z, pt.imag + m_y_z, str(val))\n\n for pt, val in rep_poles.items():\n ax.text(pt.real + m_x_p, pt.imag + m_y_p, str(val))\n\n return fig, ax\n\n\ndef pole_zero(sys, xlim=None, ylim=None, figax=None, rcParams=None):\n if len(sys) == 2:\n z, p, k = signal.tf2zpk(*sys)\n elif len(sys) == 3:\n z, p, k = sys\n elif len(sys) == 4:\n z, p, k = signal.ss2zpk(*sys)\n else:\n ValueError(\"\"\"\\\nsys must have 2 (transfer function), 3 (zeros, poles, gain),\nor 4 (state space) elements. sys is: {}\"\"\".format(sys))\n\n return _pole_zero(z, p, k, xlim=xlim, ylim=ylim, figax=figax,\n rcParams=rcParams)\n\n\ndef nyquist(freq, resp, freq_lim=None, xlim=None, ylim=None,\n figax=None, rcParams=None):\n if rcParams is None:\n rcParams = {'figure.figsize': (6, 6),\n 'lines.linewidth': 1.5,\n 'figure.dpi': 300,\n 'savefig.dpi': 300,\n 'font.size': 12}\n\n with mpl.rc_context(rcParams):\n if figax is None:\n fig, ax = plt.subplots()\n else:\n fig, ax = figax\n\n if freq_lim is not None:\n resp = resp[np.logical_and(freq > freq_lim[0], freq < freq_lim[1])]\n else:\n resp = resp\n\n ax.plot(resp.real, resp.imag, '-')\n circ = plt.Circle((0, 0), radius=1, linewidth=1, fill=False,\n color='gray')\n ax.axvline(0, color='0.5')\n ax.axhline(0, color='0.5')\n ax.add_patch(circ)\n ax.grid()\n\n if xlim is not None:\n xlim = list(ax.get_xlim())\n if xlim[0] > -1.1:\n xlim[0] = -1.1\n\n ax.set_xlim(xlim)\n else:\n ax.set_xlim(xlim)\n\n if ylim is not None:\n ax.set_ylim(ylim)\n\n return fig, ax\n"
] | [
[
"numpy.signbit",
"matplotlib.rc_context",
"numpy.abs",
"numpy.nonzero",
"numpy.logical_and",
"numpy.arange",
"scipy.signal.tf2zpk",
"numpy.cumsum",
"matplotlib.pyplot.subplots",
"numpy.atleast_1d",
"matplotlib.pyplot.Circle",
"scipy.signal.ss2zpk",
"scipy.signal.lfilter",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
cplan082-tech/project_csi4103 | [
"987d19deacc8cb36c473adf33335fc63ae4aab10"
] | [
"datasets/plot.py"
] | [
"import csv\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef read_csv_file(name):\n file = open(name)\n type(file)\n csvreader = csv.reader(file)\n\n header = []\n header = next(csvreader) #First line of CSV is name of headers\n\n \"Populates array with rows from CSV [[shoulder_angle_1,elbow_angle_2],...]\"\n rows = []\n for row in csvreader:\n rows.append(row)\n\n file.close()\n return rows\n\ndef plot(rows):\n for row in rows:\n x = float(row[0])\n y = float(row[1])\n plt.scatter(x,y,s=1.5, color='black')\n plt.axis('off')\n fig = plt.gcf()\n fig.savefig('scanned_image.png',dpi=100)\n plt.show()\n return\n\ndef main():\n rows = read_csv_file('xycoordinates.csv') #change name to actual file name\n plot(rows)\n\nif __name__ == \"__main__\":\n\tmain()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shahviraj/pgdgan | [
"97fb63547144f02d76ef0c384b9e2fbbb90c9d50"
] | [
"src/dcgan_utils.py"
] | [
"# Files of this project is modified versions of 'https://github.com/AshishBora/csgm', which\n#comes with the MIT licence: https://github.com/AshishBora/csgm/blob/master/LICENSE\n\n\"\"\"Utils for the DCGAN model\nFile based on : https://github.com/carpedm20/DCGAN-tensorflow/blob/master/utils.py\nIt comes with the following license: https://github.com/carpedm20/DCGAN-tensorflow/blob/master/LICENSE\n\"\"\"\n# pylint: skip-file\n\nfrom __future__ import division\nimport math\nimport json\nimport random\nimport pprint\nimport scipy.misc\nimport numpy as np\nfrom time import gmtime, strftime\n\npp = pprint.PrettyPrinter()\n\nget_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])\n\ndef get_image(image_path, image_size, is_crop=True, resize_w=64, is_grayscale = False):\n return transform(imread(image_path, is_grayscale), image_size, is_crop, resize_w)\n\ndef save_images(images, size, image_path):\n return imsave(inverse_transform(images), size, image_path)\n\ndef imread(path, is_grayscale = False):\n if (is_grayscale):\n return scipy.misc.imread(path, flatten = True).astype(np.float)\n else:\n return scipy.misc.imread(path).astype(np.float)\n\ndef merge_images(images, size):\n return inverse_transform(images)\n\ndef merge(images, size):\n h, w = images.shape[1], images.shape[2]\n img = np.zeros((h * size[0], w * size[1], 3))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j*h:j*h+h, i*w:i*w+w, :] = image\n\n return img\n\ndef imsave(images, size, path):\n return scipy.misc.imsave(path, merge(images, size))\n\ndef center_crop(x, crop_h, crop_w=None, resize_w=64):\n if crop_w is None:\n crop_w = crop_h\n h, w = x.shape[:2]\n j = int(round((h - crop_h)/2.))\n i = int(round((w - crop_w)/2.))\n return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w],\n [resize_w, resize_w])\n\ndef transform(image, npx=64, is_crop=True, resize_w=64):\n # npx : # of pixels width/height of image\n if is_crop:\n cropped_image = center_crop(image, npx, resize_w=resize_w)\n else:\n cropped_image = image\n return np.array(cropped_image)/127.5 - 1.\n\ndef inverse_transform(images):\n return (images+1.)/2.\n\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LYHTHU/MixMatch-pytorch | [
"a738cc95aae88f76761aeeb405201bc7ae200e7d"
] | [
"dataset/cifar10.py"
] | [
"import numpy as np\nfrom PIL import Image\n\nimport torchvision\nimport torch\n\nclass TransformTwice:\n def __init__(self, transform):\n self.transform = transform\n\n def __call__(self, inp):\n out1 = self.transform(inp)\n out2 = self.transform(inp)\n return out1, out2\n\ndef get_cifar10(root, n_labeled,\n transform_train=None, transform_val=None,\n download=True):\n\n base_dataset = torchvision.datasets.CIFAR10(root, train=True, download=download)\n train_labeled_idxs, train_unlabeled_idxs, val_idxs = train_val_split(base_dataset.targets, int(n_labeled/10))\n\n train_labeled_dataset = CIFAR10_labeled(root, train_labeled_idxs, train=True, transform=transform_train)\n train_unlabeled_dataset = CIFAR10_unlabeled(root, train_unlabeled_idxs, train=True, transform=TransformTwice(transform_train))\n val_dataset = CIFAR10_labeled(root, val_idxs, train=True, transform=transform_val, download=True)\n test_dataset = CIFAR10_labeled(root, train=False, transform=transform_val, download=True)\n\n print (f\"#Labeled: {len(train_labeled_idxs)} #Unlabeled: {len(train_unlabeled_idxs)} #Val: {len(val_idxs)}\")\n return train_labeled_dataset, train_unlabeled_dataset, val_dataset, test_dataset\n \n\ndef train_val_split(labels, n_labeled_per_class):\n labels = np.array(labels)\n train_labeled_idxs = []\n train_unlabeled_idxs = []\n val_idxs = []\n\n for i in range(10):\n idxs = np.where(labels == i)[0]\n np.random.shuffle(idxs)\n train_labeled_idxs.extend(idxs[:n_labeled_per_class])\n train_unlabeled_idxs.extend(idxs[n_labeled_per_class:-500])\n val_idxs.extend(idxs[-500:])\n np.random.shuffle(train_labeled_idxs)\n np.random.shuffle(train_unlabeled_idxs)\n np.random.shuffle(val_idxs)\n\n return train_labeled_idxs, train_unlabeled_idxs, val_idxs\n\ncifar10_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255\ncifar10_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255\n\ndef normalise(x, mean=cifar10_mean, std=cifar10_std):\n x, mean, std = [np.array(a, np.float32) for a in (x, mean, std)]\n x -= mean*255\n x *= 1.0/(255*std)\n return x\n\ndef transpose(x, source='NHWC', target='NCHW'):\n return x.transpose([source.index(d) for d in target]) \n\ndef pad(x, border=4):\n return np.pad(x, [(0, 0), (border, border), (border, border)], mode='reflect')\n\nclass RandomPadandCrop(object):\n \"\"\"Crop randomly the image.\n\n Args:\n output_size (tuple or int): Desired output size. If int, square crop\n is made.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, x):\n x = pad(x, 4)\n\n h, w = x.shape[1:]\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n x = x[:, top: top + new_h, left: left + new_w]\n\n return x\n\nclass RandomFlip(object):\n \"\"\"Flip randomly the image.\n \"\"\"\n def __call__(self, x):\n if np.random.rand() < 0.5:\n x = x[:, :, ::-1]\n\n return x.copy()\n\nclass GaussianNoise(object):\n \"\"\"Add gaussian noise to the image.\n \"\"\"\n def __call__(self, x):\n c, h, w = x.shape\n x += np.random.randn(c, h, w) * 0.15\n return x\n\nclass ToTensor(object):\n \"\"\"Transform the image to tensor.\n \"\"\"\n def __call__(self, x):\n x = torch.from_numpy(x)\n return x\n\nclass CIFAR10_labeled(torchvision.datasets.CIFAR10):\n\n def __init__(self, root, indexs=None, train=True,\n transform=None, target_transform=None,\n download=False):\n super(CIFAR10_labeled, self).__init__(root, train=train,\n transform=transform, target_transform=target_transform,\n download=download)\n if indexs is not None:\n self.data = self.data[indexs]\n self.targets = np.array(self.targets)[indexs]\n self.data = transpose(normalise(self.data))\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.targets[index]\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n \n\nclass CIFAR10_unlabeled(CIFAR10_labeled):\n\n def __init__(self, root, indexs, train=True,\n transform=None, target_transform=None,\n download=False):\n super(CIFAR10_unlabeled, self).__init__(root, indexs, train=train,\n transform=transform, target_transform=target_transform,\n download=download)\n self.targets = np.array([-1 for i in range(len(self.targets))])\n "
] | [
[
"numpy.pad",
"torch.from_numpy",
"numpy.random.shuffle",
"numpy.random.randn",
"numpy.random.rand",
"numpy.array",
"numpy.where",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lvwuyunlifan/Tensorflow_to_learn_DL | [
"c534f36b580990342219b300c418ae13c070b9a5"
] | [
"chapter4/defined_cross.py"
] | [
"#--*--coding: utf-8 --*--\n\nimport tensorflow as tf\nfrom numpy.random import RandomState\n\n\nbacth_size = 8\n\n# 两个输入节点\nx = tf.placeholder(tf.float32, shape=[None, 2], name='x-input')\n# 回归问题一般只有一个输出节点\ny_ = tf.placeholder(tf.float32, shape=[None, 1], name='y-output')\n\n# 定义了一个单层的神经网络前向传播的过程, 这里就是简单的加权和\nw1 = tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1))\ny = tf.matmul(x, w1)\n\n# 定义预测多了和与预测少了的成本\nloss_more = 10\nloss_less = 1\n# 损失函数\nloss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y-y_)*loss_more, (y_-y)*loss_less))\n'''\n tf.greater的输入为两个变量,比较变量的每个元素的大小,返回大True,小False\n tf.where输入三个变量,第一个为选择条件,True时选择第二个参数,False时选第三个参数\n'''\n# 优化器\noptimiter = tf.train.AdamOptimizer(0.001).minimize(loss)\n\n# 通过随机数生成一个模拟数据集\nrdm = RandomState(1)\ndataset_size = 128\nX = rdm.rand(dataset_size, 2)\n\n# 设置回归的正确值为两个输入的和加上一个随机数。之所以要加上一个随机量是为了加入不可预测的噪音,\n# 否则不同的损失函数的意义就不大了,因为不同的损失函数都会在能完全预测正确的时候最低。\n# 一般来说噪音为一个均值为0的小量,所以这里的噪音设置为-0.05~0.05的随机数\nY = [[x1 + x2 + rdm.rand()/10.0-0.05] for (x1, x2) in X]\n\n# 训练神经网络\nwith tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n\n epoch = 10000\n for i in range(epoch):\n start = (i * bacth_size) % dataset_size\n end = min(start+bacth_size, dataset_size)\n\n sess.run(optimiter, feed_dict={x:X[start:end], y_:Y[start:end]})\n if i % 500 == 0:\n total_loss = sess.run(loss, feed_dict={x:X, y_:Y}) \n print('After %d loss is %g' % (i, total_loss)) \n print(sess.run(w1))"
] | [
[
"tensorflow.matmul",
"tensorflow.greater",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"numpy.random.RandomState",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ramaneswaran/quickvision | [
"ff494ea9c6ae09c129603b35236f314b25d56d27"
] | [
"quickvision/datasets/classification.py"
] | [
"# Add code for Mapping using dataframe containaing id and target.\n# Port from pytorch_cnn_trainer\n# https://github.com/oke-aditya/pytorch_cnn_trainer\n\nimport torchvision\nfrom torchvision import datasets\nfrom torch.utils.data import Dataset\nimport os\nimport torch\nfrom PIL import Image\n\n__all__ = [\"create_folder_dataset\", \"CSVSingleLabelDataset\"]\n\n\ndef create_folder_dataset(root_dir, transforms, split: float = 0.8, **kwargs):\n \"\"\"\n Creates Train and Validation Dataset from a Root folder\n Arrange dataset as follows: -\n root/class_a/image01.png\n root/class_b/image01.png\n\n Creates train and validation dataset from this root dir.\n This applies same transforms to both train and validation\n\n Args:\n root_dir : Root directory of the dataset to read from\n transforms: Transforms to be applied to train and validation datasets.\n split: Float number denoting percentage of train items\n\n \"\"\"\n complete_dataset = datasets.ImageFolder(root_dir, transform=transforms)\n train_split = len(complete_dataset) * split\n valid_split = len(complete_dataset) * (1 - split)\n\n train_set, valid_set = torch.utils.data.random_split(complete_dataset, [train_split, valid_split])\n return train_set, valid_set\n\n\nclass CSVSingleLabelDataset(Dataset):\n \"\"\"\n Creates Torchvision Dataset From CSV File.\n Args:\n df: DataFrame with 2 columns ``image_id`` and ``target``.\n data_dir: Directory from where data is to be read.\n image_id: Column name which has IDs of the images.\n target: target column name.\n transform: Trasforms to apply while creating Dataset.\n img_type: Type of the image like `png` or `jpg` etc.\n \"\"\"\n\n def __init__(self, df, data_dir, image_id, target, transform, img_type):\n super().__init__()\n self.df = df\n self.data_dir = data_dir\n self.image_id = image_id\n self.target = target\n self.transform = transform\n self.img_type = img_type\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n img_name = self.df[self.image_id][idx]\n label = self.df[self.target][idx]\n\n img_path = os.path.join(self.data_dir, str(img_name) + f'.{self.img_type}')\n image = Image.open(img_path)\n image = self.transform(image)\n\n return image, label\n"
] | [
[
"torch.utils.data.random_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bakwadunka/dunka3 | [
"265ec0964087bac524da9a3f3b07bc483a466c63"
] | [
"utils.py"
] | [
"import os\nimport pickle\nimport torch\nimport numpy as np\n\ndef save(toBeSaved, filename, mode='wb'):\n dirname = os.path.dirname(filename)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n file = open(filename, mode)\n pickle.dump(toBeSaved, file)\n file.close()\n\ndef load(filename, mode='rb'):\n file = open(filename, mode)\n loaded = pickle.load(file)\n file.close()\n return loaded\n\ndef pad_sents(sents, pad_token):\n sents_padded = []\n lens = get_lens(sents)\n max_len = max(lens)\n sents_padded = [sents[i] + [pad_token] * (max_len - l) for i, l in enumerate(lens)]\n return sents_padded\n\ndef sort_sents(sents, reverse=True):\n sents.sort(key=(lambda s: len(s)), reverse=reverse)\n return sents\n\ndef get_mask(sents, unmask_idx=1, mask_idx=0):\n lens = get_lens(sents)\n max_len = max(lens)\n mask = [([unmask_idx] * l + [mask_idx] * (max_len - l)) for l in lens]\n return mask\n\ndef get_lens(sents):\n return [len(sent) for sent in sents]\n\ndef get_max_len(sents):\n max_len = max([len(sent) for sent in sents])\n return max_len\n\ndef truncate_sents(sents, length):\n sents = [sent[:length] for sent in sents]\n return sents\n\ndef get_loss_weight(labels, label_order):\n nums = [np.sum(labels == lo) for lo in label_order]\n loss_weight = torch.tensor([n / len(labels) for n in nums])\n return loss_weight\n"
] | [
[
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lalonderodney/D-Caps | [
"47050505170472abe1ea36e50903ea06054fcf07"
] | [
"utils.py"
] | [
"import os\nimport errno\n\nimport tensorflow as tf\nfrom keras import backend as K\n\ndef safe_mkdir(dir_to_make: str) -> None:\n '''\n Attempts to make a directory following the Pythonic EAFP strategy which prevents race conditions.\n\n :param dir_to_make: The directory path to attempt to make.\n :return: None\n '''\n try:\n os.makedirs(dir_to_make)\n except OSError as e:\n if e.errno != errno.EEXIST:\n print('ERROR: Unable to create directory: {}'.format(dir_to_make), e)\n raise\n\ndef as_keras_metric(method):\n import functools\n @functools.wraps(method)\n def wrapper(self, args, **kwargs):\n \"\"\" Wrapper for turning tensorflow metrics into keras metrics \"\"\"\n value, update_op = method(self, args, **kwargs)\n K.get_session().run(tf.local_variables_initializer())\n with tf.control_dependencies([update_op]):\n value = tf.identity(value)\n return value\n return wrapper"
] | [
[
"tensorflow.identity",
"tensorflow.local_variables_initializer",
"tensorflow.control_dependencies"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
antofuller/configaformers | [
"293253cd35d96c8a24c4004ba3d24fc6dc85a260",
"293253cd35d96c8a24c4004ba3d24fc6dc85a260"
] | [
"norm_module.py",
"stream_module.py"
] | [
"import torch\nfrom torch import nn\nfrom utils import set_default\n\n\n# This module is dedicated to Norm Macdonald\n# Implementations from https://github.com/lucidrains/x-transformer\n\nclass RMSNorm(nn.Module):\n def __init__(self, dim, eps=1e-8):\n super().__init__()\n self.scale = dim ** -0.5\n self.eps = eps\n self.g = nn.Parameter(torch.ones(dim))\n\n def forward(self, x):\n _norm = torch.norm(x, dim=-1, keepdim=True) * self.scale\n return x / _norm.clamp(min=self.eps) * self.g\n\n\nclass ScaleNorm(nn.Module):\n def __init__(self, dim, eps=1e-5):\n super().__init__()\n self.scale = dim ** -0.5\n self.eps = eps\n self.g = nn.Parameter(torch.ones(1))\n\n def forward(self, x):\n norm = torch.norm(x, dim=-1, keepdim=True) * self.scale\n return x / norm.clamp(min=self.eps) * self.g\n\n\ndef init_norm(_key, _config, _dim):\n if _key not in _config:\n norm_bool = False\n norm_function = False\n else:\n assert type(_config[_key]) == str, f\"{_config[_key]} is type {type(_config[_key])}, but should be a string!\"\n norm_bool = True\n norm_function = get_norm(norm_type=_config[_key], dim=_dim)\n\n return norm_bool, norm_function\n\n\ndef get_norm(norm_type: str, dim: int):\n # TODO: Batch norm may involve rearranging\n norm_type = norm_type.lower() # Make lowercase\n if norm_type == 'layer_norm':\n return nn.LayerNorm(dim)\n\n elif norm_type == 'rms_norm':\n return RMSNorm(dim)\n\n elif norm_type == 'scale_norm':\n return ScaleNorm(dim)\n\n else:\n print(f\"Norm: {norm_type} not available.\")\n\n\nclass Norm(nn.Module):\n def __init__(self,\n config,\n _streams,\n ):\n super().__init__()\n \"\"\"\n Norm module\n \"\"\"\n # Configure input(s) and output(s)\n self.input_name = set_default(_look='input_name', _dict=config, _default='x')\n self.output_name = set_default(_look='output_name', _dict=config, _default='x')\n\n self.input_dim = _streams[self.input_name][-1]\n input_shape = _streams[self.input_name]\n\n # Configuring norm\n norm_name = set_default(_look='norm_type', _dict=config, _default='layer_norm')\n self.norm = get_norm(norm_type=norm_name, dim=self.input_dim)\n\n # Prepare streams info\n self.streams_in_module = {'inputs': [[self.input_name, input_shape],\n ],\n\n 'outputs': [[self.output_name, input_shape],\n ]\n }\n\n def forward(self, _data):\n _data[self.output_name] = self.norm(_data[self.input_name])\n return _data\n\n\nclass ScaleAlongDimension(nn.Module):\n def __init__(self,\n config,\n _streams,\n ):\n super().__init__()\n \"\"\"\n Learned scale used in as a weighted residual, or for scaling mha heads (see NormFormer)\n \"\"\"\n # Configure input(s) and output(s)\n self.input_name = set_default(_look='input_name', _dict=config, _default='x')\n self.dim_to_scale = set_default(_look='dim_to_scale', _dict=config, _default=2, _type=int)\n self.output_name = set_default(_look='output_name', _dict=config, _default='x')\n\n self.input_shape = _streams[self.input_name]\n assert self.dim_to_scale > 0, f'dim_to_scale must be greater than 0!'\n assert self.dim_to_scale <= len(self.input_shape), f'dim_to_scale must less than or equal to the number of ' \\\n f'input dimensions!'\n num_params = self.input_shape[self.dim_to_scale]\n\n # Initialize gate to 1\n self.scale = nn.Parameter(torch.ones(num_params), requires_grad=True)\n\n # Built einsum input strings\n self.einsum_in_1 = 'abcdef' # max of 6 dims\n self.einsum_in_1 = self.einsum_in_1[:len(self.input_shape)]\n self.einsum_in_2 = self.einsum_in_1[self.dim_to_scale]\n\n print(f\"{self.einsum_in_1},{self.einsum_in_2}->{self.einsum_in_1}\")\n\n # Prepare streams info\n self.streams_in_module = {'inputs': [[self.input_name, self.input_shape],\n ],\n\n 'outputs': [[self.output_name, self.input_shape],\n ]\n }\n\n def forward(self, _data):\n _data[self.output_name] = torch.einsum(f'{self.einsum_in_1},{self.einsum_in_2}->{self.einsum_in_1}', _data[self.input_name], self.scale)\n return _data",
"import torch\nfrom torch import nn\nfrom utils import set_default\n\n\nclass MakeStream(nn.Module):\n def __init__(self,\n config,\n _streams,\n ):\n super().__init__()\n \"\"\"\n Make data stream\n \"\"\"\n # Configure input(s) and output(s)\n self.input_name = set_default(_look='input_name', _dict=config, _default='x')\n self.input_shape = _streams[self.input_name]\n\n assert 'output_name' in config.keys(), f\"When making a stream, 'output_name' must be given!\"\n self.output_name = config['output_name']\n\n # Prepare streams info\n self.streams_in_module = {'inputs': [[self.input_name, self.input_shape],\n ],\n\n 'outputs': [[self.output_name, self.input_shape],\n ]\n }\n\n def forward(self, _data):\n _data[self.output_name] = _data[self.input_name].clone()\n return _data\n\n\nclass MergeStreams(nn.Module):\n def __init__(self,\n config,\n _streams,\n ):\n super().__init__()\n \"\"\"\n Merge data streams via element-wise add, subtract, or multiply\n \"\"\"\n # Configure input(s) and output(s)\n self.input_name_1 = set_default(_look='input_name_1', _dict=config, _default='x')\n self.input_name_2 = set_default(_look='input_name_2', _dict=config, _default='x')\n self.output_name = set_default(_look='output_name', _dict=config, _default='x')\n self.merge_name = set_default(_look='merge_type', _dict=config, _default='add')\n\n self.input_shape_1 = _streams[self.input_name_1]\n self.input_shape_2 = _streams[self.input_name_2]\n\n assert (self.merge_name == 'add') or (self.merge_name == 'multiply') or (self.merge_name == 'subtract'), \\\n f\"Merge stream operations available are: 'add', 'multiply', and 'subtract'!\"\n\n if len(self.input_shape_1) < len(self.input_shape_2):\n self.output_shape = self.input_shape_2\n else:\n self.output_shape = self.input_shape_1\n\n # Prepare streams info\n self.streams_in_module = {'inputs': [[self.input_name_1, self.input_shape_1],\n [self.input_name_2, self.input_shape_2],\n ],\n\n 'outputs': [[self.output_name, self.output_shape],\n ]\n }\n\n def forward(self, _data):\n if self.merge_name == 'add':\n _data[self.output_name] = _data[self.input_name_1] + _data[self.input_name_2]\n elif self.merge_name == 'subtract':\n _data[self.output_name] = _data[self.input_name_1] - _data[self.input_name_2]\n elif self.merge_name == 'multiply':\n _data[self.output_name] = _data[self.input_name_1] * _data[self.input_name_2]\n else:\n print(f'{self.merge_name} did not match any options.')\n return _data\n\n\nclass CutStream(nn.Module):\n def __init__(self,\n config,\n _streams,\n ):\n super().__init__()\n \"\"\"\n IN TESTING\n Cut data stream\n \"\"\"\n # Configure input(s) and output(s)\n self.input_name = set_default(_look='input_name', _dict=config, _default='x')\n self.output_name = set_default(_look='output_name', _dict=config, _default='x')\n\n assert 'start' in config.keys(), f\"Cut_stream must be given a starting index!\"\n assert 'end' in config.keys(), f\"Cut_stream must be given an ending index!\"\n assert 'cut_dim' in config.keys(), f\"Cut_stream must be given a dimension which is cut!\"\n\n self.start = config['start']\n self.end = config['end']\n self.cut_dim = config['cut_dim']\n\n self.input_shape = _streams[self.input_name]\n\n if (type(self.start) == int) and (type(self.end) == int):\n cut_dim_output = self.end - self.start\n elif self.start == 0:\n cut_dim_output = self.end\n else:\n cut_dim_output = f\"{self.end} - {self.start}\"\n\n self.output_shape = self.input_shape.copy()\n self.output_shape[self.cut_dim] = cut_dim_output\n\n # Prepare streams info\n self.streams_in_module = {'inputs': [[self.input_name, self.input_shape],\n ],\n\n 'outputs': [[self.output_name, self.output_shape],\n ]\n }\n\n def forward(self, _data):\n if type(self.start) == int:\n start_idx = self.start\n else:\n start_idx = _data['input_sizes'][self.start]\n\n if type(self.end) == int:\n end_idx = self.end\n else:\n end_idx = _data['input_sizes'][self.end]\n\n if self.cut_dim == 0:\n _data[self.output_name] = _data[self.input_name][start_idx:end_idx, ...]\n\n elif self.cut_dim == 1:\n _data[self.output_name] = _data[self.input_name][:, start_idx:end_idx, ...]\n\n elif self.cut_dim == 2:\n _data[self.output_name] = _data[self.input_name][:, :, start_idx:end_idx, ...]\n\n elif self.cut_dim == 3:\n _data[self.output_name] = _data[self.input_name][:, :, :, start_idx:end_idx]\n\n else:\n print('cut_stream only supports up to 4 dimensional data')\n\n return _data\n\n\nclass CatStreams(nn.Module):\n def __init__(self,\n config,\n _streams,\n ):\n super().__init__()\n \"\"\"\n IN TESTING\n Concatenate data streams\n \"\"\"\n # Configure input(s) and output(s)\n self.output_name = set_default(_look='output_name', _dict=config, _default='x')\n\n assert 'input_list' in config.keys(), f\"Cat_streams must be given 'input_list'!\"\n assert 'cat_dim' in config.keys(), f\"Cat_streams must be given 'cat_dim'\"\n\n self.input_list = config['input_list']\n self.cat_dim = config['cat_dim']\n\n input_streams = []\n cat_dim_out = 0\n for input_name in self.input_list:\n input_shape = _streams[input_name]\n input_streams.append([input_name, input_shape])\n cat_dim_out += input_shape[self.cat_dim]\n\n self.output_shape = input_streams[0][1].copy() # copy the shape of the first input stream\n self.output_shape[self.cat_dim] = cat_dim_out # update the dimension that is concatenated\n\n # Prepare streams info\n self.streams_in_module = {'inputs': input_streams,\n\n 'outputs': [[self.output_name, self.output_shape],\n ]\n }\n\n def forward(self, _data):\n # collect input streams\n temp_input_list = []\n for input_name in self.input_list:\n temp_input_list.append(_data[input_name])\n\n _data[self.output_name] = torch.cat(temp_input_list, dim=self.cat_dim)\n del temp_input_list\n\n return _data"
] | [
[
"torch.einsum",
"torch.norm",
"torch.ones",
"torch.nn.LayerNorm"
],
[
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
achilleas-k/mne-python | [
"0078e1af13a92ab47498dd167bc5ec73be864427"
] | [
"mne/tests/test_cov.py"
] | [
"# Author: Alexandre Gramfort <[email protected]>\n# Denis Engemann <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport itertools as itt\n\nfrom numpy.testing import (assert_array_almost_equal, assert_array_equal,\n assert_equal, assert_allclose)\nimport pytest\nimport numpy as np\nfrom scipy import linalg\n\nfrom mne.cov import (regularize, whiten_evoked,\n _auto_low_rank_model,\n prepare_noise_cov, compute_whitener,\n _regularized_covariance)\n\nfrom mne import (read_cov, write_cov, Epochs, merge_events,\n find_events, compute_raw_covariance,\n compute_covariance, read_evokeds, compute_proj_raw,\n pick_channels_cov, pick_types, make_ad_hoc_cov,\n make_fixed_length_events)\nfrom mne.datasets import testing\nfrom mne.fixes import _get_args\nfrom mne.io import read_raw_fif, RawArray, read_raw_ctf\nfrom mne.io.pick import _DATA_CH_TYPES_SPLIT\nfrom mne.preprocessing import maxwell_filter\nfrom mne.rank import _compute_rank_int\nfrom mne.tests.common import assert_snr\nfrom mne.utils import (_TempDir, requires_version, run_tests_if_main,\n catch_logging)\n\nbase_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')\ncov_fname = op.join(base_dir, 'test-cov.fif')\ncov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')\ncov_km_fname = op.join(base_dir, 'test-km-cov.fif')\nraw_fname = op.join(base_dir, 'test_raw.fif')\nave_fname = op.join(base_dir, 'test-ave.fif')\nerm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')\nhp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')\n\nctf_fname = op.join(testing.data_path(download=False), 'CTF',\n 'testdata_ctf.ds')\n\n\[email protected]('proj', (True, False))\[email protected]('pca', (True, 'white', False))\ndef test_compute_whitener(proj, pca):\n \"\"\"Test properties of compute_whitener.\"\"\"\n raw = read_raw_fif(raw_fname).crop(0, 3).load_data()\n raw.pick_types(eeg=True, exclude=())\n if proj:\n raw.apply_proj()\n else:\n raw.del_proj()\n with pytest.warns(RuntimeWarning, match='Too few samples'):\n cov = compute_raw_covariance(raw)\n W, _, C = compute_whitener(cov, raw.info, pca=pca, return_colorer=True,\n verbose='error')\n n_channels = len(raw.ch_names)\n n_reduced = len(raw.ch_names)\n rank = n_channels - len(raw.info['projs'])\n n_reduced = rank if pca is True else n_channels\n assert W.shape == C.shape[::-1] == (n_reduced, n_channels)\n # round-trip mults\n round_trip = np.dot(W, C)\n if pca is True:\n assert_allclose(round_trip, np.eye(n_reduced), atol=1e-7)\n elif pca == 'white':\n # Our first few rows/cols are zeroed out in the white space\n assert_allclose(round_trip[-rank:, -rank:],\n np.eye(rank), atol=1e-7)\n else:\n assert pca is False\n assert_allclose(round_trip, np.eye(n_channels), atol=0.05)\n\n\ndef test_cov_mismatch():\n \"\"\"Test estimation with MEG<->Head mismatch.\"\"\"\n raw = read_raw_fif(raw_fname).crop(0, 5).load_data()\n events = find_events(raw, stim_channel='STI 014')\n raw.pick_channels(raw.ch_names[:5])\n raw.add_proj([], remove_existing=True)\n epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)\n for kind in ('shift', 'None'):\n epochs_2 = epochs.copy()\n # This should be fine\n compute_covariance([epochs, epochs_2])\n if kind == 'shift':\n epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001\n else: # None\n epochs_2.info['dev_head_t'] = None\n pytest.raises(ValueError, compute_covariance, [epochs, epochs_2])\n compute_covariance([epochs, epochs_2], on_mismatch='ignore')\n with pytest.raises(RuntimeWarning, match='transform mismatch'):\n compute_covariance([epochs, epochs_2], on_mismatch='warn')\n pytest.raises(ValueError, compute_covariance, epochs,\n on_mismatch='x')\n # This should work\n epochs.info['dev_head_t'] = None\n epochs_2.info['dev_head_t'] = None\n compute_covariance([epochs, epochs_2], method=None)\n\n\ndef test_cov_order():\n \"\"\"Test covariance ordering.\"\"\"\n raw = read_raw_fif(raw_fname)\n raw.set_eeg_reference(projection=True)\n info = raw.info\n # add MEG channel with low enough index number to affect EEG if\n # order is incorrect\n info['bads'] += ['MEG 0113']\n ch_names = [info['ch_names'][pick]\n for pick in pick_types(info, meg=False, eeg=True)]\n cov = read_cov(cov_fname)\n # no avg ref present warning\n prepare_noise_cov(cov, info, ch_names, verbose='error')\n # big reordering\n cov_reorder = cov.copy()\n order = np.random.RandomState(0).permutation(np.arange(len(cov.ch_names)))\n cov_reorder['names'] = [cov['names'][ii] for ii in order]\n cov_reorder['data'] = cov['data'][order][:, order]\n # Make sure we did this properly\n _assert_reorder(cov_reorder, cov, order)\n # Now check some functions that should get the same result for both\n # regularize\n with pytest.raises(ValueError, match='rank, if str'):\n regularize(cov, info, rank='foo')\n with pytest.raises(TypeError, match='rank must be'):\n regularize(cov, info, rank=False)\n with pytest.raises(TypeError, match='rank must be'):\n regularize(cov, info, rank=1.)\n cov_reg = regularize(cov, info, rank='full')\n cov_reg_reorder = regularize(cov_reorder, info, rank='full')\n _assert_reorder(cov_reg_reorder, cov_reg, order)\n # prepare_noise_cov\n cov_prep = prepare_noise_cov(cov, info, ch_names)\n cov_prep_reorder = prepare_noise_cov(cov, info, ch_names)\n _assert_reorder(cov_prep, cov_prep_reorder,\n order=np.arange(len(cov_prep['names'])))\n # compute_whitener\n whitener, w_ch_names, n_nzero = compute_whitener(\n cov, info, return_rank=True)\n assert whitener.shape[0] == whitener.shape[1]\n whitener_2, w_ch_names_2, n_nzero_2 = compute_whitener(\n cov_reorder, info, return_rank=True)\n assert_array_equal(w_ch_names_2, w_ch_names)\n assert_allclose(whitener_2, whitener)\n assert n_nzero == n_nzero_2\n # with pca\n assert n_nzero < whitener.shape[0]\n whitener_pca, w_ch_names_pca, n_nzero_pca = compute_whitener(\n cov, info, pca=True, return_rank=True)\n assert_array_equal(w_ch_names_pca, w_ch_names)\n assert n_nzero_pca == n_nzero\n assert whitener_pca.shape == (n_nzero_pca, len(w_ch_names))\n # whiten_evoked\n evoked = read_evokeds(ave_fname)[0]\n evoked_white = whiten_evoked(evoked, cov)\n evoked_white_2 = whiten_evoked(evoked, cov_reorder)\n assert_allclose(evoked_white_2.data, evoked_white.data)\n\n\ndef _assert_reorder(cov_new, cov_orig, order):\n \"\"\"Check that we get the same result under reordering.\"\"\"\n inv_order = np.argsort(order)\n assert_array_equal([cov_new['names'][ii] for ii in inv_order],\n cov_orig['names'])\n assert_allclose(cov_new['data'][inv_order][:, inv_order],\n cov_orig['data'], atol=1e-20)\n\n\ndef test_ad_hoc_cov():\n \"\"\"Test ad hoc cov creation and I/O.\"\"\"\n tempdir = _TempDir()\n out_fname = op.join(tempdir, 'test-cov.fif')\n evoked = read_evokeds(ave_fname)[0]\n cov = make_ad_hoc_cov(evoked.info)\n cov.save(out_fname)\n assert 'Covariance' in repr(cov)\n cov2 = read_cov(out_fname)\n assert_array_almost_equal(cov['data'], cov2['data'])\n std = dict(grad=2e-13, mag=10e-15, eeg=0.1e-6)\n cov = make_ad_hoc_cov(evoked.info, std)\n cov.save(out_fname)\n assert 'Covariance' in repr(cov)\n cov2 = read_cov(out_fname)\n assert_array_almost_equal(cov['data'], cov2['data'])\n\n\ndef test_io_cov():\n \"\"\"Test IO for noise covariance matrices.\"\"\"\n tempdir = _TempDir()\n cov = read_cov(cov_fname)\n cov['method'] = 'empirical'\n cov['loglik'] = -np.inf\n cov.save(op.join(tempdir, 'test-cov.fif'))\n cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))\n assert_array_almost_equal(cov.data, cov2.data)\n assert_equal(cov['method'], cov2['method'])\n assert_equal(cov['loglik'], cov2['loglik'])\n assert 'Covariance' in repr(cov)\n\n cov2 = read_cov(cov_gz_fname)\n assert_array_almost_equal(cov.data, cov2.data)\n cov2.save(op.join(tempdir, 'test-cov.fif.gz'))\n cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))\n assert_array_almost_equal(cov.data, cov2.data)\n\n cov['bads'] = ['EEG 039']\n cov_sel = pick_channels_cov(cov, exclude=cov['bads'])\n assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))\n assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])\n cov_sel.save(op.join(tempdir, 'test-cov.fif'))\n\n cov2 = read_cov(cov_gz_fname)\n assert_array_almost_equal(cov.data, cov2.data)\n cov2.save(op.join(tempdir, 'test-cov.fif.gz'))\n cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))\n assert_array_almost_equal(cov.data, cov2.data)\n\n # test warnings on bad filenames\n cov_badname = op.join(tempdir, 'test-bad-name.fif.gz')\n with pytest.warns(RuntimeWarning, match='-cov.fif'):\n write_cov(cov_badname, cov)\n with pytest.warns(RuntimeWarning, match='-cov.fif'):\n read_cov(cov_badname)\n\n\[email protected]('method', (None, ['empirical']))\ndef test_cov_estimation_on_raw(method):\n \"\"\"Test estimation from raw (typically empty room).\"\"\"\n tempdir = _TempDir()\n raw = read_raw_fif(raw_fname, preload=True)\n cov_mne = read_cov(erm_cov_fname)\n\n # The pure-string uses the more efficient numpy-based method, the\n # the list gets triaged to compute_covariance (should be equivalent\n # but use more memory)\n with pytest.warns(None): # can warn about EEG ref\n cov = compute_raw_covariance(raw, tstep=None, method=method,\n rank='full')\n assert_equal(cov.ch_names, cov_mne.ch_names)\n assert_equal(cov.nfree, cov_mne.nfree)\n assert_snr(cov.data, cov_mne.data, 1e4)\n\n # tstep=0.2 (default)\n with pytest.warns(None): # can warn about EEG ref\n cov = compute_raw_covariance(raw, method=method, rank='full')\n assert_equal(cov.nfree, cov_mne.nfree - 119) # cutoff some samples\n assert_snr(cov.data, cov_mne.data, 1e2)\n\n # test IO when computation done in Python\n cov.save(op.join(tempdir, 'test-cov.fif')) # test saving\n cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))\n assert cov_read.ch_names == cov.ch_names\n assert cov_read.nfree == cov.nfree\n assert_array_almost_equal(cov.data, cov_read.data)\n\n # test with a subset of channels\n raw_pick = raw.copy().pick_channels(raw.ch_names[:5])\n raw_pick.info.normalize_proj()\n cov = compute_raw_covariance(raw_pick, tstep=None, method=method,\n rank='full')\n assert cov_mne.ch_names[:5] == cov.ch_names\n assert_snr(cov.data, cov_mne.data[:5, :5], 1e4)\n cov = compute_raw_covariance(raw_pick, method=method, rank='full')\n assert_snr(cov.data, cov_mne.data[:5, :5], 90) # cutoff samps\n # make sure we get a warning with too short a segment\n raw_2 = read_raw_fif(raw_fname).crop(0, 1)\n with pytest.warns(RuntimeWarning, match='Too few samples'):\n cov = compute_raw_covariance(raw_2, method=method)\n # no epochs found due to rejection\n pytest.raises(ValueError, compute_raw_covariance, raw, tstep=None,\n method='empirical', reject=dict(eog=200e-6))\n # but this should work\n cov = compute_raw_covariance(raw.copy().crop(0, 10.),\n tstep=None, method=method,\n reject=dict(eog=1000e-6),\n verbose='error')\n\n\[email protected]\n@requires_version('sklearn', '0.15')\ndef test_cov_estimation_on_raw_reg():\n \"\"\"Test estimation from raw with regularization.\"\"\"\n raw = read_raw_fif(raw_fname, preload=True)\n raw.info['sfreq'] /= 10.\n raw = RawArray(raw._data[:, ::10].copy(), raw.info) # decimate for speed\n cov_mne = read_cov(erm_cov_fname)\n with pytest.warns(RuntimeWarning, match='Too few samples'):\n # XXX don't use \"shrunk\" here, for some reason it makes Travis 2.7\n # hang... \"diagonal_fixed\" is much faster. Use long epochs for speed.\n cov = compute_raw_covariance(raw, tstep=5., method='diagonal_fixed')\n assert_snr(cov.data, cov_mne.data, 5)\n\n\ndef _assert_cov(cov, cov_desired, tol=0.005, nfree=True):\n assert_equal(cov.ch_names, cov_desired.ch_names)\n err = (linalg.norm(cov.data - cov_desired.data, ord='fro') /\n linalg.norm(cov.data, ord='fro'))\n assert err < tol, '%s >= %s' % (err, tol)\n if nfree:\n assert_equal(cov.nfree, cov_desired.nfree)\n\n\[email protected]\[email protected]('rank', ('full', None))\ndef test_cov_estimation_with_triggers(rank):\n \"\"\"Test estimation from raw with triggers.\"\"\"\n tempdir = _TempDir()\n raw = read_raw_fif(raw_fname)\n raw.set_eeg_reference(projection=True).load_data()\n events = find_events(raw, stim_channel='STI 014')\n event_ids = [1, 2, 3, 4]\n reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)\n\n # cov with merged events and keep_sample_mean=True\n events_merged = merge_events(events, event_ids, 1234)\n epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,\n baseline=(-0.2, -0.1), proj=True,\n reject=reject, preload=True)\n\n cov = compute_covariance(epochs, keep_sample_mean=True)\n _assert_cov(cov, read_cov(cov_km_fname))\n\n # Test with tmin and tmax (different but not too much)\n cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)\n assert np.all(cov.data != cov_tmin_tmax.data)\n err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /\n linalg.norm(cov_tmin_tmax.data, ord='fro'))\n assert err < 0.05\n\n # cov using a list of epochs and keep_sample_mean=True\n epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,\n baseline=(-0.2, -0.1), proj=True, reject=reject)\n for ev_id in event_ids]\n cov2 = compute_covariance(epochs, keep_sample_mean=True)\n assert_array_almost_equal(cov.data, cov2.data)\n assert cov.ch_names == cov2.ch_names\n\n # cov with keep_sample_mean=False using a list of epochs\n cov = compute_covariance(epochs, keep_sample_mean=False)\n _assert_cov(cov, read_cov(cov_fname), nfree=False)\n\n method_params = {'empirical': {'assume_centered': False}}\n pytest.raises(ValueError, compute_covariance, epochs,\n keep_sample_mean=False, method_params=method_params)\n pytest.raises(ValueError, compute_covariance, epochs,\n keep_sample_mean=False, method='shrunk', rank=rank)\n\n # test IO when computation done in Python\n cov.save(op.join(tempdir, 'test-cov.fif')) # test saving\n cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))\n _assert_cov(cov, cov_read, 1e-5)\n\n # cov with list of epochs with different projectors\n epochs = [Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,\n baseline=(-0.2, -0.1), proj=True),\n Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,\n baseline=(-0.2, -0.1), proj=False)]\n # these should fail\n pytest.raises(ValueError, compute_covariance, epochs)\n pytest.raises(ValueError, compute_covariance, epochs, projs=None)\n # these should work, but won't be equal to above\n with pytest.warns(RuntimeWarning, match='Too few samples'):\n cov = compute_covariance(epochs, projs=epochs[0].info['projs'])\n with pytest.warns(RuntimeWarning, match='Too few samples'):\n cov = compute_covariance(epochs, projs=[])\n\n # test new dict support\n epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0,\n proj=True, reject=reject, preload=True)\n with pytest.warns(RuntimeWarning, match='Too few samples'):\n compute_covariance(epochs)\n with pytest.warns(RuntimeWarning, match='Too few samples'):\n compute_covariance(epochs, projs=[])\n pytest.raises(TypeError, compute_covariance, epochs, projs='foo')\n pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])\n\n\ndef test_arithmetic_cov():\n \"\"\"Test arithmetic with noise covariance matrices.\"\"\"\n cov = read_cov(cov_fname)\n cov_sum = cov + cov\n assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree)\n assert_array_almost_equal(2 * cov.data, cov_sum.data)\n assert cov.ch_names == cov_sum.ch_names\n\n cov += cov\n assert_array_almost_equal(cov_sum.nfree, cov.nfree)\n assert_array_almost_equal(cov_sum.data, cov.data)\n assert cov_sum.ch_names == cov.ch_names\n\n\ndef test_regularize_cov():\n \"\"\"Test cov regularization.\"\"\"\n raw = read_raw_fif(raw_fname)\n raw.info['bads'].append(raw.ch_names[0]) # test with bad channels\n noise_cov = read_cov(cov_fname)\n # Regularize noise cov\n reg_noise_cov = regularize(noise_cov, raw.info,\n mag=0.1, grad=0.1, eeg=0.1, proj=True,\n exclude='bads', rank='full')\n assert noise_cov['dim'] == reg_noise_cov['dim']\n assert noise_cov['data'].shape == reg_noise_cov['data'].shape\n assert np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08\n # make sure all args are represented\n assert set(_DATA_CH_TYPES_SPLIT) - set(_get_args(regularize)) == set()\n\n\ndef test_whiten_evoked():\n \"\"\"Test whitening of evoked data.\"\"\"\n evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),\n proj=True)\n cov = read_cov(cov_fname)\n\n ###########################################################################\n # Show result\n picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,\n exclude='bads')\n\n noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1,\n exclude='bads', rank='full')\n\n evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)\n whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]\n mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)\n assert np.all(mean_baseline < 1.)\n assert np.all(mean_baseline > 0.2)\n\n # degenerate\n cov_bad = pick_channels_cov(cov, include=evoked.ch_names[:10])\n pytest.raises(RuntimeError, whiten_evoked, evoked, cov_bad, picks)\n\n\ndef test_regularized_covariance():\n \"\"\"Test unchanged data with regularized_covariance.\"\"\"\n evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),\n proj=True)\n data = evoked.data.copy()\n # check that input data remain unchanged. gh-5698\n _regularized_covariance(data)\n assert_allclose(data, evoked.data, atol=1e-20)\n\n\n@requires_version('sklearn', '0.15')\ndef test_auto_low_rank():\n \"\"\"Test probabilistic low rank estimators.\"\"\"\n n_samples, n_features, rank = 400, 10, 5\n sigma = 0.1\n\n def get_data(n_samples, n_features, rank, sigma):\n rng = np.random.RandomState(42)\n W = rng.randn(n_features, n_features)\n X = rng.randn(n_samples, rank)\n U, _, _ = linalg.svd(W.copy())\n X = np.dot(X, U[:, :rank].T)\n\n sigmas = sigma * rng.rand(n_features) + sigma / 2.\n X += rng.randn(n_samples, n_features) * sigmas\n return X\n\n X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,\n sigma=sigma)\n method_params = {'iter_n_components': [4, 5, 6]}\n cv = 3\n n_jobs = 1\n mode = 'factor_analysis'\n rescale = 1e8\n X *= rescale\n est, info = _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,\n method_params=method_params,\n cv=cv)\n assert_equal(info['best'], rank)\n\n X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,\n sigma=sigma)\n method_params = {'iter_n_components': [n_features + 5]}\n msg = ('You are trying to estimate %i components on matrix '\n 'with %i features.') % (n_features + 5, n_features)\n with pytest.warns(RuntimeWarning, match=msg):\n _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,\n method_params=method_params, cv=cv)\n\n\[email protected]\[email protected]('rank', ('full', None, 'info'))\n@requires_version('sklearn', '0.15')\ndef test_compute_covariance_auto_reg(rank):\n \"\"\"Test automated regularization.\"\"\"\n raw = read_raw_fif(raw_fname, preload=True)\n raw.resample(100, npad='auto') # much faster estimation\n events = find_events(raw, stim_channel='STI 014')\n event_ids = [1, 2, 3, 4]\n reject = dict(mag=4e-12)\n\n # cov with merged events and keep_sample_mean=True\n events_merged = merge_events(events, event_ids, 1234)\n # we need a few channels for numerical reasons in PCA/FA\n picks = pick_types(raw.info, meg='mag', eeg=False)[:10]\n raw.pick_channels([raw.ch_names[pick] for pick in picks])\n raw.info.normalize_proj()\n epochs = Epochs(\n raw, events_merged, 1234, tmin=-0.2, tmax=0,\n baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)\n epochs = epochs.crop(None, 0)[:5]\n\n method_params = dict(factor_analysis=dict(iter_n_components=[3]),\n pca=dict(iter_n_components=[3]))\n\n covs = compute_covariance(epochs, method='auto',\n method_params=method_params,\n return_estimators=True, rank=rank)\n # make sure regularization produces structured differencess\n diag_mask = np.eye(len(epochs.ch_names)).astype(bool)\n off_diag_mask = np.invert(diag_mask)\n for cov_a, cov_b in itt.combinations(covs, 2):\n if (cov_a['method'] == 'diagonal_fixed' and\n # here we have diagnoal or no regularization.\n cov_b['method'] == 'empirical' and rank == 'full'):\n\n assert not np.any(cov_a['data'][diag_mask] ==\n cov_b['data'][diag_mask])\n\n # but the rest is the same\n assert_array_equal(cov_a['data'][off_diag_mask],\n cov_b['data'][off_diag_mask])\n\n else:\n # and here we have shrinkage everywhere.\n assert not np.any(cov_a['data'][diag_mask] ==\n cov_b['data'][diag_mask])\n\n assert not np.any(cov_a['data'][diag_mask] ==\n cov_b['data'][diag_mask])\n\n logliks = [c['loglik'] for c in covs]\n assert np.diff(logliks).max() <= 0 # descending order\n\n methods = ['empirical', 'ledoit_wolf', 'oas', 'shrunk', 'shrinkage']\n if rank == 'full':\n methods.extend(['factor_analysis', 'pca'])\n with catch_logging() as log:\n cov3 = compute_covariance(epochs, method=methods,\n method_params=method_params, projs=None,\n return_estimators=True, rank=rank,\n verbose=True)\n log = log.getvalue().split('\\n')\n if rank is None:\n assert 'Not doing PCA for MAG.' in log\n assert 'Reducing data rank from 10 -> 7' in log\n else:\n assert 'Reducing' not in log\n method_names = [cov['method'] for cov in cov3]\n best_bounds = [-45, -35]\n bounds = [-55, -45] if rank == 'full' else best_bounds\n for method in set(methods) - {'empirical', 'shrunk'}:\n this_lik = cov3[method_names.index(method)]['loglik']\n assert bounds[0] < this_lik < bounds[1]\n this_lik = cov3[method_names.index('shrunk')]['loglik']\n assert best_bounds[0] < this_lik < best_bounds[1]\n this_lik = cov3[method_names.index('empirical')]['loglik']\n bounds = [-110, -100] if rank == 'full' else best_bounds\n assert bounds[0] < this_lik < bounds[1]\n\n assert_equal({c['method'] for c in cov3}, set(methods))\n\n cov4 = compute_covariance(epochs, method=methods,\n method_params=method_params, projs=None,\n return_estimators=False, rank=rank)\n assert cov3[0]['method'] == cov4['method'] # ordering\n\n # invalid prespecified method\n pytest.raises(ValueError, compute_covariance, epochs, method='pizza')\n\n # invalid scalings\n pytest.raises(ValueError, compute_covariance, epochs, method='shrunk',\n scalings=dict(misc=123))\n\n\ndef _cov_rank(cov, info, proj=True):\n # ignore warnings about rank mismatches: sometimes we will intentionally\n # violate the computed/info assumption, such as when using SSS with\n # `rank='full'`\n with pytest.warns(None):\n return _compute_rank_int(cov, info=info, proj=proj)\n\n\[email protected](scope='module')\ndef raw_epochs_events():\n \"\"\"Create raw, epochs, and events for tests.\"\"\"\n raw = read_raw_fif(raw_fname).set_eeg_reference(projection=True).crop(0, 3)\n raw = maxwell_filter(raw, regularize=None) # heavily reduce the rank\n assert raw.info['bads'] == [] # no bads\n events = make_fixed_length_events(raw)\n epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)\n return (raw, epochs, events)\n\n\n@requires_version('sklearn', '0.15')\[email protected]('rank', (None, 'full', 'info'))\ndef test_low_rank_methods(rank, raw_epochs_events):\n \"\"\"Test low-rank covariance matrix estimation.\"\"\"\n epochs = raw_epochs_events[1]\n sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj\n n_ch = 366\n methods = ('empirical', 'diagonal_fixed', 'oas')\n bounds = {\n 'None': dict(empirical=(-6000, -5000),\n diagonal_fixed=(-1500, -500),\n oas=(-700, -600)),\n 'full': dict(empirical=(-9000, -8000),\n diagonal_fixed=(-2000, -1600),\n oas=(-1600, -1000)),\n 'info': dict(empirical=(-6000, -5000),\n diagonal_fixed=(-700, -600),\n oas=(-700, -600)),\n }\n with pytest.warns(RuntimeWarning, match='Too few samples'):\n covs = compute_covariance(\n epochs, method=methods, return_estimators=True, rank=rank,\n verbose=True)\n for cov in covs:\n method = cov['method']\n these_bounds = bounds[str(rank)][method]\n this_rank = _cov_rank(cov, epochs.info, proj=(rank != 'full'))\n if rank == 'full' and method != 'empirical':\n assert this_rank == n_ch\n else:\n assert this_rank == sss_proj_rank\n assert these_bounds[0] < cov['loglik'] < these_bounds[1], \\\n (rank, method)\n\n\n@requires_version('sklearn', '0.15')\ndef test_low_rank_cov(raw_epochs_events):\n \"\"\"Test additional properties of low rank computations.\"\"\"\n raw, epochs, events = raw_epochs_events\n sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj\n n_ch = 366\n proj_rank = 365 # one EEG proj\n with pytest.warns(RuntimeWarning, match='Too few samples'):\n emp_cov = compute_covariance(epochs)\n # Test equivalence with mne.cov.regularize subspace\n with pytest.raises(ValueError, match='are dependent.*must equal'):\n regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2)\n assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank\n reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')\n assert _cov_rank(reg_cov, epochs.info) == proj_rank\n with pytest.warns(RuntimeWarning, match='exceeds the theoretical'):\n _compute_rank_int(reg_cov, info=epochs.info)\n del reg_cov\n with catch_logging() as log:\n reg_r_cov = regularize(emp_cov, epochs.info, proj=True, rank=None,\n verbose=True)\n log = log.getvalue()\n assert 'jointly' in log\n assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank\n reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)\n assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank\n assert_allclose(reg_r_only_cov['data'], reg_r_cov['data'])\n del reg_r_only_cov, reg_r_cov\n\n # test that rank=306 is same as rank='full'\n epochs_meg = epochs.copy().pick_types()\n assert len(epochs_meg.ch_names) == 306\n epochs_meg.info.update(bads=[], projs=[])\n cov_full = compute_covariance(epochs_meg, method='oas',\n rank='full', verbose='error')\n assert _cov_rank(cov_full, epochs_meg.info) == 306\n with pytest.deprecated_call(match='int is deprecated'):\n cov_dict = compute_covariance(epochs_meg, method='oas', rank=306)\n assert _cov_rank(cov_dict, epochs_meg.info) == 306\n assert_allclose(cov_full['data'], cov_dict['data'])\n cov_dict = compute_covariance(epochs_meg, method='oas',\n rank=dict(meg=306), verbose='error')\n assert _cov_rank(cov_dict, epochs_meg.info) == 306\n assert_allclose(cov_full['data'], cov_dict['data'])\n\n # Work with just EEG data to simplify projection / rank reduction\n raw = raw.copy().pick_types(meg=False, eeg=True)\n n_proj = 2\n raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj))\n n_ch = len(raw.ch_names)\n rank = n_ch - n_proj - 1 # plus avg proj\n assert len(raw.info['projs']) == 3\n epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)\n assert len(raw.ch_names) == n_ch\n emp_cov = compute_covariance(epochs, rank='full', verbose='error')\n assert _cov_rank(emp_cov, epochs.info) == rank\n reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')\n assert _cov_rank(reg_cov, epochs.info) == rank\n reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)\n assert _cov_rank(reg_r_cov, epochs.info) == rank\n dia_cov = compute_covariance(epochs, rank=None, method='diagonal_fixed',\n verbose='error')\n assert _cov_rank(dia_cov, epochs.info) == rank\n assert_allclose(dia_cov['data'], reg_cov['data'])\n # test our deprecation: can simply remove later\n epochs.pick_channels(epochs.ch_names[:103])\n # degenerate\n with pytest.raises(ValueError, match='can.*only be used with rank=\"full\"'):\n compute_covariance(epochs, rank=None, method='pca')\n with pytest.raises(ValueError, match='can.*only be used with rank=\"full\"'):\n compute_covariance(epochs, rank=None, method='factor_analysis')\n\n\[email protected]_testing_data\n@requires_version('sklearn', '0.15')\ndef test_cov_ctf():\n \"\"\"Test basic cov computation on ctf data with/without compensation.\"\"\"\n raw = read_raw_ctf(ctf_fname).crop(0., 2.).load_data()\n events = make_fixed_length_events(raw, 99999)\n assert len(events) == 2\n ch_names = [raw.info['ch_names'][pick]\n for pick in pick_types(raw.info, meg=True, eeg=False,\n ref_meg=False)]\n\n for comp in [0, 1]:\n raw.apply_gradient_compensation(comp)\n epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)\n with pytest.warns(RuntimeWarning, match='Too few samples'):\n noise_cov = compute_covariance(epochs, tmax=0.,\n method=['empirical'])\n prepare_noise_cov(noise_cov, raw.info, ch_names)\n\n raw.apply_gradient_compensation(0)\n epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)\n with pytest.warns(RuntimeWarning, match='Too few samples'):\n noise_cov = compute_covariance(epochs, tmax=0., method=['empirical'])\n raw.apply_gradient_compensation(1)\n\n # TODO This next call in principle should fail.\n prepare_noise_cov(noise_cov, raw.info, ch_names)\n\n # make sure comps matrices was not removed from raw\n assert raw.info['comps'], 'Comps matrices removed'\n\n\nrun_tests_if_main()\n"
] | [
[
"numpy.dot",
"numpy.testing.assert_equal",
"numpy.abs",
"numpy.invert",
"numpy.eye",
"numpy.testing.assert_array_equal",
"numpy.all",
"numpy.mean",
"numpy.any",
"scipy.linalg.norm",
"numpy.testing.assert_allclose",
"numpy.diff",
"numpy.argsort",
"numpy.random.RandomState",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
masap/optuna | [
"f56cea87c4771d53b39f441e727d733dd1785557",
"f56cea87c4771d53b39f441e727d733dd1785557",
"f56cea87c4771d53b39f441e727d733dd1785557",
"f56cea87c4771d53b39f441e727d733dd1785557"
] | [
"optuna/samplers/_tpe/parzen_estimator.py",
"tests/integration_tests/test_chainer.py",
"benchmarks/kurobako/problems/wfg/shape_functions.py",
"tests/integration_tests/test_pytorch_distributed.py"
] | [
"from typing import Callable\nfrom typing import Dict\nfrom typing import NamedTuple\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom optuna import distributions\nfrom optuna._imports import _LazyImport\nfrom optuna.distributions import BaseDistribution\n\n\nif TYPE_CHECKING:\n import scipy.special as special\n import scipy.stats as stats\nelse:\n special = _LazyImport(\"scipy.special\")\n stats = _LazyImport(\"scipy.stats\")\n\n\nEPS = 1e-12\nSIGMA0_MAGNITUDE = 0.2\n\n_DISTRIBUTION_CLASSES = (\n distributions.CategoricalDistribution,\n distributions.FloatDistribution,\n distributions.IntDistribution,\n)\n\n\nclass _ParzenEstimatorParameters(\n NamedTuple(\n \"_ParzenEstimatorParameters\",\n [\n (\"consider_prior\", bool),\n (\"prior_weight\", Optional[float]),\n (\"consider_magic_clip\", bool),\n (\"consider_endpoints\", bool),\n (\"weights\", Callable[[int], np.ndarray]),\n (\"multivariate\", bool),\n ],\n )\n):\n pass\n\n\nclass _ParzenEstimator:\n def __init__(\n self,\n observations: Dict[str, np.ndarray],\n search_space: Dict[str, BaseDistribution],\n parameters: _ParzenEstimatorParameters,\n predetermined_weights: Optional[np.ndarray] = None,\n ) -> None:\n\n self._search_space = search_space\n self._parameters = parameters\n self._n_observations = next(iter(observations.values())).size\n if predetermined_weights is not None:\n assert self._n_observations == len(predetermined_weights)\n self._weights = self._calculate_weights(predetermined_weights)\n\n self._low: Dict[str, Optional[float]] = {}\n self._high: Dict[str, Optional[float]] = {}\n self._q: Dict[str, Optional[float]] = {}\n for param_name, dist in search_space.items():\n if isinstance(dist, distributions.CategoricalDistribution):\n low = high = q = None\n else:\n low, high, q = self._calculate_parzen_bounds(dist)\n self._low[param_name] = low\n self._high[param_name] = high\n self._q[param_name] = q\n\n # `_low`, `_high`, `_q` are needed for transformation.\n observations = self._transform_to_uniform(observations)\n\n # Transformed `observations` might be needed for following operations.\n self._sigmas0 = self._precompute_sigmas0(observations)\n\n self._mus: Dict[str, Optional[np.ndarray]] = {}\n self._sigmas: Dict[str, Optional[np.ndarray]] = {}\n self._categorical_weights: Dict[str, Optional[np.ndarray]] = {}\n categorical_weights: Optional[np.ndarray]\n for param_name, dist in search_space.items():\n param_observations = observations[param_name]\n if isinstance(dist, distributions.CategoricalDistribution):\n mus = sigmas = None\n categorical_weights = self._calculate_categorical_params(\n param_observations, param_name\n )\n else:\n mus, sigmas = self._calculate_numerical_params(param_observations, param_name)\n categorical_weights = None\n self._mus[param_name] = mus\n self._sigmas[param_name] = sigmas\n self._categorical_weights[param_name] = categorical_weights\n\n def sample(self, rng: np.random.RandomState, size: int) -> Dict[str, np.ndarray]:\n\n samples_dict = {}\n active = rng.choice(len(self._weights), size, p=self._weights)\n\n for param_name, dist in self._search_space.items():\n\n if isinstance(dist, distributions.CategoricalDistribution):\n categorical_weights = self._categorical_weights[param_name]\n assert categorical_weights is not None\n weights = categorical_weights[active, :]\n samples = _ParzenEstimator._sample_from_categorical_dist(rng, weights)\n\n else:\n # We restore parameters of parzen estimators.\n low = self._low[param_name]\n high = self._high[param_name]\n mus = self._mus[param_name]\n sigmas = self._sigmas[param_name]\n assert low is not None\n assert high is not None\n assert mus is not None\n assert sigmas is not None\n\n # We sample from truncnorm.\n trunc_low = (low - mus[active]) / sigmas[active]\n trunc_high = (high - mus[active]) / sigmas[active]\n samples = np.full((), fill_value=high + 1.0, dtype=np.float64)\n while (samples >= high).any():\n samples = np.where(\n samples < high,\n samples,\n stats.truncnorm.rvs(\n trunc_low,\n trunc_high,\n size=size,\n loc=mus[active],\n scale=sigmas[active],\n random_state=rng,\n ),\n )\n samples_dict[param_name] = samples\n samples_dict = self._transform_from_uniform(samples_dict)\n return samples_dict\n\n def log_pdf(self, samples_dict: Dict[str, np.ndarray]) -> np.ndarray:\n\n samples_dict = self._transform_to_uniform(samples_dict)\n n_observations = len(self._weights)\n n_samples = next(iter(samples_dict.values())).size\n if n_samples == 0:\n return np.asarray([], dtype=float)\n\n # When the search space is one CategoricalDistribution, we use the faster processing,\n # whose computation result is equivalent to the general one.\n if len(self._search_space.items()) == 1:\n param_name, dist = list(self._search_space.items())[0]\n if isinstance(dist, distributions.CategoricalDistribution):\n samples = samples_dict[param_name]\n categorical_weights = self._categorical_weights[param_name]\n assert categorical_weights is not None\n ret = np.log(np.inner(categorical_weights.T, self._weights))[samples]\n return ret\n\n # We compute log pdf (component_log_pdf)\n # for each sample in samples_dict (of size n_samples)\n # for each component of `_MultivariateParzenEstimator` (of size n_observations).\n component_log_pdf = np.zeros((n_samples, n_observations))\n for param_name, dist in self._search_space.items():\n samples = samples_dict[param_name]\n if isinstance(dist, distributions.CategoricalDistribution):\n categorical_weights = self._categorical_weights[param_name]\n assert categorical_weights is not None\n log_pdf = np.log(categorical_weights.T[samples, :])\n else:\n # We restore parameters of parzen estimators.\n low = np.asarray(self._low[param_name])\n high = np.asarray(self._high[param_name])\n q = self._q[param_name]\n mus = self._mus[param_name]\n sigmas = self._sigmas[param_name]\n assert low is not None\n assert high is not None\n assert mus is not None\n assert sigmas is not None\n\n cdf_func = _ParzenEstimator._normal_cdf\n p_accept = cdf_func(high, mus, sigmas) - cdf_func(low, mus, sigmas)\n if q is None:\n distance = samples[:, None] - mus\n mahalanobis = distance / np.maximum(sigmas, EPS)\n z = np.sqrt(2 * np.pi) * sigmas\n coefficient = 1 / z / p_accept\n log_pdf = -0.5 * mahalanobis**2 + np.log(coefficient)\n else:\n upper_bound = np.minimum(samples + q / 2.0, high)\n lower_bound = np.maximum(samples - q / 2.0, low)\n cdf = cdf_func(upper_bound[:, None], mus[None], sigmas[None]) - cdf_func(\n lower_bound[:, None], mus[None], sigmas[None]\n )\n log_pdf = np.log(cdf + EPS) - np.log(p_accept + EPS)\n component_log_pdf += log_pdf\n ret = special.logsumexp(component_log_pdf + np.log(self._weights), axis=1)\n return ret\n\n def _calculate_weights(self, predetermined_weights: Optional[np.ndarray]) -> np.ndarray:\n\n # We decide the weights.\n consider_prior = self._parameters.consider_prior\n prior_weight = self._parameters.prior_weight\n weights_func = self._parameters.weights\n n_observations = self._n_observations\n\n if n_observations == 0:\n consider_prior = True\n\n if predetermined_weights is None:\n w = weights_func(n_observations)[:n_observations]\n else:\n w = predetermined_weights[:n_observations]\n\n if consider_prior:\n # TODO(HideakiImamura) Raise `ValueError` if the weight function returns an ndarray of\n # unexpected size.\n weights = np.zeros(n_observations + 1)\n weights[:-1] = w\n weights[-1] = prior_weight\n else:\n weights = w\n weights /= weights.sum()\n return weights\n\n def _calculate_parzen_bounds(\n self, distribution: BaseDistribution\n ) -> Tuple[Optional[float], Optional[float], Optional[float]]:\n\n # We calculate low and high.\n if isinstance(distribution, distributions.FloatDistribution):\n if distribution.log:\n low = np.log(distribution.low)\n high = np.log(distribution.high)\n q = None\n elif distribution.step is not None:\n q = distribution.step\n low = distribution.low - 0.5 * q\n high = distribution.high + 0.5 * q\n else:\n low = distribution.low\n high = distribution.high\n q = None\n elif isinstance(distribution, distributions.IntDistribution):\n if distribution.log:\n low = np.log(distribution.low - 0.5)\n high = np.log(distribution.high + 0.5)\n q = None\n else:\n q = distribution.step\n low = distribution.low - 0.5 * q\n high = distribution.high + 0.5 * q\n else:\n distribution_list = [\n distributions.CategoricalDistribution.__name__,\n distributions.FloatDistribution.__name__,\n distributions.IntDistribution.__name__,\n ]\n raise NotImplementedError(\n \"The distribution {} is not implemented. \"\n \"The parameter distribution should be one of the {}\".format(\n distribution, distribution_list\n )\n )\n\n assert low < high\n\n return low, high, q\n\n def _transform_to_uniform(self, samples_dict: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n\n transformed = {}\n for param_name, samples in samples_dict.items():\n distribution = self._search_space[param_name]\n\n assert isinstance(distribution, _DISTRIBUTION_CLASSES)\n if isinstance(\n distribution,\n (distributions.FloatDistribution, distributions.IntDistribution),\n ):\n if distribution.log:\n samples = np.log(samples)\n\n transformed[param_name] = samples\n return transformed\n\n def _transform_from_uniform(\n self, samples_dict: Dict[str, np.ndarray]\n ) -> Dict[str, np.ndarray]:\n\n transformed = {}\n for param_name, samples in samples_dict.items():\n distribution = self._search_space[param_name]\n\n assert isinstance(distribution, _DISTRIBUTION_CLASSES)\n if isinstance(distribution, distributions.FloatDistribution):\n if distribution.log:\n transformed[param_name] = np.exp(samples)\n elif distribution.step is not None:\n q = self._q[param_name]\n assert q is not None\n samples = np.round((samples - distribution.low) / q) * q + distribution.low\n transformed[param_name] = np.asarray(\n np.clip(samples, distribution.low, distribution.high)\n )\n else:\n transformed[param_name] = samples\n elif isinstance(distribution, distributions.IntDistribution):\n if distribution.log:\n samples = np.round(np.exp(samples))\n transformed[param_name] = np.asarray(\n np.clip(samples, distribution.low, distribution.high)\n )\n else:\n q = self._q[param_name]\n assert q is not None\n samples = np.round((samples - distribution.low) / q) * q + distribution.low\n transformed[param_name] = np.asarray(\n np.clip(samples, distribution.low, distribution.high)\n )\n elif isinstance(distribution, distributions.CategoricalDistribution):\n transformed[param_name] = samples\n\n return transformed\n\n def _precompute_sigmas0(self, observations: Dict[str, np.ndarray]) -> Optional[float]:\n\n n_observations = next(iter(observations.values())).size\n n_observations = max(n_observations, 1)\n n_params = len(observations)\n\n # If it is univariate, there is no need to precompute sigmas0, so this method returns None.\n if not self._parameters.multivariate:\n return None\n\n # We use Scott's rule for bandwidth selection if the number of parameters > 1.\n # This rule was used in the BOHB paper.\n # TODO(kstoneriv3): The constant factor SIGMA0_MAGNITUDE=0.2 might not be optimal.\n return SIGMA0_MAGNITUDE * n_observations ** (-1.0 / (n_params + 4))\n\n def _calculate_categorical_params(\n self, observations: np.ndarray, param_name: str\n ) -> np.ndarray:\n\n # TODO(kstoneriv3): This the bandwidth selection rule might not be optimal.\n observations = observations.astype(int)\n n_observations = self._n_observations\n consider_prior = self._parameters.consider_prior\n prior_weight = self._parameters.prior_weight\n distribution = self._search_space[param_name]\n assert isinstance(distribution, distributions.CategoricalDistribution)\n choices = distribution.choices\n\n if n_observations == 0:\n consider_prior = True\n\n if consider_prior:\n shape = (n_observations + 1, len(choices))\n assert prior_weight is not None\n value = prior_weight / (n_observations + 1)\n else:\n shape = (n_observations, len(choices))\n assert prior_weight is not None\n value = prior_weight / n_observations\n weights = np.full(shape, fill_value=value)\n weights[np.arange(n_observations), observations] += 1\n weights /= weights.sum(axis=1, keepdims=True)\n return weights\n\n def _calculate_numerical_params(\n self, observations: np.ndarray, param_name: str\n ) -> Tuple[np.ndarray, np.ndarray]:\n\n n_observations = self._n_observations\n consider_prior = self._parameters.consider_prior\n consider_endpoints = self._parameters.consider_endpoints\n consider_magic_clip = self._parameters.consider_magic_clip\n multivariate = self._parameters.multivariate\n sigmas0 = self._sigmas0\n low = self._low[param_name]\n high = self._high[param_name]\n assert low is not None\n assert high is not None\n assert len(observations) == self._n_observations\n\n if n_observations == 0:\n consider_prior = True\n\n prior_mu = 0.5 * (low + high)\n prior_sigma = 1.0 * (high - low)\n\n if consider_prior:\n mus = np.empty(n_observations + 1)\n mus[:n_observations] = observations\n mus[n_observations] = prior_mu\n sigmas = np.empty(n_observations + 1)\n else:\n mus = observations\n sigmas = np.empty(n_observations)\n\n if multivariate:\n assert sigmas0 is not None\n sigmas[:] = sigmas0 * (high - low)\n else:\n assert sigmas0 is None\n sorted_indices = np.argsort(mus)\n sorted_mus = mus[sorted_indices]\n sorted_mus_with_endpoints = np.empty(len(mus) + 2, dtype=float)\n sorted_mus_with_endpoints[0] = low\n sorted_mus_with_endpoints[1:-1] = sorted_mus\n sorted_mus_with_endpoints[-1] = high\n\n sorted_sigmas = np.maximum(\n sorted_mus_with_endpoints[1:-1] - sorted_mus_with_endpoints[0:-2],\n sorted_mus_with_endpoints[2:] - sorted_mus_with_endpoints[1:-1],\n )\n\n if not consider_endpoints and sorted_mus_with_endpoints.shape[0] >= 4:\n sorted_sigmas[0] = sorted_mus_with_endpoints[2] - sorted_mus_with_endpoints[1]\n sorted_sigmas[-1] = sorted_mus_with_endpoints[-2] - sorted_mus_with_endpoints[-3]\n\n sigmas[:] = sorted_sigmas[np.argsort(sorted_indices)]\n\n # We adjust the range of the 'sigmas' according to the 'consider_magic_clip' flag.\n maxsigma = 1.0 * (high - low)\n if consider_magic_clip:\n minsigma = 1.0 * (high - low) / min(100.0, (1.0 + len(mus)))\n else:\n minsigma = EPS\n sigmas = np.asarray(np.clip(sigmas, minsigma, maxsigma))\n\n if consider_prior:\n sigmas[n_observations] = prior_sigma\n\n return mus, sigmas\n\n @staticmethod\n def _normal_cdf(x: np.ndarray, mu: np.ndarray, sigma: np.ndarray) -> np.ndarray:\n\n mu, sigma = map(np.asarray, (mu, sigma))\n denominator = x - mu\n numerator = np.maximum(np.sqrt(2) * sigma, EPS)\n z = denominator / numerator\n return 0.5 * (1 + special.erf(z))\n\n @staticmethod\n def _sample_from_categorical_dist(\n rng: np.random.RandomState, probabilities: np.ndarray\n ) -> np.ndarray:\n\n n_samples = probabilities.shape[0]\n rnd_quantile = rng.rand(n_samples)\n cum_probs = np.cumsum(probabilities, axis=1)\n return np.sum(cum_probs < rnd_quantile[..., None], axis=1)\n",
"from collections import namedtuple\nimport math\nimport typing\nfrom unittest.mock import Mock\nfrom unittest.mock import patch\n\nimport chainer\nimport chainer.links as L\nfrom chainer.training import triggers\nimport numpy as np\nimport pytest\n\nimport optuna\nfrom optuna.integration.chainer import ChainerPruningExtension\nfrom optuna.testing.integration import DeterministicPruner\n\n\nclass FixedValueDataset(chainer.dataset.DatasetMixin):\n\n size = 16\n\n def __len__(self) -> int:\n\n return self.size\n\n def get_example(self, i: int) -> typing.Tuple[np.ndarray, np.signedinteger]:\n\n return np.array([1.0], np.float32), np.intc(0)\n\n\ndef test_chainer_pruning_extension_trigger() -> None:\n\n study = optuna.create_study()\n trial = study.ask()\n\n extension = ChainerPruningExtension(trial, \"main/loss\", (1, \"epoch\"))\n assert isinstance(extension._pruner_trigger, triggers.IntervalTrigger)\n extension = ChainerPruningExtension(trial, \"main/loss\", triggers.IntervalTrigger(1, \"epoch\"))\n assert isinstance(extension._pruner_trigger, triggers.IntervalTrigger)\n extension = ChainerPruningExtension(\n trial, \"main/loss\", triggers.ManualScheduleTrigger(1, \"epoch\")\n )\n assert isinstance(extension._pruner_trigger, triggers.ManualScheduleTrigger)\n\n with pytest.raises(TypeError):\n ChainerPruningExtension(trial, \"main/loss\", triggers.TimeTrigger(1.0)) # type: ignore\n\n\ndef test_chainer_pruning_extension() -> None:\n def objective(trial: optuna.trial.Trial) -> float:\n\n model = L.Classifier(chainer.Sequential(L.Linear(None, 2)))\n optimizer = chainer.optimizers.Adam()\n optimizer.setup(model)\n\n train_iter = chainer.iterators.SerialIterator(FixedValueDataset(), 16)\n updater = chainer.training.StandardUpdater(train_iter, optimizer)\n trainer = chainer.training.Trainer(updater, (1, \"epoch\"))\n trainer.extend(\n optuna.integration.chainer.ChainerPruningExtension(trial, \"main/loss\", (1, \"epoch\"))\n )\n\n trainer.run(show_loop_exception_msg=False)\n return 1.0\n\n study = optuna.create_study(pruner=DeterministicPruner(True))\n study.optimize(objective, n_trials=1)\n assert study.trials[0].state == optuna.trial.TrialState.PRUNED\n\n study = optuna.create_study(pruner=DeterministicPruner(False))\n study.optimize(objective, n_trials=1)\n assert study.trials[0].state == optuna.trial.TrialState.COMPLETE\n assert study.trials[0].value == 1.0\n\n\ndef test_chainer_pruning_extension_observation_nan() -> None:\n\n study = optuna.create_study(pruner=DeterministicPruner(True))\n trial = study.ask()\n extension = ChainerPruningExtension(trial, \"main/loss\", (1, \"epoch\"))\n\n MockTrainer = namedtuple(\"MockTrainer\", (\"observation\", \"updater\"))\n MockUpdater = namedtuple(\"MockUpdater\", (\"epoch\"))\n trainer = MockTrainer(observation={\"main/loss\": float(\"nan\")}, updater=MockUpdater(1))\n\n with patch.object(extension, \"_observation_exists\", Mock(return_value=True)) as mock:\n with pytest.raises(optuna.TrialPruned):\n extension(trainer) # type: ignore\n assert mock.call_count == 1\n\n\ndef test_observation_exists() -> None:\n\n study = optuna.create_study()\n trial = study.ask()\n MockTrainer = namedtuple(\"MockTrainer\", (\"observation\",))\n trainer = MockTrainer(observation={\"OK\": 0})\n\n # Trigger is deactivated. Return False whether trainer has observation or not.\n with patch.object(triggers.IntervalTrigger, \"__call__\", Mock(return_value=False)) as mock:\n extension = ChainerPruningExtension(trial, \"NG\", (1, \"epoch\"))\n assert extension._observation_exists(trainer) is False # type: ignore\n extension = ChainerPruningExtension(trial, \"OK\", (1, \"epoch\"))\n assert extension._observation_exists(trainer) is False # type: ignore\n assert mock.call_count == 2\n\n # Trigger is activated. Return True if trainer has observation.\n with patch.object(triggers.IntervalTrigger, \"__call__\", Mock(return_value=True)) as mock:\n extension = ChainerPruningExtension(trial, \"NG\", (1, \"epoch\"))\n assert extension._observation_exists(trainer) is False # type: ignore\n extension = ChainerPruningExtension(trial, \"OK\", (1, \"epoch\"))\n assert extension._observation_exists(trainer) is True # type: ignore\n assert mock.call_count == 2\n\n\ndef test_get_float_value() -> None:\n\n assert 1.0 == ChainerPruningExtension._get_float_value(1.0)\n assert 1.0 == ChainerPruningExtension._get_float_value(chainer.Variable(np.array([1.0])))\n assert math.isnan(ChainerPruningExtension._get_float_value(float(\"nan\")))\n with pytest.raises(TypeError):\n ChainerPruningExtension._get_float_value([]) # type: ignore\n",
"import abc\n\nimport numpy as np\n\n\nclass BaseShapeFunction(object, metaclass=abc.ABCMeta):\n def __init__(self, n_objectives: int) -> None:\n\n self._n_objectives = n_objectives\n\n def __call__(self, m: int, x: np.ndarray) -> float:\n assert 1 <= m <= self.n_objectives\n assert x.shape == (self.n_objectives - 1,)\n return self._call(m, x)\n\n @abc.abstractmethod\n def _call(self, m: int, x: np.ndarray) -> float:\n raise NotImplementedError\n\n @property\n def n_objectives(self) -> int:\n\n return self._n_objectives\n\n\nclass LinearShapeFunction(BaseShapeFunction):\n def _call(self, m: int, x: np.ndarray) -> float:\n\n if m == 1:\n return x[:-1].prod()\n\n if m == self.n_objectives:\n return 1 - x[0]\n\n return x[: self.n_objectives - m].prod() * (1.0 - x[self.n_objectives - m])\n\n\nclass ConvexShapeFunction(BaseShapeFunction):\n def _call(self, m: int, x: np.ndarray) -> float:\n\n if m == 1:\n return (\n 1\n - np.cos(\n x * np.pi / 2,\n )\n )[:-1].prod()\n\n if m == self.n_objectives:\n return 1 - np.sin(x[0] * np.pi / 2.0)\n\n return (1.0 - np.cos(x * np.pi / 2.0))[: self.n_objectives - m].prod() * (\n 1.0 - np.sin(x[self.n_objectives - m] * np.pi / 2.0)\n )\n\n\nclass ConcaveShapeFunction(BaseShapeFunction):\n def _call(self, m: int, x: np.ndarray) -> float:\n\n if m == 1:\n return np.sin(x * np.pi / 2.0)[:-1].prod()\n\n if m == self.n_objectives:\n return np.cos(x[0] * np.pi / 2.0)\n\n return np.sin(x * np.pi / 2.0)[: self.n_objectives - m].prod() * np.cos(\n x[self.n_objectives - m] * np.pi / 2.0\n )\n\n\nclass MixedConvexOrConcaveShapeFunction(BaseShapeFunction):\n def __init__(self, n_objectives: int, alpha: float, n_segments: int) -> None:\n super().__init__(n_objectives)\n self._alpha = alpha\n self._n_segments = n_segments\n\n def _call(self, m: int, x: np.ndarray) -> float:\n if m == self.n_objectives:\n two_A_pi = 2 * self._n_segments * np.pi\n return np.power(\n 1 - x[0] - np.cos(two_A_pi * x[0] + np.pi / 2.0) / two_A_pi, self._alpha\n )\n\n raise ValueError(\"m should be the number of objectives\")\n\n\nclass DisconnectedShapeFunction(BaseShapeFunction):\n def __init__(\n self, n_objectives: int, alpha: float, beta: float, n_disconnected_regions: int\n ) -> None:\n super().__init__(n_objectives)\n self._alpha = alpha\n self._beta = beta\n self._n_disconnected_regions = n_disconnected_regions\n\n def _call(self, m: int, x: np.ndarray) -> float:\n if m == self.n_objectives:\n return (\n 1\n - np.power(x[0], self._alpha)\n * np.cos(self._n_disconnected_regions * np.power(x[0], self._beta) * np.pi) ** 2\n )\n\n raise ValueError(\"m should be the number of objectives\")\n",
"import datetime\nimport itertools\nimport os\nfrom typing import Optional\n\nimport pytest\nimport torch\nimport torch.distributed as dist\n\nimport optuna\nfrom optuna.integration import TorchDistributedTrial\nfrom optuna.testing.integration import DeterministicPruner\nfrom optuna.testing.storage import STORAGE_MODES\nfrom optuna.testing.storage import StorageSupplier\n\n\[email protected](scope=\"session\", autouse=True)\ndef init_process_group() -> None:\n\n if \"OMPI_COMM_WORLD_SIZE\" not in os.environ:\n pytest.skip(\"This test is expected to be launch with mpirun.\")\n\n # This function is automatically called at the beginning of the pytest session.\n os.environ[\"WORLD_SIZE\"] = os.environ[\"OMPI_COMM_WORLD_SIZE\"]\n os.environ[\"RANK\"] = os.environ[\"OMPI_COMM_WORLD_RANK\"]\n os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n os.environ[\"MASTER_PORT\"] = \"20000\"\n\n dist.init_process_group(\"gloo\", timeout=datetime.timedelta(seconds=15))\n\n\ndef test_torch_distributed_trial_experimental_warning() -> None:\n with pytest.warns(optuna.exceptions.ExperimentalWarning):\n if dist.get_rank() == 0:\n study = optuna.create_study()\n TorchDistributedTrial(study.ask())\n else:\n TorchDistributedTrial(None)\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\ndef test_torch_distributed_trial_invalid_argument() -> None:\n with pytest.raises(ValueError):\n if dist.get_rank() == 0:\n TorchDistributedTrial(None)\n else:\n study = optuna.create_study()\n TorchDistributedTrial(study.ask())\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_suggest_float(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n x1 = trial.suggest_float(\"x\", 0, 1)\n assert 0 <= x1 <= 1\n\n x2 = trial.suggest_float(\"x\", 0, 1)\n assert x1 == x2\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_suggest_uniform(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n x1 = trial.suggest_uniform(\"x\", 0, 1)\n assert 0 <= x1 <= 1\n\n x2 = trial.suggest_uniform(\"x\", 0, 1)\n assert x1 == x2\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_suggest_loguniform(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n x1 = trial.suggest_loguniform(\"x\", 1e-7, 1)\n assert 1e-7 <= x1 <= 1\n\n x2 = trial.suggest_loguniform(\"x\", 1e-7, 1)\n assert x1 == x2\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_suggest_discrete_uniform(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n x1 = trial.suggest_discrete_uniform(\"x\", 0, 10, 2)\n assert 0 <= x1 <= 10\n assert x1 % 2 == 0\n\n x2 = trial.suggest_discrete_uniform(\"x\", 0, 10, 2)\n assert x1 == x2\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_suggest_int(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n x1 = trial.suggest_int(\"x\", 0, 10)\n assert 0 <= x1 <= 10\n\n x2 = trial.suggest_int(\"x\", 0, 10)\n assert x1 == x2\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_suggest_categorical(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n x1 = trial.suggest_categorical(\"x\", (\"a\", \"b\", \"c\"))\n assert x1 in {\"a\", \"b\", \"c\"}\n\n x2 = trial.suggest_categorical(\"x\", (\"a\", \"b\", \"c\"))\n assert x1 == x2\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_report(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n study: Optional[optuna.study.Study] = None\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n trial.report(1, 0)\n\n if dist.get_rank() == 0:\n assert study is not None\n study.trials[0].intermediate_values[0] == 1\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_report_nan(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n study: Optional[optuna.study.Study] = None\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n with pytest.raises(TypeError):\n trial.report(\"abc\", 0) # type: ignore\n\n if dist.get_rank() == 0:\n assert study is not None\n assert len(study.trials[0].intermediate_values) == 0\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\n \"storage_mode, is_pruning\", itertools.product(STORAGE_MODES, [False, True])\n)\ndef test_should_prune(storage_mode: str, is_pruning: bool) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage, pruner=DeterministicPruner(is_pruning))\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n trial.report(1, 0)\n assert trial.should_prune() == is_pruning\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_user_attrs(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n trial.set_user_attr(\"dataset\", \"mnist\")\n trial.set_user_attr(\"batch_size\", 128)\n\n assert trial.user_attrs[\"dataset\"] == \"mnist\"\n assert trial.user_attrs[\"batch_size\"] == 128\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\ndef test_user_attrs_with_exception() -> None:\n with StorageSupplier(\"sqlite\") as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n with pytest.raises(TypeError):\n trial.set_user_attr(\"not serializable\", torch.Tensor([1, 2]))\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_system_attrs(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n trial.set_system_attr(\"dataset\", \"mnist\")\n trial.set_system_attr(\"batch_size\", 128)\n\n assert trial.system_attrs[\"dataset\"] == \"mnist\"\n assert trial.system_attrs[\"batch_size\"] == 128\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\ndef test_system_attrs_with_exception() -> None:\n with StorageSupplier(\"sqlite\") as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n with pytest.raises(TypeError):\n trial.set_system_attr(\"not serializable\", torch.Tensor([1, 2]))\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_number(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n assert trial.number == 0\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_datetime_start(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n assert isinstance(trial.datetime_start, datetime.datetime)\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_params(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n trial.suggest_float(\"f\", 0, 1)\n trial.suggest_int(\"i\", 0, 1)\n trial.suggest_categorical(\"c\", (\"a\", \"b\", \"c\"))\n\n params = trial.params\n assert 0 <= params[\"f\"] <= 1\n assert 0 <= params[\"i\"] <= 1\n assert params[\"c\"] in {\"a\", \"b\", \"c\"}\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_distributions(storage_mode: str) -> None:\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n trial.suggest_float(\"u\", 0, 1)\n trial.suggest_float(\"lu\", 1e-7, 1, log=True)\n trial.suggest_float(\"du\", 0, 1, step=0.5)\n trial.suggest_int(\"i\", 0, 1)\n trial.suggest_int(\"il\", 1, 128, log=True)\n trial.suggest_categorical(\"c\", (\"a\", \"b\", \"c\"))\n\n distributions = trial.distributions\n assert distributions[\"u\"] == optuna.distributions.FloatDistribution(0, 1)\n assert distributions[\"lu\"] == optuna.distributions.FloatDistribution(1e-7, 1, log=True)\n assert distributions[\"du\"] == optuna.distributions.FloatDistribution(0, 1, step=0.5)\n assert distributions[\"i\"] == optuna.distributions.IntDistribution(0, 1)\n assert distributions[\"il\"] == optuna.distributions.IntDistribution(1, 128, log=True)\n assert distributions[\"c\"] == optuna.distributions.CategoricalDistribution((\"a\", \"b\", \"c\"))\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"storage_mode\", STORAGE_MODES)\ndef test_updates_properties(storage_mode: str) -> None:\n \"\"\"Check for any distributed deadlock following a property read.\"\"\"\n with StorageSupplier(storage_mode) as storage:\n if dist.get_rank() == 0:\n study = optuna.create_study(storage=storage)\n trial = TorchDistributedTrial(study.ask())\n else:\n trial = TorchDistributedTrial(None)\n\n trial.suggest_float(\"f\", 0, 1)\n trial.suggest_int(\"i\", 0, 1)\n trial.suggest_categorical(\"c\", (\"a\", \"b\", \"c\"))\n\n property_names = [\n p\n for p in dir(TorchDistributedTrial)\n if isinstance(getattr(TorchDistributedTrial, p), property)\n ]\n\n # Rank 0 can read properties without deadlock.\n if dist.get_rank() == 0:\n [getattr(trial, p) for p in property_names]\n\n dist.barrier()\n\n # Same with rank 1.\n if dist.get_rank() == 1:\n [getattr(trial, p) for p in property_names]\n\n dist.barrier()\n"
] | [
[
"numpy.log",
"numpy.maximum",
"numpy.sqrt",
"numpy.minimum",
"numpy.clip",
"numpy.asarray",
"numpy.arange",
"numpy.inner",
"numpy.cumsum",
"numpy.full",
"numpy.round",
"numpy.argsort",
"numpy.exp",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
],
[
"numpy.array",
"numpy.intc"
],
[
"numpy.cos",
"numpy.power",
"numpy.sin"
],
[
"torch.distributed.get_rank",
"torch.Tensor",
"torch.distributed.barrier"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ericlearning/Progressive-Image-Translation-Network | [
"972c54dfdbc4c065328f7fc54b2b47c2cefcc609"
] | [
"others/paired/progressive/dataset.py"
] | [
"import os\nimport torch\nimport random\nimport numpy as np\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\nfrom PIL import Image\n\nclass Dataset():\n\tdef __init__(self, train_dir, basic_types = None, shuffle = True):\n\t\tself.train_dir = train_dir\n\t\tself.basic_types = basic_types\n\t\tself.shuffle = shuffle\n\n\tdef get_loader(self, sz, bs, num_workers = 1):\n\t\tif(self.basic_types == 'Pix2Pix'):\n\t\t\tdt = {\n\t\t\t\t'input' : transforms.Compose([\n\t\t\t\t\ttransforms.Resize((sz, sz)),\n\t\t\t\t\ttransforms.ToTensor(),\n\t\t\t\t\ttransforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n\t\t\t\t]),\n\t\t\t\t'target' : transforms.Compose([\n\t\t\t\t\ttransforms.Resize((sz, sz)),\n\t\t\t\t\ttransforms.ToTensor(),\n\t\t\t\t\ttransforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n\t\t\t\t])\n\t\t\t}\n\t\t\tinput_transform = dt['input']\n\t\t\ttarget_transform = dt['target']\n\n\t\t\ttrain_dataset = Pix2Pix_Dataset(self.train_dir[0], self.train_dir[1], input_transform, target_transform)\n\t\t\ttrain_loader = DataLoader(train_dataset, batch_size = bs, shuffle = self.shuffle, num_workers = num_workers)\n\n\t\t\treturns = (train_loader)\n\n\t\treturn returns\n\nclass Pix2Pix_Dataset():\n\tdef __init__(self, input_dir, target_dir, input_transform, target_transform):\n\t\tself.input_dir = input_dir\n\t\tself.target_dir = target_dir\n\t\tself.input_transform = input_transform\n\t\tself.target_transform = target_transform\n\n\t\tself.image_name_list = []\n\t\tfor file in os.listdir(input_dir):\n\t\t\tif(file.endswith('.png') or file.endswith('.jpeg') or file.endswith('.jpg') or file.endswith('.bmp')):\n\t\t\t\tself.image_name_list.append(file)\n\n\tdef __len__(self):\n\t\treturn len(self.image_name_list)\n\n\tdef __getitem__(self, idx):\n\t\tif(self.target_dir == None):\n\t\t\tinput_img = Image.open(os.path.join(self.input_dir, self.image_name_list[idx]))\n\t\t\ttarget_img = input_img.copy()\n\t\telse:\n\t\t\tinput_img = Image.open(os.path.join(self.input_dir, self.image_name_list[idx]))\n\t\t\ttarget_img = Image.open(os.path.join(self.target_dir, self.image_name_list[idx]))\n\n\t\tinput_img = self.input_transform(input_img)\n\t\ttarget_img = self.target_transform(target_img)\n\n\t\tsample = (input_img, target_img)\n\t\treturn sample"
] | [
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lim0606/pytorch-ardae-rl | [
"6e861d8f09ee27fa8f7b42d1eb209788c93395fa"
] | [
"main_ardae.py"
] | [
"\"\"\"\nPyTorch code for SAC-AR-DAE. Copied and modified from PyTorch code for SAC-NF (Mazoure et al., 2019): https://arxiv.org/abs/1905.06893\n\"\"\"\n\nimport os\nimport sys\nimport argparse\nimport time\nimport datetime\nimport itertools\nimport random\nimport pickle\nimport glob\n\nimport gym\nimport numpy as np\nimport torch\nfrom sac_ardae import SAC\nfrom normalized_actions import NormalizedActions\nfrom replay_memory import ReplayMemory\nimport pandas as pd\ntry:\n import pybulletgym\nexcept:\n print('No PyBullet Gym. Skipping...')\nfrom utils import logging, get_time, print_args\nfrom utils import save_checkpoint, load_checkpoint\n\nfrom tensorboardX import SummaryWriter\n\n\nparser = argparse.ArgumentParser(description='PyTorch code for SAC-AR-DAE (Lim et al. 2020, https://arxiv.org/abs/2006.05164)')\nparser.add_argument('--env-name', default=\"Ant-v2\",\n help='name of the environment to run')\nparser.add_argument('--eval', type=bool, default=True,\n help='Evaluates a policy a policy every 10 episode (default:True)')\nparser.add_argument('--gamma', type=float, default=0.99, metavar='G',\n help='discount factor for reward (default: 0.99)')\nparser.add_argument('--tau', type=float, default=0.005, metavar='G',\n help='target smoothing coefficient(tau) (default: 0.005)')\nparser.add_argument('--lr', type=float, default=0.0003, metavar='G',\n help='learning rate (default: 0.0003)')\nparser.add_argument('--num_enc_layers', type=int, default=1,\n help='number of fc layers in stochastic policy (default: 1)')\nparser.add_argument('--num_fc_layers', type=int, default=1,\n help='number of fc layers in stochastic policy (default: 1)')\nparser.add_argument('--policy_nonlin', default='relu',\n help='nonlinear function in stochastic policy (default: relu)')\nparser.add_argument('--policy_type', default='mlp',\n choices=['mlp', 'wnres', 'res', 'mlpdeep', 'wnresdeep', 'resdeep'],\n help='type of fc network in stochastic policy network (default: mlp)')\nparser.add_argument('--alpha', type=float, default=0.05, metavar='G',\n help='Temperature parameter alpha determines the relative importance of the entropy term against the reward (default: 0.2)')\nparser.add_argument('--automatic_entropy_tuning', type=bool, default=False, metavar='G',\n help='Temperature parameter alpha automaically adjusted.')\n#parser.add_argument('--seed', type=int, default=456, metavar='N',\n# help='random seed (default: 456)')\nparser.add_argument('--batch_size', type=int, default=256, metavar='N',\n help='batch size (default: 256)')\nparser.add_argument('--num_steps', type=int, default=3000001, metavar='N',\n help='maximum number of steps (default: 1000000)')\nparser.add_argument('--hidden_size', type=int, default=256, metavar='N',\n help='hidden size (default: 256)')\nparser.add_argument('--noise_size', type=int, default=10, metavar='N',\n help='noise size (default: 10)')\nparser.add_argument('--updates_per_step', type=int, default=1, metavar='N',\n help='model updates per simulator step (default: 1)')\nparser.add_argument('--start_steps', type=int, default=10000, metavar='N',\n help='Steps sampling random actions (default: 10000)')\nparser.add_argument('--target_update_interval', type=int, default=1, metavar='N',\n help='Value target update per no. of updates per step (default: 1)')\nparser.add_argument('--hadamard',type=int,default=1)\nparser.add_argument('--replay_size', type=int, default=1000000, metavar='N',\n help='size of replay buffer (default: 10000000)')\nparser.add_argument('--cuda', action=\"store_true\",\n help='run on CUDA (default: False)')\nparser.add_argument('--cache', default='experiments', type=str)\nparser.add_argument('--experiment', default=None, help='name of experiment')\nparser.add_argument('--nb_evals', type=int, default=10,\n help='nb of evaluations')\nparser.add_argument('--resume', dest='resume', action='store_true', default=True,\n help='flag to resume the experiments')\nparser.add_argument('--no-resume', dest='resume', action='store_false', default=True,\n help='flag to resume the experiments')\nparser.add_argument('--exp-num', type=int, default=0,\n help='experiment number')\n\n# jacobian clamping\nparser.add_argument('--lmbd', type=float, default=0,\n help='')\nparser.add_argument('--nu', type=float, default=0,\n help='')\nparser.add_argument('--eta', type=float, default=0,\n help='')\nparser.add_argument('--num-pert-samples', type=int, default=0,\n help='')\nparser.add_argument('--jac-act', default='tanh',\n choices=['none', 'tanh'],\n help='')\n\n# seed\nparser.add_argument('--seed', type=int, default=456, metavar='N',\n help='random seed (default: 456)')\n\n# log\nparser.add_argument('--log-interval', type=int, default=100,\n help='log print-out interval (step)')\nparser.add_argument('--eval-interval', type=int, default=10000,\n help='eval interval (step)')\nparser.add_argument('--ckpt-interval', type=int, default=10000,\n help='checkpoint interval (step)')\n\n# grad q network\nparser.add_argument('--gqnet_num_layers', type=int, default=1,\n help='number of layers in grad q network (default: 1)')\nparser.add_argument('--gqnet_nonlin', default='relu',\n help='nonlinear function in grad q network (default: relu)')\nparser.add_argument('--q-optimizer', default='adam',\n choices=['sgd', 'adam', 'amsgrad', 'rmsprop'],\n help='optimization methods: sgd | adam | amsgrad | rmsprop ')\nparser.add_argument('--q-beta1', type=float, default=0.5, help='beta1 for adam or adam-amsgrad. default=0.5') # adam\nparser.add_argument('--q-momentum', type=float, default=0.5, help='momentum for std or rmsprop. default=0.5') # sgd or rmsprop\n#parser.add_argument('--q-lr', type=float, default=0.0001, help='initial learning rate')\n\n# use mean subtraction\nparser.add_argument('--mean-sub-method', default='none',\n choices=['none', 'entms'],\n help='mean subtraction method')\nparser.add_argument('--mean-upd-method', default='exp',\n choices=['exp', 'avg'],\n help='mean update method')\nparser.add_argument('--mean-sub-tau', type=float, default=0.005,\n help='target smoothing coefficient(tau) (default: 0.005)')\n\n# use partition function estimation\nparser.add_argument('--use-ptfnc', default=0, type=int,\n help='use partition function estimation. if 0, do not estimate. if > 0, use as the number of samples')\nparser.add_argument('--ptflogvar', type=int, default=-2.,\n help='logvar of the proposal distribution in the partition function')\n\n# cdae\nparser.add_argument('--dae-type', default='grad',\n choices=['grad', 'wnresgrad', 'resgrad', 'argrad'],\n help='type of dae')\nparser.add_argument('--dae-norm', default='none',\n choices=['none'],\n help='normalization method in cdae encoders')\nparser.add_argument('--dae-nonlin', default='softplus',\n help='nonlinear function in dae (default: softplus)')\nparser.add_argument('--dae_num_layers', type=int, default=1,\n help='number of layers in dae (default: 1)')\nparser.add_argument('--dae-enc-ctx', default='false',\n choices=['true', 'false', 'part'],\n help='dae enc architectures: true | false | part ')\nparser.add_argument('--dae-ctx-type', default='state',\n choices=['state', 'hidden'],\n help='condition methods: state | hidden ')\nparser.add_argument('--std-scale', type=float, default=1.0,\n help='std scaling for denoising autoencoder')\nparser.add_argument('--delta', type=float, default=0.1,\n help='std sampling distribution')\nparser.add_argument('--num-cdae-updates', type=int, default=1,\n help='number of cdae updates')\nparser.add_argument('--train-nz-cdae', type=int, default=100, metavar='N',\n help='the number of z samples per data point (default: 100)')\nparser.add_argument('--train-nstd-cdae', type=int, default=10, metavar='N',\n help='the number of std samples per data point (default: 10)')\nparser.add_argument('--d-optimizer', default='adam',\n choices=['sgd', 'adam', 'amsgrad', 'rmsprop'],\n help='optimization methods: sgd | adam | amsgrad | rmsprop ')\nparser.add_argument('--d-beta1', type=float, default=0.5, help='beta1 for adam or adam-amsgrad. default=0.5') # adam\nparser.add_argument('--d-momentum', type=float, default=0.5, help='momentum for std or rmsprop. default=0.5') # sgd or rmsprop\nparser.add_argument('--d-lr', type=float, default=0.0001, help='initial learning rate')\n#parser.add_argument('--clip-preact', type=float, default=25., help='clipping preactivation in cosh(preact) function.')\n\nargs = parser.parse_args()\nargs.hadamard = bool(args.hadamard)\n#assert not (args.lmbd > 0 and args.nu > 0)\nassert args.use_ptfnc >= 0\n\n# set env\nif args.env_name == 'Humanoidrllab':\n from rllab.envs.mujoco.humanoid_env import HumanoidEnv\n from rllab.envs.normalized_env import normalize\n env = normalize(HumanoidEnv())\n max_episode_steps = float('inf')\n if args.seed >= 0:\n global seed_\n seed_ = args.seed\nelse:\n env = gym.make(args.env_name)\n max_episode_steps=env._max_episode_steps\n env=NormalizedActions(env)\n if args.seed >= 0:\n env.seed(args.seed)\nif args.seed >= 0:\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n torch.random.manual_seed(args.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n# set args\nargs.num_actions = env.action_space.shape[0]\nargs.max_action = env.action_space.high\nargs.min_action = env.action_space.low\n\n# set cache folder\nif args.cache is None:\n args.cache = 'experiments'\nif args.experiment is None:\n args.experiment = '-'.join(['sac-ardae'#,\n '{}'.format(\n '-{}-{}{}'.format(\n args.mean_sub_method,\n args.mean_upd_method,\n '-t{}'.format(args.mean_sub_tau) if args.mean_upd_method == 'exp' else '',\n ) if args.mean_sub_method != 'none' else ''),\n 'p{}{}'.format(\n args.use_ptfnc,\n '-lv{}'.format(args.ptflogvar) if args.ptflogvar != -2. else '',\n ),\n 'jg{}-nu{}-et{}-n{}-a{}'.format(\n args.lmbd,\n args.nu,\n args.eta,\n args.num_pert_samples,\n args.jac_act,\n ),\n 'm{}-ma{}-menh{}-mfnh{}'.format(\n args.policy_type,\n args.policy_nonlin,\n args.num_enc_layers,\n args.num_fc_layers,\n ),\n 'qa{}-qnh{}'.format(\n args.gqnet_nonlin,\n args.gqnet_num_layers),\n 'd{}-da{}-dnh{}-dc{}{}'.format(\n args.dae_type,\n args.dae_nonlin,\n args.dae_num_layers,\n args.dae_ctx_type,\n '' if args.dae_enc_ctx == 'false' else ('-dencctx' if args.dae_enc_ctx == 'true' else '-dencctx-pt'), #'-dencctx' if args.dae_enc_ctx == 'true' else '',\n ),\n 'nsz{}'.format(args.noise_size),\n 'sstep{}'.format(args.start_steps),\n 'a{}'.format(args.alpha),\n 'ssc{}'.format(args.std_scale), # std scale\n 'del{}'.format(args.delta),\n 'nd{}'.format(args.num_cdae_updates),\n 'nzc{}'.format(args.train_nz_cdae),\n 'nstd{}'.format(args.train_nstd_cdae),\n 'd{}-bt1{}'.format(args.d_optimizer, args.d_beta1) if args.d_optimizer in ['adam', 'amsgrad'] else 'd{}-mt{}'.format(args.d_optimizer, args.d_momentum),\n 'dlr{}'.format(args.d_lr),\n 'q{}-qt1{}'.format(args.q_optimizer, args.q_beta1) if args.q_optimizer in ['adam', 'amsgrad'] else 'q{}-mt{}'.format(args.q_optimizer, args.q_momentum),\n 'mlr{}'.format(args.lr),\n 'seed{}'.format(args.seed),\n 'exp{}'.format(args.exp_num),\n ])\nargs.path = os.path.join(args.cache, args.experiment)\nif args.resume:\n listing = glob.glob(args.path+'-19*') + glob.glob(args.path+'-20*')\n if len(listing) == 0:\n args.path = '{}-{}'.format(args.path, get_time())\n else:\n path_sorted = sorted(listing, key=lambda x: datetime.datetime.strptime(x, args.path+'-%y%m%d-%H:%M:%S'))\n args.path = path_sorted[-1]\n pass\nelse:\n args.path = '{}-{}'.format(args.path, get_time())\nos.system('mkdir -p {}'.format(args.path))\n\n# print args\nlogging(str(args), path=args.path)\n\n# init tensorboard\nwriter = SummaryWriter(args.path)\n\n# print config\nconfiguration_setup='SAC-AR-DAE'\nconfiguration_setup+='\\n'\nconfiguration_setup+=print_args(args)\n#for arg in vars(args):\n# configuration_setup+=' {} : {}'.format(str(arg),str(getattr(args, arg)))\n# configuration_setup+='\\n'\nlogging(configuration_setup, path=args.path)\n\n# init sac\nagent = SAC(env.observation_space.shape[0], env.action_space, args)\nlogging(\"----------------------------------------\", path=args.path)\nlogging(str(agent.critic), path=args.path)\nlogging(\"----------------------------------------\", path=args.path)\nlogging(str(agent.policy), path=args.path)\nlogging(\"----------------------------------------\", path=args.path)\nlogging(str(agent.cdae), path=args.path)\nlogging(\"----------------------------------------\", path=args.path)\n\n# memory\nmemory = ReplayMemory(args.replay_size)\n\n# resume\nargs.start_episode = 1\nargs.offset_time = 0 # elapsed\nargs.total_numsteps = 0\nargs.updates = 0\nargs.eval_steps = 0\nargs.ckpt_steps = 0\nagent.load_model(args)\nmemory.load(os.path.join(args.path, 'replay_memory'), 'pkl')\n\n# Training Loop\ntotal_numsteps = args.total_numsteps # 0\nupdates = args.updates # 0\neval_steps = args.eval_steps # 0\nckpt_steps = args.ckpt_steps # 0\nstart_episode = args.start_episode # 1\noffset_time = args.offset_time # 0\nstart_time = time.time()\nif 'dataframe' in args:\n df = args.dataframe\nelse:\n df = pd.DataFrame(columns=[\"total_steps\", \"score_eval\", \"time_so_far\"])\n\nfor i_episode in itertools.count(start_episode):\n episode_reward = 0\n episode_steps = 0\n done = False\n state = env.reset()\n\n while not done:\n if args.start_steps > total_numsteps:\n action = np.random.uniform(env.action_space.low,env.action_space.high,env.action_space.shape[0]) # Sample random action\n else:\n action = agent.select_action(state) # Sample action from policy\n if len(memory) > args.start_steps:\n # Number of updates per step in environment\n for i in range(args.updates_per_step):\n # Update parameters of all the networks\n (critic_1_loss, critic_2_loss,\n policy_loss,\n cdae_loss,\n cdae_info,\n ) = agent.update_parameters(memory, args.batch_size, updates)\n updates += 1\n\n # log\n if updates % args.log_interval == 0:\n lmbd = cdae_info['lmbd']\n _action = cdae_info['action']\n __action = _action.view(-1)#.numpy()\n _mean_action = torch.mean(__action).item()\n _med_action = torch.median(__action).item()\n logvar_qa = torch.log(torch.var(_action, dim=1) + 1e-10) # bsz x zdim\n _logvar_qa = logvar_qa.view(-1).numpy()\n _mean_logvar_qa = torch.mean(logvar_qa).item()\n _med_logvar_qa = torch.median(logvar_qa).item()\n __logvar_qa = logvar_qa.view(logvar_qa.size(0), -1).numpy()\n\n logging(\"Episode: {}\"\n \", update: {}\"\n \", critic_1 loss: {:.3f}\"\n \", critic_2 loss: {:.3f}\"\n \", cdae loss {:.3f}\"\n \", lmbd {:.2f}\"\n \", logvar_action: {:.3f}\"\n .format(\n i_episode,\n updates,\n critic_1_loss,\n critic_2_loss,\n cdae_loss,\n lmbd,\n _mean_logvar_qa,\n ), path=args.path)\n\n writer.add_scalar('train/critic_1/loss/update', critic_1_loss, updates)\n writer.add_scalar('train/critic_2/loss/update', critic_2_loss, updates)\n writer.add_scalar('train/cdae/loss/update', cdae_loss, updates)\n writer.add_scalar('train/policy/lmbd/update', lmbd, updates)\n writer.add_scalar('train/action/logvar/mean/update', _mean_logvar_qa, updates)\n else:\n cdae_loss = 0\n _mean_logvar_qa = 0\n\n next_state, reward, done, _ = env.step(action) # Step\n episode_steps += 1\n total_numsteps += 1\n eval_steps += 1\n ckpt_steps += 1\n episode_reward += reward\n\n # Ignore the \"done\" signal if it comes from hitting the time horizon.\n # (https://github.com/openai/spinningup/blob/master/spinup/algos/sac/sac.py)\n mask = 1 if episode_steps == max_episode_steps else float(not done)\n\n memory.push(state, action, reward, next_state, mask) # Append transition to memory\n\n state = next_state\n\n elapsed = round((time.time() - start_time + offset_time),2)\n logging(\"Episode: {}\"\n \", time (sec): {}\"\n \", total numsteps: {}\"\n \", episode steps: {}\"\n \", reward: {}\"\n .format(\n i_episode,\n elapsed,\n total_numsteps,\n episode_steps,\n round(episode_reward, 2),\n ), path=args.path)\n writer.add_scalar('train/ep_reward/episode', episode_reward, i_episode)\n writer.add_scalar('train/ep_reward/step', episode_reward, total_numsteps)\n\n # evaluation\n if eval_steps>=args.eval_interval or total_numsteps > args.num_steps:\n logging('evaluation time', path=args.path)\n r=[]\n for _ in range(args.nb_evals):\n state = env.reset()\n episode_reward = 0\n done = False\n while not done:\n action = agent.select_action(state, eval=True)\n\n next_state, reward, done, _ = env.step(action)\n episode_reward += reward\n\n state = next_state\n r.append(episode_reward)\n mean_reward=np.mean(r)\n\n # add to data frame\n res = {\"total_steps\": total_numsteps,\n \"score_eval\": mean_reward,\n \"time_so_far\": round((time.time() - start_time),2)}\n df = df.append(res, ignore_index=True)\n\n # add to log\n logging(\"----------------------------------------\", path=args.path)\n logging(\"Test Episode: {}, mean reward: {}, ep reward: {}\"\n .format(\n i_episode, round(mean_reward, 2), round(episode_reward, 2),\n ), path=args.path)\n logging(\"----------------------------------------\", path=args.path)\n writer.add_scalar('test/ep_reward/mean/step', mean_reward, total_numsteps)\n writer.add_scalar('test/ep_reward/episode/step', episode_reward, total_numsteps)\n\n # writer\n writer.flush()\n\n # reset count\n eval_steps%=args.eval_interval\n\n if ckpt_steps>=args.ckpt_interval and args.ckpt_interval > 0:\n training_info = {\n 'start_episode': i_episode+1,\n 'offset_time': round((time.time() - start_time + offset_time),2),\n 'total_numsteps': total_numsteps,\n 'updates': updates,\n 'eval_steps': eval_steps,\n 'ckpt_steps': ckpt_steps,\n 'dataframe': df,\n }\n agent.save_model(training_info)\n memory.save(os.path.join(args.path, 'replay_memory'), 'pkl')\n ckpt_steps%=args.ckpt_interval\n\n if total_numsteps > args.num_steps:\n break\n\nenv.close()\n"
] | [
[
"torch.mean",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.median",
"torch.random.manual_seed",
"pandas.DataFrame",
"numpy.mean",
"torch.cuda.is_available",
"torch.var",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rezeck/grf_transport | [
"64d68dc18b950575c19e44d91ade87b01a1e9ab0"
] | [
"script/eval_1_mean_plot.py"
] | [
"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats\nimport sys\n\n\ndef mean_confidence_interval(data, confidence=0.95):\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, m-h, m+h\n\n\nprefix = \"exp_adaptability/default/\"\n# prefix = \"exp_adaptability/robot_failure/\"\n# prefix = \"exp_adaptability/goal_change/\"\nexperiments = [\"exp_000.npy\", \"exp_001.npy\", \"exp_002.npy\", \"exp_003.npy\", \"exp_004.npy\", \"exp_005.npy\", \"exp_006.npy\", \"exp_007.npy\", \"exp_008.npy\", \"exp_009.npy\"]\ndata_vel = []\ndata_time = []\ndata_dist = []\nend = 440000\nfor experiment in experiments:\n print(\"Reading file: \" + prefix+experiment)\n d = np.load(prefix+experiment)\n print(d.shape)\n data_time.append(d[:end, 0])\n data_dist.append(d[:end, 1])\n data_vel.append(d[:end, 2])\n\ndata_time = np.transpose(np.array(data_time))\ndata_dist = np.transpose(np.array(data_dist))\ndata_vel = np.transpose(np.array(data_vel))\nprint(data_time.shape)\nprint(data_dist.shape)\nprint(data_vel.shape)\n\n# data = []\n# print(\"processing...\")\n# for i in range(0, end):\n# m_dist, l_dist, u_dist = mean_confidence_interval(data_dist[i, :])\n# m_vel, l_vel, u_vel = mean_confidence_interval(data_vel[i, :])\n# data.append([data_time[i, 0], m_dist, l_dist, u_dist, m_vel, l_vel, u_vel])\n# data = np.array(data)\n# print(\"saving...\")\n# np.save(\"exp_adaptability/default.npy\", data)\n# exit()\n\n\ndata_dft = np.load(\"exp_adaptability/default.npy\")\ndata_rfai = np.load(\"exp_adaptability/robot_failure.npy\")\ndata_gch = np.load(\"exp_adaptability/goal_change.npy\")\ndatas = [data_dft, data_rfai, data_gch]\n\n# Ploting data\n# fig = plt.figure(dpi=200)\n# ax = fig.add_subplot(111)\n# color = 'tab:red'\n# ax.plot(data[:, 0], data[:, 5]*100, \"--\", color=color, linewidth=0.4)\n# ax.plot(data[:, 0], data[:, 6]*100, \"--\", color=color, linewidth=0.4)\n# ax.plot(data[:, 0], data[:, 4]*100, label='object velocity', color=color)\n# ax.set_xlabel('time (seconds)')\n# ax.set_ylabel('object velocity (cm/s)', color=color)\n# ax.tick_params(axis='y', labelcolor=color)\n# ax.set_ylim([0, 3.2])\n\n\n# ax2 = ax.twinx()\n# color = 'tab:blue'\n# ax2.set_ylabel('distance to goal (m)', color=color)\n# ax2.plot(data[:, 0], data[:, 2], \"--\", color=color, linewidth=0.2)\n# ax2.plot(data[:, 0], data[:, 3], \"--\", color=color, linewidth=0.2)\n# ax2.plot(data[:, 0], data[:, 1], label='distance to goal', color=color)\n# ax2.tick_params(axis='y', labelcolor=color)\n# ax2.set_ylim([0, 2.2])\n\n# ax.set_title(\"Convergence Analyses\")\n\n# # plt.savefig(figfile, dpi=200)\n# plt.show()\n\n\nfig, axs = plt.subplots(3, sharex=True, sharey=False,\n gridspec_kw={'hspace': 0.1})\n\nfor i in range(0, 3):\n axs[i].set_rasterized(True)\n ax = axs[i]\n color = 'tab:red'\n data = datas[i]\n ax.plot(data[:, 0], data[:, 5]*100, \"--\",\n color=color, linewidth=0.2, alpha=0.1)\n ax.plot(data[:, 0], data[:, 6]*100, \"--\",\n color=color, linewidth=0.2, alpha=0.1)\n ax.plot(data[:, 0], data[:, 4]*100, label='object velocity', color=color)\n if i == 1:\n ax.set_ylabel('Object velocity (cm/s)', color=color, fontsize=14)\n ax.tick_params(axis='y', labelcolor=color)\n ax.set_ylim([0, 3.2])\n\n ax2 = ax.twinx()\n ax2.set_rasterized(True)\n color = 'tab:blue'\n if i == 1:\n ax2.set_ylabel('Distance to goal (m)', color=color, fontsize=14)\n ax2.plot(data[:, 0], data[:, 2], \"--\",\n color=color, linewidth=0.8, alpha=1.0)\n ax2.plot(data[:, 0], data[:, 3], \"--\",\n color=color, linewidth=0.8, alpha=1.0)\n\n ax2.plot(data[:, 0], data[:, 1], label='distance to goal', color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n ax2.set_ylim([0, 2.2])\n\naxs[0].set_title(\"Convergence Analyses\", fontsize=16)\naxs[2].set_xlabel('Time (seconds)', fontsize=14)\n\n\nplt.savefig(\"adaptability.pdf\", dpi=200)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.mean",
"numpy.load",
"numpy.array",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jizhouh/deepcell-tf | [
"491ece59f5024d73429477ebdcb437a6e67d766b",
"491ece59f5024d73429477ebdcb437a6e67d766b",
"491ece59f5024d73429477ebdcb437a6e67d766b",
"491ece59f5024d73429477ebdcb437a6e67d766b",
"491ece59f5024d73429477ebdcb437a6e67d766b",
"491ece59f5024d73429477ebdcb437a6e67d766b",
"491ece59f5024d73429477ebdcb437a6e67d766b"
] | [
"deepcell/running.py",
"deepcell/layers/normalization.py",
"deepcell/applications/cytoplasm_segmentation_test.py",
"deepcell/callbacks.py",
"deepcell/image_generators/cropping.py",
"deepcell/utils/backbone_utils_test.py",
"deepcell/layers/location.py"
] | [
"# Copyright 2016-2021 The Van Valen Lab at the California Institute of\n# Technology (Caltech), with support from the Paul Allen Family Foundation,\n# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.\n# All rights reserved.\n#\n# Licensed under a modified Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE\n#\n# The Work provided may be used for non-commercial academic purposes only.\n# For any other use of the Work, including commercial use, please contact:\n# [email protected]\n#\n# Neither the name of Caltech nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions for running convolutional neural networks\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import Model\n\nfrom deepcell.utils.data_utils import trim_padding\n\n\ndef get_cropped_input_shape(images,\n num_crops=4,\n receptive_field=61,\n data_format=None):\n \"\"\"Calculate the input_shape for models to process cropped sub-images.\n\n Args:\n images (numpy.array): numpy array of original data\n num_crops (int): number of slices for the x and y axis\n to create sub-images\n receptive_field (int): the receptive field of the neural network.\n data_format (str): \"channels_first\" or \"channels_last\"\n\n Returns:\n tuple: new ``input_shape`` for model to process sub-images.\n \"\"\"\n if data_format is None:\n data_format = K.image_data_format()\n if data_format == 'channels_first':\n channel_axis = 1\n row_axis = len(images.shape) - 2\n col_axis = len(images.shape) - 1\n else:\n channel_axis = len(images.shape) - 1\n row_axis = len(images.shape) - 3\n col_axis = len(images.shape) - 2\n\n channel_dim = images.shape[channel_axis]\n\n # Split the frames into quarters, as the full image size is too large\n crop_x = images.shape[row_axis] // num_crops + (receptive_field - 1)\n crop_y = images.shape[col_axis] // num_crops + (receptive_field - 1)\n\n if images.ndim == 5:\n input_shape = (images.shape[row_axis - 1], crop_x, crop_y, channel_dim)\n else:\n input_shape = (crop_x, crop_y, channel_dim)\n\n # switch to channels_first if necessary\n if channel_axis == 1:\n input_shape = tuple([input_shape[-1]] + list(input_shape[:-1]))\n\n return input_shape\n\n\ndef get_padding_layers(model):\n \"\"\"Get all names of padding layers in a model\n\n Args:\n model (tensorflow.keras.Model): Keras model\n\n Returns:\n list: list of names of padding layers inside model\n \"\"\"\n padding_layers = []\n for layer in model.layers:\n if 'padding' in layer.name:\n padding_layers.append(layer.name)\n elif isinstance(layer, Model):\n padding_layers.extend(get_padding_layers(layer))\n return padding_layers\n\n\ndef process_whole_image(model, images, num_crops=4, receptive_field=61, padding=None):\n \"\"\"Slice images into num_crops * num_crops pieces, and use the model to\n process each small image.\n\n Args:\n model (tensorflow.keras.Model): model that will process each small image\n images (numpy.array): numpy array that is too big for model.predict\n num_crops (int): number of slices for the x and y axis\n to create sub-images\n receptive_field (int): receptive field used by model,\n required to pad images\n padding (str): type of padding for input images,\n one of {'reflect', 'zero'}.\n\n Returns:\n numpy.array: model outputs for each sub-image\n\n Raises:\n ValueError: invalid padding value\n ValueError: model input shape is different than expected_input_shape\n \"\"\"\n if K.image_data_format() == 'channels_first':\n channel_axis = 1\n row_axis = len(images.shape) - 2\n col_axis = len(images.shape) - 1\n else:\n channel_axis = len(images.shape) - 1\n row_axis = len(images.shape) - 3\n col_axis = len(images.shape) - 2\n\n if not padding:\n padding_layers = get_padding_layers(model)\n if padding_layers:\n padding = 'reflect' if 'reflect' in padding_layers[0] else 'zero'\n\n if str(padding).lower() not in {'reflect', 'zero'}:\n raise ValueError('Expected `padding_mode` to be either `zero` or '\n '`reflect`. Got ', padding)\n\n # Split the frames into quarters, as the full image size is too large\n crop_x = images.shape[row_axis] // num_crops\n crop_y = images.shape[col_axis] // num_crops\n\n # Set up receptive field window for padding\n win_x, win_y = (receptive_field - 1) // 2, (receptive_field - 1) // 2\n\n # instantiate matrix for model output\n model_output_shape = tuple(list(model.layers[-1].output_shape)[1:])\n if channel_axis == 1:\n output = np.zeros(tuple([images.shape[0], model_output_shape[0]] +\n list(images.shape[2:])))\n else:\n output = np.zeros(tuple(list(images.shape[0:-1]) +\n [model_output_shape[-1]]))\n\n expected_input_shape = get_cropped_input_shape(\n images, num_crops, receptive_field)\n\n if expected_input_shape != model.input_shape[1:]:\n raise ValueError('Expected model.input_shape to be {}. Got {}. Use '\n '`get_cropped_input_shape()` to recreate your model '\n ' with the proper input_shape'.format(\n expected_input_shape, model.input_shape[1:]))\n\n # pad the images only in the x and y axes\n pad_width = []\n for i in range(len(images.shape)):\n if i == row_axis:\n pad_width.append((win_x, win_x))\n elif i == col_axis:\n pad_width.append((win_y, win_y))\n else:\n pad_width.append((0, 0))\n\n if str(padding).lower() == 'reflect':\n padded_images = np.pad(images, pad_width, mode='reflect')\n else:\n padded_images = np.pad(images, pad_width, mode='constant', constant_values=0)\n\n for i in range(num_crops):\n for j in range(num_crops):\n e, f = i * crop_x, (i + 1) * crop_x + 2 * win_x\n g, h = j * crop_y, (j + 1) * crop_y + 2 * win_y\n\n if images.ndim == 5:\n if channel_axis == 1:\n predicted = model.predict(padded_images[:, :, :, e:f, g:h])\n else:\n predicted = model.predict(padded_images[:, :, e:f, g:h, :])\n else:\n if channel_axis == 1:\n predicted = model.predict(padded_images[:, :, e:f, g:h])\n else:\n predicted = model.predict(padded_images[:, e:f, g:h, :])\n\n # if using skip_connections, get the final model output\n if isinstance(predicted, list):\n predicted = predicted[-1]\n\n # if the model uses padding, trim the output images to proper shape\n # if model does not use padding, images should already be correct\n if padding:\n predicted = trim_padding(predicted, win_x, win_y)\n\n a, b = i * crop_x, (i + 1) * crop_x\n c, d = j * crop_y, (j + 1) * crop_y\n\n if images.ndim == 5:\n if channel_axis == 1:\n output[:, :, :, a:b, c:d] = predicted\n else:\n output[:, :, a:b, c:d, :] = predicted\n else:\n if channel_axis == 1:\n output[:, :, a:b, c:d] = predicted\n else:\n output[:, a:b, c:d, :] = predicted\n\n return output\n",
"# Copyright 2016-2021 The Van Valen Lab at the California Institute of\n# Technology (Caltech), with support from the Paul Allen Family Foundation,\n# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.\n# All rights reserved.\n#\n# Licensed under a modified Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE\n#\n# The Work provided may be used for non-commercial academic purposes only.\n# For any other use of the Work, including commercial use, please contact:\n# [email protected]\n#\n# Neither the name of Caltech nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Layers to noramlize input images for 2D and 3D images\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras import activations\nfrom tensorflow.keras import constraints\nfrom tensorflow.keras import initializers\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.layers import Layer, InputSpec\nfrom tensorflow.python.keras.utils import conv_utils\n\n\nclass ImageNormalization2D(Layer):\n \"\"\"Image Normalization layer for 2D data.\n\n Args:\n norm_method (str): Normalization method to use, one of:\n \"std\", \"max\", \"whole_image\", None.\n filter_size (int): The length of the convolution window.\n data_format (str): A string, one of ``channels_last`` (default)\n or ``channels_first``. The ordering of the dimensions in the\n inputs. ``channels_last`` corresponds to inputs with shape\n ``(batch, height, width, channels)`` while ``channels_first``\n corresponds to inputs with shape\n ``(batch, channels, height, width)``.\n activation (function): Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: ``a(x) = x``).\n use_bias (bool): Whether the layer uses a bias.\n kernel_initializer (function): Initializer for the ``kernel`` weights\n matrix, used for the linear transformation of the inputs.\n bias_initializer (function): Initializer for the bias vector. If None,\n the default initializer will be used.\n kernel_regularizer (function): Regularizer function applied to the\n ``kernel`` weights matrix.\n bias_regularizer (function): Regularizer function applied to the\n bias vector.\n activity_regularizer (function): Regularizer function applied to.\n kernel_constraint (function): Constraint function applied to\n the ``kernel`` weights matrix.\n bias_constraint (function): Constraint function applied to the\n bias vector.\n \"\"\"\n def __init__(self,\n norm_method='std',\n filter_size=61,\n data_format=None,\n activation=None,\n use_bias=False,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n self.valid_modes = {'std', 'max', None, 'whole_image'}\n if norm_method not in self.valid_modes:\n raise ValueError('Invalid `norm_method`: \"{}\". '\n 'Use one of {}.'.format(\n norm_method, self.valid_modes))\n if 'trainable' not in kwargs:\n kwargs['trainable'] = False\n super(ImageNormalization2D, self).__init__(\n activity_regularizer=regularizers.get(activity_regularizer),\n **kwargs)\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(ndim=4) # hardcoded for 2D data\n\n self.filter_size = filter_size\n self.norm_method = norm_method\n self.data_format = conv_utils.normalize_data_format(data_format)\n\n if self.data_format == 'channels_first':\n self.channel_axis = 1\n else:\n self.channel_axis = 3 # hardcoded for 2D data\n\n if isinstance(self.norm_method, str):\n self.norm_method = self.norm_method.lower()\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n if len(input_shape) != 4:\n raise ValueError('Inputs should have rank 4, '\n 'received input shape: %s' % input_shape)\n if self.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = -1\n if input_shape.dims[channel_axis].value is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = int(input_shape[channel_axis])\n self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})\n\n kernel_shape = (self.filter_size, self.filter_size, input_dim, 1)\n # self.kernel = self.add_weight(\n # name='kernel',\n # shape=kernel_shape,\n # initializer=self.kernel_initializer,\n # regularizer=self.kernel_regularizer,\n # constraint=self.kernel_constraint,\n # trainable=False,\n # dtype=self.compute_dtype)\n\n W = K.ones(kernel_shape, dtype=self.compute_dtype)\n W = W / K.cast(K.prod(K.int_shape(W)), dtype=self.compute_dtype)\n self.kernel = W\n # self.set_weights([W])\n\n if self.use_bias:\n self.bias = self.add_weight(\n name='bias',\n shape=(self.filter_size, self.filter_size),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=False,\n dtype=self.compute_dtype)\n else:\n self.bias = None\n\n self.built = True\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n return tensor_shape.TensorShape(input_shape)\n\n def _average_filter(self, inputs):\n # Depthwise convolution on CPU is only supported for NHWC format\n if self.data_format == 'channels_first':\n inputs = K.permute_dimensions(inputs, pattern=[0, 2, 3, 1])\n outputs = tf.nn.depthwise_conv2d(inputs, self.kernel, [1, 1, 1, 1],\n padding='SAME', data_format='NHWC')\n if self.data_format == 'channels_first':\n outputs = K.permute_dimensions(outputs, pattern=[0, 3, 1, 2])\n return outputs\n\n def _window_std_filter(self, inputs, epsilon=K.epsilon()):\n c1 = self._average_filter(inputs)\n c2 = self._average_filter(K.square(inputs))\n output = K.sqrt(c2 - c1 * c1) + epsilon\n return output\n\n def call(self, inputs):\n if not self.norm_method:\n outputs = inputs\n\n elif self.norm_method == 'whole_image':\n axes = [2, 3] if self.channel_axis == 1 else [1, 2]\n outputs = inputs - K.mean(inputs, axis=axes, keepdims=True)\n outputs = outputs / (K.std(inputs, axis=axes, keepdims=True) + K.epsilon())\n\n elif self.norm_method == 'std':\n outputs = inputs - self._average_filter(inputs)\n outputs = outputs / self._window_std_filter(outputs)\n\n elif self.norm_method == 'max':\n outputs = inputs / K.max(inputs)\n outputs = outputs - self._average_filter(outputs)\n\n else:\n raise NotImplementedError('\"{}\" is not a valid norm_method'.format(\n self.norm_method))\n\n return outputs\n\n def get_config(self):\n config = {\n 'norm_method': self.norm_method,\n 'filter_size': self.filter_size,\n 'data_format': self.data_format,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(ImageNormalization2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass ImageNormalization3D(Layer):\n \"\"\"Image Normalization layer for 3D data.\n\n Args:\n norm_method (str): Normalization method to use, one of:\n \"std\", \"max\", \"whole_image\", None.\n filter_size (int): The length of the convolution window.\n data_format (str): A string, one of ``channels_last`` (default)\n or ``channels_first``. The ordering of the dimensions in the\n inputs. ``channels_last`` corresponds to inputs with shape\n ``(batch, height, width, channels)`` while ``channels_first``\n corresponds to inputs with shape\n ``(batch, channels, height, width)``.\n activation (function): Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: ``a(x) = x``).\n use_bias (bool): Whether the layer uses a bias.\n kernel_initializer (function): Initializer for the ``kernel`` weights\n matrix, used for the linear transformation of the inputs.\n bias_initializer (function): Initializer for the bias vector. If None,\n the default initializer will be used.\n kernel_regularizer (function): Regularizer function applied to the\n ``kernel`` weights matrix.\n bias_regularizer (function): Regularizer function applied to the\n bias vector.\n activity_regularizer (function): Regularizer function applied to.\n kernel_constraint (function): Constraint function applied to\n the ``kernel`` weights matrix.\n bias_constraint (function): Constraint function applied to the\n bias vector.\n \"\"\"\n def __init__(self,\n norm_method='std',\n filter_size=61,\n data_format=None,\n activation=None,\n use_bias=False,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n self.valid_modes = {'std', 'max', None, 'whole_image'}\n if norm_method not in self.valid_modes:\n raise ValueError('Invalid `norm_method`: \"{}\". '\n 'Use one of {}.'.format(\n norm_method, self.valid_modes))\n if 'trainable' not in kwargs:\n kwargs['trainable'] = False\n super(ImageNormalization3D, self).__init__(\n activity_regularizer=regularizers.get(activity_regularizer),\n **kwargs)\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(ndim=5) # hardcoded for 3D data\n\n self.filter_size = filter_size\n self.norm_method = norm_method\n self.data_format = conv_utils.normalize_data_format(data_format)\n\n if self.data_format == 'channels_first':\n self.channel_axis = 1\n else:\n self.channel_axis = 4 # hardcoded for 3D data\n\n if isinstance(self.norm_method, str):\n self.norm_method = self.norm_method.lower()\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n if len(input_shape) != 5:\n raise ValueError('Inputs should have rank 5, '\n 'received input shape: %s' % input_shape)\n if self.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = -1\n if input_shape.dims[channel_axis].value is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined, found None: %s' % input_shape)\n input_dim = int(input_shape[channel_axis])\n self.input_spec = InputSpec(ndim=5, axes={channel_axis: input_dim})\n\n if self.data_format == 'channels_first':\n depth = int(input_shape[2])\n else:\n depth = int(input_shape[1])\n kernel_shape = (depth, self.filter_size, self.filter_size, input_dim, 1)\n\n # self.kernel = self.add_weight(\n # 'kernel',\n # shape=kernel_shape,\n # initializer=self.kernel_initializer,\n # regularizer=self.kernel_regularizer,\n # constraint=self.kernel_constraint,\n # trainable=False,\n # dtype=self.compute_dtype)\n\n W = K.ones(kernel_shape, dtype=self.compute_dtype)\n W = W / K.cast(K.prod(K.int_shape(W)), dtype=self.compute_dtype)\n self.kernel = W\n # self.set_weights([W])\n\n if self.use_bias:\n self.bias = self.add_weight(\n name='bias',\n shape=(depth, self.filter_size, self.filter_size),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=False,\n dtype=self.compute_dtype)\n else:\n self.bias = None\n\n self.built = True\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n return tensor_shape.TensorShape(input_shape)\n\n def _average_filter(self, inputs):\n if self.data_format == 'channels_first':\n inputs = K.permute_dimensions(inputs, pattern=[0, 2, 3, 4, 1])\n # TODO: conv3d vs depthwise_conv2d?\n outputs = tf.nn.conv3d(inputs, self.kernel, [1, 1, 1, 1, 1],\n padding='SAME', data_format='NDHWC')\n\n if self.data_format == 'channels_first':\n outputs = K.permute_dimensions(outputs, pattern=[0, 4, 1, 2, 3])\n return outputs\n\n def _window_std_filter(self, inputs, epsilon=K.epsilon()):\n c1 = self._average_filter(inputs)\n c2 = self._average_filter(K.square(inputs))\n output = K.sqrt(c2 - c1 * c1) + epsilon\n return output\n\n def call(self, inputs):\n if not self.norm_method:\n outputs = inputs\n\n elif self.norm_method == 'whole_image':\n axes = [3, 4] if self.channel_axis == 1 else [2, 3]\n outputs = inputs - K.mean(inputs, axis=axes, keepdims=True)\n outputs = outputs / (K.std(inputs, axis=axes, keepdims=True) + K.epsilon())\n\n elif self.norm_method == 'std':\n outputs = inputs - self._average_filter(inputs)\n outputs = outputs / self._window_std_filter(outputs)\n\n elif self.norm_method == 'max':\n outputs = inputs / K.max(inputs)\n outputs = outputs - self._average_filter(outputs)\n\n else:\n raise NotImplementedError('\"{}\" is not a valid norm_method'.format(\n self.norm_method))\n\n return outputs\n\n def get_config(self):\n config = {\n 'norm_method': self.norm_method,\n 'filter_size': self.filter_size,\n 'data_format': self.data_format,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(ImageNormalization3D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n",
"# Copyright 2016-2021 The Van Valen Lab at the California Institute of\n# Technology (Caltech), with support from the Paul Allen Family Foundation,\n# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.\n# All rights reserved.\n#\n# Licensed under a modified Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE\n#\n# The Work provided may be used for non-commercial academic purposes only.\n# For any other use of the Work, including commercial use, please contact:\n# [email protected]\n#\n# Neither the name of Caltech nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for CytoplasmSegmentationModel\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.platform import test\nimport numpy as np\n\nfrom deepcell.model_zoo import PanopticNet\nfrom deepcell.applications import CytoplasmSegmentation\n\n\nclass TestCytoplasmSegmentation(test.TestCase):\n\n def test_cytoplasm_app(self):\n with self.cached_session():\n model = PanopticNet(\n 'resnet50',\n input_shape=(128, 128, 1),\n norm_method='whole_image',\n num_semantic_heads=2,\n num_semantic_classes=[1, 1],\n location=True,\n include_top=True,\n lite=True,\n use_imagenet=False,\n interpolation='bilinear')\n app = CytoplasmSegmentation(model)\n\n # test output shape\n shape = app.model.output_shape\n self.assertIsInstance(shape, list)\n self.assertEqual(len(shape), 2)\n self.assertEqual(len(shape[0]), 4)\n self.assertEqual(len(shape[1]), 4)\n\n # test predict\n x = np.random.rand(1, 500, 500, 1)\n y = app.predict(x)\n self.assertEqual(x.shape, y.shape)\n",
"# Copyright 2016-2021 The Van Valen Lab at the California Institute of\n# Technology (Caltech), with support from the Paul Allen Family Foundation,\n# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.\n# All rights reserved.\n#\n# Licensed under a modified Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE\n#\n# The Work provided may be used for non-commercial academic purposes only.\n# For any other use of the Work, including commercial use, please contact:\n# [email protected]\n#\n# Neither the name of Caltech nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Custom Callbacks for DeepCell\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport timeit\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\n\n\nclass InferenceTimer(tf.keras.callbacks.Callback):\n \"\"\"Callback to log inference speed per epoch.\"\"\"\n\n def __init__(self, samples=100):\n super(InferenceTimer, self).__init__()\n self._samples = int(samples)\n self._batch_times = []\n self._samples_seen = []\n self._timer = None\n\n def on_predict_begin(self, epoch, logs=None):\n self._batch_times = []\n self._samples_seen = []\n\n def on_predict_batch_begin(self, batch, logs=None):\n self._timer = timeit.default_timer()\n\n def on_predict_batch_end(self, batch, logs=None):\n t = timeit.default_timer() - self._timer\n self._batch_times.append(t)\n outputs = logs.get('outputs', np.empty((1,)))\n if isinstance(self.model.output_shape, list):\n outputs = outputs[0]\n self._samples_seen.append(outputs.shape[0])\n\n def on_predict_end(self, logs=None):\n total_samples = np.sum(self._samples_seen)\n\n per_sample = [t / float(s) for t, s in\n zip(self._batch_times, self._samples_seen)]\n\n avg = np.mean(per_sample)\n std = np.std(per_sample)\n\n print('Average inference speed per sample for %s total samples: '\n '%0.5fs ± %0.5fs.' % (total_samples, avg, std))\n\n def on_epoch_end(self, epoch, logs=None):\n shape = tuple([self._samples] + list(self.model.input_shape[1:]))\n test_batch = np.random.random(shape)\n self.model.predict(test_batch, callbacks=self)\n",
"# Copyright 2016-2021 The Van Valen Lab at the California Institute of\n# Technology (Caltech), with support from the Paul Allen Family Foundation,\n# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.\n# All rights reserved.\n#\n# Licensed under a modified Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE\n#\n# The Work provided may be used for non-commercial academic purposes only.\n# For any other use of the Work, including commercial use, please contact:\n# [email protected]\n#\n# Neither the name of Caltech nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Semantic segmentation data generators with cropping.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport warnings\n\nimport numpy as np\n\nfrom tensorflow.python.keras.preprocessing.image import array_to_img\n\nfrom deepcell.image_generators import SemanticDataGenerator, SemanticIterator\n\ntry:\n import scipy\n # scipy.linalg cannot be accessed until explicitly imported\n from scipy import linalg\n # scipy.ndimage cannot be accessed until explicitly imported\n from scipy import ndimage\nexcept ImportError:\n scipy = None\n\nfrom deepcell.image_generators import _transform_masks\n\n\nclass CroppingIterator(SemanticIterator):\n \"\"\"Iterator yielding data from Numpy arrays (X and y).\n\n Args:\n train_dict (dict): Consists of numpy arrays for ``X`` and ``y``.\n image_data_generator (ImageDataGenerator): For random transformations\n and normalization.\n batch_size (int): Size of a batch.\n min_objects (int): Images with fewer than ``min_objects`` are ignored.\n shuffle (bool): Whether to shuffle the data between epochs.\n seed (int): Random seed for data shuffling.\n data_format (str): A string, one of ``channels_last`` (default)\n or ``channels_first``. The ordering of the dimensions in the\n inputs. ``channels_last`` corresponds to inputs with shape\n ``(batch, height, width, channels)`` while ``channels_first``\n corresponds to inputs with shape\n ``(batch, channels, height, width)``.\n save_to_dir (str): Optional directory where to save the pictures\n being yielded, in a viewable format. This is useful\n for visualizing the random transformations being\n applied, for debugging purposes.\n save_prefix (str): Prefix to use for saving sample\n images (if ``save_to_dir`` is set).\n save_format (str): Format to use for saving sample images\n (if ``save_to_dir`` is set).\n crop_size (tuple): Optional parameter specifying size of crop to take from image\n \"\"\"\n def __init__(self,\n train_dict,\n image_data_generator,\n batch_size=1,\n shuffle=False,\n transforms=['outer-distance'],\n transforms_kwargs={},\n seed=None,\n min_objects=3,\n data_format='channels_last',\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n crop_size=None):\n\n super(CroppingIterator, self).__init__(\n train_dict=train_dict,\n image_data_generator=image_data_generator,\n batch_size=batch_size,\n shuffle=shuffle,\n transforms=transforms,\n transforms_kwargs=transforms_kwargs,\n seed=seed,\n min_objects=min_objects,\n data_format=data_format,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n )\n\n # set output size of image based on crop_size\n if crop_size is not None:\n output_size = crop_size\n\n else:\n output_size = self.x.shape[1:3] if self.channel_axis == 3 else self.x.shape[2:4]\n\n self.output_size = output_size\n\n def _get_batches_of_transformed_samples(self, index_array):\n # set output size based on output shape and # of channels\n if self.channel_axis == 3:\n x_shape = tuple([len(index_array)] + list(self.output_size) + [self.x.shape[3]])\n else:\n x_shape = tuple([len(index_array)] + [self.x.shape[1]] + list(self.output_size))\n\n batch_x = np.zeros(x_shape, dtype=self.x.dtype)\n batch_y = []\n\n for i, j in enumerate(index_array):\n x = self.x[j]\n\n # _transform_labels expects batch dimension\n y_semantic_list = self._transform_labels(self.y[j:j + 1])\n\n # initialize batch_y\n if len(batch_y) == 0:\n for ys in y_semantic_list:\n if self.data_format == 'channels_first':\n shape = tuple([len(index_array), ys.shape[1]] + list(self.output_size))\n else:\n shape = tuple([len(index_array)] + list(self.output_size) + [ys.shape[-1]])\n batch_y.append(np.zeros(shape, dtype=ys.dtype))\n\n # random_transform does not expect batch dimension\n y_semantic_list = [ys[0] for ys in y_semantic_list]\n\n # Apply transformation\n x, y_semantic_list = self.image_data_generator.random_transform(\n x, y_semantic_list)\n\n x = self.image_data_generator.standardize(x)\n\n batch_x[i] = x\n\n for k, ys in enumerate(y_semantic_list):\n batch_y[k][i] = ys\n\n if self.save_to_dir:\n for i, j in enumerate(index_array):\n if self.data_format == 'channels_first':\n img_x = np.expand_dims(batch_x[i, 0, ...], 0)\n else:\n img_x = np.expand_dims(batch_x[i, ..., 0], -1)\n img = array_to_img(img_x, self.data_format, scale=True)\n fname = '{prefix}_{index}_{hash}.{format}'.format(\n prefix=self.save_prefix,\n index=j,\n hash=np.random.randint(1e4),\n format=self.save_format)\n img.save(os.path.join(self.save_to_dir, fname))\n\n if self.y is not None:\n # Save argmax of y batch\n for k, y_sem in enumerate(batch_y):\n if y_sem[i].shape[self.channel_axis - 1] == 1:\n img_y = y_sem[i]\n else:\n img_y = np.argmax(y_sem[i],\n axis=self.channel_axis - 1)\n img_y = np.expand_dims(img_y,\n axis=self.channel_axis - 1)\n img = array_to_img(img_y, self.data_format, scale=True)\n fname = 'y_{sem}_{prefix}_{index}_{hash}.{format}'.format(\n sem=k,\n prefix=self.save_prefix,\n index=j,\n hash=np.random.randint(1e4),\n format=self.save_format)\n img.save(os.path.join(self.save_to_dir, fname))\n\n return batch_x, batch_y\n\n\nclass CroppingDataGenerator(SemanticDataGenerator):\n \"\"\"Generates batches of tensor image data with real-time data augmentation.\n The data will be looped over (in batches).\n\n Args:\n ffeaturewise_center (bool): Set input mean to 0 over the dataset,\n feature-wise.\n samplewise_center (bool): Set each sample mean to 0.\n featurewise_std_normalization (bool): Divide inputs by std\n of the dataset, feature-wise.\n samplewise_std_normalization (bool): Divide each input by its std.\n zca_epsilon (float): Epsilon for ZCA whitening. Default is 1e-6.\n zca_whitening (bool): Apply ZCA whitening.\n rotation_range (int): Degree range for random rotations.\n width_shift_range (float): 1-D array-like or int\n\n - float: fraction of total width, if < 1, or pixels if >= 1.\n - 1-D array-like: random elements from the array.\n - int: integer number of pixels from interval\n ``(-width_shift_range, +width_shift_range)``\n - With ``width_shift_range=2`` possible values are integers\n ``[-1, 0, +1]``, same as with ``width_shift_range=[-1, 0, +1]``,\n while with ``width_shift_range=1.0`` possible values are floats\n in the interval [-1.0, +1.0).\n\n height_shift_range: Float, 1-D array-like or int\n\n - float: fraction of total height, if < 1, or pixels if >= 1.\n - 1-D array-like: random elements from the array.\n - int: integer number of pixels from interval\n ``(-height_shift_range, +height_shift_range)``\n - With ``height_shift_range=2`` possible values\n are integers ``[-1, 0, +1]``,\n same as with ``height_shift_range=[-1, 0, +1]``,\n while with ``height_shift_range=1.0`` possible values are floats\n in the interval [-1.0, +1.0).\n\n shear_range (float): Shear Intensity\n (Shear angle in counter-clockwise direction in degrees)\n zoom_range (float): float or [lower, upper], Range for random zoom.\n If a float, ``[lower, upper] = [1-zoom_range, 1+zoom_range]``.\n channel_shift_range (float): range for random channel shifts.\n fill_mode (str): One of {\"constant\", \"nearest\", \"reflect\" or \"wrap\"}.\n\n Default is 'nearest'. Points outside the boundaries of the input\n are filled according to the given mode:\n\n - 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)\n - 'nearest': aaaaaaaa|abcd|dddddddd\n - 'reflect': abcddcba|abcd|dcbaabcd\n - 'wrap': abcdabcd|abcd|abcdabcd\n\n cval (float): Value used for points outside the boundaries\n when ``fill_mode = \"constant\"``.\n horizontal_flip (bool): Randomly flip inputs horizontally.\n vertical_flip (bool): Randomly flip inputs vertically.\n rescale: rescaling factor. Defaults to None. If None or 0, no rescaling\n is applied, otherwise we multiply the data by the value provided\n (before applying any other transformation).\n preprocessing_function: function that will be implied on each input.\n The function will run after the image is resized and augmented.\n The function should take one argument:\n one image (Numpy tensor with rank 3),\n and should output a Numpy tensor with the same shape.\n data_format (str): A string, one of ``channels_last`` (default)\n or ``channels_first``. The ordering of the dimensions in the\n inputs. ``channels_last`` corresponds to inputs with shape\n ``(batch, height, width, channels)`` while ``channels_first``\n corresponds to inputs with shape\n ``(batch, channels, height, width)``.\n validation_split (float): Fraction of images reserved for validation\n (strictly between 0 and 1).\n \"\"\"\n\n def __init__(self,\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n zca_epsilon=1e-6,\n rotation_range=0,\n width_shift_range=0.,\n height_shift_range=0.,\n brightness_range=None,\n shear_range=0.,\n zoom_range=0.,\n channel_shift_range=0.,\n fill_mode='nearest',\n cval=0.,\n horizontal_flip=False,\n vertical_flip=False,\n rescale=None,\n preprocessing_function=None,\n data_format='channels_last',\n validation_split=0.0,\n interpolation_order=1,\n crop_size=None,\n dtype='float32'):\n\n super(CroppingDataGenerator, self).__init__(\n featurewise_center=featurewise_center,\n samplewise_center=samplewise_center,\n featurewise_std_normalization=featurewise_std_normalization,\n samplewise_std_normalization=samplewise_std_normalization,\n zca_whitening=zca_whitening,\n zca_epsilon=zca_epsilon,\n rotation_range=rotation_range,\n width_shift_range=width_shift_range,\n height_shift_range=height_shift_range,\n brightness_range=brightness_range,\n shear_range=shear_range,\n zoom_range=zoom_range,\n channel_shift_range=channel_shift_range,\n fill_mode=fill_mode,\n cval=cval,\n horizontal_flip=horizontal_flip,\n vertical_flip=vertical_flip,\n rescale=rescale,\n preprocessing_function=preprocessing_function,\n data_format=data_format,\n validation_split=validation_split,\n dtype=dtype)\n\n if crop_size is not None:\n if not isinstance(crop_size, (tuple, list)):\n raise ValueError(\"Crop size must be a list or tuple of row/col dimensions\")\n\n self.crop_size = crop_size\n\n # tensorflow does not initialize interpolation_order, so we'll do it here\n self.interpolation_order = interpolation_order\n\n def flow(self,\n train_dict,\n batch_size=1,\n transforms=['outer-distance'],\n transforms_kwargs={},\n min_objects=3,\n shuffle=True,\n seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png'):\n \"\"\"Generates batches of augmented/normalized data with given arrays.\n\n Args:\n train_dict (dict): Consists of numpy arrays for ``X`` and ``y``.\n batch_size (int): Size of a batch. Defaults to 1.\n shuffle (bool): Whether to shuffle the data between epochs.\n Defaults to ``True``.\n seed (int): Random seed for data shuffling.\n min_objects (int): Minumum number of objects allowed per image\n save_to_dir (str): Optional directory where to save the pictures\n being yielded, in a viewable format. This is useful\n for visualizing the random transformations being\n applied, for debugging purposes.\n save_prefix (str): Prefix to use for saving sample\n images (if ``save_to_dir`` is set).\n save_format (str): Format to use for saving sample images\n (if ``save_to_dir`` is set).\n\n Returns:\n CroppingIterator: An ``Iterator`` yielding tuples of ``(x, y)``,\n where ``x`` is a numpy array of image data and ``y`` is list of\n numpy arrays of transformed masks of the same shape.\n \"\"\"\n return CroppingIterator(\n train_dict,\n self,\n batch_size=batch_size,\n transforms=transforms,\n transforms_kwargs=transforms_kwargs,\n shuffle=shuffle,\n min_objects=min_objects,\n seed=seed,\n data_format=self.data_format,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n crop_size=self.crop_size)\n\n def get_random_transform(self, img_shape, seed=None):\n transform_parameters = super(CroppingDataGenerator, self).get_random_transform(\n img_shape=img_shape, seed=seed)\n\n crop_indices = None\n if self.crop_size is not None:\n\n img_dims = img_shape[1:] if self.channel_axis == 1 else img_shape[:2]\n if img_dims == self.crop_size:\n # don't need to crop\n pass\n elif img_dims[0] == self.crop_size[0] or img_dims[1] == self.crop_size[1]:\n raise ValueError('crop_size must be a subset of both axes or exactly '\n ' equal to image dims')\n elif img_dims[0] < self.crop_size[0] or img_dims[1] < self.crop_size[1]:\n raise ValueError('Crop dimensions must be smaller than image dimensions')\n else:\n row_start = np.random.randint(0, img_dims[0] - self.crop_size[0])\n col_start = np.random.randint(0, img_dims[1] - self.crop_size[1])\n crop_indices = ([row_start, row_start + self.crop_size[0]],\n [col_start, col_start + self.crop_size[1]])\n\n transform_parameters['crop_indices'] = crop_indices\n\n return transform_parameters\n\n def apply_transform(self, x, transform_parameters):\n\n if transform_parameters['crop_indices'] is not None:\n row_indices, col_indices = transform_parameters['crop_indices']\n if self.channel_axis == 1:\n x = x[:, row_indices[0]:row_indices[1], col_indices[0]:col_indices[1]]\n else:\n x = x[row_indices[0]:row_indices[1], col_indices[0]:col_indices[1], :]\n\n x = super(CroppingDataGenerator, self).apply_transform(\n x=x, transform_parameters=transform_parameters)\n return x\n\n def fit(self, x, augment=False, rounds=1, seed=None):\n \"\"\"Fits the data generator to some sample data.\n This computes the internal data stats related to the\n data-dependent transformations, based on an array of sample data.\n Only required if `featurewise_center` or\n `featurewise_std_normalization` or `zca_whitening` are set to True.\n When `rescale` is set to a value, rescaling is applied to\n sample data before computing the internal data stats.\n\n Args:\n x: Sample data. Should have rank 4.\n In case of grayscale data,\n the channels axis should have value 1, in case\n of RGB data, it should have value 3, and in case\n of RGBA data, it should have value 4.\n augment: Boolean (default: False).\n Whether to fit on randomly augmented samples.\n rounds: Int (default: 1).\n If using data augmentation (`augment=True`),\n this is how many augmentation passes over the data to use.\n seed: Int (default: None). Random seed.\n \"\"\"\n x = np.asarray(x, dtype=self.dtype)\n if x.ndim != 4:\n raise ValueError('Input to `.fit()` should have rank 4. '\n 'Got array with shape: ' + str(x.shape))\n if x.shape[self.channel_axis] not in {1, 3, 4}:\n warnings.warn(\n 'Expected input to be images (as Numpy array) '\n 'following the data format convention \"' +\n self.data_format + '\" (channels on axis ' +\n str(self.channel_axis) + '), i.e. expected '\n 'either 1, 3 or 4 channels on axis ' +\n str(self.channel_axis) + '. '\n 'However, it was passed an array with shape ' +\n str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +\n ' channels).')\n\n if seed is not None:\n np.random.seed(seed)\n\n x = np.copy(x)\n if self.rescale:\n x *= self.rescale\n\n if augment:\n # adjust output shape to account for cropping in generator\n if self.crop_size is not None:\n if self.channel_axis == 1:\n x_crop_shape = [x.shape[1]] + list(self.crop_size)\n else:\n x_crop_shape = list(self.crop_size) + [x.shape[3]]\n\n ax = np.zeros(\n tuple([rounds * x.shape[0]] + x_crop_shape),\n dtype=self.dtype)\n else:\n ax = np.zeros(\n tuple([rounds * x.shape[0]] + list(x.shape)[1:]),\n dtype=self.dtype)\n\n for r in range(rounds):\n for i in range(x.shape[0]):\n ax[i + r * x.shape[0]] = self.random_transform(x[i])\n x = ax\n\n if self.featurewise_center:\n self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))\n broadcast_shape = [1, 1, 1]\n broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n self.mean = np.reshape(self.mean, broadcast_shape)\n x -= self.mean\n\n if self.featurewise_std_normalization:\n self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))\n broadcast_shape = [1, 1, 1]\n broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n self.std = np.reshape(self.std, broadcast_shape)\n x /= (self.std + 1e-6)\n\n if self.zca_whitening:\n if scipy is None:\n raise ImportError('Using zca_whitening requires SciPy. '\n 'Install SciPy.')\n flat_x = np.reshape(\n x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))\n sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]\n u, s, _ = linalg.svd(sigma)\n s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)\n self.principal_components = (u * s_inv).dot(u.T)\n",
"# Copyright 2016-2021 The Van Valen Lab at the California Institute of\n# Technology (Caltech), with support from the Paul Allen Family Foundation,\n# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.\n# All rights reserved.\n#\n# Licensed under a modified Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE\n#\n# The Work provided may be used for non-commercial academic purposes only.\n# For any other use of the Work, including commercial use, please contact:\n# [email protected]\n#\n# Neither the name of Caltech nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for backbone_utils\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.framework import test_util as tf_test_util\nfrom tensorflow.python.platform import test\n\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.python.keras import keras_parameterized\n\nfrom deepcell.utils import backbone_utils\n\n\nclass TestBackboneUtils(keras_parameterized.TestCase):\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters(\n *tf_test_util.generate_combinations_with_testcase_name(\n data_format=[\n # 'channels_first',\n 'channels_last']))\n def test_get_featurenet_backbone(self, data_format):\n backbone = 'featurenet'\n input_shape = (256, 256, 3)\n inputs = Input(shape=input_shape)\n with self.cached_session():\n K.set_image_data_format(data_format)\n model, output_dict = backbone_utils.get_backbone(\n backbone, inputs, return_dict=True)\n assert isinstance(output_dict, dict)\n assert all(k.startswith('C') for k in output_dict)\n assert isinstance(model, Model)\n\n # No imagenet weights for featurenet backbone\n with self.assertRaises(ValueError):\n backbone_utils.get_backbone(backbone, inputs, use_imagenet=True)\n\n # @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters(\n *tf_test_util.generate_combinations_with_testcase_name(\n data_format=[\n # 'channels_first',\n 'channels_last']))\n def test_get_featurenet3d_backbone(self, data_format):\n backbone = 'featurenet3d'\n input_shape = (40, 256, 256, 3)\n inputs = Input(shape=input_shape)\n with self.cached_session():\n K.set_image_data_format(data_format)\n model, output_dict = backbone_utils.get_backbone(\n backbone, inputs, return_dict=True)\n assert isinstance(output_dict, dict)\n assert all(k.startswith('C') for k in output_dict)\n assert isinstance(model, Model)\n\n # No imagenet weights for featurenet backbone\n with self.assertRaises(ValueError):\n backbone_utils.get_backbone(backbone, inputs, use_imagenet=True)\n\n # @keras_parameterized.run_with_all_model_types\n # @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters(\n *tf_test_util.generate_combinations_with_testcase_name(\n backbone=[\n 'resnet50',\n 'resnet101',\n 'resnet152',\n 'resnet50v2',\n 'resnet101v2',\n 'resnet152v2',\n # 'resnext50',\n # 'resnext101',\n 'vgg16',\n 'vgg19',\n 'densenet121',\n 'densenet169',\n 'densenet201',\n 'mobilenet',\n 'mobilenetv2',\n 'efficientnetb0',\n 'efficientnetb1',\n 'efficientnetb2',\n 'efficientnetb3',\n 'efficientnetb4',\n 'efficientnetb5',\n 'efficientnetb6',\n 'efficientnetb7',\n 'nasnet_large',\n 'nasnet_mobile']))\n def test_get_backbone(self, backbone):\n with self.cached_session():\n K.set_image_data_format('channels_last')\n inputs = Input(shape=(256, 256, 3))\n model, output_dict = backbone_utils.get_backbone(\n backbone, inputs, return_dict=True)\n assert isinstance(output_dict, dict)\n assert all(k.startswith('C') for k in output_dict)\n assert isinstance(model, Model)\n\n def test_invalid_backbone(self):\n inputs = Input(shape=(4, 2, 3))\n with self.assertRaises(ValueError):\n backbone_utils.get_backbone('bad', inputs, return_dict=True)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2016-2021 The Van Valen Lab at the California Institute of\n# Technology (Caltech), with support from the Paul Allen Family Foundation,\n# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.\n# All rights reserved.\n#\n# Licensed under a modified Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE\n#\n# The Work provided may be used for non-commercial academic purposes only.\n# For any other use of the Work, including commercial use, please contact:\n# [email protected]\n#\n# Neither the name of Caltech nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Layers to encode location data\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.layers import Layer\nfrom tensorflow.python.keras.utils import conv_utils\nfrom tensorflow.python.framework import tensor_shape\n\n\nlogger = tf.get_logger()\n\n\nclass Location2D(Layer):\n \"\"\"Location Layer for 2D cartesian coordinate locations.\n\n Args:\n data_format (str): A string, one of ``channels_last`` (default)\n or ``channels_first``. The ordering of the dimensions in the\n inputs. ``channels_last`` corresponds to inputs with shape\n ``(batch, height, width, channels)`` while ``channels_first``\n corresponds to inputs with shape\n ``(batch, channels, height, width)``.\n \"\"\"\n def __init__(self, data_format=None, **kwargs):\n in_shape = kwargs.pop('in_shape', None)\n if in_shape is not None:\n logger.warn('in_shape (from deepcell.layerse.location) is '\n 'deprecated and will be removed in a future version.')\n super(Location2D, self).__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n channel_axis = 1 if self.data_format == 'channels_first' else 3\n input_shape[channel_axis] = 2\n return tensor_shape.TensorShape(input_shape)\n\n def call(self, inputs):\n input_shape = K.shape(inputs)\n if self.data_format == 'channels_first':\n x = K.arange(0, input_shape[2], dtype=inputs.dtype)\n y = K.arange(0, input_shape[3], dtype=inputs.dtype)\n else:\n x = K.arange(0, input_shape[1], dtype=inputs.dtype)\n y = K.arange(0, input_shape[2], dtype=inputs.dtype)\n\n x = x / K.max(x)\n y = y / K.max(y)\n\n loc_x, loc_y = tf.meshgrid(x, y, indexing='ij')\n\n if self.data_format == 'channels_first':\n loc = K.stack([loc_x, loc_y], axis=0)\n else:\n loc = K.stack([loc_x, loc_y], axis=-1)\n\n location = K.expand_dims(loc, axis=0)\n if self.data_format == 'channels_first':\n location = K.permute_dimensions(location, pattern=[0, 2, 3, 1])\n\n location = tf.tile(location, [input_shape[0], 1, 1, 1])\n\n if self.data_format == 'channels_first':\n location = K.permute_dimensions(location, pattern=[0, 3, 1, 2])\n\n return location\n\n def get_config(self):\n config = {\n 'data_format': self.data_format\n }\n base_config = super(Location2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Location3D(Layer):\n \"\"\"Location Layer for 3D cartesian coordinate locations.\n\n Args:\n data_format (str): A string, one of ``channels_last`` (default)\n or ``channels_first``. The ordering of the dimensions in the\n inputs. ``channels_last`` corresponds to inputs with shape\n ``(batch, height, width, channels)`` while ``channels_first``\n corresponds to inputs with shape\n ``(batch, channels, height, width)``.\n \"\"\"\n def __init__(self, data_format=None, **kwargs):\n in_shape = kwargs.pop('in_shape', None)\n if in_shape is not None:\n logger.warn('in_shape (from deepcell.layerse.location) is '\n 'deprecated and will be removed in a future version.')\n super(Location3D, self).__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n channel_axis = 1 if self.data_format == 'channels_first' else 4\n input_shape[channel_axis] = 3\n return tensor_shape.TensorShape(input_shape)\n\n def call(self, inputs):\n input_shape = K.shape(inputs)\n\n if self.data_format == 'channels_first':\n z = K.arange(0, input_shape[2], dtype=inputs.dtype)\n x = K.arange(0, input_shape[3], dtype=inputs.dtype)\n y = K.arange(0, input_shape[4], dtype=inputs.dtype)\n else:\n z = K.arange(0, input_shape[1], dtype=inputs.dtype)\n x = K.arange(0, input_shape[2], dtype=inputs.dtype)\n y = K.arange(0, input_shape[3], dtype=inputs.dtype)\n\n x = x / K.max(x)\n y = y / K.max(y)\n z = z / K.max(z)\n\n loc_z, loc_x, loc_y = tf.meshgrid(z, x, y, indexing='ij')\n\n if self.data_format == 'channels_first':\n loc = K.stack([loc_z, loc_x, loc_y], axis=0)\n else:\n loc = K.stack([loc_z, loc_x, loc_y], axis=-1)\n\n location = K.expand_dims(loc, axis=0)\n\n if self.data_format == 'channels_first':\n location = K.permute_dimensions(location, pattern=[0, 2, 3, 4, 1])\n\n location = tf.tile(location, [input_shape[0], 1, 1, 1, 1])\n\n if self.data_format == 'channels_first':\n location = K.permute_dimensions(location, pattern=[0, 4, 1, 2, 3])\n\n return location\n\n def get_config(self):\n config = {\n 'data_format': self.data_format\n }\n base_config = super(Location3D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n"
] | [
[
"numpy.pad",
"tensorflow.keras.backend.image_data_format"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.keras.constraints.serialize",
"tensorflow.keras.regularizers.serialize",
"tensorflow.keras.backend.ones",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.keras.backend.permute_dimensions",
"tensorflow.keras.backend.int_shape",
"tensorflow.keras.backend.max",
"tensorflow.keras.backend.square",
"tensorflow.keras.layers.InputSpec",
"tensorflow.keras.initializers.get",
"tensorflow.keras.backend.sqrt",
"tensorflow.keras.initializers.serialize",
"tensorflow.nn.conv3d",
"tensorflow.keras.constraints.get",
"tensorflow.keras.activations.serialize",
"tensorflow.keras.regularizers.get",
"tensorflow.python.keras.utils.conv_utils.normalize_data_format",
"tensorflow.keras.backend.std",
"tensorflow.keras.backend.mean",
"tensorflow.keras.activations.get",
"tensorflow.keras.backend.epsilon"
],
[
"numpy.random.rand"
],
[
"numpy.random.random",
"numpy.std",
"numpy.mean",
"numpy.sum",
"numpy.empty"
],
[
"numpy.dot",
"scipy.linalg.svd",
"numpy.expand_dims",
"numpy.sqrt",
"numpy.random.seed",
"numpy.asarray",
"numpy.reshape",
"numpy.copy",
"numpy.std",
"numpy.mean",
"numpy.argmax",
"numpy.zeros",
"tensorflow.python.keras.preprocessing.image.array_to_img",
"numpy.random.randint"
],
[
"tensorflow.python.framework.test_util.generate_combinations_with_testcase_name",
"tensorflow.keras.backend.set_image_data_format",
"tensorflow.python.platform.test.main",
"tensorflow.keras.layers.Input"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.keras.backend.permute_dimensions",
"tensorflow.keras.backend.max",
"tensorflow.get_logger",
"tensorflow.keras.backend.shape",
"tensorflow.keras.backend.expand_dims",
"tensorflow.python.keras.utils.conv_utils.normalize_data_format",
"tensorflow.keras.backend.arange",
"tensorflow.meshgrid",
"tensorflow.keras.backend.stack",
"tensorflow.tile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"1.4",
"2.2",
"1.13",
"2.3",
"2.4",
"1.5",
"1.7",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
wumpus/poliastro | [
"6ef314f3b80528018ce489fd51d26db106daac91",
"6ef314f3b80528018ce489fd51d26db106daac91",
"6ef314f3b80528018ce489fd51d26db106daac91"
] | [
"src/poliastro/twobody/angles.py",
"tests/tests_twobody/test_perturbations.py",
"tests/tests_plotting/test_static.py"
] | [
"\"\"\"Angles and anomalies.\n\n\"\"\"\nimport numpy as np\nfrom astropy import coordinates, units as u\n\nfrom poliastro import constants\nfrom poliastro.core.angles import (\n D_to_M as D_to_M_fast,\n D_to_nu as D_to_nu_fast,\n E_to_M as E_to_M_fast,\n E_to_nu as E_to_nu_fast,\n F_to_M as F_to_M_fast,\n F_to_nu as F_to_nu_fast,\n M_to_D as M_to_D_fast,\n M_to_E as M_to_E_fast,\n M_to_F as M_to_F_fast,\n fp_angle as fp_angle_fast,\n nu_to_D as nu_to_D_fast,\n nu_to_E as nu_to_E_fast,\n nu_to_F as nu_to_F_fast,\n)\n\n\[email protected]_input(D=u.rad)\ndef D_to_nu(D):\n \"\"\"True anomaly from parabolic eccentric anomaly.\n\n Parameters\n ----------\n D : ~astropy.units.Quantity\n Eccentric anomaly.\n\n Returns\n -------\n nu : ~astropy.units.Quantity\n True anomaly.\n\n Notes\n -----\n Taken from Farnocchia, Davide, Davide Bracali Cioci, and Andrea Milani.\n \"Robust resolution of Kepler’s equation in all eccentricity regimes.\"\n Celestial Mechanics and Dynamical Astronomy 116, no. 1 (2013): 21-34.\n \"\"\"\n return (D_to_nu_fast(D.to(u.rad).value) * u.rad).to(D.unit)\n\n\[email protected]_input(nu=u.rad)\ndef nu_to_D(nu):\n \"\"\"Parabolic eccentric anomaly from true anomaly.\n\n Parameters\n ----------\n nu : ~astropy.units.Quantity\n True anomaly.\n\n Returns\n -------\n D : ~astropy.units.Quantity\n Hyperbolic eccentric anomaly.\n\n Notes\n -----\n Taken from Farnocchia, Davide, Davide Bracali Cioci, and Andrea Milani.\n \"Robust resolution of Kepler’s equation in all eccentricity regimes.\"\n Celestial Mechanics and Dynamical Astronomy 116, no. 1 (2013): 21-34.\n \"\"\"\n return (nu_to_D_fast(nu.to(u.rad).value) * u.rad).to(nu.unit)\n\n\[email protected]_input(nu=u.rad, ecc=u.one)\ndef nu_to_E(nu, ecc):\n \"\"\"Eccentric anomaly from true anomaly.\n\n .. versionadded:: 0.4.0\n\n Parameters\n ----------\n nu : ~astropy.units.Quantity\n True anomaly.\n ecc : ~astropy.units.Quantity\n Eccentricity.\n\n Returns\n -------\n E : ~astropy.units.Quantity\n Eccentric anomaly.\n\n \"\"\"\n return (nu_to_E_fast(nu.to(u.rad).value, ecc.value) * u.rad).to(nu.unit)\n\n\[email protected]_input(nu=u.rad, ecc=u.one)\ndef nu_to_F(nu, ecc):\n \"\"\"Hyperbolic eccentric anomaly from true anomaly.\n\n Parameters\n ----------\n nu : ~astropy.units.Quantity\n True anomaly.\n ecc : ~astropy.units.Quantity\n Eccentricity (>1).\n\n Returns\n -------\n F : ~astropy.units.Quantity\n Hyperbolic eccentric anomaly.\n\n Note\n -----\n Taken from Curtis, H. (2013). *Orbital mechanics for engineering students*. 167\n\n \"\"\"\n return (nu_to_F_fast(nu.to(u.rad).value, ecc.value) * u.rad).to(nu.unit)\n\n\[email protected]_input(E=u.rad, ecc=u.one)\ndef E_to_nu(E, ecc):\n \"\"\"True anomaly from eccentric anomaly.\n\n .. versionadded:: 0.4.0\n\n Parameters\n ----------\n E : ~astropy.units.Quantity\n Eccentric anomaly.\n ecc : ~astropy.units.Quantity\n Eccentricity.\n\n Returns\n -------\n nu : ~astropy.units.Quantity\n True anomaly.\n\n \"\"\"\n return (E_to_nu_fast(E.to(u.rad).value, ecc.value) * u.rad).to(E.unit)\n\n\[email protected]_input(F=u.rad, ecc=u.one)\ndef F_to_nu(F, ecc):\n \"\"\"True anomaly from hyperbolic eccentric anomaly.\n\n Parameters\n ----------\n F : ~astropy.units.Quantity\n Hyperbolic eccentric anomaly.\n ecc : ~astropy.units.Quantity\n Eccentricity (>1).\n\n Returns\n -------\n nu : ~astropy.units.Quantity\n True anomaly.\n\n \"\"\"\n return (F_to_nu_fast(F.to(u.rad).value, ecc.value) * u.rad).to(F.unit)\n\n\[email protected]_input(M=u.rad, ecc=u.one)\ndef M_to_E(M, ecc):\n \"\"\"Eccentric anomaly from mean anomaly.\n\n .. versionadded:: 0.4.0\n\n Parameters\n ----------\n M : ~astropy.units.Quantity\n Mean anomaly.\n ecc : ~astropy.units.Quantity\n Eccentricity.\n\n Returns\n -------\n E : ~astropy.units.Quantity\n Eccentric anomaly.\n\n \"\"\"\n return (M_to_E_fast(M.to(u.rad).value, ecc.value) * u.rad).to(M.unit)\n\n\[email protected]_input(M=u.rad, ecc=u.one)\ndef M_to_F(M, ecc):\n \"\"\"Hyperbolic eccentric anomaly from mean anomaly.\n\n Parameters\n ----------\n M : ~astropy.units.Quantity\n Mean anomaly.\n ecc : ~astropy.units.Quantity\n Eccentricity (>1).\n\n Returns\n -------\n F : ~astropy.units.Quantity\n Hyperbolic eccentric anomaly.\n\n \"\"\"\n return (M_to_F_fast(M.to(u.rad).value, ecc.value) * u.rad).to(M.unit)\n\n\[email protected]_input(M=u.rad, ecc=u.one)\ndef M_to_D(M):\n \"\"\"Parabolic eccentric anomaly from mean anomaly.\n\n Parameters\n ----------\n M : ~astropy.units.Quantity\n Mean anomaly.\n\n Returns\n -------\n D : ~astropy.units.Quantity\n Parabolic eccentric anomaly.\n\n \"\"\"\n return (M_to_D_fast(M.to(u.rad).value) * u.rad).to(M.unit)\n\n\[email protected]_input(E=u.rad, ecc=u.one)\ndef E_to_M(E, ecc):\n \"\"\"Mean anomaly from eccentric anomaly.\n\n .. versionadded:: 0.4.0\n\n Parameters\n ----------\n E : ~astropy.units.Quantity\n Eccentric anomaly.\n ecc : ~astropy.units.Quantity\n Eccentricity.\n\n Returns\n -------\n M : ~astropy.units.Quantity\n Mean anomaly.\n\n \"\"\"\n return (E_to_M_fast(E.to(u.rad).value, ecc.value) * u.rad).to(E.unit)\n\n\[email protected]_input(F=u.rad, ecc=u.one)\ndef F_to_M(F, ecc):\n \"\"\"Mean anomaly from eccentric anomaly.\n\n Parameters\n ----------\n F : ~astropy.units.Quantity\n Hyperbolic eccentric anomaly.\n ecc : ~astropy.units.Quantity\n Eccentricity (>1).\n\n Returns\n -------\n M : ~astropy.units.Quantity\n Mean anomaly.\n\n \"\"\"\n return (F_to_M_fast(F.to(u.rad).value, ecc.value) * u.rad).to(F.unit)\n\n\[email protected]_input(D=u.rad, ecc=u.one)\ndef D_to_M(D):\n \"\"\"Mean anomaly from eccentric anomaly.\n\n Parameters\n ----------\n D : ~astropy.units.Quantity\n Parabolic eccentric anomaly.\n\n Returns\n -------\n M : ~astropy.units.Quantity\n Mean anomaly.\n\n \"\"\"\n return (D_to_M_fast(D.to(u.rad).value) * u.rad).to(D.unit)\n\n\[email protected]_input(nu=u.rad, ecc=u.one)\ndef fp_angle(nu, ecc):\n \"\"\"Flight path angle.\n\n .. versionadded:: 0.4.0\n\n Parameters\n ----------\n nu : ~astropy.units.Quantity\n True anomaly.\n ecc : ~astropy.units.Quantity\n Eccentricity.\n\n Note\n -----\n Algorithm taken from Vallado 2007, pp. 113.\n\n \"\"\"\n return (fp_angle_fast(nu.to(u.rad).value, ecc.value) * u.rad).to(nu.unit)\n\n\[email protected]_input(ltan=u.hourangle)\ndef raan_from_ltan(epoch, ltan=12.0):\n \"\"\"RAAN angle from LTAN for SSO around the earth\n\n Parameters\n ----------\n epoch : ~astropy.time.Time\n Value of time to calculate the RAAN for\n ltan: ~astropy.units.Quantity\n Decimal hour between 0 and 24\n\n Returns\n -------\n RAAN: ~astropy.units.Quantity\n Right ascension of the ascending node angle in GCRS\n\n Note\n ----\n Calculations of the sun mean longitude and equation of time\n follow \"Fundamentals of Astrodynamics and Applications\"\n Fourth edition by Vallado, David A.\n \"\"\"\n\n T_UT1 = ((epoch.ut1 - constants.J2000).value / 36525.0) * u.deg\n T_TDB = ((epoch.tdb - constants.J2000).value / 36525.0) * u.deg\n\n # Apparent sun position\n sun_position = coordinates.get_sun(epoch)\n\n # Calculate the sun apparent local time\n salt = sun_position.ra + 12 * u.hourangle\n\n # Use the equation of time to calculate the mean sun local time (fictional sun without anomalies)\n\n # sun mean anomaly\n M_sun = 357.5291092 * u.deg + 35999.05034 * T_TDB\n\n # sun mean longitude\n l_sun = 280.460 * u.deg + 36000.771 * T_UT1\n l_ecliptic_part2 = 1.914666471 * u.deg * np.sin(\n M_sun\n ) + 0.019994643 * u.deg * np.sin(2 * M_sun)\n l_ecliptic = l_sun + l_ecliptic_part2\n\n eq_time = (\n -l_ecliptic_part2\n + 2.466 * u.deg * np.sin(2 * l_ecliptic)\n - 0.0053 * u.deg * np.sin(4 * l_ecliptic)\n )\n\n # Calculate sun mean local time\n\n smlt = salt + eq_time\n\n # Desired angle between sun and ascending node\n alpha = (coordinates.Angle(ltan).wrap_at(24 * u.hourangle)).to(u.rad)\n\n # Use the mean sun local time calculate needed RAAN for given LTAN\n raan = smlt + alpha\n return raan\n",
"import functools\n\nimport numpy as np\nimport pytest\nfrom astropy import units as u\nfrom astropy.coordinates import Angle, solar_system_ephemeris\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.time import Time\nfrom numpy.linalg import norm\n\nfrom poliastro.atmosphere import COESA76\nfrom poliastro.bodies import Earth, Moon, Sun\nfrom poliastro.constants import H0_earth, Wdivc_sun, rho0_earth\nfrom poliastro.core.elements import rv2coe\nfrom poliastro.core.perturbations import (\n J2_perturbation,\n J3_perturbation,\n atmospheric_drag_exponential,\n atmospheric_drag_model,\n radiation_pressure,\n third_body,\n)\nfrom poliastro.ephem import build_ephem_interpolant\nfrom poliastro.twobody import Orbit\nfrom poliastro.twobody.events import LithobrakeEvent\nfrom poliastro.twobody.propagation import cowell\n\n\[email protected]\ndef test_J2_propagation_Earth():\n # from Curtis example 12.2:\n r0 = np.array([-2384.46, 5729.01, 3050.46]) # km\n v0 = np.array([-7.36138, -2.98997, 1.64354]) # km/s\n\n orbit = Orbit.from_vectors(Earth, r0 * u.km, v0 * u.km / u.s)\n\n tofs = [48.0] * u.h\n rr, vv = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n ad=J2_perturbation,\n J2=Earth.J2.value,\n R=Earth.R.to(u.km).value,\n )\n\n k = Earth.k.to(u.km ** 3 / u.s ** 2).value\n\n _, _, _, raan0, argp0, _ = rv2coe(k, r0, v0)\n _, _, _, raan, argp, _ = rv2coe(k, rr[0].to(u.km).value, vv[0].to(u.km / u.s).value)\n\n raan_variation_rate = (raan - raan0) / tofs[0].to(u.s).value # type: ignore\n argp_variation_rate = (argp - argp0) / tofs[0].to(u.s).value # type: ignore\n\n raan_variation_rate = (raan_variation_rate * u.rad / u.s).to(u.deg / u.h)\n argp_variation_rate = (argp_variation_rate * u.rad / u.s).to(u.deg / u.h)\n\n assert_quantity_allclose(raan_variation_rate, -0.172 * u.deg / u.h, rtol=1e-2)\n assert_quantity_allclose(argp_variation_rate, 0.282 * u.deg / u.h, rtol=1e-2)\n\n\[email protected]\[email protected](\n \"test_params\",\n [\n {\n \"inc\": 0.2618 * u.rad,\n \"da_max\": 43.2 * u.m,\n \"dinc_max\": 3.411e-5,\n \"decc_max\": 3.549e-5,\n },\n {\n \"inc\": 0.7854 * u.rad,\n \"da_max\": 135.8 * u.m,\n \"dinc_max\": 2.751e-5,\n \"decc_max\": 9.243e-5,\n },\n {\n \"inc\": 1.3090 * u.rad,\n \"da_max\": 58.7 * u.m,\n \"dinc_max\": 0.79e-5,\n \"decc_max\": 10.02e-5,\n },\n {\n \"inc\": 1.5708 * u.rad,\n \"da_max\": 96.1 * u.m,\n \"dinc_max\": 0.0,\n \"decc_max\": 17.04e-5,\n },\n ],\n)\ndef test_J3_propagation_Earth(test_params):\n # Nai-ming Qi, Qilong Sun, Yong Yang, (2018) \"Effect of J3 perturbation on satellite position in LEO\",\n # Aircraft Engineering and Aerospace Technology, Vol. 90 Issue: 1,\n # pp.74-86, https://doi.org/10.1108/AEAT-03-2015-0092\n a_ini = 8970.667 * u.km\n ecc_ini = 0.25 * u.one\n raan_ini = 1.047 * u.rad\n nu_ini = 0.0 * u.rad\n argp_ini = 1.0 * u.rad\n inc_ini = test_params[\"inc\"]\n\n k = Earth.k.to(u.km ** 3 / u.s ** 2).value\n\n orbit = Orbit.from_classical(\n Earth, a_ini, ecc_ini, inc_ini, raan_ini, argp_ini, nu_ini\n )\n\n tofs = np.linspace(0, 10.0 * u.day, 1000)\n r_J2, v_J2 = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n ad=J2_perturbation,\n J2=Earth.J2.value,\n R=Earth.R.to(u.km).value,\n rtol=1e-8,\n )\n\n def a_J2J3(t0, u_, k_):\n j2 = J2_perturbation(t0, u_, k_, J2=Earth.J2.value, R=Earth.R.to(u.km).value)\n j3 = J3_perturbation(t0, u_, k_, J3=Earth.J3.value, R=Earth.R.to(u.km).value)\n return j2 + j3\n\n r_J3, v_J3 = cowell(Earth.k, orbit.r, orbit.v, tofs, ad=a_J2J3, rtol=1e-8)\n\n a_values_J2 = np.array(\n [\n rv2coe(k, ri, vi)[0] / (1.0 - rv2coe(k, ri, vi)[1] ** 2)\n for ri, vi in zip(r_J2.to(u.km).value, v_J2.to(u.km / u.s).value)\n ]\n )\n a_values_J3 = np.array(\n [\n rv2coe(k, ri, vi)[0] / (1.0 - rv2coe(k, ri, vi)[1] ** 2)\n for ri, vi in zip(r_J3.to(u.km).value, v_J3.to(u.km / u.s).value)\n ]\n )\n da_max = np.max(np.abs(a_values_J2 - a_values_J3))\n\n ecc_values_J2 = np.array(\n [\n rv2coe(k, ri, vi)[1]\n for ri, vi in zip(r_J2.to(u.km).value, v_J2.to(u.km / u.s).value)\n ]\n )\n ecc_values_J3 = np.array(\n [\n rv2coe(k, ri, vi)[1]\n for ri, vi in zip(r_J3.to(u.km).value, v_J3.to(u.km / u.s).value)\n ]\n )\n decc_max = np.max(np.abs(ecc_values_J2 - ecc_values_J3))\n\n inc_values_J2 = np.array(\n [\n rv2coe(k, ri, vi)[2]\n for ri, vi in zip(r_J2.to(u.km).value, v_J2.to(u.km / u.s).value)\n ]\n )\n inc_values_J3 = np.array(\n [\n rv2coe(k, ri, vi)[2]\n for ri, vi in zip(r_J3.to(u.km).value, v_J3.to(u.km / u.s).value)\n ]\n )\n dinc_max = np.max(np.abs(inc_values_J2 - inc_values_J3))\n\n assert_quantity_allclose(dinc_max, test_params[\"dinc_max\"], rtol=1e-1, atol=1e-7)\n assert_quantity_allclose(decc_max, test_params[\"decc_max\"], rtol=1e-1, atol=1e-7)\n try:\n assert_quantity_allclose(da_max * u.km, test_params[\"da_max\"])\n except AssertionError:\n pytest.xfail(\"this assertion disagrees with the paper\")\n\n\[email protected]\ndef test_atmospheric_drag_exponential():\n # http://farside.ph.utexas.edu/teaching/celestial/Celestialhtml/node94.html#sair (10.148)\n # given the expression for \\dot{r} / r, aproximate \\Delta r \\approx F_r * \\Delta t\n\n R = Earth.R.to(u.km).value\n k = Earth.k.to(u.km ** 3 / u.s ** 2).value\n\n # parameters of a circular orbit with h = 250 km (any value would do, but not too small)\n orbit = Orbit.circular(Earth, 250 * u.km)\n r0, _ = orbit.rv()\n r0 = r0.to(u.km).value\n\n # parameters of a body\n C_D = 2.2 # dimentionless (any value would do)\n A_over_m = ((np.pi / 4.0) * (u.m ** 2) / (100 * u.kg)).to_value(\n u.km ** 2 / u.kg\n ) # km^2/kg\n B = C_D * A_over_m\n\n # parameters of the atmosphere\n rho0 = rho0_earth.to(u.kg / u.km ** 3).value # kg/km^3\n H0 = H0_earth.to(u.km).value # km\n tof = 100000 # s\n\n dr_expected = -B * rho0 * np.exp(-(norm(r0) - R) / H0) * np.sqrt(k * norm(r0)) * tof\n # assuming the atmospheric decay during tof is small,\n # dr_expected = F_r * tof (Newton's integration formula), where\n # F_r = -B rho(r) |r|^2 sqrt(k / |r|^3) = -B rho(r) sqrt(k |r|)\n\n rr, _ = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n [tof] * u.s,\n ad=atmospheric_drag_exponential,\n R=R,\n C_D=C_D,\n A_over_m=A_over_m,\n H0=H0,\n rho0=rho0,\n )\n\n assert_quantity_allclose(\n norm(rr[0].to(u.km).value) - norm(r0), dr_expected, rtol=1e-2\n )\n\n\[email protected]\ndef test_atmospheric_demise():\n # Test an orbital decay that hits Earth. No analytic solution.\n R = Earth.R.to(u.km).value\n\n orbit = Orbit.circular(Earth, 230 * u.km)\n t_decay = 48.2179 * u.d # not an analytic value\n\n # parameters of a body\n C_D = 2.2 # dimentionless (any value would do)\n A_over_m = ((np.pi / 4.0) * (u.m ** 2) / (100 * u.kg)).to_value(\n u.km ** 2 / u.kg\n ) # km^2/kg\n\n # parameters of the atmosphere\n rho0 = rho0_earth.to(u.kg / u.km ** 3).value # kg/km^3\n H0 = H0_earth.to(u.km).value # km\n\n tofs = [365] * u.d # actually hits the ground a bit after day 48\n\n lithobrake_event = LithobrakeEvent(R)\n events = [lithobrake_event]\n\n rr, _ = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n ad=atmospheric_drag_exponential,\n R=R,\n C_D=C_D,\n A_over_m=A_over_m,\n H0=H0,\n rho0=rho0,\n events=events,\n )\n\n assert_quantity_allclose(norm(rr[0].to(u.km).value), R, atol=1) # below 1km\n\n assert_quantity_allclose(lithobrake_event.last_t, t_decay, rtol=1e-2)\n\n # make sure having the event not firing is ok\n tofs = [1] * u.d\n lithobrake_event = LithobrakeEvent(R)\n events = [lithobrake_event]\n\n rr, _ = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n ad=atmospheric_drag_exponential,\n R=R,\n C_D=C_D,\n A_over_m=A_over_m,\n H0=H0,\n rho0=rho0,\n events=events,\n )\n\n assert lithobrake_event.last_t == tofs[-1]\n\n\[email protected]\ndef test_atmospheric_demise_coesa76():\n # Test an orbital decay that hits Earth. No analytic solution.\n R = Earth.R.to(u.km).value\n\n orbit = Orbit.circular(Earth, 250 * u.km)\n t_decay = 7.17 * u.d\n\n # parameters of a body\n C_D = 2.2 # dimentionless (any value would do)\n A_over_m = ((np.pi / 4.0) * (u.m ** 2) / (100 * u.kg)).to_value(\n u.km ** 2 / u.kg\n ) # km^2/kg\n\n tofs = [365] * u.d\n\n lithobrake_event = LithobrakeEvent(R)\n events = [lithobrake_event]\n\n coesa76 = COESA76()\n\n rr, _ = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n ad=atmospheric_drag_model,\n R=R,\n C_D=C_D,\n A_over_m=A_over_m,\n model=coesa76,\n events=events,\n )\n\n assert_quantity_allclose(norm(rr[0].to(u.km).value), R, atol=1) # below 1km\n\n assert_quantity_allclose(lithobrake_event.last_t, t_decay, rtol=1e-2)\n\n\[email protected]\ndef test_cowell_works_with_small_perturbations():\n r0 = [-2384.46, 5729.01, 3050.46] * u.km\n v0 = [-7.36138, -2.98997, 1.64354] * u.km / u.s\n\n r_expected = [\n 13179.39566663877121754922,\n -13026.25123408228319021873,\n -9852.66213692844394245185,\n ] * u.km\n v_expected = (\n [2.78170542314378943516, 3.21596786944631274352, 0.16327165546278937791]\n * u.km\n / u.s\n )\n\n initial = Orbit.from_vectors(Earth, r0, v0)\n\n def accel(t0, state, k):\n v_vec = state[3:]\n norm_v = (v_vec * v_vec).sum() ** 0.5\n return 1e-5 * v_vec / norm_v\n\n final = initial.propagate(3 * u.day, method=cowell, ad=accel)\n\n assert_quantity_allclose(final.r, r_expected)\n assert_quantity_allclose(final.v, v_expected)\n\n\[email protected]\ndef test_cowell_converges_with_small_perturbations():\n r0 = [-2384.46, 5729.01, 3050.46] * u.km\n v0 = [-7.36138, -2.98997, 1.64354] * u.km / u.s\n\n initial = Orbit.from_vectors(Earth, r0, v0)\n\n def accel(t0, state, k):\n v_vec = state[3:]\n norm_v = (v_vec * v_vec).sum() ** 0.5\n return 0.0 * v_vec / norm_v\n\n final = initial.propagate(initial.period, method=cowell, ad=accel)\n\n assert_quantity_allclose(final.r, initial.r)\n assert_quantity_allclose(final.v, initial.v)\n\n\nmoon_heo = {\n \"body\": Moon,\n \"tof\": 60 * u.day,\n \"raan\": -0.06 * u.deg,\n \"argp\": 0.15 * u.deg,\n \"inc\": 0.08 * u.deg,\n \"orbit\": [\n 26553.4 * u.km,\n 0.741 * u.one,\n 63.4 * u.deg,\n 0.0 * u.deg,\n -10.12921 * u.deg,\n 0.0 * u.rad,\n ],\n \"period\": 28 * u.day,\n}\n\nmoon_leo = {\n \"body\": Moon,\n \"tof\": 60 * u.day,\n \"raan\": -2.18 * 1e-4 * u.deg,\n \"argp\": 15.0 * 1e-3 * u.deg,\n \"inc\": 6.0 * 1e-4 * u.deg,\n \"orbit\": [\n 6678.126 * u.km,\n 0.01 * u.one,\n 28.5 * u.deg,\n 0.0 * u.deg,\n 0.0 * u.deg,\n 0.0 * u.rad,\n ],\n \"period\": 28 * u.day,\n}\n\nmoon_geo = {\n \"body\": Moon,\n \"tof\": 60 * u.day,\n \"raan\": 6.0 * u.deg,\n \"argp\": -11.0 * u.deg,\n \"inc\": 6.5 * 1e-3 * u.deg,\n \"orbit\": [\n 42164.0 * u.km,\n 0.0001 * u.one,\n 1 * u.deg,\n 0.0 * u.deg,\n 0.0 * u.deg,\n 0.0 * u.rad,\n ],\n \"period\": 28 * u.day,\n}\n\nsun_heo = {\n \"body\": Sun,\n \"tof\": 200 * u.day,\n \"raan\": -0.10 * u.deg,\n \"argp\": 0.2 * u.deg,\n \"inc\": 0.1 * u.deg,\n \"orbit\": [\n 26553.4 * u.km,\n 0.741 * u.one,\n 63.4 * u.deg,\n 0.0 * u.deg,\n -10.12921 * u.deg,\n 0.0 * u.rad,\n ],\n \"period\": 365 * u.day,\n}\n\nsun_leo = {\n \"body\": Sun,\n \"tof\": 200 * u.day,\n \"raan\": -6.0 * 1e-3 * u.deg,\n \"argp\": 0.02 * u.deg,\n \"inc\": -1.0 * 1e-4 * u.deg,\n \"orbit\": [\n 6678.126 * u.km,\n 0.01 * u.one,\n 28.5 * u.deg,\n 0.0 * u.deg,\n 0.0 * u.deg,\n 0.0 * u.rad,\n ],\n \"period\": 365 * u.day,\n}\n\nsun_geo = {\n \"body\": Sun,\n \"tof\": 200 * u.day,\n \"raan\": 8.7 * u.deg,\n \"argp\": -5.5 * u.deg,\n \"inc\": 5.5e-3 * u.deg,\n \"orbit\": [\n 42164.0 * u.km,\n 0.0001 * u.one,\n 1 * u.deg,\n 0.0 * u.deg,\n 0.0 * u.deg,\n 0.0 * u.rad,\n ],\n \"period\": 365 * u.day,\n}\n\n\[email protected]\[email protected](\n \"test_params\",\n [\n moon_heo,\n moon_geo,\n moon_leo,\n sun_heo,\n sun_geo,\n pytest.param(\n sun_leo,\n marks=pytest.mark.skip(\n reason=\"here agreement required rtol=1e-10, too long for 200 days\"\n ),\n ),\n ],\n)\ndef test_3rd_body_Curtis(test_params):\n # based on example 12.11 from Howard Curtis\n body = test_params[\"body\"]\n with solar_system_ephemeris.set(\"builtin\"):\n j_date = 2454283.0 * u.day\n tof = (test_params[\"tof\"]).to(u.s).value\n body_r = build_ephem_interpolant(\n body,\n test_params[\"period\"],\n (j_date, j_date + test_params[\"tof\"]),\n rtol=1e-2,\n )\n\n epoch = Time(j_date, format=\"jd\", scale=\"tdb\")\n initial = Orbit.from_classical(Earth, *test_params[\"orbit\"], epoch=epoch)\n rr, vv = cowell(\n Earth.k,\n initial.r,\n initial.v,\n np.linspace(0, tof, 400) * u.s,\n rtol=1e-10,\n ad=third_body,\n k_third=body.k.to(u.km ** 3 / u.s ** 2).value,\n perturbation_body=body_r,\n )\n\n incs, raans, argps = [], [], []\n for ri, vi in zip(rr.to(u.km).value, vv.to(u.km / u.s).value):\n angles = Angle(\n rv2coe(Earth.k.to(u.km ** 3 / u.s ** 2).value, ri, vi)[2:5] * u.rad\n ) # inc, raan, argp\n angles = angles.wrap_at(180 * u.deg)\n incs.append(angles[0].value)\n raans.append(angles[1].value)\n argps.append(angles[2].value)\n\n # averaging over 5 last values in the way Curtis does\n inc_f, raan_f, argp_f = (\n np.mean(incs[-5:]),\n np.mean(raans[-5:]),\n np.mean(argps[-5:]),\n )\n\n assert_quantity_allclose(\n [\n (raan_f * u.rad).to(u.deg) - test_params[\"orbit\"][3],\n (inc_f * u.rad).to(u.deg) - test_params[\"orbit\"][2],\n (argp_f * u.rad).to(u.deg) - test_params[\"orbit\"][4],\n ],\n [test_params[\"raan\"], test_params[\"inc\"], test_params[\"argp\"]],\n rtol=1e-1,\n )\n\n\[email protected](scope=\"module\")\ndef sun_r():\n j_date = 2_438_400.5 * u.day\n tof = 600 * u.day\n return build_ephem_interpolant(Sun, 365 * u.day, (j_date, j_date + tof), rtol=1e-2)\n\n\ndef normalize_to_Curtis(t0, sun_r):\n r = sun_r(t0)\n return 149600000 * r / norm(r)\n\n\[email protected]\[email protected](\n \"t_days,deltas_expected\",\n [\n (200, [3e-3, -8e-3, -0.035, -80.0]),\n (400, [-1.3e-3, 0.01, -0.07, 8.0]),\n (600, [7e-3, 0.03, -0.10, -80.0]),\n # (800, [-7.5e-3, 0.02, -0.13, 1.7]),\n # (1000, [6e-3, 0.065, -0.165, -70.0]),\n # (1095, [0.0, 0.06, -0.165, -10.0]),\n ],\n)\ndef test_solar_pressure(t_days, deltas_expected, sun_r):\n # based on example 12.9 from Howard Curtis\n with solar_system_ephemeris.set(\"builtin\"):\n j_date = 2_438_400.5 * u.day\n tof = 600 * u.day\n epoch = Time(j_date, format=\"jd\", scale=\"tdb\")\n\n initial = Orbit.from_classical(\n Earth,\n 10085.44 * u.km,\n 0.025422 * u.one,\n 88.3924 * u.deg,\n 45.38124 * u.deg,\n 227.493 * u.deg,\n 343.4268 * u.deg,\n epoch=epoch,\n )\n # in Curtis, the mean distance to Sun is used. In order to validate against it, we have to do the same thing\n sun_normalized = functools.partial(normalize_to_Curtis, sun_r=sun_r)\n\n rr, vv = cowell(\n Earth.k,\n initial.r,\n initial.v,\n np.linspace(0, (tof).to(u.s).value, 4000) * u.s,\n rtol=1e-8,\n ad=radiation_pressure,\n R=Earth.R.to(u.km).value,\n C_R=2.0,\n A_over_m=2e-4 / 100,\n Wdivc_s=Wdivc_sun.value,\n star=sun_normalized,\n )\n\n delta_eccs, delta_incs, delta_raans, delta_argps = [], [], [], []\n for ri, vi in zip(rr.to(u.km).value, vv.to(u.km / u.s).value):\n orbit_params = rv2coe(Earth.k.to(u.km ** 3 / u.s ** 2).value, ri, vi)\n delta_eccs.append(orbit_params[1] - initial.ecc.value)\n delta_incs.append(\n (orbit_params[2] * u.rad).to(u.deg).value - initial.inc.value\n )\n delta_raans.append(\n (orbit_params[3] * u.rad).to(u.deg).value - initial.raan.value\n )\n delta_argps.append(\n (orbit_params[4] * u.rad).to(u.deg).value - initial.argp.value\n )\n\n # averaging over 5 last values in the way Curtis does\n index = int(\n 1.0 * t_days / tof.to(u.day).value * 4000 # type: ignore\n )\n delta_ecc, delta_inc, delta_raan, delta_argp = (\n np.mean(delta_eccs[index - 5 : index]),\n np.mean(delta_incs[index - 5 : index]),\n np.mean(delta_raans[index - 5 : index]),\n np.mean(delta_argps[index - 5 : index]),\n )\n assert_quantity_allclose(\n [delta_ecc, delta_inc, delta_raan, delta_argp],\n deltas_expected,\n rtol=1e0, # TODO: Excessively low, rewrite test?\n atol=1e-4,\n )\n",
"import matplotlib.pyplot as plt\nimport pytest\nfrom astropy import units as u\nfrom astropy.coordinates import CartesianDifferential, CartesianRepresentation\nfrom astropy.time import Time\n\nfrom poliastro.bodies import Earth, Jupiter, Mars, Sun\nfrom poliastro.constants import J2000\nfrom poliastro.ephem import Ephem\nfrom poliastro.examples import churi, iss, molniya\nfrom poliastro.frames import Planes\nfrom poliastro.plotting.static import StaticOrbitPlotter\nfrom poliastro.twobody import Orbit\nfrom poliastro.util import time_range\n\n\ndef test_axes_labels_and_title():\n ax = plt.gca()\n op = StaticOrbitPlotter(ax)\n ss = iss\n op.plot(ss)\n\n assert ax.get_xlabel() == \"$x$ (km)\"\n assert ax.get_ylabel() == \"$y$ (km)\"\n\n\ndef test_number_of_lines_for_osculating_orbit():\n op1 = StaticOrbitPlotter()\n ss = iss\n\n l1 = op1.plot(ss)\n\n assert len(l1) == 2\n\n\ndef test_legend():\n op = StaticOrbitPlotter()\n ss = iss\n op.plot(ss, label=\"ISS\")\n legend = plt.gca().get_legend()\n\n ss.epoch.out_subfmt = \"date_hm\"\n label = f\"{ss.epoch.iso} (ISS)\"\n\n assert legend.get_texts()[0].get_text() == label\n\n\ndef test_color():\n op = StaticOrbitPlotter()\n ss = iss\n c = \"#FF0000\"\n op.plot(ss, label=\"ISS\", color=c)\n ax = plt.gca()\n\n assert ax.get_legend().get_lines()[0].get_c() == c\n for element in ax.get_lines():\n assert element.get_c() == c\n\n\ndef test_plot_trajectory_sets_label():\n expected_label = \"67P\"\n\n op = StaticOrbitPlotter()\n trajectory = churi.sample()\n op.plot_body_orbit(Mars, J2000, label=\"Mars\")\n\n op.plot_trajectory(trajectory, label=expected_label)\n\n legend = plt.gca().get_legend()\n assert legend.get_texts()[1].get_text() == expected_label\n\n\[email protected](\n \"dark, expected_color\", [(True, (0.0, 0.0, 0.0, 1.0)), (False, (1.0, 1.0, 1.0, 1))]\n)\ndef test_dark_mode_plots_dark_plot(dark, expected_color):\n op = StaticOrbitPlotter(dark=dark)\n assert op._ax.get_facecolor() == expected_color\n\n\ndef test_redraw_makes_attractor_none():\n # TODO: Review\n op = StaticOrbitPlotter()\n op._redraw()\n assert op._attractor_radius is not None\n\n\ndef test_set_frame_plots_same_colors():\n # TODO: Review\n op = StaticOrbitPlotter()\n op.plot_body_orbit(Jupiter, J2000)\n colors1 = [orb[2] for orb in op.trajectories]\n op.set_body_frame(Jupiter)\n colors2 = [orb[2] for orb in op.trajectories]\n assert colors1 == colors2\n\n\ndef test_redraw_keeps_trajectories():\n # See https://github.com/poliastro/poliastro/issues/518\n op = StaticOrbitPlotter()\n trajectory = churi.sample()\n op.plot_body_orbit(Mars, J2000, label=\"Mars\")\n op.plot_trajectory(trajectory, label=\"67P\")\n\n assert len(op.trajectories) == 2\n\n op.set_body_frame(Mars)\n\n assert len(op.trajectories) == 2\n\n\ndef test_plot_ephem_different_plane_raises_error():\n unused_epochs = Time.now().reshape(-1)\n unused_coordinates = CartesianRepresentation(\n [(1, 0, 0)] * u.au,\n xyz_axis=1,\n differentials=CartesianDifferential([(0, 1, 0)] * (u.au / u.day), xyz_axis=1),\n )\n\n op = StaticOrbitPlotter(plane=Planes.EARTH_ECLIPTIC)\n op.set_attractor(Sun)\n op.set_body_frame(Earth)\n with pytest.raises(ValueError) as excinfo:\n op.plot_ephem(Ephem(unused_epochs, unused_coordinates, Planes.EARTH_EQUATOR))\n\n assert (\n \"sample the ephemerides using a different plane or create a new plotter\"\n in excinfo.exconly()\n )\n\n\[email protected]_image_compare\ndef test_basic_plotting():\n fig, ax = plt.subplots()\n plotter = StaticOrbitPlotter(ax=ax)\n plotter.plot(iss)\n\n return fig\n\n\[email protected]_image_compare\ndef test_basic_trajectory_plotting():\n fig, ax = plt.subplots()\n plotter = StaticOrbitPlotter(ax=ax)\n plotter.set_attractor(Earth)\n plotter.set_orbit_frame(iss)\n plotter.plot_trajectory(iss.sample())\n\n return fig\n\n\[email protected]_image_compare\ndef test_basic_orbit_and_trajectory_plotting():\n fig, ax = plt.subplots()\n plotter = StaticOrbitPlotter(ax=ax)\n plotter.plot(iss)\n plotter.plot_trajectory(molniya.sample(), label=\"Molniya\")\n\n return fig\n\n\[email protected]_image_compare\ndef test_trail_plotting():\n fig, ax = plt.subplots()\n plotter = StaticOrbitPlotter(ax=ax)\n plotter.plot(iss, trail=True)\n\n return fig\n\n\[email protected]_image_compare\ndef test_plot_different_planes():\n fig, ax = plt.subplots()\n plotter = StaticOrbitPlotter(ax=ax)\n plotter.plot(iss)\n plotter.plot(molniya.change_plane(Planes.EARTH_ECLIPTIC))\n\n return fig\n\n\[email protected]_image_compare\ndef test_body_plotting(earth_perihelion):\n Earth.plot(earth_perihelion)\n\n return plt.gcf()\n\n\[email protected]_image_compare\ndef test_plot_ephem_epoch():\n epoch = Time(\"2020-02-14 00:00:00\")\n ephem = Ephem.from_horizons(\n \"2020 CD3\",\n time_range(Time(\"2020-02-13 12:00:00\"), end=Time(\"2020-02-14 12:00:00\")),\n attractor=Earth,\n )\n\n fig, ax = plt.subplots()\n plotter = StaticOrbitPlotter(ax=ax)\n plotter.set_attractor(Earth)\n plotter.set_orbit_frame(Orbit.from_ephem(Earth, ephem, epoch))\n\n plotter.plot_ephem(ephem, epoch, label=\"2020 CD3 Minimoon\", color=\"k\")\n\n return fig\n\n\[email protected]_image_compare\ndef test_plot_ephem_no_epoch():\n epoch = Time(\"2020-02-14 00:00:00\")\n ephem = Ephem.from_horizons(\n \"2020 CD3\",\n time_range(Time(\"2020-02-13 12:00:00\"), end=Time(\"2020-02-14 12:00:00\")),\n attractor=Earth,\n )\n\n fig, ax = plt.subplots()\n plotter = StaticOrbitPlotter(ax=ax)\n plotter.set_attractor(Earth)\n plotter.set_orbit_frame(Orbit.from_ephem(Earth, ephem, epoch))\n\n plotter.plot_ephem(ephem, label=\"2020 CD3 Minimoon\", color=\"k\")\n\n return fig\n"
] | [
[
"numpy.sin"
],
[
"numpy.abs",
"numpy.linspace",
"numpy.linalg.norm",
"numpy.mean",
"numpy.array"
],
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.gcf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jfc43/robust-attribution-regularization | [
"fad85f40d4b1c2efcd851c32216b4549e7122421",
"fad85f40d4b1c2efcd851c32216b4549e7122421"
] | [
"GTSRB/eval_attribution_attack.py",
"GTSRB/gtsrb_input.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport numpy as np\nimport shutil\nimport json\nimport math\nimport os\nimport sys\nimport time\n\nimport tensorflow as tf\nimport gtsrb_input\nfrom model import Model\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\n# Global constants\nwith open('config.json') as config_file:\n config = json.load(config_file)\n\nnum_eval_examples = config['num_eval_examples']\nepsilon = config['epsilon']\nrandom_seed = config['np_random_seed']\nmodel_dir = config['model_dir']\nnum_IG_steps = config['num_IG_steps']\nk_top = config['k_top']\neval_k_top = config['eval_k_top']\nsaliency_type = config['saliency_type']\nattribution_attack_method = config['attribution_attack_method']\nattribution_attack_measure = config['attribution_attack_measure']\nattribution_attack_step_size = config['attribution_attack_step_size']\nattribution_attack_steps = config['attribution_attack_steps']\nattribution_attack_times = config['attribution_attack_times']\ndata_path = config['data_path']\n\nif saliency_type == 'ig':\n from ig_attack import IntegratedGradientsAttack as SaliencyAttack\nelif saliency_type == 'simple_gradient':\n from simple_gradient_attack import SimpleGradientAttack as SaliencyAttack\nelse:\n assert False, ('Unknown saliency type.')\n\nnp.random.seed(random_seed)\n\n# Set upd the data, hyperparameters, and the model\ngtsrb = gtsrb_input.GTSRBData(data_path)\n\nreference_image = np.zeros((32,32,3))\n\nmodel = Model(mode='eval', create_saliency_op=saliency_type)\n\nsaver = tf.train.Saver()\n\nglobal_step = tf.contrib.framework.get_or_create_global_step()\n\ncheckpoint = tf.train.latest_checkpoint(model_dir)\n\ntf_config = tf.ConfigProto()\ntf_config.gpu_options.allow_growth = True\n\nwith tf.Session(config = tf_config) as sess:\n # Restore the checkpoint\n saver.restore(sess, checkpoint)\n\n test_images = gtsrb.eval_data.xs\n test_labels = gtsrb.eval_data.ys\n \n min_intersections = []\n min_spearmans = []\n min_kendalls = []\n \n correct_cnt = 0\n\n for i in range(num_eval_examples):\n test_image = test_images[i]\n original_label = test_labels[i]\n\n module = SaliencyAttack(sess = sess, test_image = test_image, original_label = original_label, NET = model,\n attack_method = attribution_attack_method, epsilon = epsilon,\n k_top = k_top, eval_k_top = eval_k_top, num_steps = num_IG_steps,\n attack_iters = attribution_attack_steps,\n attack_times = attribution_attack_times,\n alpha = attribution_attack_step_size, attack_measure = attribution_attack_measure,\n reference_image = reference_image, same_label = True)\n\n if module.status == 1:\n \n correct_cnt += 1\n \n intersections, spearmans, kendalls = module.iterative_attack()\n \n idx = np.argmin(kendalls)\n min_intersections.append(intersections[idx])\n min_spearmans.append(spearmans[idx])\n min_kendalls.append(kendalls[idx])\n \n res_str = '{} {} '.format(i, 1)\n\n for k in range(attribution_attack_times):\n res_str += '{:.6f} {:.6f} {:.6f} '.format(intersections[k], spearmans[k], kendalls[k])\n \n\n print('progress: {}/{}, {}'.format(i + 1, num_eval_examples, res_str))\n else:\n res_str = '{} {} '.format(i, 0)\n\n for k in range(attribution_attack_times):\n res_str += '{:.6f} {:.6f} {:.6f} '.format(0, 0, 0)\n\n print('progress: {}/{}, prediction incorrect!'.format(i + 1, num_eval_examples))\n \navg_intersection = np.mean(min_intersections)\navg_spearman = np.mean(min_spearmans)\navg_kendall = np.mean(min_kendalls)\n\nprint('process {} examples'.format(num_eval_examples))\nprint('accuracy {}'.format(float(correct_cnt)/num_eval_examples))\nprint('Average top-k intersection: {:.4f}'.format(avg_intersection))\nprint('Average spearman rank correlation: {:.4f}'.format(avg_spearman))\nprint('Average kendall rank correlation: {:.4f}'.format(avg_kendall))\n\n\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport pickle\nimport sys\nimport tensorflow as tf\nimport csv\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nversion = sys.version_info\n\nimport numpy as np\nimport skimage.data\nimport scipy.io as sio\nimport cv2\n\ndef image_brightness_normalisation(image):\n image[:,:,0] = cv2.equalizeHist(image[:,:,0])\n image[:,:,1] = cv2.equalizeHist(image[:,:,1])\n image[:,:,2] = cv2.equalizeHist(image[:,:,2])\n return image\n\ndef preprocess_data(X):\n \n for i in range(len(X)):\n X[i,:,:,:] = image_brightness_normalisation(X[i,:,:,:])\n \n return X\n\nclass GTSRBData(object):\n def __init__(self, path):\n loaded = np.load(os.path.join(path, 'train.npz'))\n train_images = loaded['images']\n train_images = preprocess_data(train_images)\n train_labels = loaded['labels']\n \n loaded = np.load(os.path.join(path,'test.npz'))\n eval_images = loaded['images']\n eval_images = preprocess_data(eval_images)\n eval_labels = loaded['labels']\n \n self.train_data = DataSubset(train_images, train_labels)\n self.eval_data = DataSubset(eval_images, eval_labels)\n\nclass DataSubset(object):\n def __init__(self, xs, ys):\n self.xs = xs\n self.n = xs.shape[0]\n self.ys = ys\n self.batch_start = 0\n self.cur_order = np.random.permutation(self.n)\n\n def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):\n if self.n < batch_size:\n raise ValueError('Batch size can be at most the dataset size')\n if not multiple_passes:\n actual_batch_size = min(batch_size, self.n - self.batch_start)\n if actual_batch_size <= 0:\n raise ValueError('Pass through the dataset is complete.')\n batch_end = self.batch_start + actual_batch_size\n batch_xs = self.xs[self.cur_order[self.batch_start : batch_end], ...]\n batch_ys = self.ys[self.cur_order[self.batch_start : batch_end], ...]\n self.batch_start += actual_batch_size\n return batch_xs, batch_ys\n actual_batch_size = min(batch_size, self.n - self.batch_start)\n if actual_batch_size < batch_size:\n if reshuffle_after_pass:\n self.cur_order = np.random.permutation(self.n)\n self.batch_start = 0\n batch_end = self.batch_start + batch_size\n batch_xs = self.xs[self.cur_order[self.batch_start : batch_end], ...]\n batch_ys = self.ys[self.cur_order[self.batch_start : batch_end], ...]\n self.batch_start += actual_batch_size\n return batch_xs, batch_ys\n\n"
] | [
[
"tensorflow.train.latest_checkpoint",
"numpy.random.seed",
"tensorflow.ConfigProto",
"numpy.mean",
"numpy.argmin",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.contrib.framework.get_or_create_global_step",
"numpy.zeros"
],
[
"numpy.random.permutation"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
weixiong-zheng-berkeley/BART-lite | [
"ead39f757acab936af815352262080318163debb"
] | [
"mesh.py"
] | [
"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n\nimport numpy as np\n\nclass Mesh(object):\n def __init__(self, mesh_cells, domain_upper, mat_map):\n assert type(mesh_cells) == int, \"mesh_cells must be an int\"\n self._mesh_params = {'x_cell': mesh_cells,\n 'cell_length': float(domain_upper)/float(mesh_cells)}\n\n self._mat_map = mat_map\n\n i = np.repeat(np.arange(mesh_cells), mesh_cells)\n j = np.tile(np.arange(mesh_cells), mesh_cells)\n idxs = zip(i,j)\n \n self._cells = []\n\n for idx in idxs:\n self._cells.append(Cell(idx, self._mesh_params, mat_map))\n\n # Save parameters\n self._n_cell = mesh_cells**2\n assert self._n_cell == len(self._cells),\\\n \"Cell array incorrect length\"\n\n self._x_cell = mesh_cells\n self._y_cell = mesh_cells\n self._x_node = mesh_cells + 1\n self._y_node = mesh_cells + 1\n self._n_node = self._x_node * self._y_node\n self._cell_length = self._mesh_params['cell_length']\n\n def soln_plot(self, solution, plot = True): # pragma: no cover\n # Plot a given solution\n return self.__plot__(solution, plot)\n \n def test_plot(self, plot = False):\n # Plot a test solution of length n_cells\n solution = np.zeros(self._n_node)\n \n for cell in self._cells:\n for idx in cell.global_idx():\n solution[idx] += 0.5\n \n return self.__plot__(solution, plot)\n \n def __plot__(self, solution, plot):\n xs = []\n ys = []\n zs = []\n for i,s in enumerate(solution):\n x, y = self.__idx_to_xy__(i)\n xs.append(x)\n ys.append(y)\n zs.append(s)\n if plot: # pragma: no cover\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n X = np.reshape(xs, (self._x_node, self._x_node))\n Y = np.reshape(ys, (self._x_node, self._x_node))\n Z = np.reshape(zs, (self._x_node, self._x_node))\n rstride = int(self._x_node/50) + 1\n cstride = int(self._x_node/50) + 1\n surf = ax.plot_surface(X,Y,Z, cmap=cm.coolwarm, rstride=rstride,\n cstride=cstride, linewidth=0, antialiased=False)\n fig.colorbar(surf)\n plt.show()\n return fig\n else:\n return xs, ys, zs\n\n def __idx_to_xy__(self, idx):\n y = self._cell_length*int(idx/self._x_node)\n x = self._cell_length*int(idx % self._y_node)\n return (x,y)\n \n def cell_length(self):\n return self._cell_length\n \n def cells(self):\n return self._cells\n \n def n_cell(self):\n return self._n_cell\n \n def n_node(self):\n return self._n_node\n \n def x_cell(self):\n return self._x_cell\n \n def x_node(self):\n return self._x_node\n \n def y_cell(self):\n return self._y_cell\n \n def y_node(self):\n return self._y_node\n\nclass Cell(object):\n \"\"\" A single cell in the mesh, holds location and material data \"\"\"\n\n def __init__(self, index, mesh_params, mat_map=None):\n \"\"\" Cell constructor, give index in a tuple (i,j) \"\"\"\n \n # Constructor validations\n assert isinstance(index, tuple), \"Index must be a tuple\"\n assert len(index) == 2, \"Index must be a length 2 tuple\"\n \n try:\n assert mesh_params['cell_length'] > 0, \"cell_length must be greater than 0\"\n except KeyError:\n raise KeyError(\"Missing 'cell_length' parameter in mesh_params\")\n\n self._index = index\n\n try:\n self._length = float(mesh_params['cell_length'])\n except ValueError:\n raise TypeError(\"cell_length parameter must be a number\")\n\n # Calculate global_idx\n x_node = mesh_params['x_cell'] + 1\n i,j = index[0], index[1]\n self._global_idx = [x_node*i + j,\n x_node*i + j + 1,\n x_node*(i + 1) + j,\n x_node*(i + 1) + j + 1]\n \n # Determine if on a boundary\n self._bounds = {}\n x_cell = mesh_params['x_cell']\n try:\n y_cell = mesh_params['y_cell']\n except KeyError:\n y_cell = x_cell\n \n # Verify cell is in the mesh\n assert i < x_cell, \"Cell i exceeds num of x nodes\"\n assert j < y_cell, \"Cell j exceeds num of y nodes\"\n \n if index[0] == 0:\n self._bounds.update({'x_min': None})\n if index[0] == y_cell - 1:\n self._bounds.update({'x_max': None})\n if index[1] == 0:\n self._bounds.update({'y_min': None})\n if index[1] == x_cell - 1:\n self._bounds.update({'y_max': None})\n\n # Get material properties\n if mat_map:\n assert (mat_map.dx * mat_map.n) == (x_cell * self._length),\\\n \"Material map and cells must have the same total x length\"\n assert (mat_map.dy * mat_map.n) == (y_cell * self._length),\\\n \"Material map and cells must have the same total y length\"\n \n self._mat_map = mat_map\n\n\n # UTILITY FUNCTIONS ================================================\n\n # MATERIAL PROPERTIES ==============================================\n def get(self, prop):\n try:\n x = self._length*(self._index[0] + 0.5)\n y = self._length*(self._index[1] + 0.5)\n \n return self._mat_map.get(prop, loc=(x,y))\n except AttributeError:\n raise AttributeError(\"This cell has no material map assigned\")\n\n \n # ATTRIBUTES =======================================================\n \n def bounds(self, bound=None, value=None):\n if bound and bound in self._bounds:\n if value:\n self._bounds[bound] = value\n else:\n return self._bounds[bound]\n elif bound and not bound in self._bounds:\n raise KeyError(\"Cell does not have bound \" + str(bound))\n else:\n return self._bounds \n \n def global_idx(self):\n \"\"\" Returns global index, a list of the node indices \"\"\"\n return self._global_idx\n\n def index(self):\n return self._index\n \n def length(self):\n return self._length\n"
] | [
[
"numpy.reshape",
"numpy.arange",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PaulWang1905/tensorflow | [
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0",
"ebf12d22b4801fb8dab5034cc94562bf7cc33fa0"
] | [
"tensorflow/python/training/experimental/loss_scale.py",
"tensorflow/contrib/sparsemax/python/ops/sparsemax.py",
"tensorflow/contrib/image/python/ops/dense_image_warp.py",
"tensorflow/python/compiler/tensorrt/test/batch_matmul_test.py",
"tensorflow/python/kernel_tests/accumulate_n_test.py",
"tensorflow/contrib/timeseries/python/timeseries/state_space_models/test_utils.py",
"tensorflow/python/ops/dequantize_op_test.py",
"tensorflow/contrib/slim/python/slim/data/dataset_data_provider_test.py",
"tensorflow/python/distribute/model_combinations.py",
"tensorflow/python/keras/layers/kernelized.py",
"tensorflow/python/data/experimental/benchmarks/snapshot_dataset_benchmark.py",
"tensorflow/python/ops/init_ops.py",
"tensorflow/contrib/distributions/python/kernel_tests/bijectors/reshape_test.py",
"tensorflow/python/keras/layers/convolutional_recurrent.py",
"tensorflow/contrib/gan/python/features/python/virtual_batchnorm_impl.py",
"tensorflow/python/kernel_tests/boosted_trees/quantile_ops_test.py",
"tensorflow/python/ops/signal/dct_ops.py",
"tensorflow/contrib/predictor/saved_model_predictor.py",
"tensorflow/contrib/seq2seq/python/ops/helper.py",
"tensorflow/python/data/experimental/kernel_tests/serialization/dataset_serialization_test_base.py",
"tensorflow/python/data/kernel_tests/list_files_test.py",
"tensorflow/contrib/opt/python/training/variable_clipping_optimizer_test.py",
"tensorflow/contrib/distributions/python/kernel_tests/relaxed_onehot_categorical_test.py",
"tensorflow/contrib/learn/python/learn/ops/ops_test.py",
"tensorflow/python/eager/function_argument_naming_test.py",
"tensorflow/examples/get_started/regression/test.py",
"tensorflow/contrib/opt/python/training/matrix_functions.py",
"tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py",
"tensorflow/contrib/eager/python/examples/l2hmc/l2hmc_test.py",
"tensorflow/contrib/slim/python/slim/nets/resnet_v1_test.py",
"tensorflow/contrib/distribute/python/examples/keras_mnist.py",
"tensorflow/python/training/session_manager_test.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains LossScale classes.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nimport six\n\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.util.tf_export import tf_export\n\n\[email protected]_metaclass(abc.ABCMeta)\n@tf_export('train.experimental.LossScale')\nclass LossScale(trackable.Trackable):\n \"\"\"Loss scale base class.\n\n Loss scaling is a process that multiplies the loss by a multiplier called the\n loss scale, and divides each gradient by the same multiplier. The pseudocode\n for this process is:\n\n ```\n loss = ...\n loss *= loss_scale\n grads = gradients(loss, vars)\n grads /= loss_scale\n ```\n\n Mathematically, loss scaling has no effect, but can help avoid numerical\n underflow in intermediate gradients when float16 tensors are used for mixed\n precision training. By multiplying the loss, each intermediate gradient will\n have the same multiplier applied.\n\n Instances of this class represent a loss scale. Calling instances of this\n class returns the loss scale as a scalar float32 tensor, while method\n `update()` updates the loss scale depending on the values of the gradients.\n Optimizers use instances of this class to scale loss and gradients.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initializes the loss scale class.\"\"\"\n self._weights = {}\n\n @abc.abstractmethod\n def __call__(self):\n \"\"\"Returns the current loss scale as a scalar `float32` tensor.\"\"\"\n pass\n\n @abc.abstractmethod\n def update(self, grads):\n \"\"\"Updates the value of the loss scale.\n\n The loss scale will be potentially updated, based on the value of `grads`.\n The tensor returned by calling this class is only updated when this function\n is evaluated.\n\n In eager mode, this directly updates the loss scale, so that calling\n `__call__` will return the newly updated loss scale. In graph mode,\n this returns an op that, when evaluated, updates the loss scale.\n\n This function also returns a `should_apply_gradients` bool. If False,\n gradients should not be applied to the variables that step, as nonfinite\n gradients were found, and the loss scale has been be updated to reduce the\n chance of finding nonfinite gradients in the next step. Some loss scale\n classes will always return True, as they cannot adjust themselves in\n response to nonfinite gradients.\n\n When a DistributionStrategy is used, this function may only be called in a\n cross-replica context.\n\n Args:\n grads: A list of unscaled gradients, each which is the gradient of the\n loss with respect to a weight. The gradients should have already been\n divided by the loss scale being before passed to this function. 'None'\n gradients are accepted, and are ignored.\n\n Returns:\n update_op: In eager mode, None. In graph mode, an op to update the loss\n scale.\n should_apply_gradients: Either a bool or a scalar boolean tensor. If\n False, the caller should skip applying `grads` to the variables this\n step.\n \"\"\"\n pass\n\n def _add_weight(self, name, initial_value, dtype=None):\n \"\"\"Adds a weight to this loss scale.\n\n Args:\n name: Variable name.\n initial_value: The variable's initial value.\n dtype: The type of the variable.\n\n Returns:\n A variable.\n\n Raises:\n RuntimeError: If a weight with `name` has already been added.\n \"\"\"\n variable = variable_scope.variable(\n initial_value=initial_value,\n name=name,\n dtype=dtype,\n trainable=False,\n use_resource=True,\n synchronization=variables.VariableSynchronization.AUTO,\n # Set aggregation to NONE, as loss scaling variables should never be\n # aggregated.\n aggregation=variables.VariableAggregation.NONE)\n if context.executing_eagerly():\n graph_key = None\n else:\n graph = ops.get_default_graph()\n graph_key = graph._graph_key # pylint: disable=protected-access\n\n key = (name, graph_key)\n if self._weights.get(key, None) is not None:\n raise RuntimeError('Duplicate variables detected. {}'.format(key))\n self._weights[key] = variable\n self._handle_deferred_dependencies(name=name, trackable=variable)\n return variable\n\n @property\n def _checkpoint_dependencies(self):\n \"\"\"From Trackable. Gather graph-specific weights to save.\"\"\"\n if context.executing_eagerly():\n graph_key = None\n else:\n graph = ops.get_default_graph()\n graph_key = graph._graph_key # pylint: disable=protected-access\n weights = []\n for (name, g), v in sorted(self._weights.items(), key=lambda i: i[0][0]):\n if g == graph_key:\n weights.append(trackable.TrackableReference(name=name, ref=v))\n return super(LossScale, self)._checkpoint_dependencies + weights\n\n def _lookup_dependency(self, name):\n \"\"\"From Trackable. Find a weight in the current graph.\"\"\"\n unconditional = super(LossScale, self)._lookup_dependency(name)\n if unconditional is not None:\n return unconditional\n if context.executing_eagerly():\n graph_key = None\n else:\n graph = ops.get_default_graph()\n graph_key = graph._graph_key # pylint: disable=protected-access\n return self._weights.get((name, graph_key), None)\n\n @abc.abstractmethod\n def get_config(self):\n \"\"\"Returns the config of this loss scale.\"\"\"\n pass\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Creates the LossScale from its config.\"\"\"\n return cls(**config)\n\n\ndef get_loss_scale_weights(loss_scale):\n return loss_scale._weights.values() # pylint: disable=protected-access\n\n\n@tf_export('train.experimental.FixedLossScale')\nclass FixedLossScale(LossScale):\n \"\"\"Loss scale with a fixed value.\n\n The loss scale is not updated for the lifetime of instances of this class.\n A given instance of this class always returns the same number when called.\n \"\"\"\n\n def __init__(self, loss_scale_value):\n \"\"\"Creates the fixed loss scale.\n\n Args:\n loss_scale_value: A Python float. Its ideal value varies depending on\n models to run. Choosing a too small loss_scale might affect model\n quality; a too big loss_scale might cause inf or nan. There is no single\n right loss_scale to apply. There is no harm choosing a relatively big\n number as long as no nan or inf is encountered in training.\n\n Raises:\n ValueError: If loss_scale is less than 1.\n \"\"\"\n super(FixedLossScale, self).__init__()\n if not isinstance(loss_scale_value, six.integer_types + (float,)):\n raise ValueError('loss_scale_value must be a Python int or float.')\n if loss_scale_value < 1:\n raise ValueError('loss_scale_value must be at least 1.')\n # It's important we do not create tensors in the constructor, as such\n # tensors might be on a different device or tf.function vs when the tensor\n # is used. This would hurt performance. Therefore, we do not create a tensor\n # from loss_scale_value, but instead leave it as a Python float.\n # TODO(reedwm): Also do not create tensors in the DynamicLossScale\n # constructor.\n self._loss_scale_value = float(loss_scale_value)\n\n def __call__(self):\n return ops.convert_to_tensor(self._loss_scale_value)\n\n def update(self, grads):\n del grads\n return control_flow_ops.no_op(), True\n\n def get_config(self):\n return {'loss_scale_value': self._loss_scale_value}\n\n\ndef _is_all_finite(grads):\n \"\"\"Returns a scalar boolean tensor indicating if all gradients are finite.\"\"\"\n is_finite_per_grad = [\n math_ops.reduce_all(math_ops.is_finite(g)) for g in grads if g is not None\n ]\n return math_ops.reduce_all(is_finite_per_grad)\n\n\ndef _op_in_graph_mode(tensor):\n \"\"\"Returns the tensor's op in graph mode, or the tensor in eager mode.\n\n This is useful because sometimes an op is needed in graph mode instead of a\n tensor. In eager mode, there are no ops.\n\n Args:\n tensor: A tensor.\n\n Returns:\n The tensor's op in graph mode. The tensor in eager mode.\n \"\"\"\n if context.executing_eagerly():\n return tensor\n return tensor.op\n\n\ndef _assign_if_finite(var, value):\n \"\"\"Assigns a value to a variable if the value is finite.\"\"\"\n return control_flow_ops.cond(\n math_ops.is_finite(value), lambda: _op_in_graph_mode(var.assign(value)),\n control_flow_ops.no_op)\n\n\n@tf_export('train.experimental.DynamicLossScale')\nclass DynamicLossScale(LossScale):\n \"\"\"Loss scale that dynamically adjusts itself.\n\n Dynamic loss scaling works by adjusting the loss scale as training progresses.\n The goal is to keep the loss scale as high as possible without overflowing the\n gradients. As long as the gradients do not overflow, raising the loss scale\n never hurts.\n\n The algorithm starts by setting the loss scale to an initial value. Every N\n steps that the gradients are finite, the loss scale is increased by some\n factor. However, if a NaN or Inf gradient is found, the gradients for that\n step are not applied, and the loss scale is decreased by the factor. This\n process tends to keep the loss scale as high as possible without gradients\n overflowing.\n \"\"\"\n\n def __init__(self,\n initial_loss_scale=2 ** 15, # See docstring for why this is big.\n increment_period=2000,\n multiplier=2.):\n \"\"\"Creates the dynamic loss scale.\n\n Args:\n initial_loss_scale: A Python float. The loss scale to use at the\n beginning. It's better to start this at a very high number, because a\n loss scale that is too high gets lowered far more quickly than a loss\n scale that is to low gets raised. The default is 2 ** 15, which is\n approximately half the maximum float16 value.\n increment_period: Increases loss scale every `increment_period`\n consecutive steps that finite gradients are encountered. If a nonfinite\n gradient is encountered, the count is reset back to zero.\n multiplier: The multiplier to use when increasing or decreasing the loss\n scale.\n \"\"\"\n super(DynamicLossScale, self).__init__()\n self._initial_loss_scale = float(initial_loss_scale)\n self._increment_period = int(increment_period)\n self._multiplier = float(multiplier)\n\n self._current_loss_scale = self._add_weight(\n name='current_loss_scale',\n dtype=dtypes.float32,\n initial_value=self._initial_loss_scale)\n # The number of consecutive steps with finite gradients since the last\n # nonfinite gradient or change in loss scale.\n self._num_good_steps = self._add_weight(\n name='good_steps', dtype=dtypes.int64, initial_value=0)\n\n @property\n def initial_loss_scale(self):\n return self._initial_loss_scale\n\n @property\n def increment_period(self):\n return self._increment_period\n\n @property\n def multiplier(self):\n return self._multiplier\n\n def __call__(self):\n return self._current_loss_scale\n\n def update(self, grads):\n \"\"\"Updates loss scale based on if gradients are finite in current step.\"\"\"\n if distribution_strategy_context.has_strategy():\n distribution = distribution_strategy_context.get_cross_replica_context()\n\n def get_is_finite(grads):\n is_finite = _is_all_finite(grads)\n # We cast to float, because we cannot reduce booleans with\n # DistributionStrategy.\n return math_ops.cast(is_finite, dtypes.float32)\n\n is_finite_float = distribution.extended.call_for_each_replica(\n get_is_finite, args=(grads,))\n reduced_is_finite_float = distribution.reduce(reduce_util.ReduceOp.SUM,\n is_finite_float, axis=None)\n is_finite = math_ops.equal(reduced_is_finite_float,\n distribution.num_replicas_in_sync)\n else:\n is_finite = _is_all_finite(grads)\n\n def update_if_finite_grads():\n \"\"\"Update assuming the gradients are finite.\"\"\"\n\n def incr_loss_scale():\n new_loss_scale = self._current_loss_scale * self._multiplier\n return control_flow_ops.group(\n _assign_if_finite(self._current_loss_scale, new_loss_scale),\n self._num_good_steps.assign(0))\n\n return control_flow_ops.cond(\n self._num_good_steps + 1 >= self._increment_period,\n incr_loss_scale, lambda: _op_in_graph_mode(\n self._num_good_steps.assign_add(1)))\n\n def update_if_not_finite_grads():\n \"\"\"Update assuming the gradients are nonfinite.\"\"\"\n\n new_loss_scale = math_ops.maximum(\n self._current_loss_scale / self._multiplier, 1)\n return control_flow_ops.group(\n self._num_good_steps.assign(0),\n self._current_loss_scale.assign(new_loss_scale))\n\n update_op = control_flow_ops.cond(is_finite, update_if_finite_grads,\n update_if_not_finite_grads)\n should_apply_gradients = is_finite\n return update_op, should_apply_gradients\n\n def get_config(self):\n return {\n 'initial_loss_scale': self.initial_loss_scale,\n 'increment_period': self.increment_period,\n 'multiplier': self.multiplier,\n }\n\n\ndef get(identifier):\n \"\"\"Get a loss scale object.\"\"\"\n if isinstance(identifier, six.integer_types + (float,)):\n return FixedLossScale(identifier)\n if identifier == 'dynamic':\n return DynamicLossScale()\n if isinstance(identifier, LossScale):\n return identifier\n elif identifier is None:\n return None\n else:\n raise ValueError('Could not interpret loss scale identifier: %s' %\n identifier)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Sparsemax op.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\n\n__all__ = [\"sparsemax\"]\n\n\ndef sparsemax(logits, name=None):\n \"\"\"Computes sparsemax activations [1].\n\n For each batch `i` and class `j` we have\n $$sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)$$\n\n [1]: https://arxiv.org/abs/1602.02068\n\n Args:\n logits: A `Tensor`. Must be one of the following types: `half`, `float32`,\n `float64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `logits`.\n \"\"\"\n\n with ops.name_scope(name, \"sparsemax\", [logits]) as name:\n logits = ops.convert_to_tensor(logits, name=\"logits\")\n obs = array_ops.shape(logits)[0]\n dims = array_ops.shape(logits)[1]\n\n # In the paper, they call the logits z.\n # The mean(logits) can be substracted from logits to make the algorithm\n # more numerically stable. the instability in this algorithm comes mostly\n # from the z_cumsum. Substacting the mean will cause z_cumsum to be close\n # to zero. However, in practise the numerical instability issues are very\n # minor and substacting the mean causes extra issues with inf and nan\n # input.\n z = logits\n\n # sort z\n z_sorted, _ = nn.top_k(z, k=dims)\n\n # calculate k(z)\n z_cumsum = math_ops.cumsum(z_sorted, axis=1)\n k = math_ops.range(\n 1, math_ops.cast(dims, logits.dtype) + 1, dtype=logits.dtype)\n z_check = 1 + k * z_sorted > z_cumsum\n # because the z_check vector is always [1,1,...1,0,0,...0] finding the\n # (index + 1) of the last `1` is the same as just summing the number of 1.\n k_z = math_ops.reduce_sum(math_ops.cast(z_check, dtypes.int32), axis=1)\n\n # calculate tau(z)\n # If there are inf values or all values are -inf, the k_z will be zero,\n # this is mathematically invalid and will also cause the gather_nd to fail.\n # Prevent this issue for now by setting k_z = 1 if k_z = 0, this is then\n # fixed later (see p_safe) by returning p = nan. This results in the same\n # behavior as softmax.\n k_z_safe = math_ops.maximum(k_z, 1)\n indices = array_ops.stack([math_ops.range(0, obs), k_z_safe - 1], axis=1)\n tau_sum = array_ops.gather_nd(z_cumsum, indices)\n tau_z = (tau_sum - 1) / math_ops.cast(k_z, logits.dtype)\n\n # calculate p\n p = math_ops.maximum(\n math_ops.cast(0, logits.dtype), z - tau_z[:, array_ops.newaxis])\n # If k_z = 0 or if z = nan, then the input is invalid\n p_safe = array_ops.where(\n math_ops.logical_or(\n math_ops.equal(k_z, 0), math_ops.is_nan(z_cumsum[:, -1])),\n array_ops.fill([obs, dims], math_ops.cast(float(\"nan\"), logits.dtype)),\n p)\n\n return p_safe\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Image warping using per-pixel flow vectors.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\n\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import math_ops\n\n\ndef _interpolate_bilinear(grid,\n query_points,\n name='interpolate_bilinear',\n indexing='ij'):\n \"\"\"Similar to Matlab's interp2 function.\n\n Finds values for query points on a grid using bilinear interpolation.\n\n Args:\n grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.\n query_points: a 3-D float `Tensor` of N points with shape `[batch, N, 2]`.\n name: a name for the operation (optional).\n indexing: whether the query points are specified as row and column (ij),\n or Cartesian coordinates (xy).\n\n Returns:\n values: a 3-D `Tensor` with shape `[batch, N, channels]`\n\n Raises:\n ValueError: if the indexing mode is invalid, or if the shape of the inputs\n invalid.\n \"\"\"\n if indexing != 'ij' and indexing != 'xy':\n raise ValueError('Indexing mode must be \\'ij\\' or \\'xy\\'')\n\n with ops.name_scope(name):\n grid = ops.convert_to_tensor(grid)\n query_points = ops.convert_to_tensor(query_points)\n shape = grid.get_shape().as_list()\n if len(shape) != 4:\n msg = 'Grid must be 4 dimensional. Received size: '\n raise ValueError(msg + str(grid.get_shape()))\n\n batch_size, height, width, channels = (array_ops.shape(grid)[0],\n array_ops.shape(grid)[1],\n array_ops.shape(grid)[2],\n array_ops.shape(grid)[3])\n\n shape = [batch_size, height, width, channels]\n query_type = query_points.dtype\n grid_type = grid.dtype\n\n with ops.control_dependencies([\n check_ops.assert_equal(\n len(query_points.get_shape()),\n 3,\n message='Query points must be 3 dimensional.'),\n check_ops.assert_equal(\n array_ops.shape(query_points)[2],\n 2,\n message='Query points must be size 2 in dim 2.')\n ]):\n num_queries = array_ops.shape(query_points)[1]\n\n with ops.control_dependencies([\n check_ops.assert_greater_equal(\n height, 2, message='Grid height must be at least 2.'),\n check_ops.assert_greater_equal(\n width, 2, message='Grid width must be at least 2.')\n ]):\n alphas = []\n floors = []\n ceils = []\n index_order = [0, 1] if indexing == 'ij' else [1, 0]\n unstacked_query_points = array_ops.unstack(query_points, axis=2)\n\n for dim in index_order:\n with ops.name_scope('dim-' + str(dim)):\n queries = unstacked_query_points[dim]\n\n size_in_indexing_dimension = shape[dim + 1]\n\n # max_floor is size_in_indexing_dimension - 2 so that max_floor + 1\n # is still a valid index into the grid.\n max_floor = math_ops.cast(size_in_indexing_dimension - 2, query_type)\n min_floor = constant_op.constant(0.0, dtype=query_type)\n floor = math_ops.minimum(\n math_ops.maximum(min_floor, math_ops.floor(queries)), max_floor)\n int_floor = math_ops.cast(floor, dtypes.int32)\n floors.append(int_floor)\n ceil = int_floor + 1\n ceils.append(ceil)\n\n # alpha has the same type as the grid, as we will directly use alpha\n # when taking linear combinations of pixel values from the image.\n alpha = math_ops.cast(queries - floor, grid_type)\n min_alpha = constant_op.constant(0.0, dtype=grid_type)\n max_alpha = constant_op.constant(1.0, dtype=grid_type)\n alpha = math_ops.minimum(math_ops.maximum(min_alpha, alpha), max_alpha)\n\n # Expand alpha to [b, n, 1] so we can use broadcasting\n # (since the alpha values don't depend on the channel).\n alpha = array_ops.expand_dims(alpha, 2)\n alphas.append(alpha)\n\n with ops.control_dependencies([\n check_ops.assert_less_equal(\n math_ops.cast(batch_size * height * width, dtype=dtypes.float32),\n np.iinfo(np.int32).max / 8,\n message=\"\"\"The image size or batch size is sufficiently large\n that the linearized addresses used by array_ops.gather\n may exceed the int32 limit.\"\"\")\n ]):\n flattened_grid = array_ops.reshape(\n grid, [batch_size * height * width, channels])\n batch_offsets = array_ops.reshape(\n math_ops.range(batch_size) * height * width, [batch_size, 1])\n\n # This wraps array_ops.gather. We reshape the image data such that the\n # batch, y, and x coordinates are pulled into the first dimension.\n # Then we gather. Finally, we reshape the output back. It's possible this\n # code would be made simpler by using array_ops.gather_nd.\n def gather(y_coords, x_coords, name):\n with ops.name_scope('gather-' + name):\n linear_coordinates = batch_offsets + y_coords * width + x_coords\n gathered_values = array_ops.gather(flattened_grid, linear_coordinates)\n return array_ops.reshape(gathered_values,\n [batch_size, num_queries, channels])\n\n # grab the pixel values in the 4 corners around each query point\n top_left = gather(floors[0], floors[1], 'top_left')\n top_right = gather(floors[0], ceils[1], 'top_right')\n bottom_left = gather(ceils[0], floors[1], 'bottom_left')\n bottom_right = gather(ceils[0], ceils[1], 'bottom_right')\n\n # now, do the actual interpolation\n with ops.name_scope('interpolate'):\n interp_top = alphas[1] * (top_right - top_left) + top_left\n interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left\n interp = alphas[0] * (interp_bottom - interp_top) + interp_top\n\n return interp\n\n\ndef dense_image_warp(image, flow, name='dense_image_warp'):\n \"\"\"Image warping using per-pixel flow vectors.\n\n Apply a non-linear warp to the image, where the warp is specified by a dense\n flow field of offset vectors that define the correspondences of pixel values\n in the output image back to locations in the source image. Specifically, the\n pixel value at output[b, j, i, c] is\n images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c].\n\n The locations specified by this formula do not necessarily map to an int\n index. Therefore, the pixel value is obtained by bilinear\n interpolation of the 4 nearest pixels around\n (b, j - flow[b, j, i, 0], i - flow[b, j, i, 1]). For locations outside\n of the image, we use the nearest pixel values at the image boundary.\n\n\n Args:\n image: 4-D float `Tensor` with shape `[batch, height, width, channels]`.\n flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.\n name: A name for the operation (optional).\n\n Note that image and flow can be of type tf.half, tf.float32, or tf.float64,\n and do not necessarily have to be the same type.\n\n Returns:\n A 4-D float `Tensor` with shape`[batch, height, width, channels]`\n and same type as input image.\n\n Raises:\n ValueError: if height < 2 or width < 2 or the inputs have the wrong number\n of dimensions.\n \"\"\"\n with ops.name_scope(name):\n batch_size, height, width, channels = (array_ops.shape(image)[0],\n array_ops.shape(image)[1],\n array_ops.shape(image)[2],\n array_ops.shape(image)[3])\n\n # The flow is defined on the image grid. Turn the flow into a list of query\n # points in the grid space.\n grid_x, grid_y = array_ops.meshgrid(\n math_ops.range(width), math_ops.range(height))\n stacked_grid = math_ops.cast(\n array_ops.stack([grid_y, grid_x], axis=2), flow.dtype)\n batched_grid = array_ops.expand_dims(stacked_grid, axis=0)\n query_points_on_grid = batched_grid - flow\n query_points_flattened = array_ops.reshape(query_points_on_grid,\n [batch_size, height * width, 2])\n # Compute values at the query points, then reshape the result back to the\n # image grid.\n interpolated = _interpolate_bilinear(image, query_points_flattened)\n interpolated = array_ops.reshape(interpolated,\n [batch_size, height, width, channels])\n return interpolated\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Model script to test TF-TensorRT integration.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass BatchMatMulTest(trt_test.TfTrtIntegrationTestBase):\n\n def GetParams(self):\n \"\"\"Testing conversion of BatchMatMul in TF-TRT conversion.\"\"\"\n dtype = dtypes.float32\n input_name = \"input\"\n input_dims = [12, 5, 8, 12]\n output_name = \"output\"\n w1_name = \"matmul_w1\"\n w1_dims = [12, 5, 12, 7]\n w2_name = \"matmul_w2\"\n w2_dims = [12, 12, 7]\n g = ops.Graph()\n with g.as_default():\n inp = array_ops.placeholder(\n dtype=dtype, shape=[None] + input_dims[1:], name=input_name)\n w1 = array_ops.placeholder(dtype=dtype, shape=w1_dims, name=w1_name)\n w2 = array_ops.placeholder(dtype=dtype, shape=w2_dims, name=w2_name)\n with g.device(\"/GPU:0\"):\n b = constant_op.constant(np.random.randn(12, 5, 12, 7), dtype=dtype)\n x1 = math_ops.matmul(inp, b)\n c = constant_op.constant(np.random.randn(5, 1, 1), dtype=dtype)\n x1 = x1 + c\n\n x2 = math_ops.matmul(inp, w1)\n d = constant_op.constant(np.random.randn(5, 1, 1), dtype=dtype)\n x2 = x2 * d\n\n e = self.trt_incompatible_op(inp)\n e = gen_array_ops.reshape(e, [12, 40, 12])\n x3 = math_ops.matmul(e, w2)\n f = constant_op.constant(np.random.randn(40, 1), dtype=dtype)\n x3 = x3 + f\n x3 = gen_array_ops.reshape(x3, [12, 5, 8, 7])\n x3 = self.trt_incompatible_op(x3)\n\n out = x1 + x2 + x3\n array_ops.squeeze(out, name=output_name)\n return trt_test.TfTrtIntegrationTestParams(\n gdef=g.as_graph_def(add_shapes=True),\n input_names=[input_name, w1_name, w2_name],\n input_dims=[[input_dims, w1_dims, w2_dims]],\n output_names=[output_name],\n expected_output_dims=[[[12, 5, 8, 7]]])\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n return [\"TRTEngineOp_0\", \"TRTEngineOp_1\", \"TRTEngineOp_2\"]\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for new version of accumulate_n op.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes as dtypes_lib\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradients\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.control_flow_ops import while_loop as while_loop_v1\nfrom tensorflow.python.platform import googletest\n\n\nclass AccumulateNV2Test(test_util.TensorFlowTestCase):\n \"\"\"Tests of the new, differentiable version of accumulate_n.\"\"\"\n\n @test_util.run_deprecated_v1\n def testFloat(self):\n np.random.seed(12345)\n x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]\n tf_x = ops.convert_n_to_tensor(x)\n with self.session(use_gpu=True):\n self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())\n self.assertAllClose(x[0] * 5,\n math_ops.accumulate_n([tf_x[0]] * 5).eval())\n\n @test_util.run_deprecated_v1\n def testInt(self):\n np.random.seed(54321)\n x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]\n tf_x = ops.convert_n_to_tensor(x)\n with self.session(use_gpu=True):\n self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())\n self.assertAllEqual(x[0] * 6,\n math_ops.accumulate_n([tf_x[0]] * 6).eval())\n\n @test_util.run_deprecated_v1\n def testUnknownShape(self):\n with self.session(use_gpu=True):\n x0 = array_ops.placeholder(dtype=dtypes_lib.int32, shape=[None])\n acc = math_ops.accumulate_n([x0, x0], shape=[None])\n self.assertAllEqual([2, 4], acc.eval(feed_dict={x0: [1, 2]}))\n\n @test_util.run_deprecated_v1\n def testGrad(self):\n np.random.seed(42)\n for num_inputs in range(1, 10):\n with self.cached_session(use_gpu=True) as sess:\n input_vars = [\n variables.Variable(10.0 * np.random.random())\n for _ in range(0, num_inputs)\n ]\n accum_n = math_ops.accumulate_n(input_vars)\n self.evaluate(variables.global_variables_initializer())\n accum_n_grad = gradients.gradients(accum_n, input_vars)\n self.assertAllEqual(\n np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1\n [g.eval() for g in accum_n_grad])\n\n # The tests below used to be in a separate class under cwise_ops_test.py,\n # which did not run in the default test target.\n # Putting them here so that everything that exercises AccumulateNV2 is in\n # one place and the default build runs all unit tests.\n def testSimple(self):\n with self.cached_session():\n random_arrays = [\n np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)\n ]\n random_tensors = [\n ops.convert_to_tensor(x, dtype=dtypes_lib.float32)\n for x in random_arrays\n ]\n tf_val = math_ops.accumulate_n(random_tensors)\n np_val = random_arrays[0]\n for random_array in random_arrays[1:]:\n np_val += random_array\n self.assertAllClose(np_val, self.evaluate(tf_val))\n\n # Test that AccumulateNV2 rewrite correctly add edges necessary to propagate\n # while loop execution frame to all nodes.\n def testAccumulateInsideWhileLoop(self):\n with self.cached_session():\n random_arrays = [\n np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)\n ]\n random_tensors = [\n ops.convert_to_tensor(x, dtype=dtypes_lib.float32)\n for x in random_arrays\n ]\n\n def cond_fn(i, acc, tensors):\n del acc, tensors # unused\n return i < 1 # do just one iteration\n\n def body_fn(i, acc, tensors):\n return i + 1, acc + math_ops.accumulate_n(tensors), tensors\n\n zeros = np.zeros((16, 16, 16, 16)).astype(np.float32)\n _, tf_val, _ = while_loop_v1(cond_fn, body_fn, (0, zeros, random_tensors))\n np_val = random_arrays[0]\n for random_array in random_arrays[1:]:\n np_val += random_array\n self.assertAllClose(np_val, self.evaluate(tf_val))\n\n def testZeroArgs(self):\n with self.cached_session():\n with self.assertRaises(ValueError):\n tf_val = math_ops.accumulate_n([])\n self.evaluate(tf_val)\n\n def testWrongShape(self):\n with self.cached_session():\n with self.assertRaises(ValueError):\n a = variables.Variable(0.2)\n b = variables.Variable(0.1)\n math_ops.accumulate_n([a, b], shape=[2, 2]) # Should be shape=[]\n\n def testIncompatibleShapes(self):\n with self.cached_session():\n with self.assertRaises(ValueError):\n a = variables.Variable(np.array([0.1, 0.2]))\n b = variables.Variable(np.array([[0.3], [0.4]]))\n math_ops.accumulate_n([a, b])\n\n def testWrongType(self):\n with self.cached_session():\n with self.assertRaises(TypeError):\n a = variables.Variable(0.2, dtype=np.float32)\n b = variables.Variable(0.1, dtype=np.float32)\n math_ops.accumulate_n([a, b], tensor_dtype=np.int32)\n\n def testWrongTypeOneInput(self):\n # Scenario that used to trigger a bug, even when testWrongType() worked\n with self.cached_session():\n with self.assertRaises(TypeError):\n a = variables.Variable(0.2, dtype=np.float32)\n math_ops.accumulate_n([a], tensor_dtype=np.int32)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for testing state space models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy\n\nfrom tensorflow.contrib.timeseries.python.timeseries import math_utils\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\n\n\ndef transition_power_test_template(test_case, model, num_steps):\n \"\"\"Tests the transition_to_powers function of a state space model.\"\"\"\n transition_matrix = ops.convert_to_tensor(\n model.get_state_transition(), dtype=model.dtype)\n step_number = array_ops.placeholder(shape=[], dtype=dtypes.int64)\n state_dimension = tensor_shape.dimension_value(transition_matrix.shape[0])\n previous_matrix = array_ops.placeholder(\n shape=[state_dimension, state_dimension], dtype=transition_matrix.dtype)\n true_single_step_update = math_ops.matmul(previous_matrix,\n transition_matrix)\n model_output_tensor = model.transition_to_powers(powers=array_ops.stack(\n [step_number, step_number]))\n with test_case.test_session():\n starting_matrix = linalg_ops.eye(\n state_dimension, batch_shape=array_ops.shape(num_steps)).eval()\n evaled_current_matrix = starting_matrix\n for iteration_number in range(num_steps):\n model_output = model_output_tensor.eval(\n feed_dict={step_number: iteration_number})\n test_case.assertAllClose(\n evaled_current_matrix,\n model_output[0],\n rtol=1e-8 if evaled_current_matrix.dtype == numpy.float64 else 1e-4)\n evaled_current_matrix = true_single_step_update.eval(\n feed_dict={previous_matrix: evaled_current_matrix})\n\n\ndef noise_accumulator_test_template(test_case, model, num_steps):\n \"\"\"Tests `model`'s transition_power_noise_accumulator.\"\"\"\n transition_matrix = ops.convert_to_tensor(\n model.get_state_transition(), dtype=model.dtype)\n noise_transform = ops.convert_to_tensor(\n model.get_noise_transform(), dtype=model.dtype)\n state_dimension = tensor_shape.dimension_value(transition_matrix.shape[0])\n state_noise_dimension = tensor_shape.dimension_value(noise_transform.shape[1])\n gen_noise_addition = math_utils.sign_magnitude_positive_definite(\n raw=random_ops.random_normal(\n shape=[state_noise_dimension, state_noise_dimension],\n dtype=model.dtype))\n gen_starting_noise = math_utils.sign_magnitude_positive_definite(\n random_ops.random_normal(\n shape=[state_dimension, state_dimension], dtype=model.dtype))\n starting_noise = array_ops.placeholder(\n shape=[state_dimension, state_dimension], dtype=model.dtype)\n step_number = array_ops.placeholder(shape=[], dtype=dtypes.int64)\n starting_transitioned = math_ops.matmul(\n math_ops.matmul(transition_matrix, starting_noise),\n transition_matrix,\n adjoint_b=True)\n with test_case.test_session():\n evaled_starting_noise = gen_starting_noise.eval()\n current_starting_noise_transitioned = evaled_starting_noise\n current_noise = evaled_starting_noise\n evaled_noise_addition = gen_noise_addition.eval()\n evaled_noise_addition_transformed = math_ops.matmul(\n math_ops.matmul(noise_transform, evaled_noise_addition),\n noise_transform,\n adjoint_b=True).eval()\n model.state_transition_noise_covariance = evaled_noise_addition\n model._window_initializer( # pylint: disable=protected-access\n times=math_ops.range(num_steps + 1)[..., None], state=(None, None, 0))\n model_update = model.transition_power_noise_accumulator(\n num_steps=step_number)\n for iteration_number in range(num_steps):\n model_new_noise = model_update.eval(\n feed_dict={step_number: iteration_number})\n test_case.assertAllClose(\n current_noise,\n model_new_noise + current_starting_noise_transitioned,\n rtol=1e-8 if current_noise.dtype == numpy.float64 else 1e-3)\n current_starting_noise_transitioned = starting_transitioned.eval(\n feed_dict={starting_noise: current_starting_noise_transitioned})\n current_noise = (\n starting_transitioned.eval(\n feed_dict={starting_noise: current_noise})\n + evaled_noise_addition_transformed)\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Dequantize Operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\nclass DequantizeOpTest(test.TestCase):\n\n def __init__(self, method_name=\"runTest\"):\n super(DequantizeOpTest, self).__init__(method_name)\n\n def _testDequantizeOp(self, inputs, min_range, max_range, dtype):\n with self.cached_session():\n input_op = constant_op.constant(inputs, shape=[len(inputs)], dtype=dtype)\n dequantized = array_ops.dequantize(input_op, min_range, max_range)\n tf_ans = self.evaluate(dequantized)\n\n # TODO(vrv): Add support for DT_QINT32 quantization if needed.\n type_dict = {\n dtypes.quint8: np.uint8,\n dtypes.qint8: np.int8,\n dtypes.quint16: np.uint16,\n dtypes.qint16: np.int16\n }\n self.assertTrue(dtype in type_dict.keys())\n v_max = np.iinfo(type_dict[dtype]).max\n v_min = np.iinfo(type_dict[dtype]).min\n self.assertTrue(min_range >= v_min)\n self.assertTrue(max_range <= v_max)\n type_range = v_max - v_min\n if v_min < 0:\n half_range = (type_range + 1) / 2\n else:\n half_range = 0.0\n\n np_ans = ((inputs.astype(np.float32) + half_range) *\n (max_range - min_range) / type_range) + min_range\n self.assertAllClose(tf_ans, np_ans, rtol=1e-5, atol=1e-5)\n\n def testBasicQuint8(self):\n self._testDequantizeOp(np.array([0, 128, 255]), 0.0, 6.0, dtypes.quint8)\n self._testDequantizeOp(np.array([0, 128, 255]), 0.0, 123.456, dtypes.quint8)\n self._testDequantizeOp(\n np.array([0, 4, 42, 108, 243]), 5.0, 200.2, dtypes.quint8)\n\n def testBasicQint8(self):\n self._testDequantizeOp(np.array([-128, 0, 127]), -1.0, 2.0, dtypes.qint8)\n self._testDequantizeOp(np.array([-2, 4, -17]), -5.0, -3.0, dtypes.qint8)\n self._testDequantizeOp(np.array([0, -4, 42, -108]), 5.0, 40.0, dtypes.qint8)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for slim.data.dataset_data_provider.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nfrom tensorflow.contrib.slim.python.slim import queues\nfrom tensorflow.contrib.slim.python.slim.data import dataset\nfrom tensorflow.contrib.slim.python.slim.data import dataset_data_provider\nfrom tensorflow.contrib.slim.python.slim.data import test_utils\nfrom tensorflow.contrib.slim.python.slim.data import tfexample_decoder\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import image_ops\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\n\n\ndef _resize_image(image, height, width):\n image = array_ops.expand_dims(image, 0)\n image = image_ops.resize_bilinear(image, [height, width])\n return array_ops.squeeze(image, [0])\n\n\ndef _create_tfrecord_dataset(tmpdir):\n if not gfile.Exists(tmpdir):\n gfile.MakeDirs(tmpdir)\n\n data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)\n\n keys_to_features = {\n 'image/encoded':\n parsing_ops.FixedLenFeature(\n shape=(), dtype=dtypes.string, default_value=''),\n 'image/format':\n parsing_ops.FixedLenFeature(\n shape=(), dtype=dtypes.string, default_value='jpeg'),\n 'image/class/label':\n parsing_ops.FixedLenFeature(\n shape=[1],\n dtype=dtypes.int64,\n default_value=array_ops.zeros(\n [1], dtype=dtypes.int64))\n }\n\n items_to_handlers = {\n 'image': tfexample_decoder.Image(),\n 'label': tfexample_decoder.Tensor('image/class/label'),\n }\n\n decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,\n items_to_handlers)\n\n return dataset.Dataset(\n data_sources=data_sources,\n reader=io_ops.TFRecordReader,\n decoder=decoder,\n num_samples=100,\n items_to_descriptions=None)\n\n\nclass DatasetDataProviderTest(test.TestCase):\n\n def testTFRecordDataset(self):\n dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),\n 'tfrecord_dataset'))\n\n height = 300\n width = 280\n\n with self.cached_session():\n test_dataset = _create_tfrecord_dataset(dataset_dir)\n provider = dataset_data_provider.DatasetDataProvider(test_dataset)\n key, image, label = provider.get(['record_key', 'image', 'label'])\n image = _resize_image(image, height, width)\n\n with session.Session('') as sess:\n with queues.QueueRunners(sess):\n key, image, label = sess.run([key, image, label])\n split_key = key.decode('utf-8').split(':')\n self.assertEqual(2, len(split_key))\n self.assertEqual(test_dataset.data_sources[0], split_key[0])\n self.assertTrue(split_key[1].isdigit())\n self.assertListEqual([height, width, 3], list(image.shape))\n self.assertListEqual([1], list(label.shape))\n\n def testTFRecordSeparateGetDataset(self):\n dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),\n 'tfrecord_separate_get'))\n\n height = 300\n width = 280\n\n with self.cached_session():\n provider = dataset_data_provider.DatasetDataProvider(\n _create_tfrecord_dataset(dataset_dir))\n [image] = provider.get(['image'])\n [label] = provider.get(['label'])\n image = _resize_image(image, height, width)\n\n with session.Session('') as sess:\n with queues.QueueRunners(sess):\n image, label = sess.run([image, label])\n self.assertListEqual([height, width, 3], list(image.shape))\n self.assertListEqual([1], list(label.shape))\n\n def testConflictingRecordKeyItem(self):\n dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),\n 'tfrecord_dataset'))\n\n with self.cached_session():\n with self.assertRaises(ValueError):\n dataset_data_provider.DatasetDataProvider(\n _create_tfrecord_dataset(dataset_dir), record_key='image')\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Strategy and optimizer combinations for combinations.combine().\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute.model_collection import simple_models\n\nsimple_functional_model = combinations.NamedObject(\n \"SimpleFunctionalModel\", simple_models.SimpleFunctionalModel())\n\nsimple_sequential_model = combinations.NamedObject(\n \"SimpleSequentialModel\", simple_models.SimpleSequentialModel())\n\nsimple_subclass_model = combinations.NamedObject(\n \"SimpleSubclassModel\", simple_models.SimpleSubclassModel())\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras layers that implement explicit (approximate) kernel feature maps.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport six\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.keras.engine import input_spec\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn\n\n_SUPPORTED_RBF_KERNEL_TYPES = ['gaussian', 'laplacian']\n\n\nclass RandomFourierFeatures(base_layer.Layer):\n r\"\"\"Layer that maps its inputs using random Fourier features.\n\n This layer implements a feature map \\\\(\\phi: \\mathbb{R}^d \\rightarrow\n \\mathbb{R}^D\\\\) which approximates shift-invariant kernels. A kernel function\n K(x, y) defined over \\\\(\\mathbb{R}^d x \\mathbb{R}^d\\\\) is shift-invariant if\n K(x, y) = k(x-y) for some function defined over \\\\(\\mathbb{R}^d\\\\). Many\n popular Radial Basis Functions (in short RBF), including gaussian and\n laplacian kernels are shift-invariant.\n\n The layer approximates a (shift invariant) kernel K in the following sense:\n up to a scaling factor, for all inputs \\\\(x, y \\in \\mathbb{R}^d\\\\)\n \\\\(\\phi(x)^T \\cdot \\phi(y) \\approx K(x, y)\\\\)\n\n The implementation of this layer is based on the following paper:\n \"Random Features for Large-Scale Kernel Machines\" by Ali Rahimi and Ben Recht.\n (link: https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)\n\n The distribution from which the parameters of the random features map (layer)\n are sampled, determines which shift-invariant kernel the layer approximates\n (see paper for more details). The users can use the distribution of their\n choice. Due to their popularity, the layer supports the out-of-the-box\n approximation of the following RBF kernels:\n - Gaussian: \\\\(K(x, y) = e^{-\\frac{\\|x-y\\|_2^2}{2 \\cdot scale^2}}\\\\)\n - Laplacian: \\\\(K(x, y) = e^{-\\frac{\\|x-y\\|_1}{scale}}\\\\)\n\n NOTE: Unlike the map described in the paper and the scikit-learn\n implementation, the output of this layer does not apply the sqrt(2/D)\n normalization factor.\n\n Usage for ML: Typically, this layer is used to \"kernelize\" linear models by\n applying a non-linear transformation (this layer) to the input features and\n then training a linear model on top of the transformed features. Depending on\n the loss function of the linear model, the composition of this layer and the\n linear model results to models that are equivalent (up to approximation) to\n kernel SVMs (for hinge loss), kernel logistic regression (for logistic loss),\n kernel linear regression (for squared loss) etc.\n\n Example of building a kernel multinomial logistic regression model with\n Gaussian kernel in keras:\n ```python\n random_features_layer = RandomFourierFeatures(\n output_dim=500,\n kernel_initializer='gaussian',\n scale=5.0,\n ...)\n\n model = tf.keras.models.Sequential()\n model.add(random_features_layer)\n model.add(tf.keras.layers.Dense(units=num_classes, activation='softmax')\n\n model.compile(\n loss=tf.keras.losses.categorical_crossentropy, optimizer=..., metrics=...)\n ```\n\n To use another kernel, replace the layer creation command with:\n ```python\n random_features_layer = RandomFourierFeatures(\n output_dim=500,\n kernel_initializer=<my_initializer>,\n scale=...,\n ...)\n ```\n\n Arguments:\n output_dim: Positive integer, the dimension of the layer's output, i.e., the\n number of random features used to approximate the kernel.\n kernel_initializer: Determines the distribution of the parameters of the\n random features map (and therefore the kernel approximated by the layer).\n It can be either a string or an instance of TensorFlow's Initializer\n class. Currently only 'gaussian' and 'laplacian' are supported as string\n initializers (case insensitive). Note that these parameters are not\n trainable.\n scale: For gaussian and laplacian kernels, this corresponds to a scaling\n factor of the corresponding kernel approximated by the layer (see concrete\n definitions above). When provided, it should be a positive float. If None,\n the implementation chooses a default value (1.0 typically). Both the\n approximation error of the kernel and the classification quality are\n sensitive to this parameter. If trainable is set to True, this paramater\n is learned end-to-end during training and the provided value serves as an\n initialization value.\n NOTE: When this layer is used to map the initial features and then the\n transformed features are fed to a linear model, by making `scale`\n trainable, the resulting optimization problem is no longer convex (even\n if the loss function used by the linear model is convex).\n trainable: Whether the scaling parameter of th layer is trainable. Defaults\n to False.\n name: name for the RandomFourierFeatures layer.\n\n Raises:\n ValueError: if output_dim or stddev are not positive or if the provided\n kernel_initializer is not supported.\n \"\"\"\n\n def __init__(self,\n output_dim,\n kernel_initializer='gaussian',\n scale=None,\n trainable=False,\n name=None,\n **kwargs):\n if output_dim <= 0:\n raise ValueError(\n '`output_dim` should be a positive integer. Given: {}.'.format(\n output_dim))\n if isinstance(kernel_initializer, six.string_types):\n if kernel_initializer.lower() not in _SUPPORTED_RBF_KERNEL_TYPES:\n raise ValueError(\n 'Unsupported kernel type: \\'{}\\'. Supported kernel types: {}.'\n .format(kernel_initializer, _SUPPORTED_RBF_KERNEL_TYPES))\n if scale is not None and scale <= 0.0:\n raise ValueError('When provided, `scale` should be a positive float. '\n 'Given: {}.'.format(scale))\n super(RandomFourierFeatures, self).__init__(\n trainable=trainable, name=name, **kwargs)\n self.output_dim = output_dim\n self.kernel_initializer = kernel_initializer\n self.scale = scale\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n # TODO(sibyl-vie3Poto): Allow higher dimension inputs. Currently the input is expected\n # to have shape [batch_size, dimension].\n if input_shape.rank != 2:\n raise ValueError(\n 'The rank of the input tensor should be 2. Got {} instead.'.format(\n input_shape.ndims))\n if input_shape.dims[1].value is None:\n raise ValueError(\n 'The last dimension of the inputs to `RandomFourierFeatures` '\n 'should be defined. Found `None`.')\n self.input_spec = input_spec.InputSpec(\n ndim=2, axes={1: input_shape.dims[1].value})\n input_dim = input_shape.dims[1].value\n\n kernel_initializer = _get_random_features_initializer(\n self.kernel_initializer, shape=(input_dim, self.output_dim))\n\n unscaled_kernel = self.add_weight(\n name='unscaled_random_features',\n shape=(input_dim, self.output_dim),\n dtype=dtypes.float32,\n initializer=kernel_initializer,\n trainable=False)\n\n self.bias = self.add_weight(\n name='random_features_bias',\n shape=(self.output_dim,),\n dtype=dtypes.float32,\n initializer=init_ops.random_uniform_initializer(\n minval=0.0, maxval=2 * np.pi, dtype=dtypes.float32),\n trainable=False)\n\n if self.scale is None:\n self.scale = _get_default_scale(self.kernel_initializer, input_dim)\n scale = self.add_weight(\n name='random_features_scale',\n shape=(1,),\n dtype=dtypes.float32,\n initializer=init_ops.constant_initializer(self.scale),\n trainable=True,\n constraint='NonNeg')\n self.kernel = (1.0 / scale) * unscaled_kernel\n super(RandomFourierFeatures, self).build(input_shape)\n\n def call(self, inputs):\n inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)\n inputs = gen_math_ops.cast(inputs, dtypes.float32)\n outputs = gen_math_ops.mat_mul(inputs, self.kernel)\n outputs = nn.bias_add(outputs, self.bias)\n return gen_math_ops.cos(outputs)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n input_shape = input_shape.with_rank(2)\n if input_shape.dims[-1].value is None:\n raise ValueError(\n 'The innermost dimension of input shape must be defined. Given: %s' %\n input_shape)\n return input_shape[:-1].concatenate(self.output_dim)\n\n def get_config(self):\n kernel_initializer = self.kernel_initializer\n if isinstance(self.kernel_initializer, init_ops.Initializer):\n kernel_initializer = initializers.serialize(self.kernel_initializer)\n config = {\n 'output_dim': self.output_dim,\n 'kernel_initializer': kernel_initializer,\n 'scale': self.scale,\n }\n base_config = super(RandomFourierFeatures, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef _get_random_features_initializer(initializer, shape):\n \"\"\"Returns Initializer object for random features.\"\"\"\n\n def _get_cauchy_samples(loc, scale, shape):\n probs = np.random.uniform(low=0., high=1., size=shape)\n return loc + scale * np.tan(np.pi * (probs - 0.5))\n\n random_features_initializer = initializer\n if isinstance(initializer, six.string_types):\n if initializer.lower() == 'gaussian':\n random_features_initializer = init_ops.random_normal_initializer(\n stddev=1.0)\n elif initializer.lower() == 'laplacian':\n random_features_initializer = init_ops.constant_initializer(\n _get_cauchy_samples(loc=0.0, scale=1.0, shape=shape))\n\n else:\n raise ValueError(\n 'Unsupported kernel type: \\'{}\\'. Supported kernel types: {}.'.format(\n random_features_initializer, _SUPPORTED_RBF_KERNEL_TYPES))\n return random_features_initializer\n\n\ndef _get_default_scale(initializer, input_dim):\n if (isinstance(initializer, six.string_types) and\n initializer.lower() == 'gaussian'):\n return np.sqrt(input_dim / 2.0)\n return 1.0\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmarks for `tf.data.experimental.snapshot()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.data.benchmarks import benchmark_base\nfrom tensorflow.python.data.experimental.ops import snapshot\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import errors_impl as errors\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.platform import test\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass SnapshotDatasetBenchmark(benchmark_base.DatasetBenchmarkBase):\n \"\"\"Benchmarks for `tf.data.experimental.snapshot()`.\"\"\"\n\n def _makeSnapshotDirectory(self):\n tmp_dir = test.get_temp_dir()\n tmp_dir = os.path.join(tmp_dir, \"snapshot\")\n if os.path.exists(tmp_dir):\n shutil.rmtree(tmp_dir)\n os.mkdir(tmp_dir)\n return tmp_dir\n\n def _createSimpleDataset(self, num_elems, tmp_dir=None):\n if not tmp_dir:\n tmp_dir = self._makeSnapshotDirectory()\n\n dataset = dataset_ops.Dataset.from_tensor_slices([1.0])\n dataset = dataset.map(\n lambda x: gen_array_ops.broadcast_to(x, [50, 50, 3]))\n dataset = dataset.repeat(num_elems)\n dataset = dataset.apply(snapshot.snapshot(tmp_dir))\n\n return dataset\n\n def _consumeDataset(self, dataset, num_elems):\n dataset = dataset.skip(num_elems)\n next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()\n with session.Session() as sess:\n try:\n sess.run(next_element)\n except errors.OutOfRangeError:\n pass\n\n def benchmarkWriteSnapshotSimple(self):\n num_elems = 500000\n dataset = self._createSimpleDataset(num_elems)\n\n # We only run one iteration here because running multiple iterations will\n # cause the later iterations to simply read from the already written\n # snapshot rather than write a new one.\n self.run_and_report_benchmark(dataset, num_elems, \"write_simple\",\n warmup=False, iters=1)\n\n def benchmarkPassthroughSnapshotSimple(self):\n num_elems = 100000\n tmp_dir = self._makeSnapshotDirectory()\n dataset = self._createSimpleDataset(num_elems, tmp_dir)\n\n # Consume only 1 element, thus making sure we don't finalize.\n self._consumeDataset(dataset, 1)\n\n self.run_and_report_benchmark(dataset, num_elems, \"passthrough_simple\")\n\n def benchmarkReadSnapshotSimple(self):\n num_elems = 100000\n tmp_dir = self._makeSnapshotDirectory()\n dataset = self._createSimpleDataset(num_elems, tmp_dir)\n\n # consume all the elements to let snapshot write things to disk\n self._consumeDataset(dataset, num_elems)\n\n self.run_and_report_benchmark(dataset, num_elems, \"read_simple\")\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Operations often used for initializing tensors.\n\nAll variable initializers returned by functions in this file should have the\nfollowing signature:\n\ndef _initializer(shape, dtype=dtypes.float32, partition_info=None):\n Args:\n shape: List of `int` representing the shape of the output `Tensor`. Some\n initializers may also be able to accept a `Tensor`.\n dtype: (Optional) Type of the output `Tensor`.\n partition_info: (Optional) variable_scope._PartitionInfo object holding\n additional information about how the variable is partitioned. May be\n `None` if the variable is not partitioned.\n\n Returns:\n A `Tensor` of type `dtype` and `shape`.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_linalg_ops\nfrom tensorflow.python.ops import linalg_ops_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.deprecation import deprecated\nfrom tensorflow.python.util.deprecation import deprecated_arg_values\nfrom tensorflow.python.util.deprecation import deprecated_args\nfrom tensorflow.python.util.tf_export import tf_export\n\n\nclass Initializer(object):\n \"\"\"Initializer base class: all initializers inherit from this class.\"\"\"\n\n def __call__(self, shape, dtype=None, partition_info=None):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. If not provided use the initializer\n dtype.\n partition_info: Optional information about the possible partitioning of a\n tensor.\n \"\"\"\n raise NotImplementedError\n\n def get_config(self):\n \"\"\"Returns the configuration of the initializer as a JSON-serializable dict.\n\n Returns:\n A JSON-serializable Python dict.\n \"\"\"\n return {}\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Instantiates an initializer from a configuration dictionary.\n\n Example:\n\n ```python\n initializer = RandomUniform(-1, 1)\n config = initializer.get_config()\n initializer = RandomUniform.from_config(config)\n ```\n\n Args:\n config: A Python dictionary. It will typically be the output of\n `get_config`.\n\n Returns:\n An Initializer instance.\n \"\"\"\n return cls(**config)\n\n\n@tf_export(v1=[\"initializers.zeros\", \"zeros_initializer\"])\[email protected]_endpoints(\"initializers.zeros\")\nclass Zeros(Initializer):\n \"\"\"Initializer that generates tensors initialized to 0.\"\"\"\n\n @deprecated_args(None,\n \"Call initializer instance with the dtype argument instead \"\n \"of passing it to the constructor\", \"dtype\")\n def __init__(self, dtype=dtypes.float32):\n self.dtype = dtypes.as_dtype(dtype)\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n return array_ops.zeros(shape, dtype)\n\n def get_config(self):\n return {\"dtype\": self.dtype.name}\n\n\n@tf_export(v1=[\"initializers.ones\", \"ones_initializer\"])\[email protected]_endpoints(\"initializers.ones\", \"ones_initializer\")\nclass Ones(Initializer):\n \"\"\"Initializer that generates tensors initialized to 1.\"\"\"\n\n @deprecated_args(None,\n \"Call initializer instance with the dtype argument instead \"\n \"of passing it to the constructor\", \"dtype\")\n def __init__(self, dtype=dtypes.float32):\n self.dtype = dtypes.as_dtype(dtype)\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n return array_ops.ones(shape, dtype)\n\n def get_config(self):\n return {\"dtype\": self.dtype.name}\n\n\n@tf_export(v1=[\"initializers.constant\", \"constant_initializer\"])\[email protected]_endpoints(\"constant_initializer\")\nclass Constant(Initializer):\n \"\"\"Initializer that generates tensors with constant values.\n\n The resulting tensor is populated with values of type `dtype`, as\n specified by arguments `value` following the desired `shape` of the\n new tensor (see examples below).\n\n The argument `value` can be a constant value, or a list of values of type\n `dtype`. If `value` is a list, then the length of the list must be less\n than or equal to the number of elements implied by the desired shape of the\n tensor. In the case where the total number of elements in `value` is less\n than the number of elements required by the tensor shape, the last element\n in `value` will be used to fill the remaining entries. If the total number of\n elements in `value` is greater than the number of elements required by the\n tensor shape, the initializer will raise a `ValueError`.\n\n Args:\n value: A Python scalar, list or tuple of values, or a N-dimensional numpy\n array. All elements of the initialized variable will be set to the\n corresponding value in the `value` argument.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer.\n verify_shape: Boolean that enables verification of the shape of `value`. If\n `True`, the initializer will throw an error if the shape of `value` is not\n compatible with the shape of the initialized tensor.\n\n Raises:\n TypeError: If the input `value` is not one of the expected types.\n\n Examples:\n The following example can be rewritten using a numpy.ndarray instead\n of the `value` list, even reshaped, as shown in the two commented lines\n below the `value` list initialization.\n\n ```python\n >>> import numpy as np\n >>> import tensorflow as tf\n\n >>> value = [0, 1, 2, 3, 4, 5, 6, 7]\n >>> # value = np.array(value)\n >>> # value = value.reshape([2, 4])\n >>> init = tf.compat.v1.constant_initializer(value)\n\n >>> print('fitting shape:')\n >>> with tf.compat.v1.Session():\n >>> x = tf.compat.v1.get_variable('x', shape=[2, 4], initializer=init)\n >>> x.initializer.run()\n >>> print(x.eval())\n\n fitting shape:\n [[ 0. 1. 2. 3.]\n [ 4. 5. 6. 7.]]\n\n >>> print('larger shape:')\n >>> with tf.compat.v1.Session():\n >>> x = tf.compat.v1.get_variable('x', shape=[3, 4], initializer=init)\n >>> x.initializer.run()\n >>> print(x.eval())\n\n larger shape:\n [[ 0. 1. 2. 3.]\n [ 4. 5. 6. 7.]\n [ 7. 7. 7. 7.]]\n\n >>> print('smaller shape:')\n >>> with tf.compat.v1.Session():\n >>> x = tf.compat.v1.get_variable('x', shape=[2, 3], initializer=init)\n\n ValueError: Too many elements provided. Needed at most 6, but received 8\n\n >>> print('shape verification:')\n >>> init_verify = tf.compat.v1.constant_initializer(value,\n verify_shape=True)\n >>> with tf.compat.v1.Session():\n >>> x = tf.compat.v1.get_variable('x', shape=[3, 4],\n initializer=init_verify)\n\n TypeError: Expected Tensor's shape: (3, 4), got (8,).\n ```\n \"\"\"\n\n @deprecated_args(None,\n \"Call initializer instance with the dtype argument instead \"\n \"of passing it to the constructor\", \"dtype\")\n @deprecated_args(None, \"Objects must now be the required shape or no shape \"\n \"can be specified\", \"verify_shape\")\n def __init__(self, value=0, dtype=dtypes.float32, verify_shape=False):\n if not (np.isscalar(value) or isinstance(value, (list, tuple, np.ndarray))):\n raise TypeError(\n \"Invalid type for initial value: %s (expected Python scalar, list or \"\n \"tuple of values, or numpy.ndarray).\" % type(value))\n\n self.value = value\n self.dtype = dtypes.as_dtype(dtype)\n self._verify_shape = verify_shape\n\n def __call__(self, shape, dtype=None, partition_info=None, verify_shape=None):\n if dtype is None:\n dtype = self.dtype\n if verify_shape is None:\n verify_shape = self._verify_shape\n return constant_op.constant_v1(\n self.value, dtype=dtype, shape=shape, verify_shape=verify_shape)\n\n def get_config(self):\n # We don't include `verify_shape` for compatibility with Keras.\n # `verify_shape` should be passed as an argument to `__call__` rather\n # than as a constructor argument: conceptually it isn't a property\n # of the initializer.\n return {\"value\": self.value, \"dtype\": self.dtype.name}\n\n\n@tf_export(v1=[\"initializers.random_uniform\", \"random_uniform_initializer\"])\[email protected]_endpoints(\"initializers.random_uniform\")\nclass RandomUniform(Initializer):\n \"\"\"Initializer that generates tensors with a uniform distribution.\n\n Args:\n minval: A python scalar or a scalar tensor. Lower bound of the range of\n random values to generate.\n maxval: A python scalar or a scalar tensor. Upper bound of the range of\n random values to generate. Defaults to 1 for float types.\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer.\n \"\"\"\n\n @deprecated_args(None,\n \"Call initializer instance with the dtype argument instead \"\n \"of passing it to the constructor\", \"dtype\")\n def __init__(self, minval=0, maxval=None, seed=None, dtype=dtypes.float32):\n self.minval = minval\n self.maxval = maxval\n self.seed = seed\n self.dtype = dtypes.as_dtype(dtype)\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n return random_ops.random_uniform(\n shape, self.minval, self.maxval, dtype, seed=self.seed)\n\n def get_config(self):\n return {\n \"minval\": self.minval,\n \"maxval\": self.maxval,\n \"seed\": self.seed,\n \"dtype\": self.dtype.name\n }\n\n\n@tf_export(v1=[\"initializers.random_normal\", \"random_normal_initializer\"])\[email protected]_endpoints(\"initializers.random_normal\")\nclass RandomNormal(Initializer):\n \"\"\"Initializer that generates tensors with a normal distribution.\n\n Args:\n mean: a python scalar or a scalar tensor. Mean of the random values to\n generate.\n stddev: a python scalar or a scalar tensor. Standard deviation of the random\n values to generate.\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n \"\"\"\n\n @deprecated_args(None,\n \"Call initializer instance with the dtype argument instead \"\n \"of passing it to the constructor\", \"dtype\")\n def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):\n self.mean = mean\n self.stddev = stddev\n self.seed = seed\n self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n return random_ops.random_normal(\n shape, self.mean, self.stddev, dtype, seed=self.seed)\n\n def get_config(self):\n return {\n \"mean\": self.mean,\n \"stddev\": self.stddev,\n \"seed\": self.seed,\n \"dtype\": self.dtype.name\n }\n\n\n@tf_export(v1=[\"initializers.truncated_normal\", \"truncated_normal_initializer\"])\[email protected]_endpoints(\"initializers.truncated_normal\",\n \"truncated_normal_initializer\")\nclass TruncatedNormal(Initializer):\n \"\"\"Initializer that generates a truncated normal distribution.\n\n These values are similar to values from a `random_normal_initializer`\n except that values more than two standard deviations from the mean\n are discarded and re-drawn. This is the recommended initializer for\n neural network weights and filters.\n\n Args:\n mean: a python scalar or a scalar tensor. Mean of the random values to\n generate.\n stddev: a python scalar or a scalar tensor. Standard deviation of the random\n values to generate.\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n \"\"\"\n\n @deprecated_args(None,\n \"Call initializer instance with the dtype argument instead \"\n \"of passing it to the constructor\", \"dtype\")\n def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):\n self.mean = mean\n self.stddev = stddev\n self.seed = seed\n self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n return random_ops.truncated_normal(\n shape, self.mean, self.stddev, dtype, seed=self.seed)\n\n def get_config(self):\n return {\n \"mean\": self.mean,\n \"stddev\": self.stddev,\n \"seed\": self.seed,\n \"dtype\": self.dtype.name\n }\n\n\n@tf_export(v1=[\n \"initializers.uniform_unit_scaling\", \"uniform_unit_scaling_initializer\"\n])\[email protected]_endpoints(\"uniform_unit_scaling_initializer\",\n \"initializers.uniform_unit_scaling\")\nclass UniformUnitScaling(Initializer):\n \"\"\"Initializer that generates tensors without scaling variance.\n\n When initializing a deep network, it is in principle advantageous to keep\n the scale of the input variance constant, so it does not explode or diminish\n by reaching the final layer. If the input is `x` and the operation `x * W`,\n and we want to initialize `W` uniformly at random, we need to pick `W` from\n\n [-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]\n\n to keep the scale intact, where `dim = W.shape[0]` (the size of the input).\n A similar calculation for convolutional networks gives an analogous result\n with `dim` equal to the product of the first 3 dimensions. When\n nonlinearities are present, we need to multiply this by a constant `factor`.\n See (Sussillo et al., 2014) for deeper motivation, experiments\n and the calculation of constants. In section 2.3 there, the constants were\n numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.\n\n Args:\n factor: Float. A multiplicative factor by which the values will be scaled.\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n References:\n [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)\n ([pdf](http://arxiv.org/pdf/1412.6558.pdf))\n \"\"\"\n\n @deprecated_args(None,\n \"Call initializer instance with the dtype argument instead \"\n \"of passing it to the constructor\", \"dtype\")\n @deprecated(None,\n \"Use tf.initializers.variance_scaling instead with distribution=\"\n \"uniform to get equivalent behavior.\")\n def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):\n self.factor = factor\n self.seed = seed\n self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n scale_shape = shape\n if partition_info is not None:\n scale_shape = partition_info.full_shape\n\n input_size = 1.0\n # Estimating input size is not possible to do perfectly, but we try.\n # The estimate, obtained by multiplying all dimensions but the last one,\n # is the right thing for matrix multiply and convolutions (see above).\n for dim in scale_shape[:-1]:\n input_size *= float(dim)\n # Avoid errors when initializing zero-size tensors.\n input_size = max(input_size, 1.0)\n max_val = math.sqrt(3 / input_size) * self.factor\n return random_ops.random_uniform(\n shape, -max_val, max_val, dtype, seed=self.seed)\n\n def get_config(self):\n return {\"factor\": self.factor, \"seed\": self.seed, \"dtype\": self.dtype.name}\n\n\n@tf_export(v1=[\"initializers.variance_scaling\", \"variance_scaling_initializer\"])\[email protected]_endpoints(\"initializers.variance_scaling\",\n \"variance_scaling_initializer\")\nclass VarianceScaling(Initializer):\n \"\"\"Initializer capable of adapting its scale to the shape of weights tensors.\n\n With `distribution=\"truncated_normal\" or \"untruncated_normal\"`,\n samples are drawn from a truncated/untruncated normal\n distribution with a mean of zero and a standard deviation (after truncation,\n if used) `stddev = sqrt(scale / n)`\n where n is:\n - number of input units in the weight tensor, if mode = \"fan_in\"\n - number of output units, if mode = \"fan_out\"\n - average of the numbers of input and output units, if mode = \"fan_avg\"\n\n With `distribution=\"uniform\"`, samples are drawn from a uniform distribution\n within [-limit, limit], with `limit = sqrt(3 * scale / n)`.\n\n Args:\n scale: Scaling factor (positive float).\n mode: One of \"fan_in\", \"fan_out\", \"fan_avg\".\n distribution: Random distribution to use. One of \"normal\", \"uniform\".\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n\n Raises:\n ValueError: In case of an invalid value for the \"scale\", mode\" or\n \"distribution\" arguments.\n \"\"\"\n\n @deprecated_args(None,\n \"Call initializer instance with the dtype argument instead \"\n \"of passing it to the constructor\", \"dtype\")\n @deprecated_arg_values(\n None,\n \"`normal` is a deprecated alias for `truncated_normal`\",\n distribution=\"normal\")\n def __init__(self,\n scale=1.0,\n mode=\"fan_in\",\n distribution=\"truncated_normal\",\n seed=None,\n dtype=dtypes.float32):\n if scale <= 0.:\n raise ValueError(\"`scale` must be positive float.\")\n if mode not in {\"fan_in\", \"fan_out\", \"fan_avg\"}:\n raise ValueError(\"Invalid `mode` argument:\", mode)\n distribution = distribution.lower()\n if distribution not in {\n \"normal\", \"uniform\", \"truncated_normal\", \"untruncated_normal\"\n }:\n raise ValueError(\"Invalid `distribution` argument:\", distribution)\n self.scale = scale\n self.mode = mode\n self.distribution = distribution\n self.seed = seed\n self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n scale = self.scale\n scale_shape = shape\n if partition_info is not None:\n scale_shape = partition_info.full_shape\n fan_in, fan_out = _compute_fans(scale_shape)\n if self.mode == \"fan_in\":\n scale /= max(1., fan_in)\n elif self.mode == \"fan_out\":\n scale /= max(1., fan_out)\n else:\n scale /= max(1., (fan_in + fan_out) / 2.)\n if self.distribution == \"normal\" or self.distribution == \"truncated_normal\":\n # constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)\n stddev = math.sqrt(scale) / .87962566103423978\n return random_ops.truncated_normal(\n shape, 0.0, stddev, dtype, seed=self.seed)\n elif self.distribution == \"untruncated_normal\":\n stddev = math.sqrt(scale)\n return random_ops.random_normal(shape, 0.0, stddev, dtype, seed=self.seed)\n else:\n limit = math.sqrt(3.0 * scale)\n return random_ops.random_uniform(\n shape, -limit, limit, dtype, seed=self.seed)\n\n def get_config(self):\n return {\n \"scale\": self.scale,\n \"mode\": self.mode,\n \"distribution\": self.distribution,\n \"seed\": self.seed,\n \"dtype\": self.dtype.name\n }\n\n\n@tf_export(v1=[\"initializers.orthogonal\", \"orthogonal_initializer\"])\[email protected]_endpoints(\"initializers.orthogonal\",\n \"orthogonal_initializer\")\nclass Orthogonal(Initializer):\n \"\"\"Initializer that generates an orthogonal matrix.\n\n If the shape of the tensor to initialize is two-dimensional, it is initialized\n with an orthogonal matrix obtained from the QR decomposition of a matrix of\n random numbers drawn from a normal distribution.\n If the matrix has fewer rows than columns then the output will have orthogonal\n rows. Otherwise, the output will have orthogonal columns.\n\n If the shape of the tensor to initialize is more than two-dimensional,\n a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`\n is initialized, where `n` is the length of the shape vector.\n The matrix is subsequently reshaped to give a tensor of the desired shape.\n\n Args:\n gain: multiplicative factor to apply to the orthogonal matrix\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n References:\n [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)\n ([pdf](https://arxiv.org/pdf/1312.6120.pdf))\n \"\"\"\n\n @deprecated_args(None,\n \"Call initializer instance with the dtype argument instead \"\n \"of passing it to the constructor\", \"dtype\")\n def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):\n self.gain = gain\n self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n self.seed = seed\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n # Check the shape\n if len(shape) < 2:\n raise ValueError(\"The tensor to initialize must be \"\n \"at least two-dimensional\")\n # Flatten the input shape with the last dimension remaining\n # its original shape so it works for conv2d\n num_rows = 1\n for dim in shape[:-1]:\n num_rows *= dim\n num_rows = int(num_rows)\n num_cols = int(shape[-1])\n if num_rows < num_cols:\n flat_shape = (num_cols, num_rows)\n else:\n flat_shape = (num_rows, num_cols)\n\n # Generate a random matrix\n a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)\n # Compute the qr factorization\n q, r = gen_linalg_ops.qr(a, full_matrices=False)\n # Make Q uniform\n d = array_ops.diag_part(r)\n q *= math_ops.sign(d)\n if num_rows < num_cols:\n q = array_ops.matrix_transpose(q)\n return self.gain * array_ops.reshape(q, shape)\n\n def get_config(self):\n return {\"gain\": self.gain, \"seed\": self.seed, \"dtype\": self.dtype.name}\n\n\n# Note these haven't been ported to TF2.0. They are not currently visible and\n# the tests are non trivial to port\nclass ConvolutionDeltaOrthogonal(Initializer):\n \"\"\"Initializer that generates a delta orthogonal kernel for ConvNets.\n\n The shape of the tensor must have length 3, 4 or 5. The number of input\n filters must not exceed the number of output filters. The center pixels of the\n tensor form an orthogonal matrix. Other pixels are set to be zero. See\n algorithm 2 in (Xiao et al., 2018).\n\n\n Args:\n gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1.\n The 2-norm of an input is multiplied by a factor of `gain` after applying\n this convolution.\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n References:\n [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)\n ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))\n \"\"\"\n\n def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):\n self.gain = gain\n self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n self.seed = seed\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n # Check the shape\n if len(shape) < 3 or len(shape) > 5:\n raise ValueError(\"The tensor to initialize must be at least \"\n \"three-dimensional and at most five-dimensional\")\n\n if shape[-2] > shape[-1]:\n raise ValueError(\"In_filters cannot be greater than out_filters.\")\n\n # Generate a random matrix\n a = random_ops.random_normal([shape[-1], shape[-1]],\n dtype=dtype,\n seed=self.seed)\n # Compute the qr factorization\n q, r = gen_linalg_ops.qr(a, full_matrices=False)\n # Make Q uniform\n d = array_ops.diag_part(r)\n q *= math_ops.sign(d)\n q = q[:shape[-2], :]\n q *= math_ops.cast(self.gain, dtype=dtype)\n if len(shape) == 3:\n weight = array_ops.scatter_nd([[(shape[0] - 1) // 2]],\n array_ops.expand_dims(q, 0), shape)\n elif len(shape) == 4:\n weight = array_ops.scatter_nd([[(shape[0] - 1) // 2,\n (shape[1] - 1) // 2]],\n array_ops.expand_dims(q, 0), shape)\n else:\n weight = array_ops.scatter_nd([[(shape[0] - 1) // 2, (shape[1] - 1) // 2,\n (shape[2] - 1) // 2]],\n array_ops.expand_dims(q, 0), shape)\n return weight\n\n def get_config(self):\n return {\"gain\": self.gain, \"seed\": self.seed, \"dtype\": self.dtype.name}\n\n\nclass ConvolutionOrthogonal(Initializer):\n \"\"\"Initializer that generates orthogonal kernel for ConvNets.\n\n Base class used to construct 1D, 2D and 3D orthogonal kernels for convolution.\n\n Args:\n gain: multiplicative factor to apply to the orthogonal matrix. Default is 1.\n The 2-norm of an input is multiplied by a factor of `gain` after applying\n this convolution.\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n References:\n [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)\n ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))\n \"\"\"\n\n def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):\n self.gain = gain\n self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n self.seed = seed\n\n def __call__(self, shape, dtype=None, partition_info=None):\n raise NotImplementedError\n\n def get_config(self):\n return {\"gain\": self.gain, \"seed\": self.seed, \"dtype\": self.dtype.name}\n\n # Helper functions.\n def _orthogonal_matrix(self, n):\n \"\"\"Construct an n x n orthogonal matrix.\n\n Args:\n n: Dimension.\n\n Returns:\n A n x n orthogonal matrix.\n \"\"\"\n a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)\n if self.seed:\n self.seed += 1\n q, r = gen_linalg_ops.qr(a)\n d = array_ops.diag_part(r)\n # make q uniform\n q *= math_ops.sign(d)\n return q\n\n def _symmetric_projection(self, n):\n \"\"\"Compute a n x n symmetric projection matrix.\n\n Args:\n n: Dimension.\n\n Returns:\n A n x n symmetric projection matrix, i.e. a matrix P s.t. P=P*P, P=P^T.\n \"\"\"\n q = self._orthogonal_matrix(n)\n # randomly zeroing out some columns\n mask = math_ops.cast(\n random_ops.random_normal([n], seed=self.seed) > 0, self.dtype)\n if self.seed:\n self.seed += 1\n c = math_ops.multiply(q, mask)\n return math_ops.matmul(c, array_ops.matrix_transpose(c))\n\n\nclass ConvolutionOrthogonal2D(ConvolutionOrthogonal):\n \"\"\"Initializer that generates a 2D orthogonal kernel for ConvNets.\n\n The shape of the tensor must have length 4. The number of input\n filters must not exceed the number of output filters.\n The orthogonality(==isometry) is exact when the inputs are circular padded.\n There are finite-width effects with non-circular padding (e.g. zero padding).\n See algorithm 1 in (Xiao et al., 2018).\n\n Args:\n gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1.\n This has the effect of scaling the output 2-norm by a factor of `gain`.\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n References:\n [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)\n ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))\n \"\"\"\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n if len(shape) != 4:\n raise ValueError(\"The tensor to initialize must be four-dimensional\")\n\n if shape[-2] > shape[-1]:\n raise ValueError(\"In_filters cannot be greater than out_filters.\")\n\n if shape[0] != shape[1]:\n raise ValueError(\"Kernel sizes must be equal.\")\n\n kernel = self._orthogonal_kernel(shape[0], shape[2], shape[3])\n kernel *= math_ops.cast(self.gain, dtype=dtype)\n return kernel\n\n def _dict_to_tensor(self, x, k1, k2):\n \"\"\"Convert a dictionary to a tensor.\n\n Args:\n x: A k1 * k2 dictionary.\n k1: First dimension of x.\n k2: Second dimension of x.\n\n Returns:\n A k1 * k2 tensor.\n \"\"\"\n\n return array_ops.stack([array_ops.stack([x[i, j] for j in range(k2)])\n for i in range(k1)])\n\n def _block_orth(self, p1, p2):\n \"\"\"Construct a 2 x 2 kernel.\n\n Used to construct orthgonal kernel.\n\n Args:\n p1: A symmetric projection matrix.\n p2: A symmetric projection matrix.\n\n Returns:\n A 2 x 2 kernel [[p1p2, p1(1-p2)],\n [(1-p1)p2, (1-p1)(1-p2)]].\n Raises:\n ValueError: If the dimensions of p1 and p2 are different.\n \"\"\"\n if p1.shape.as_list() != p2.shape.as_list():\n raise ValueError(\"The dimension of the matrices must be the same.\")\n n = p1.shape.as_list()[0]\n kernel2x2 = {}\n eye = linalg_ops_impl.eye(n, dtype=self.dtype)\n kernel2x2[0, 0] = math_ops.matmul(p1, p2)\n kernel2x2[0, 1] = math_ops.matmul(p1, (eye - p2))\n kernel2x2[1, 0] = math_ops.matmul((eye - p1), p2)\n kernel2x2[1, 1] = math_ops.matmul((eye - p1), (eye - p2))\n\n return kernel2x2\n\n def _matrix_conv(self, m1, m2):\n \"\"\"Matrix convolution.\n\n Args:\n m1: A k x k dictionary, each element is a n x n matrix.\n m2: A l x l dictionary, each element is a n x n matrix.\n\n Returns:\n (k + l - 1) * (k + l - 1) dictionary each element is a n x n matrix.\n Raises:\n ValueError: if the entries of m1 and m2 are of different dimensions.\n \"\"\"\n\n n = (m1[0, 0]).shape.as_list()[0]\n if n != (m2[0, 0]).shape.as_list()[0]:\n raise ValueError(\"The entries in matrices m1 and m2 \"\n \"must have the same dimensions!\")\n k = int(np.sqrt(len(m1)))\n l = int(np.sqrt(len(m2)))\n result = {}\n size = k + l - 1\n # Compute matrix convolution between m1 and m2.\n for i in range(size):\n for j in range(size):\n result[i, j] = array_ops.zeros([n, n], self.dtype)\n for index1 in range(min(k, i + 1)):\n for index2 in range(min(k, j + 1)):\n if (i - index1) < l and (j - index2) < l:\n result[i, j] += math_ops.matmul(m1[index1, index2],\n m2[i - index1, j - index2])\n return result\n\n def _orthogonal_kernel(self, ksize, cin, cout):\n \"\"\"Construct orthogonal kernel for convolution.\n\n Args:\n ksize: Kernel size.\n cin: Number of input channels.\n cout: Number of output channels.\n\n Returns:\n An [ksize, ksize, cin, cout] orthogonal kernel.\n Raises:\n ValueError: If cin > cout.\n \"\"\"\n if cin > cout:\n raise ValueError(\"The number of input channels cannot exceed \"\n \"the number of output channels.\")\n orth = self._orthogonal_matrix(cout)[0:cin, :]\n if ksize == 1:\n return array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0)\n\n p = self._block_orth(\n self._symmetric_projection(cout), self._symmetric_projection(cout))\n for _ in range(ksize - 2):\n temp = self._block_orth(\n self._symmetric_projection(cout), self._symmetric_projection(cout))\n p = self._matrix_conv(p, temp)\n for i in range(ksize):\n for j in range(ksize):\n p[i, j] = math_ops.matmul(orth, p[i, j])\n\n return self._dict_to_tensor(p, ksize, ksize)\n\n\nclass ConvolutionOrthogonal1D(ConvolutionOrthogonal):\n \"\"\"Initializer that generates a 1D orthogonal kernel for ConvNets.\n\n The shape of the tensor must have length 3. The number of input\n filters must not exceed the number of output filters.\n The orthogonality(==isometry) is exact when the inputs are circular padded.\n There are finite-width effects with non-circular padding (e.g. zero padding).\n See algorithm 1 in (Xiao et al., 2018).\n\n Args:\n gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1.\n The 2-norm of an input is multiplied by a factor of `gain` after applying\n this convolution.\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n References:\n [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)\n ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))\n \"\"\"\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n if len(shape) != 3:\n raise ValueError(\"The tensor to initialize must be three-dimensional\")\n\n if shape[-2] > shape[-1]:\n raise ValueError(\"In_filters cannot be greater than out_filters.\")\n\n kernel = self._orthogonal_kernel(shape[0], shape[-2], shape[-1])\n kernel *= math_ops.cast(self.gain, dtype=dtype)\n return kernel\n\n def _dict_to_tensor(self, x, k):\n \"\"\"Convert a dictionary to a tensor.\n\n Args:\n x: A dictionary of length k.\n k: Dimension of x.\n\n Returns:\n A tensor with the same dimension.\n \"\"\"\n\n return array_ops.stack([x[i] for i in range(k)])\n\n def _block_orth(self, projection_matrix):\n \"\"\"Construct a kernel.\n\n Used to construct orthgonal kernel.\n\n Args:\n projection_matrix: A symmetric projection matrix of size n x n.\n\n Returns:\n [projection_matrix, (1 - projection_matrix)].\n \"\"\"\n n = projection_matrix.shape.as_list()[0]\n kernel = {}\n eye = linalg_ops_impl.eye(n, dtype=self.dtype)\n kernel[0] = projection_matrix\n kernel[1] = eye - projection_matrix\n return kernel\n\n def _matrix_conv(self, m1, m2):\n \"\"\"Matrix convolution.\n\n Args:\n m1: A dictionary of length k, each element is a n x n matrix.\n m2: A dictionary of length l, each element is a n x n matrix.\n\n Returns:\n (k + l - 1) dictionary each element is a n x n matrix.\n Raises:\n ValueError: Ff the entries of m1 and m2 are of different dimensions.\n \"\"\"\n\n n = (m1[0]).shape.as_list()[0]\n if n != (m2[0]).shape.as_list()[0]:\n raise ValueError(\"The entries in matrices m1 and m2 \"\n \"must have the same dimensions!\")\n k = len(m1)\n l = len(m2)\n result = {}\n size = k + l - 1\n # Compute matrix convolution between m1 and m2.\n for i in range(size):\n result[i] = array_ops.zeros([n, n], self.dtype)\n for index in range(min(k, i + 1)):\n if (i - index) < l:\n result[i] += math_ops.matmul(m1[index], m2[i - index])\n return result\n\n def _orthogonal_kernel(self, ksize, cin, cout):\n \"\"\"Construct orthogonal kernel for convolution.\n\n Args:\n ksize: Kernel size.\n cin: Number of input channels.\n cout: Number of output channels.\n\n Returns:\n An [ksize, ksize, cin, cout] orthogonal kernel.\n Raises:\n ValueError: If cin > cout.\n \"\"\"\n if cin > cout:\n raise ValueError(\"The number of input channels cannot exceed \"\n \"the number of output channels.\")\n orth = self._orthogonal_matrix(cout)[0:cin, :]\n if ksize == 1:\n return array_ops.expand_dims(orth, 0)\n\n p = self._block_orth(self._symmetric_projection(cout))\n for _ in range(ksize - 2):\n temp = self._block_orth(self._symmetric_projection(cout))\n p = self._matrix_conv(p, temp)\n for i in range(ksize):\n p[i] = math_ops.matmul(orth, p[i])\n\n return self._dict_to_tensor(p, ksize)\n\n\nclass ConvolutionOrthogonal3D(ConvolutionOrthogonal):\n \"\"\"Initializer that generates a 3D orthogonal kernel for ConvNets.\n\n The shape of the tensor must have length 5. The number of input\n filters must not exceed the number of output filters.\n The orthogonality(==isometry) is exact when the inputs are circular padded.\n There are finite-width effects with non-circular padding (e.g. zero padding).\n See algorithm 1 (Xiao et al., 2018).\n\n Args:\n gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1.\n The 2-norm of an input is multiplied by a factor of `gain` after applying\n this convolution.\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n References:\n [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)\n ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))\n \"\"\"\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n if len(shape) != 5:\n raise ValueError(\"The tensor to initialize must be five-dimensional\")\n\n if shape[-2] > shape[-1]:\n raise ValueError(\"In_filters cannot be greater than out_filters.\")\n\n if shape[0] != shape[1] or shape[0] != shape[2]:\n raise ValueError(\"Kernel sizes must be equal.\")\n\n kernel = self._orthogonal_kernel(shape[0], shape[-2], shape[-1])\n kernel *= math_ops.cast(self.gain, dtype=dtype)\n return kernel\n\n def _dict_to_tensor(self, x, k1, k2, k3):\n \"\"\"Convert a dictionary to a tensor.\n\n Args:\n x: A k1 * k2 dictionary.\n k1: First dimension of x.\n k2: Second dimension of x.\n k3: Third dimension of x.\n\n Returns:\n A k1 * k2 * k3 tensor.\n \"\"\"\n\n return array_ops.stack([array_ops.stack(\n [array_ops.stack([x[i, j, k] for k in range(k3)])\n for j in range(k2)]) for i in range(k1)])\n\n def _block_orth(self, p1, p2, p3):\n \"\"\"Construct a 3 x 3 kernel.\n\n Used to construct orthgonal kernel.\n\n Args:\n p1: A symmetric projection matrix.\n p2: A symmetric projection matrix.\n p3: A symmetric projection matrix.\n\n Returns:\n A 2 x 2 x 2 kernel.\n Raises:\n ValueError: If the dimensions of p1, p2 and p3 are different.\n \"\"\"\n p1_shape = p1.shape.as_list()\n if p1_shape != p2.shape.as_list() or p1_shape != p3.shape.as_list():\n raise ValueError(\"The dimension of the matrices must be the same.\")\n n = p1_shape[0]\n eye = linalg_ops_impl.eye(n, dtype=self.dtype)\n kernel2x2x2 = {}\n\n def matmul(p1, p2, p3):\n return math_ops.matmul(math_ops.matmul(p1, p2), p3)\n\n def cast(i, p):\n \"\"\"Return p or (1-p).\"\"\"\n return i * p + (1 - i) * (eye - p)\n\n for i in [0, 1]:\n for j in [0, 1]:\n for k in [0, 1]:\n kernel2x2x2[i, j, k] = matmul(cast(i, p1), cast(j, p2), cast(k, p3))\n return kernel2x2x2\n\n def _matrix_conv(self, m1, m2):\n \"\"\"Matrix convolution.\n\n Args:\n m1: is a k x k x k dictionary, each element is a n x n matrix.\n m2: is a l x l x l dictionary, each element is a n x n matrix.\n\n Returns:\n (k + l - 1) x (k + l - 1) x (k + l - 1) dictionary each\n element is a n x n matrix.\n Raises:\n ValueError: if the entries of m1 and m2 are of different dimensions.\n \"\"\"\n\n n = (m1[0, 0, 0]).shape.as_list()[0]\n if n != (m2[0, 0, 0]).shape.as_list()[0]:\n raise ValueError(\"The entries in matrices m1 and m2 \"\n \"must have the same dimensions!\")\n k = int(np.cbrt(len(m1)))\n l = int(np.cbrt(len(m2)))\n result = {}\n size = k + l - 1\n # Compute matrix convolution between m1 and m2.\n for i in range(size):\n for j in range(size):\n for r in range(size):\n result[i, j, r] = array_ops.zeros([n, n], self.dtype)\n for index1 in range(min(k, i + 1)):\n for index2 in range(min(k, j + 1)):\n for index3 in range(min(k, r + 1)):\n if (i - index1) < l and (j - index2) < l and (r - index3) < l:\n result[i, j, r] += math_ops.matmul(\n m1[index1, index2, index3],\n m2[i - index1, j - index2, r - index3])\n return result\n\n def _orthogonal_kernel(self, ksize, cin, cout):\n \"\"\"Construct orthogonal kernel for convolution.\n\n Args:\n ksize: Kernel size.\n cin: Number of input channels.\n cout: Number of output channels.\n\n Returns:\n An [ksize, ksize, ksize, cin, cout] orthogonal kernel.\n Raises:\n ValueError: If cin > cout.\n \"\"\"\n if cin > cout:\n raise ValueError(\"The number of input channels cannot exceed \"\n \"the number of output channels.\")\n orth = self._orthogonal_matrix(cout)[0:cin, :]\n if ksize == 1:\n return array_ops.expand_dims(\n array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0), 0)\n\n p = self._block_orth(\n self._symmetric_projection(cout), self._symmetric_projection(cout),\n self._symmetric_projection(cout))\n for _ in range(ksize - 2):\n temp = self._block_orth(\n self._symmetric_projection(cout), self._symmetric_projection(cout),\n self._symmetric_projection(cout))\n p = self._matrix_conv(p, temp)\n for i in range(ksize):\n for j in range(ksize):\n for k in range(ksize):\n p[i, j, k] = math_ops.matmul(orth, p[i, j, k])\n\n return self._dict_to_tensor(p, ksize, ksize, ksize)\n\n\n@tf_export(v1=[\"initializers.identity\"])\[email protected]_endpoints(\"initializers.identity\")\nclass Identity(Initializer):\n \"\"\"Initializer that generates the identity matrix.\n\n Only use for 2D matrices.\n\n Args:\n gain: Multiplicative factor to apply to the identity matrix.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n \"\"\"\n\n @deprecated_args(None,\n \"Call initializer instance with the dtype argument instead \"\n \"of passing it to the constructor\", \"dtype\")\n def __init__(self, gain=1.0, dtype=dtypes.float32):\n self.gain = gain\n self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n\n def __call__(self, shape, dtype=None, partition_info=None):\n full_shape = shape if partition_info is None else partition_info.full_shape\n if len(full_shape) != 2:\n raise ValueError(\n \"Identity matrix initializer can only be used for 2D matrices.\")\n if dtype is None:\n dtype = self.dtype\n if isinstance(full_shape, tensor_shape.TensorShape):\n full_shape = full_shape.as_list()\n initializer = linalg_ops_impl.eye(*full_shape, dtype=dtype)\n if partition_info is not None:\n initializer = array_ops.slice(initializer, partition_info.var_offset,\n shape)\n return self.gain * initializer\n\n def get_config(self):\n return {\"gain\": self.gain, \"dtype\": self.dtype.name}\n\n\n@tf_export(v1=[\"glorot_uniform_initializer\", \"initializers.glorot_uniform\"])\[email protected]_endpoints(\"glorot_uniform_initializer\",\n \"initializers.glorot_uniform\")\nclass GlorotUniform(VarianceScaling):\n \"\"\"The Glorot uniform initializer, also called Xavier uniform initializer.\n\n It draws samples from a uniform distribution within [-limit, limit]\n where `limit` is `sqrt(6 / (fan_in + fan_out))`\n where `fan_in` is the number of input units in the weight tensor\n and `fan_out` is the number of output units in the weight tensor.\n\n Args:\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n References:\n [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)\n ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))\n \"\"\"\n\n @deprecated_args(None,\n \"Call initializer instance with the dtype argument instead \"\n \"of passing it to the constructor\", \"dtype\")\n def __init__(self, seed=None, dtype=dtypes.float32):\n super(GlorotUniform, self).__init__(\n scale=1.0,\n mode=\"fan_avg\",\n distribution=\"uniform\",\n seed=seed,\n dtype=dtype)\n\n def get_config(self):\n return {\"seed\": self.seed, \"dtype\": self.dtype.name}\n\n\n@tf_export(v1=[\"glorot_normal_initializer\", \"initializers.glorot_normal\"])\[email protected]_endpoints(\"glorot_normal_initializer\",\n \"initializers.glorot_normal\")\nclass GlorotNormal(VarianceScaling):\n \"\"\"The Glorot normal initializer, also called Xavier normal initializer.\n\n It draws samples from a truncated normal distribution centered on 0\n with standard deviation (after truncation) given by\n `stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number\n of input units in the weight tensor and `fan_out` is the number of\n output units in the weight tensor.\n\n Args:\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\n References:\n [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)\n ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))\n \"\"\"\n\n @deprecated_args(None,\n \"Call initializer instance with the dtype argument instead \"\n \"of passing it to the constructor\", \"dtype\")\n def __init__(self, seed=None, dtype=dtypes.float32):\n super(GlorotNormal, self).__init__(\n scale=1.0,\n mode=\"fan_avg\",\n distribution=\"truncated_normal\",\n seed=seed,\n dtype=dtype)\n\n def get_config(self):\n return {\"seed\": self.seed, \"dtype\": self.dtype.name}\n\n\n# Aliases.\n\n# pylint: disable=invalid-name\nzeros_initializer = Zeros\nones_initializer = Ones\nconstant_initializer = Constant\nrandom_uniform_initializer = RandomUniform\nrandom_normal_initializer = RandomNormal\ntruncated_normal_initializer = TruncatedNormal\nuniform_unit_scaling_initializer = UniformUnitScaling\nvariance_scaling_initializer = VarianceScaling\nglorot_uniform_initializer = GlorotUniform\nglorot_normal_initializer = GlorotNormal\northogonal_initializer = Orthogonal\nidentity_initializer = Identity\nconvolutional_delta_orthogonal = ConvolutionDeltaOrthogonal\nconvolutional_orthogonal_1d = ConvolutionOrthogonal1D\nconvolutional_orthogonal_2d = ConvolutionOrthogonal2D\nconvolutional_orthogonal_3d = ConvolutionOrthogonal3D\n# pylint: enable=invalid-name\n\n\n@tf_export(v1=[\"initializers.lecun_normal\"])\ndef lecun_normal(seed=None):\n \"\"\"LeCun normal initializer.\n\n It draws samples from a truncated normal distribution centered on 0\n with standard deviation (after truncation) given by\n `stddev = sqrt(1 / fan_in)` where `fan_in` is the number of\n input units in the weight tensor.\n\n Arguments:\n seed: A Python integer. Used to seed the random generator.\n\n Returns:\n An initializer.\n\n References:\n - Self-Normalizing Neural Networks,\n [Klambauer et al.,\n 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)\n # pylint: disable=line-too-long\n ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))\n - Efficient Backprop,\n [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)\n \"\"\"\n return VarianceScaling(\n scale=1., mode=\"fan_in\", distribution=\"truncated_normal\", seed=seed)\n\n\n@tf_export(v1=[\"initializers.lecun_uniform\"])\ndef lecun_uniform(seed=None):\n \"\"\"LeCun uniform initializer.\n\n It draws samples from a uniform distribution within [-limit, limit]\n where `limit` is `sqrt(3 / fan_in)`\n where `fan_in` is the number of input units in the weight tensor.\n\n Arguments:\n seed: A Python integer. Used to seed the random generator.\n\n Returns:\n An initializer.\n\n References:\n - Self-Normalizing Neural Networks,\n [Klambauer et al.,\n 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)\n # pylint: disable=line-too-long\n ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))\n - Efficient Backprop,\n [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)\n \"\"\"\n return VarianceScaling(\n scale=1., mode=\"fan_in\", distribution=\"uniform\", seed=seed)\n\n\n@tf_export(v1=[\"initializers.he_normal\"])\ndef he_normal(seed=None):\n \"\"\"He normal initializer.\n\n It draws samples from a truncated normal distribution centered on 0\n with standard deviation (after truncation) given by\n `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of\n input units in the weight tensor.\n\n Arguments:\n seed: A Python integer. Used to seed the random generator.\n\n Returns:\n An initializer.\n\n References:\n [He et al., 2015]\n (https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)\n # pylint: disable=line-too-long\n ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))\n \"\"\"\n return VarianceScaling(\n scale=2., mode=\"fan_in\", distribution=\"truncated_normal\", seed=seed)\n\n\n@tf_export(v1=[\"initializers.he_uniform\"])\ndef he_uniform(seed=None):\n \"\"\"He uniform variance scaling initializer.\n\n It draws samples from a uniform distribution within [-limit, limit]\n where `limit` is `sqrt(6 / fan_in)`\n where `fan_in` is the number of input units in the weight tensor.\n\n Arguments:\n seed: A Python integer. Used to seed the random generator.\n\n Returns:\n An initializer.\n\n References:\n [He et al., 2015]\n (https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)\n # pylint: disable=line-too-long\n ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))\n \"\"\"\n return VarianceScaling(\n scale=2., mode=\"fan_in\", distribution=\"uniform\", seed=seed)\n\n\n# Utility functions.\n\n\ndef _compute_fans(shape):\n \"\"\"Computes the number of input and output units for a weight shape.\n\n Args:\n shape: Integer shape tuple or TF tensor shape.\n\n Returns:\n A tuple of integer scalars (fan_in, fan_out).\n \"\"\"\n if len(shape) < 1: # Just to avoid errors for constants.\n fan_in = fan_out = 1\n elif len(shape) == 1:\n fan_in = fan_out = shape[0]\n elif len(shape) == 2:\n fan_in = shape[0]\n fan_out = shape[1]\n else:\n # Assuming convolution kernels (2D, 3D, or more).\n # kernel shape: (..., input_depth, depth)\n receptive_field_size = 1\n for dim in shape[:-2]:\n receptive_field_size *= dim\n fan_in = shape[-2] * receptive_field_size\n fan_out = shape[-1] * receptive_field_size\n return int(fan_in), int(fan_out)\n\n\ndef _assert_float_dtype(dtype):\n \"\"\"Validate and return floating point type based on `dtype`.\n\n `dtype` must be a floating point type.\n\n Args:\n dtype: The data type to validate.\n\n Returns:\n Validated type.\n\n Raises:\n ValueError: if `dtype` is not a floating point type.\n \"\"\"\n if not dtype.is_floating:\n raise ValueError(\"Expected floating point type, got %s.\" % dtype)\n return dtype\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Reshape Bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.distributions.python.ops.bijectors.reshape import Reshape\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite\nfrom tensorflow.python.platform import test\n\n\nclass _ReshapeBijectorTest(object):\n \"\"\"Base class for testing the reshape transformation.\n\n Methods defined in this class call a method self.build_shapes() that\n is implemented by subclasses defined below, returning respectively\n ReshapeBijectorTestStatic: static shapes,\n ReshapeBijectorTestDynamic: shape placeholders of known ndims, and\n ReshapeBijectorTestDynamicNdims: shape placeholders of unspecified ndims,\n so that each test in this base class is automatically run over all\n three cases. The subclasses also implement assertRaisesError to test\n for either Python exceptions (in the case of static shapes) or\n TensorFlow op errors (dynamic shapes).\n \"\"\"\n\n def setUp(self):\n self._rng = np.random.RandomState(42)\n\n def testBijector(self):\n \"\"\"Do a basic sanity check of forward, inverse, jacobian.\"\"\"\n expected_x = np.random.randn(4, 3, 2)\n expected_y = np.reshape(expected_x, [4, 6])\n\n with self.cached_session() as sess:\n shape_in, shape_out, feed_dict = self.build_shapes([3, 2], [6,])\n bijector = Reshape(\n event_shape_out=shape_out,\n event_shape_in=shape_in,\n validate_args=True)\n (x_,\n y_,\n fldj_,\n ildj_) = sess.run((\n bijector.inverse(expected_y),\n bijector.forward(expected_x),\n bijector.forward_log_det_jacobian(expected_x, event_ndims=2),\n bijector.inverse_log_det_jacobian(expected_y, event_ndims=2),\n ), feed_dict=feed_dict)\n self.assertEqual(\"reshape\", bijector.name)\n self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)\n self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)\n self.assertAllClose(0., fldj_, rtol=1e-6, atol=0)\n self.assertAllClose(0., ildj_, rtol=1e-6, atol=0)\n\n def testEventShapeTensor(self):\n \"\"\"Test event_shape_tensor methods when even ndims may be dynamic.\"\"\"\n\n shape_in_static = [2, 3]\n shape_out_static = [6,]\n shape_in, shape_out, feed_dict = self.build_shapes(shape_in_static,\n shape_out_static)\n bijector = Reshape(\n event_shape_out=shape_out,\n event_shape_in=shape_in, validate_args=True)\n\n # using the _tensor methods, we should always get a fully-specified\n # result since these are evaluated at graph runtime.\n with self.cached_session() as sess:\n (shape_out_,\n shape_in_) = sess.run((\n bijector.forward_event_shape_tensor(shape_in),\n bijector.inverse_event_shape_tensor(shape_out),\n ), feed_dict=feed_dict)\n self.assertAllEqual(shape_out_static, shape_out_)\n self.assertAllEqual(shape_in_static, shape_in_)\n\n def testScalarReshape(self):\n \"\"\"Test reshaping to and from a scalar shape ().\"\"\"\n\n expected_x = np.random.randn(4, 3, 1)\n expected_y = np.reshape(expected_x, [4, 3])\n\n expected_x_scalar = np.random.randn(1,)\n expected_y_scalar = expected_x_scalar[0]\n\n shape_in, shape_out, feed_dict = self.build_shapes([], [1,])\n with self.cached_session() as sess:\n bijector = Reshape(\n event_shape_out=shape_in,\n event_shape_in=shape_out, validate_args=True)\n (x_,\n y_,\n x_scalar_,\n y_scalar_\n ) = sess.run((\n bijector.inverse(expected_y),\n bijector.forward(expected_x),\n bijector.inverse(expected_y_scalar),\n bijector.forward(expected_x_scalar),\n ), feed_dict=feed_dict)\n self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)\n self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)\n self.assertAllClose(expected_y_scalar, y_scalar_, rtol=1e-6, atol=0)\n self.assertAllClose(expected_x_scalar, x_scalar_, rtol=1e-6, atol=0)\n\n def testMultipleUnspecifiedDimensionsOpError(self):\n\n with self.cached_session() as sess:\n shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [4, -1, -1,])\n bijector = Reshape(\n event_shape_out=shape_out,\n event_shape_in=shape_in,\n validate_args=True)\n\n with self.assertRaisesError(\n \"elements must have at most one `-1`.\"):\n sess.run(bijector.forward_event_shape_tensor(shape_in),\n feed_dict=feed_dict)\n\n # pylint: disable=invalid-name\n def _testInvalidDimensionsOpError(self, expected_error_message):\n\n with self.cached_session() as sess:\n\n shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [1, 2, -2,])\n bijector = Reshape(\n event_shape_out=shape_out,\n event_shape_in=shape_in,\n validate_args=True)\n\n with self.assertRaisesError(expected_error_message):\n sess.run(bijector.forward_event_shape_tensor(shape_in),\n feed_dict=feed_dict)\n # pylint: enable=invalid-name\n\n def testValidButNonMatchingInputOpError(self):\n x = np.random.randn(4, 3, 2)\n\n with self.cached_session() as sess:\n shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [1, 6, 1,])\n bijector = Reshape(\n event_shape_out=shape_out,\n event_shape_in=shape_in,\n validate_args=True)\n\n # Here we pass in a tensor (x) whose shape is compatible with\n # the output shape, so tf.reshape will throw no error, but\n # doesn't match the expected input shape.\n with self.assertRaisesError(\n \"Input `event_shape` does not match `event_shape_in`.\"):\n sess.run(bijector.forward(x),\n feed_dict=feed_dict)\n\n def testValidButNonMatchingInputPartiallySpecifiedOpError(self):\n x = np.random.randn(4, 3, 2)\n\n with self.cached_session() as sess:\n shape_in, shape_out, feed_dict = self.build_shapes([2, -1], [1, 6, 1,])\n bijector = Reshape(\n event_shape_out=shape_out,\n event_shape_in=shape_in,\n validate_args=True)\n\n with self.assertRaisesError(\n \"Input `event_shape` does not match `event_shape_in`.\"):\n sess.run(bijector.forward(x),\n feed_dict=feed_dict)\n\n # pylint: disable=invalid-name\n def _testInputOutputMismatchOpError(self, expected_error_message):\n x1 = np.random.randn(4, 2, 3)\n x2 = np.random.randn(4, 1, 1, 5)\n\n with self.cached_session() as sess:\n shape_in, shape_out, fd_mismatched = self.build_shapes([2, 3],\n [1, 1, 5])\n bijector = Reshape(\n event_shape_out=shape_out,\n event_shape_in=shape_in,\n validate_args=True)\n\n with self.assertRaisesError(expected_error_message):\n sess.run(bijector.forward(x1), feed_dict=fd_mismatched)\n with self.assertRaisesError(expected_error_message):\n sess.run(bijector.inverse(x2), feed_dict=fd_mismatched)\n # pylint: enable=invalid-name\n\n def testOneShapePartiallySpecified(self):\n expected_x = np.random.randn(4, 6)\n expected_y = np.reshape(expected_x, [4, 2, 3])\n\n with self.cached_session() as sess:\n # one of input/output shapes is partially specified\n shape_in, shape_out, feed_dict = self.build_shapes([-1,], [2, 3])\n bijector = Reshape(\n event_shape_out=shape_out,\n event_shape_in=shape_in,\n validate_args=True)\n (x_,\n y_,\n ) = sess.run((\n bijector.inverse(expected_y),\n bijector.forward(expected_x),\n ), feed_dict=feed_dict)\n self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)\n self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)\n\n def testBothShapesPartiallySpecified(self):\n expected_x = np.random.randn(4, 2, 3)\n expected_y = np.reshape(expected_x, [4, 3, 2])\n with self.cached_session() as sess:\n shape_in, shape_out, feed_dict = self.build_shapes([-1, 3], [-1, 2])\n bijector = Reshape(\n event_shape_out=shape_out,\n event_shape_in=shape_in,\n validate_args=True)\n (x_,\n y_,\n ) = sess.run((\n bijector.inverse(expected_y),\n bijector.forward(expected_x),\n ), feed_dict=feed_dict)\n self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)\n self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)\n\n def testDefaultVectorShape(self):\n expected_x = np.random.randn(4, 4)\n expected_y = np.reshape(expected_x, [4, 2, 2])\n with self.cached_session() as sess:\n _, shape_out, feed_dict = self.build_shapes([-1,], [-1, 2])\n bijector = Reshape(shape_out,\n validate_args=True)\n (x_,\n y_,\n ) = sess.run((\n bijector.inverse(expected_y),\n bijector.forward(expected_x),\n ), feed_dict=feed_dict)\n self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)\n self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)\n\n def build_shapes(self, *args, **kwargs):\n raise NotImplementedError(\"Subclass failed to implement `build_shapes`.\")\n\n\nclass ReshapeBijectorTestStatic(test.TestCase, _ReshapeBijectorTest):\n\n def build_shapes(self, shape_in, shape_out):\n shape_in_static = shape_in\n shape_out_static = shape_out\n feed_dict = {}\n return shape_in_static, shape_out_static, feed_dict\n\n def assertRaisesError(self, msg):\n return self.assertRaisesRegexp(Exception, msg)\n\n def testEventShape(self):\n shape_in_static = tensor_shape.TensorShape([2, 3])\n shape_out_static = tensor_shape.TensorShape([6,])\n bijector = Reshape(\n event_shape_out=shape_out_static,\n event_shape_in=shape_in_static, validate_args=True)\n\n # test that forward_ and inverse_event_shape do sensible things\n # when shapes are statically known.\n self.assertEqual(\n bijector.forward_event_shape(shape_in_static),\n shape_out_static)\n self.assertEqual(\n bijector.inverse_event_shape(shape_out_static),\n shape_in_static)\n\n def testBijectiveAndFinite(self):\n x = np.random.randn(4, 2, 3)\n y = np.reshape(x, [4, 1, 2, 3])\n with self.cached_session():\n bijector = Reshape(\n event_shape_in=[2, 3],\n event_shape_out=[1, 2, 3],\n validate_args=True)\n assert_bijective_and_finite(\n bijector, x, y, event_ndims=2, rtol=1e-6, atol=0)\n\n def testInvalidDimensionsOpError(self):\n self._testInvalidDimensionsOpError(\n \"Invalid value in tensor used for shape: -2\")\n\n def testInputOutputMismatchOpError(self):\n self._testInputOutputMismatchOpError(\"Cannot reshape a tensor with\")\n\n\nclass ReshapeBijectorTestDynamic(test.TestCase, _ReshapeBijectorTest):\n\n def build_shapes(self, shape_in, shape_out):\n shape_in_ph = array_ops.placeholder(shape=(len(shape_in),),\n dtype=dtypes.int32)\n shape_out_ph = array_ops.placeholder(shape=(len(shape_out),),\n dtype=dtypes.int32)\n feed_dict = {shape_in_ph: shape_in, shape_out_ph: shape_out}\n return shape_in_ph, shape_out_ph, feed_dict\n\n def assertRaisesError(self, msg):\n return self.assertRaisesOpError(msg)\n\n def testInvalidDimensionsOpError(self):\n self._testInvalidDimensionsOpError(\n \"elements must be either positive integers or `-1`.\")\n\n def testInputOutputMismatchOpError(self):\n self._testInputOutputMismatchOpError(\"Input to reshape is a tensor with\")\n\n\nclass ReshapeBijectorTestDynamicNdims(test.TestCase, _ReshapeBijectorTest):\n\n def build_shapes(self, shape_in, shape_out):\n shape_in_ph = array_ops.placeholder(shape=None, dtype=dtypes.int32)\n shape_out_ph = array_ops.placeholder(shape=None, dtype=dtypes.int32)\n feed_dict = {shape_in_ph: shape_in, shape_out_ph: shape_out}\n return shape_in_ph, shape_out_ph, feed_dict\n\n def assertRaisesError(self, msg):\n return self.assertRaisesOpError(msg)\n\n def testInvalidDimensionsOpError(self):\n self._testInvalidDimensionsOpError(\n \"elements must be either positive integers or `-1`.\")\n\n def testInputOutputMismatchOpError(self):\n self._testInputOutputMismatchOpError(\"Input to reshape is a tensor with\")\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Convolutional-recurrent layers.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.keras import activations\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.engine.base_layer import Layer\nfrom tensorflow.python.keras.engine.input_spec import InputSpec\nfrom tensorflow.python.keras.layers.recurrent import _standardize_args\nfrom tensorflow.python.keras.layers.recurrent import DropoutRNNCellMixin\nfrom tensorflow.python.keras.layers.recurrent import RNN\nfrom tensorflow.python.keras.utils import conv_utils\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.util.tf_export import keras_export\n\n\nclass ConvRNN2D(RNN):\n \"\"\"Base class for convolutional-recurrent layers.\n\n Arguments:\n cell: A RNN cell instance. A RNN cell is a class that has:\n - a `call(input_at_t, states_at_t)` method, returning\n `(output_at_t, states_at_t_plus_1)`. The call method of the\n cell can also take the optional argument `constants`, see\n section \"Note on passing external constants\" below.\n - a `state_size` attribute. This can be a single integer\n (single state) in which case it is\n the number of channels of the recurrent state\n (which should be the same as the number of channels of the cell\n output). This can also be a list/tuple of integers\n (one size per state). In this case, the first entry\n (`state_size[0]`) should be the same as\n the size of the cell output.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n input_shape: Use this argument to specify the shape of the\n input when this layer is the first one in a model.\n\n Call arguments:\n inputs: A 5D tensor.\n mask: Binary tensor of shape `(samples, timesteps)` indicating whether\n a given timestep should be masked.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is for use with cells that use dropout.\n initial_state: List of initial state tensors to be passed to the first\n call of the cell.\n constants: List of constant tensors to be passed to the cell at each\n timestep.\n\n Input shape:\n 5D tensor with shape:\n `(samples, timesteps, channels, rows, cols)`\n if data_format='channels_first' or 5D tensor with shape:\n `(samples, timesteps, rows, cols, channels)`\n if data_format='channels_last'.\n\n Output shape:\n - If `return_state`: a list of tensors. The first tensor is\n the output. The remaining tensors are the last states,\n each 4D tensor with shape:\n `(samples, filters, new_rows, new_cols)`\n if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, new_rows, new_cols, filters)`\n if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n - If `return_sequences`: 5D tensor with shape:\n `(samples, timesteps, filters, new_rows, new_cols)`\n if data_format='channels_first'\n or 5D tensor with shape:\n `(samples, timesteps, new_rows, new_cols, filters)`\n if data_format='channels_last'.\n - Else, 4D tensor with shape:\n `(samples, filters, new_rows, new_cols)`\n if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, new_rows, new_cols, filters)`\n if data_format='channels_last'.\n\n Masking:\n This layer supports masking for input data with a variable number\n of timesteps.\n\n Note on using statefulness in RNNs:\n You can set RNN layers to be 'stateful', which means that the states\n computed for the samples in one batch will be reused as initial states\n for the samples in the next batch. This assumes a one-to-one mapping\n between samples in different successive batches.\n To enable statefulness:\n - Specify `stateful=True` in the layer constructor.\n - Specify a fixed batch size for your model, by passing\n - If sequential model:\n `batch_input_shape=(...)` to the first layer in your model.\n - If functional model with 1 or more Input layers:\n `batch_shape=(...)` to all the first layers in your model.\n This is the expected shape of your inputs\n *including the batch size*.\n It should be a tuple of integers,\n e.g. `(32, 10, 100, 100, 32)`.\n Note that the number of rows and columns should be specified\n too.\n - Specify `shuffle=False` when calling fit().\n To reset the states of your model, call `.reset_states()` on either\n a specific layer, or on your entire model.\n\n Note on specifying the initial state of RNNs:\n You can specify the initial state of RNN layers symbolically by\n calling them with the keyword argument `initial_state`. The value of\n `initial_state` should be a tensor or list of tensors representing\n the initial state of the RNN layer.\n You can specify the initial state of RNN layers numerically by\n calling `reset_states` with the keyword argument `states`. The value of\n `states` should be a numpy array or list of numpy arrays representing\n the initial state of the RNN layer.\n\n Note on passing external constants to RNNs:\n You can pass \"external\" constants to the cell using the `constants`\n keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This\n requires that the `cell.call` method accepts the same keyword argument\n `constants`. Such constants can be used to condition the cell\n transformation on additional static inputs (not changing over time),\n a.k.a. an attention mechanism.\n \"\"\"\n\n def __init__(self,\n cell,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if unroll:\n raise TypeError('Unrolling isn\\'t possible with '\n 'convolutional RNNs.')\n if isinstance(cell, (list, tuple)):\n # The StackedConvRNN2DCells isn't implemented yet.\n raise TypeError('It is not possible at the moment to'\n 'stack convolutional cells.')\n super(ConvRNN2D, self).__init__(cell,\n return_sequences,\n return_state,\n go_backwards,\n stateful,\n unroll,\n **kwargs)\n self.input_spec = [InputSpec(ndim=5)]\n self.states = None\n self._num_constants = None\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n\n cell = self.cell\n if cell.data_format == 'channels_first':\n rows = input_shape[3]\n cols = input_shape[4]\n elif cell.data_format == 'channels_last':\n rows = input_shape[2]\n cols = input_shape[3]\n rows = conv_utils.conv_output_length(rows,\n cell.kernel_size[0],\n padding=cell.padding,\n stride=cell.strides[0],\n dilation=cell.dilation_rate[0])\n cols = conv_utils.conv_output_length(cols,\n cell.kernel_size[1],\n padding=cell.padding,\n stride=cell.strides[1],\n dilation=cell.dilation_rate[1])\n\n if cell.data_format == 'channels_first':\n output_shape = input_shape[:2] + (cell.filters, rows, cols)\n elif cell.data_format == 'channels_last':\n output_shape = input_shape[:2] + (rows, cols, cell.filters)\n\n if not self.return_sequences:\n output_shape = output_shape[:1] + output_shape[2:]\n\n if self.return_state:\n output_shape = [output_shape]\n if cell.data_format == 'channels_first':\n output_shape += [(input_shape[0], cell.filters, rows, cols)\n for _ in range(2)]\n elif cell.data_format == 'channels_last':\n output_shape += [(input_shape[0], rows, cols, cell.filters)\n for _ in range(2)]\n return output_shape\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n # Note input_shape will be list of shapes of initial states and\n # constants if these are passed in __call__.\n if self._num_constants is not None:\n constants_shape = input_shape[-self._num_constants:] # pylint: disable=E1130\n else:\n constants_shape = None\n\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n\n batch_size = input_shape[0] if self.stateful else None\n self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:5])\n\n # allow cell (if layer) to build before we set or validate state_spec\n if isinstance(self.cell, Layer):\n step_input_shape = (input_shape[0],) + input_shape[2:]\n if constants_shape is not None:\n self.cell.build([step_input_shape] + constants_shape)\n else:\n self.cell.build(step_input_shape)\n\n # set or validate state_spec\n if hasattr(self.cell.state_size, '__len__'):\n state_size = list(self.cell.state_size)\n else:\n state_size = [self.cell.state_size]\n\n if self.state_spec is not None:\n # initial_state was passed in call, check compatibility\n if self.cell.data_format == 'channels_first':\n ch_dim = 1\n elif self.cell.data_format == 'channels_last':\n ch_dim = 3\n if [spec.shape[ch_dim] for spec in self.state_spec] != state_size:\n raise ValueError(\n 'An initial_state was passed that is not compatible with '\n '`cell.state_size`. Received `state_spec`={}; '\n 'However `cell.state_size` is '\n '{}'.format([spec.shape for spec in self.state_spec],\n self.cell.state_size))\n else:\n if self.cell.data_format == 'channels_first':\n self.state_spec = [InputSpec(shape=(None, dim, None, None))\n for dim in state_size]\n elif self.cell.data_format == 'channels_last':\n self.state_spec = [InputSpec(shape=(None, None, None, dim))\n for dim in state_size]\n if self.stateful:\n self.reset_states()\n self.built = True\n\n def get_initial_state(self, inputs):\n # (samples, timesteps, rows, cols, filters)\n initial_state = K.zeros_like(inputs)\n # (samples, rows, cols, filters)\n initial_state = K.sum(initial_state, axis=1)\n shape = list(self.cell.kernel_shape)\n shape[-1] = self.cell.filters\n initial_state = self.cell.input_conv(initial_state,\n array_ops.zeros(tuple(shape)),\n padding=self.cell.padding)\n\n if hasattr(self.cell.state_size, '__len__'):\n return [initial_state for _ in self.cell.state_size]\n else:\n return [initial_state]\n\n def __call__(self, inputs, initial_state=None, constants=None, **kwargs):\n inputs, initial_state, constants = _standardize_args(\n inputs, initial_state, constants, self._num_constants)\n\n if initial_state is None and constants is None:\n return super(ConvRNN2D, self).__call__(inputs, **kwargs)\n\n # If any of `initial_state` or `constants` are specified and are Keras\n # tensors, then add them to the inputs and temporarily modify the\n # input_spec to include them.\n\n additional_inputs = []\n additional_specs = []\n if initial_state is not None:\n kwargs['initial_state'] = initial_state\n additional_inputs += initial_state\n self.state_spec = []\n for state in initial_state:\n shape = K.int_shape(state)\n self.state_spec.append(InputSpec(shape=shape))\n\n additional_specs += self.state_spec\n if constants is not None:\n kwargs['constants'] = constants\n additional_inputs += constants\n self.constants_spec = [InputSpec(shape=K.int_shape(constant))\n for constant in constants]\n self._num_constants = len(constants)\n additional_specs += self.constants_spec\n # at this point additional_inputs cannot be empty\n for tensor in additional_inputs:\n if K.is_keras_tensor(tensor) != K.is_keras_tensor(additional_inputs[0]):\n raise ValueError('The initial state or constants of an RNN'\n ' layer cannot be specified with a mix of'\n ' Keras tensors and non-Keras tensors')\n\n if K.is_keras_tensor(additional_inputs[0]):\n # Compute the full input spec, including state and constants\n full_input = [inputs] + additional_inputs\n full_input_spec = self.input_spec + additional_specs\n # Perform the call with temporarily replaced input_spec\n original_input_spec = self.input_spec\n self.input_spec = full_input_spec\n output = super(ConvRNN2D, self).__call__(full_input, **kwargs)\n self.input_spec = original_input_spec\n return output\n else:\n return super(ConvRNN2D, self).__call__(inputs, **kwargs)\n\n def call(self,\n inputs,\n mask=None,\n training=None,\n initial_state=None,\n constants=None):\n # note that the .build() method of subclasses MUST define\n # self.input_spec and self.state_spec with complete input shapes.\n if isinstance(inputs, list):\n inputs = inputs[0]\n if initial_state is not None:\n pass\n elif self.stateful:\n initial_state = self.states\n else:\n initial_state = self.get_initial_state(inputs)\n\n if isinstance(mask, list):\n mask = mask[0]\n\n if len(initial_state) != len(self.states):\n raise ValueError('Layer has ' + str(len(self.states)) +\n ' states but was passed ' +\n str(len(initial_state)) +\n ' initial states.')\n timesteps = K.int_shape(inputs)[1]\n\n kwargs = {}\n if generic_utils.has_arg(self.cell.call, 'training'):\n kwargs['training'] = training\n\n if constants:\n if not generic_utils.has_arg(self.cell.call, 'constants'):\n raise ValueError('RNN cell does not support constants')\n\n def step(inputs, states):\n constants = states[-self._num_constants:]\n states = states[:-self._num_constants]\n return self.cell.call(inputs, states, constants=constants,\n **kwargs)\n else:\n def step(inputs, states):\n return self.cell.call(inputs, states, **kwargs)\n\n last_output, outputs, states = K.rnn(step,\n inputs,\n initial_state,\n constants=constants,\n go_backwards=self.go_backwards,\n mask=mask,\n input_length=timesteps)\n if self.stateful:\n updates = []\n for i in range(len(states)):\n updates.append(K.update(self.states[i], states[i]))\n self.add_update(updates, inputs=True)\n\n if self.return_sequences:\n output = outputs\n else:\n output = last_output\n\n if self.return_state:\n if not isinstance(states, (list, tuple)):\n states = [states]\n else:\n states = list(states)\n return [output] + states\n else:\n return output\n\n def reset_states(self, states=None):\n if not self.stateful:\n raise AttributeError('Layer must be stateful.')\n input_shape = self.input_spec[0].shape\n state_shape = self.compute_output_shape(input_shape)\n if self.return_state:\n state_shape = state_shape[0]\n if self.return_sequences:\n state_shape = state_shape[:1].concatenate(state_shape[2:])\n if None in state_shape:\n raise ValueError('If a RNN is stateful, it needs to know '\n 'its batch size. Specify the batch size '\n 'of your input tensors: \\n'\n '- If using a Sequential model, '\n 'specify the batch size by passing '\n 'a `batch_input_shape` '\n 'argument to your first layer.\\n'\n '- If using the functional API, specify '\n 'the time dimension by passing a '\n '`batch_shape` argument to your Input layer.\\n'\n 'The same thing goes for the number of rows and '\n 'columns.')\n\n # helper function\n def get_tuple_shape(nb_channels):\n result = list(state_shape)\n if self.cell.data_format == 'channels_first':\n result[1] = nb_channels\n elif self.cell.data_format == 'channels_last':\n result[3] = nb_channels\n else:\n raise KeyError\n return tuple(result)\n\n # initialize state if None\n if self.states[0] is None:\n if hasattr(self.cell.state_size, '__len__'):\n self.states = [K.zeros(get_tuple_shape(dim))\n for dim in self.cell.state_size]\n else:\n self.states = [K.zeros(get_tuple_shape(self.cell.state_size))]\n elif states is None:\n if hasattr(self.cell.state_size, '__len__'):\n for state, dim in zip(self.states, self.cell.state_size):\n K.set_value(state, np.zeros(get_tuple_shape(dim)))\n else:\n K.set_value(self.states[0],\n np.zeros(get_tuple_shape(self.cell.state_size)))\n else:\n if not isinstance(states, (list, tuple)):\n states = [states]\n if len(states) != len(self.states):\n raise ValueError('Layer ' + self.name + ' expects ' +\n str(len(self.states)) + ' states, ' +\n 'but it received ' + str(len(states)) +\n ' state values. Input received: ' + str(states))\n for index, (value, state) in enumerate(zip(states, self.states)):\n if hasattr(self.cell.state_size, '__len__'):\n dim = self.cell.state_size[index]\n else:\n dim = self.cell.state_size\n if value.shape != get_tuple_shape(dim):\n raise ValueError('State ' + str(index) +\n ' is incompatible with layer ' +\n self.name + ': expected shape=' +\n str(get_tuple_shape(dim)) +\n ', found shape=' + str(value.shape))\n # TODO(anjalisridhar): consider batch calls to `set_value`.\n K.set_value(state, value)\n\n\nclass ConvLSTM2DCell(DropoutRNNCellMixin, Layer):\n \"\"\"Cell class for the ConvLSTM2D layer.\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of n integers, specifying the\n dimensions of the convolution window.\n strides: An integer or tuple/list of n integers,\n specifying the strides of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: An integer or tuple/list of n integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Use in combination with `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.]\n (http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n\n Call arguments:\n inputs: A 4D tensor.\n states: List of state tensors corresponding to the previous timestep.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. Only relevant when `dropout` or\n `recurrent_dropout` is used.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n **kwargs):\n super(ConvLSTM2DCell, self).__init__(**kwargs)\n self.filters = filters\n self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')\n self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')\n self.padding = conv_utils.normalize_padding(padding)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,\n 'dilation_rate')\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.unit_forget_bias = unit_forget_bias\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.state_size = (self.filters, self.filters)\n\n def build(self, input_shape):\n\n if self.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = -1\n if input_shape[channel_axis] is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = input_shape[channel_axis]\n kernel_shape = self.kernel_size + (input_dim, self.filters * 4)\n self.kernel_shape = kernel_shape\n recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4)\n\n self.kernel = self.add_weight(shape=kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.recurrent_kernel = self.add_weight(\n shape=recurrent_kernel_shape,\n initializer=self.recurrent_initializer,\n name='recurrent_kernel',\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n\n if self.use_bias:\n if self.unit_forget_bias:\n\n def bias_initializer(_, *args, **kwargs):\n return K.concatenate([\n self.bias_initializer((self.filters,), *args, **kwargs),\n initializers.Ones()((self.filters,), *args, **kwargs),\n self.bias_initializer((self.filters * 2,), *args, **kwargs),\n ])\n else:\n bias_initializer = self.bias_initializer\n self.bias = self.add_weight(\n shape=(self.filters * 4,),\n name='bias',\n initializer=bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs, states, training=None):\n h_tm1 = states[0] # previous memory state\n c_tm1 = states[1] # previous carry state\n\n # dropout matrices for input units\n dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)\n # dropout matrices for recurrent units\n rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(\n h_tm1, training, count=4)\n\n if 0 < self.dropout < 1.:\n inputs_i = inputs * dp_mask[0]\n inputs_f = inputs * dp_mask[1]\n inputs_c = inputs * dp_mask[2]\n inputs_o = inputs * dp_mask[3]\n else:\n inputs_i = inputs\n inputs_f = inputs\n inputs_c = inputs\n inputs_o = inputs\n\n if 0 < self.recurrent_dropout < 1.:\n h_tm1_i = h_tm1 * rec_dp_mask[0]\n h_tm1_f = h_tm1 * rec_dp_mask[1]\n h_tm1_c = h_tm1 * rec_dp_mask[2]\n h_tm1_o = h_tm1 * rec_dp_mask[3]\n else:\n h_tm1_i = h_tm1\n h_tm1_f = h_tm1\n h_tm1_c = h_tm1\n h_tm1_o = h_tm1\n\n (kernel_i, kernel_f,\n kernel_c, kernel_o) = array_ops.split(self.kernel, 4, axis=3)\n (recurrent_kernel_i,\n recurrent_kernel_f,\n recurrent_kernel_c,\n recurrent_kernel_o) = array_ops.split(self.recurrent_kernel, 4, axis=3)\n\n if self.use_bias:\n bias_i, bias_f, bias_c, bias_o = array_ops.split(self.bias, 4)\n else:\n bias_i, bias_f, bias_c, bias_o = None, None, None, None\n\n x_i = self.input_conv(inputs_i, kernel_i, bias_i, padding=self.padding)\n x_f = self.input_conv(inputs_f, kernel_f, bias_f, padding=self.padding)\n x_c = self.input_conv(inputs_c, kernel_c, bias_c, padding=self.padding)\n x_o = self.input_conv(inputs_o, kernel_o, bias_o, padding=self.padding)\n h_i = self.recurrent_conv(h_tm1_i, recurrent_kernel_i)\n h_f = self.recurrent_conv(h_tm1_f, recurrent_kernel_f)\n h_c = self.recurrent_conv(h_tm1_c, recurrent_kernel_c)\n h_o = self.recurrent_conv(h_tm1_o, recurrent_kernel_o)\n\n i = self.recurrent_activation(x_i + h_i)\n f = self.recurrent_activation(x_f + h_f)\n c = f * c_tm1 + i * self.activation(x_c + h_c)\n o = self.recurrent_activation(x_o + h_o)\n h = o * self.activation(c)\n return h, [h, c]\n\n def input_conv(self, x, w, b=None, padding='valid'):\n conv_out = K.conv2d(x, w, strides=self.strides,\n padding=padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate)\n if b is not None:\n conv_out = K.bias_add(conv_out, b,\n data_format=self.data_format)\n return conv_out\n\n def recurrent_conv(self, x, w):\n conv_out = K.conv2d(x, w, strides=(1, 1),\n padding='same',\n data_format=self.data_format)\n return conv_out\n\n def get_config(self):\n config = {'filters': self.filters,\n 'kernel_size': self.kernel_size,\n 'strides': self.strides,\n 'padding': self.padding,\n 'data_format': self.data_format,\n 'dilation_rate': self.dilation_rate,\n 'activation': activations.serialize(self.activation),\n 'recurrent_activation': activations.serialize(\n self.recurrent_activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(\n self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(\n self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'unit_forget_bias': self.unit_forget_bias,\n 'kernel_regularizer': regularizers.serialize(\n self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(\n self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': constraints.serialize(\n self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(\n self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout}\n base_config = super(ConvLSTM2DCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.ConvLSTM2D')\nclass ConvLSTM2D(ConvRNN2D):\n \"\"\"Convolutional LSTM.\n\n It is similar to an LSTM layer, but the input transformations\n and recurrent transformations are both convolutional.\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of n integers, specifying the\n dimensions of the convolution window.\n strides: An integer or tuple/list of n integers,\n specifying the strides of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, time, ..., channels)`\n while `channels_first` corresponds to\n inputs with shape `(batch, time, channels, ...)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: An integer or tuple/list of n integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Use in combination with `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.]\n (http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to.\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n\n Call arguments:\n inputs: A 5D tensor.\n mask: Binary tensor of shape `(samples, timesteps)` indicating whether\n a given timestep should be masked.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or `recurrent_dropout`\n are set.\n initial_state: List of initial state tensors to be passed to the first\n call of the cell.\n\n Input shape:\n - If data_format='channels_first'\n 5D tensor with shape:\n `(samples, time, channels, rows, cols)`\n - If data_format='channels_last'\n 5D tensor with shape:\n `(samples, time, rows, cols, channels)`\n\n Output shape:\n - If `return_sequences`\n - If data_format='channels_first'\n 5D tensor with shape:\n `(samples, time, filters, output_row, output_col)`\n - If data_format='channels_last'\n 5D tensor with shape:\n `(samples, time, output_row, output_col, filters)`\n - Else\n - If data_format ='channels_first'\n 4D tensor with shape:\n `(samples, filters, output_row, output_col)`\n - If data_format='channels_last'\n 4D tensor with shape:\n `(samples, output_row, output_col, filters)`\n where `o_row` and `o_col` depend on the shape of the filter and\n the padding\n\n Raises:\n ValueError: in case of invalid constructor arguments.\n\n References:\n - [Convolutional LSTM Network: A Machine Learning Approach for\n Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)\n The current implementation does not include the feedback loop on the\n cells output.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n return_sequences=False,\n go_backwards=False,\n stateful=False,\n dropout=0.,\n recurrent_dropout=0.,\n **kwargs):\n cell = ConvLSTM2DCell(filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n recurrent_activation=recurrent_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n bias_initializer=bias_initializer,\n unit_forget_bias=unit_forget_bias,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout)\n super(ConvLSTM2D, self).__init__(cell,\n return_sequences=return_sequences,\n go_backwards=go_backwards,\n stateful=stateful,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n self.cell.reset_dropout_mask()\n self.cell.reset_recurrent_dropout_mask()\n return super(ConvLSTM2D, self).call(inputs,\n mask=mask,\n training=training,\n initial_state=initial_state)\n\n @property\n def filters(self):\n return self.cell.filters\n\n @property\n def kernel_size(self):\n return self.cell.kernel_size\n\n @property\n def strides(self):\n return self.cell.strides\n\n @property\n def padding(self):\n return self.cell.padding\n\n @property\n def data_format(self):\n return self.cell.data_format\n\n @property\n def dilation_rate(self):\n return self.cell.dilation_rate\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n return self.cell.recurrent_activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def unit_forget_bias(self):\n return self.cell.unit_forget_bias\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n def get_config(self):\n config = {'filters': self.filters,\n 'kernel_size': self.kernel_size,\n 'strides': self.strides,\n 'padding': self.padding,\n 'data_format': self.data_format,\n 'dilation_rate': self.dilation_rate,\n 'activation': activations.serialize(self.activation),\n 'recurrent_activation': activations.serialize(\n self.recurrent_activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(\n self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(\n self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'unit_forget_bias': self.unit_forget_bias,\n 'kernel_regularizer': regularizers.serialize(\n self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(\n self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(\n self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(\n self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(\n self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout}\n base_config = super(ConvLSTM2D, self).get_config()\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Virtual batch normalization.\n\nThis technique was first introduced in `Improved Techniques for Training GANs`\n(Salimans et al, https://arxiv.org/abs/1606.03498). Instead of using batch\nnormalization on a minibatch, it fixes a reference subset of the data to use for\ncalculating normalization statistics.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import variable_scope\n\n__all__ = [\n 'VBN',\n]\n\n\ndef _static_or_dynamic_batch_size(tensor, batch_axis):\n \"\"\"Returns the static or dynamic batch size.\"\"\"\n batch_size = array_ops.shape(tensor)[batch_axis]\n static_batch_size = tensor_util.constant_value(batch_size)\n return static_batch_size or batch_size\n\n\ndef _statistics(x, axes):\n \"\"\"Calculate the mean and mean square of `x`.\n\n Modified from the implementation of `tf.nn.moments`.\n\n Args:\n x: A `Tensor`.\n axes: Array of ints. Axes along which to compute mean and variance.\n\n Returns:\n Two `Tensor` objects: `mean` and `square mean`.\n \"\"\"\n # The dynamic range of fp16 is too limited to support the collection of\n # sufficient statistics. As a workaround we simply perform the operations\n # on 32-bit floats before converting the mean and variance back to fp16\n y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x\n\n # Compute true mean while keeping the dims for proper broadcasting.\n shift = array_ops.stop_gradient(math_ops.reduce_mean(y, axes, keepdims=True))\n\n shifted_mean = math_ops.reduce_mean(y - shift, axes, keepdims=True)\n mean = shifted_mean + shift\n mean_squared = math_ops.reduce_mean(math_ops.square(y), axes, keepdims=True)\n\n mean = array_ops.squeeze(mean, axes)\n mean_squared = array_ops.squeeze(mean_squared, axes)\n if x.dtype == dtypes.float16:\n return (math_ops.cast(mean, dtypes.float16),\n math_ops.cast(mean_squared, dtypes.float16))\n else:\n return (mean, mean_squared)\n\n\ndef _validate_init_input_and_get_axis(reference_batch, axis):\n \"\"\"Validate input and return the used axis value.\"\"\"\n if reference_batch.shape.ndims is None:\n raise ValueError('`reference_batch` has unknown dimensions.')\n\n ndims = reference_batch.shape.ndims\n if axis < 0:\n used_axis = ndims + axis\n else:\n used_axis = axis\n if used_axis < 0 or used_axis >= ndims:\n raise ValueError('Value of `axis` argument ' + str(used_axis) +\n ' is out of range for input with rank ' + str(ndims))\n return used_axis\n\n\ndef _validate_call_input(tensor_list, batch_dim):\n \"\"\"Verifies that tensor shapes are compatible, except for `batch_dim`.\"\"\"\n\n def _get_shape(tensor):\n shape = tensor.shape.as_list()\n del shape[batch_dim]\n return shape\n\n base_shape = tensor_shape.TensorShape(_get_shape(tensor_list[0]))\n for tensor in tensor_list:\n base_shape.assert_is_compatible_with(_get_shape(tensor))\n\n\nclass VBN(object):\n \"\"\"A class to perform virtual batch normalization.\n\n This technique was first introduced in `Improved Techniques for Training GANs`\n (Salimans et al, https://arxiv.org/abs/1606.03498). Instead of using batch\n normalization on a minibatch, it fixes a reference subset of the data to use\n for calculating normalization statistics.\n\n To do this, we calculate the reference batch mean and mean square, and modify\n those statistics for each example. We use mean square instead of variance,\n since it is linear.\n\n Note that if `center` or `scale` variables are created, they are shared\n between all calls to this object.\n\n The `__init__` API is intended to mimic\n `tf.compat.v1.layers.batch_normalization` as\n closely as possible.\n \"\"\"\n\n def __init__(self,\n reference_batch,\n axis=-1,\n epsilon=1e-3,\n center=True,\n scale=True,\n beta_initializer=init_ops.zeros_initializer(),\n gamma_initializer=init_ops.ones_initializer(),\n beta_regularizer=None,\n gamma_regularizer=None,\n trainable=True,\n name=None,\n batch_axis=0):\n \"\"\"Initialize virtual batch normalization object.\n\n We precompute the 'mean' and 'mean squared' of the reference batch, so that\n `__call__` is efficient. This means that the axis must be supplied when the\n object is created, not when it is called.\n\n We precompute 'square mean' instead of 'variance', because the square mean\n can be easily adjusted on a per-example basis.\n\n Args:\n reference_batch: A minibatch tensors. This will form the reference data\n from which the normalization statistics are calculated. See\n https://arxiv.org/abs/1606.03498 for more details.\n axis: Integer, the axis that should be normalized (typically the features\n axis). For instance, after a `Convolution2D` layer with\n `data_format=\"channels_first\"`, set `axis=1` in `BatchNormalization`.\n epsilon: Small float added to variance to avoid dividing by zero.\n center: If True, add offset of `beta` to normalized tensor. If False,\n `beta` is ignored.\n scale: If True, multiply by `gamma`. If False, `gamma` is not used. When\n the next layer is linear (also e.g. `nn.relu`), this can be disabled\n since the scaling can be done by the next layer.\n beta_initializer: Initializer for the beta weight.\n gamma_initializer: Initializer for the gamma weight.\n beta_regularizer: Optional regularizer for the beta weight.\n gamma_regularizer: Optional regularizer for the gamma weight.\n trainable: Boolean, if `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n name: String, the name of the ops.\n batch_axis: The axis of the batch dimension. This dimension is treated\n differently in `virtual batch normalization` vs `batch normalization`.\n\n Raises:\n ValueError: If `reference_batch` has unknown dimensions at graph\n construction.\n ValueError: If `batch_axis` is the same as `axis`.\n \"\"\"\n axis = _validate_init_input_and_get_axis(reference_batch, axis)\n self._epsilon = epsilon\n self._beta = 0\n self._gamma = 1\n self._batch_axis = _validate_init_input_and_get_axis(\n reference_batch, batch_axis)\n\n if axis == self._batch_axis:\n raise ValueError('`axis` and `batch_axis` cannot be the same.')\n\n with variable_scope.variable_scope(\n name, 'VBN', values=[reference_batch]) as self._vs:\n self._reference_batch = reference_batch\n\n # Calculate important shapes:\n # 1) Reduction axes for the reference batch\n # 2) Broadcast shape, if necessary\n # 3) Reduction axes for the virtual batchnormed batch\n # 4) Shape for optional parameters\n input_shape = self._reference_batch.shape\n ndims = input_shape.ndims\n reduction_axes = list(range(ndims))\n del reduction_axes[axis]\n\n self._broadcast_shape = [1] * len(input_shape)\n self._broadcast_shape[axis] = input_shape.dims[axis]\n\n self._example_reduction_axes = list(range(ndims))\n del self._example_reduction_axes[max(axis, self._batch_axis)]\n del self._example_reduction_axes[min(axis, self._batch_axis)]\n\n params_shape = self._reference_batch.shape[axis]\n\n # Determines whether broadcasting is needed. This is slightly different\n # than in the `nn.batch_normalization` case, due to `batch_dim`.\n self._needs_broadcasting = (\n sorted(self._example_reduction_axes) != list(range(ndims))[:-2])\n\n # Calculate the sufficient statistics for the reference batch in a way\n # that can be easily modified by additional examples.\n self._ref_mean, self._ref_mean_squares = _statistics(\n self._reference_batch, reduction_axes)\n self._ref_variance = (\n self._ref_mean_squares - math_ops.square(self._ref_mean))\n\n # Virtual batch normalization uses a weighted average between example\n # statistics and the reference batch statistics.\n ref_batch_size = _static_or_dynamic_batch_size(self._reference_batch,\n self._batch_axis)\n self._example_weight = 1. / (\n math_ops.cast(ref_batch_size, dtypes.float32) + 1.)\n self._ref_weight = 1. - self._example_weight\n\n # Make the variables, if necessary.\n if center:\n self._beta = variable_scope.get_variable(\n name='beta',\n shape=(params_shape,),\n initializer=beta_initializer,\n regularizer=beta_regularizer,\n trainable=trainable)\n if scale:\n self._gamma = variable_scope.get_variable(\n name='gamma',\n shape=(params_shape,),\n initializer=gamma_initializer,\n regularizer=gamma_regularizer,\n trainable=trainable)\n\n def _virtual_statistics(self, inputs, reduction_axes):\n \"\"\"Compute the statistics needed for virtual batch normalization.\"\"\"\n cur_mean, cur_mean_sq = _statistics(inputs, reduction_axes)\n vb_mean = (\n self._example_weight * cur_mean + self._ref_weight * self._ref_mean)\n vb_mean_sq = (\n self._example_weight * cur_mean_sq +\n self._ref_weight * self._ref_mean_squares)\n return (vb_mean, vb_mean_sq)\n\n def _broadcast(self, v, broadcast_shape=None):\n # The exact broadcast shape depends on the current batch, not the reference\n # batch, unless we're calculating the batch normalization of the reference\n # batch.\n b_shape = broadcast_shape or self._broadcast_shape\n if self._needs_broadcasting and v is not None:\n return array_ops.reshape(v, b_shape)\n return v\n\n def reference_batch_normalization(self):\n \"\"\"Return the reference batch, but batch normalized.\"\"\"\n with ops.name_scope(self._vs.name):\n return nn.batch_normalization(self._reference_batch,\n self._broadcast(self._ref_mean),\n self._broadcast(self._ref_variance),\n self._broadcast(self._beta),\n self._broadcast(self._gamma), self._epsilon)\n\n def __call__(self, inputs):\n \"\"\"Run virtual batch normalization on inputs.\n\n Args:\n inputs: Tensor input.\n\n Returns:\n A virtual batch normalized version of `inputs`.\n\n Raises:\n ValueError: If `inputs` shape isn't compatible with the reference batch.\n \"\"\"\n _validate_call_input([inputs, self._reference_batch], self._batch_axis)\n\n with ops.name_scope(self._vs.name, values=[inputs, self._reference_batch]):\n # Calculate the statistics on the current input on a per-example basis.\n vb_mean, vb_mean_sq = self._virtual_statistics(\n inputs, self._example_reduction_axes)\n vb_variance = vb_mean_sq - math_ops.square(vb_mean)\n\n # The exact broadcast shape of the input statistic Tensors depends on the\n # current batch, not the reference batch. The parameter broadcast shape\n # is independent of the shape of the input statistic Tensor dimensions.\n b_shape = self._broadcast_shape[:] # deep copy\n b_shape[self._batch_axis] = _static_or_dynamic_batch_size(\n inputs, self._batch_axis)\n return nn.batch_normalization(\n inputs, self._broadcast(vb_mean, b_shape),\n self._broadcast(vb_variance, b_shape),\n self._broadcast(self._beta, self._broadcast_shape),\n self._broadcast(self._gamma, self._broadcast_shape), self._epsilon)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test for checking quantile related ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import boosted_trees_ops\nfrom tensorflow.python.ops import resources\nfrom tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_handle_op as resource_handle_op\nfrom tensorflow.python.ops.gen_boosted_trees_ops import is_boosted_trees_quantile_stream_resource_initialized as resource_initialized\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.training import saver\n\n\n@test_util.run_deprecated_v1\nclass QuantileOpsTest(test_util.TensorFlowTestCase):\n\n def create_resource(self, name, eps, max_elements, num_streams=1):\n quantile_accumulator_handle = resource_handle_op(\n container=\"\", shared_name=name, name=name)\n create_op = boosted_trees_ops.create_quantile_stream_resource(\n quantile_accumulator_handle,\n epsilon=eps,\n max_elements=max_elements,\n num_streams=num_streams)\n is_initialized_op = resource_initialized(quantile_accumulator_handle)\n resources.register_resource(quantile_accumulator_handle, create_op,\n is_initialized_op)\n return quantile_accumulator_handle\n\n def setUp(self):\n \"\"\"Sets up the quantile ops test as follows.\n\n Create a batch of 6 examples having 2 features\n The data looks like this\n | Instance | instance weights | Feature 0 | Feature 1\n | 0 | 10 | 1.2 | 2.3\n | 1 | 1 | 12.1 | 1.2\n | 2 | 1 | 0.3 | 1.1\n | 3 | 1 | 0.5 | 2.6\n | 4 | 1 | 0.6 | 3.2\n | 5 | 1 | 2.2 | 0.8\n \"\"\"\n\n self._feature_0 = constant_op.constant([1.2, 12.1, 0.3, 0.5, 0.6, 2.2],\n dtype=dtypes.float32)\n self._feature_1 = constant_op.constant([2.3, 1.2, 1.1, 2.6, 3.2, 0.8],\n dtype=dtypes.float32)\n self._feature_0_boundaries = np.array([0.3, 0.6, 1.2, 12.1])\n self._feature_1_boundaries = np.array([0.8, 1.2, 2.3, 3.2])\n self._feature_0_quantiles = constant_op.constant([2, 3, 0, 1, 1, 3],\n dtype=dtypes.int32)\n self._feature_1_quantiles = constant_op.constant([2, 1, 1, 3, 3, 0],\n dtype=dtypes.int32)\n\n self._example_weights = constant_op.constant(\n [10, 1, 1, 1, 1, 1], dtype=dtypes.float32)\n\n self.eps = 0.01\n self.max_elements = 1 << 16\n self.num_quantiles = constant_op.constant(3, dtype=dtypes.int64)\n\n def testBasicQuantileBucketsSingleResource(self):\n with self.cached_session() as sess:\n quantile_accumulator_handle = self.create_resource(\"floats\", self.eps,\n self.max_elements, 2)\n resources.initialize_resources(resources.shared_resources()).run()\n summaries = boosted_trees_ops.make_quantile_summaries(\n [self._feature_0, self._feature_1], self._example_weights,\n epsilon=self.eps)\n summary_op = boosted_trees_ops.quantile_add_summaries(\n quantile_accumulator_handle, summaries)\n flush_op = boosted_trees_ops.quantile_flush(\n quantile_accumulator_handle, self.num_quantiles)\n buckets = boosted_trees_ops.get_bucket_boundaries(\n quantile_accumulator_handle, num_features=2)\n quantiles = boosted_trees_ops.boosted_trees_bucketize(\n [self._feature_0, self._feature_1], buckets)\n self.evaluate(summary_op)\n self.evaluate(flush_op)\n self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())\n self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())\n\n self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval())\n self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval())\n\n def testBasicQuantileBucketsMultipleResources(self):\n with self.cached_session() as sess:\n quantile_accumulator_handle_0 = self.create_resource(\"float_0\", self.eps,\n self.max_elements)\n quantile_accumulator_handle_1 = self.create_resource(\"float_1\", self.eps,\n self.max_elements)\n resources.initialize_resources(resources.shared_resources()).run()\n summaries = boosted_trees_ops.make_quantile_summaries(\n [self._feature_0, self._feature_1], self._example_weights,\n epsilon=self.eps)\n summary_op_0 = boosted_trees_ops.quantile_add_summaries(\n quantile_accumulator_handle_0,\n [summaries[0]])\n summary_op_1 = boosted_trees_ops.quantile_add_summaries(\n quantile_accumulator_handle_1,\n [summaries[1]])\n flush_op_0 = boosted_trees_ops.quantile_flush(\n quantile_accumulator_handle_0, self.num_quantiles)\n flush_op_1 = boosted_trees_ops.quantile_flush(\n quantile_accumulator_handle_1, self.num_quantiles)\n bucket_0 = boosted_trees_ops.get_bucket_boundaries(\n quantile_accumulator_handle_0, num_features=1)\n bucket_1 = boosted_trees_ops.get_bucket_boundaries(\n quantile_accumulator_handle_1, num_features=1)\n quantiles = boosted_trees_ops.boosted_trees_bucketize(\n [self._feature_0, self._feature_1], bucket_0 + bucket_1)\n self.evaluate([summary_op_0, summary_op_1])\n self.evaluate([flush_op_0, flush_op_1])\n self.assertAllClose(self._feature_0_boundaries, bucket_0[0].eval())\n self.assertAllClose(self._feature_1_boundaries, bucket_1[0].eval())\n\n self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval())\n self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval())\n\n def testSaveRestoreAfterFlush(self):\n save_dir = os.path.join(self.get_temp_dir(), \"save_restore\")\n save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n with self.cached_session() as sess:\n accumulator = boosted_trees_ops.QuantileAccumulator(\n num_streams=2, num_quantiles=3, epsilon=self.eps, name=\"q0\")\n\n save = saver.Saver()\n resources.initialize_resources(resources.shared_resources()).run()\n\n buckets = accumulator.get_bucket_boundaries()\n self.assertAllClose([], buckets[0].eval())\n self.assertAllClose([], buckets[1].eval())\n summaries = accumulator.add_summaries([self._feature_0, self._feature_1],\n self._example_weights)\n with ops.control_dependencies([summaries]):\n flush = accumulator.flush()\n self.evaluate(flush)\n self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())\n self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())\n save.save(sess, save_path)\n\n with self.session(graph=ops.Graph()) as sess:\n accumulator = boosted_trees_ops.QuantileAccumulator(\n num_streams=2, num_quantiles=3, epsilon=self.eps, name=\"q0\")\n save = saver.Saver()\n save.restore(sess, save_path)\n buckets = accumulator.get_bucket_boundaries()\n self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())\n self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())\n\n def testSaveRestoreBeforeFlush(self):\n save_dir = os.path.join(self.get_temp_dir(), \"save_restore\")\n save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n with self.cached_session() as sess:\n accumulator = boosted_trees_ops.QuantileAccumulator(\n num_streams=2, num_quantiles=3, epsilon=self.eps, name=\"q0\")\n\n save = saver.Saver()\n resources.initialize_resources(resources.shared_resources()).run()\n\n summaries = accumulator.add_summaries([self._feature_0, self._feature_1],\n self._example_weights)\n self.evaluate(summaries)\n buckets = accumulator.get_bucket_boundaries()\n self.assertAllClose([], buckets[0].eval())\n self.assertAllClose([], buckets[1].eval())\n save.save(sess, save_path)\n self.evaluate(accumulator.flush())\n self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())\n self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())\n\n with self.session(graph=ops.Graph()) as sess:\n accumulator = boosted_trees_ops.QuantileAccumulator(\n num_streams=2, num_quantiles=3, epsilon=self.eps, name=\"q0\")\n save = saver.Saver()\n save.restore(sess, save_path)\n buckets = accumulator.get_bucket_boundaries()\n self.assertAllClose([], buckets[0].eval())\n self.assertAllClose([], buckets[1].eval())\n\n\nif __name__ == \"__main__\":\n googletest.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Discrete Cosine Transform ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math as _math\n\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops as _array_ops\nfrom tensorflow.python.ops import math_ops as _math_ops\nfrom tensorflow.python.ops.signal import fft_ops\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef _validate_dct_arguments(input_tensor, dct_type, n, axis, norm):\n \"\"\"Checks that DCT/IDCT arguments are compatible and well formed.\"\"\"\n if n is not None:\n raise NotImplementedError(\"The DCT length argument is not implemented.\")\n if axis != -1:\n raise NotImplementedError(\"axis must be -1. Got: %s\" % axis)\n if dct_type not in (1, 2, 3):\n raise ValueError(\"Only Types I, II and III (I)DCT are supported.\")\n if dct_type == 1:\n if norm == \"ortho\":\n raise ValueError(\"Normalization is not supported for the Type-I DCT.\")\n if input_tensor.shape[-1] is not None and input_tensor.shape[-1] < 2:\n raise ValueError(\n \"Type-I DCT requires the dimension to be greater than one.\")\n\n if norm not in (None, \"ortho\"):\n raise ValueError(\n \"Unknown normalization. Expected None or 'ortho', got: %s\" % norm)\n\n\n# TODO(rjryan): Implement `n` and `axis` parameters.\n@tf_export(\"signal.dct\", v1=[\"signal.dct\", \"spectral.dct\"])\ndef dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin\n \"\"\"Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`.\n\n Currently only Types I, II and III are supported.\n Type I is implemented using a length `2N` padded `tf.signal.rfft`.\n Type II is implemented using a length `2N` padded `tf.signal.rfft`, as\n described here: [Type 2 DCT using 2N FFT padded (Makhoul)](https://dsp.stackexchange.com/a/10606).\n Type III is a fairly straightforward inverse of Type II\n (i.e. using a length `2N` padded `tf.signal.irfft`).\n\n @compatibility(scipy)\n Equivalent to [scipy.fftpack.dct](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html)\n for Type-I, Type-II and Type-III DCT.\n @end_compatibility\n\n Args:\n input: A `[..., samples]` `float32` `Tensor` containing the signals to\n take the DCT of.\n type: The DCT type to perform. Must be 1, 2 or 3.\n n: For future expansion. The length of the transform. Must be `None`.\n axis: For future expansion. The axis to compute the DCT along. Must be `-1`.\n norm: The normalization to apply. `None` for no normalization or `'ortho'`\n for orthonormal normalization.\n name: An optional name for the operation.\n\n Returns:\n A `[..., samples]` `float32` `Tensor` containing the DCT of `input`.\n\n Raises:\n ValueError: If `type` is not `1`, `2` or `3`, `n` is not `None, `axis` is\n not `-1`, or `norm` is not `None` or `'ortho'`.\n ValueError: If `type` is `1` and `norm` is `ortho`.\n\n [dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform\n \"\"\"\n _validate_dct_arguments(input, type, n, axis, norm)\n with _ops.name_scope(name, \"dct\", [input]):\n # We use the RFFT to compute the DCT and TensorFlow only supports float32\n # for FFTs at the moment.\n input = _ops.convert_to_tensor(input, dtype=_dtypes.float32)\n\n axis_dim = (tensor_shape.dimension_value(input.shape[-1])\n or _array_ops.shape(input)[-1])\n axis_dim_float = _math_ops.cast(axis_dim, _dtypes.float32)\n\n if type == 1:\n dct1_input = _array_ops.concat([input, input[..., -2:0:-1]], axis=-1)\n dct1 = _math_ops.real(fft_ops.rfft(dct1_input))\n return dct1\n\n if type == 2:\n scale = 2.0 * _math_ops.exp(\n _math_ops.complex(\n 0.0, -_math_ops.range(axis_dim_float) * _math.pi * 0.5 /\n axis_dim_float))\n\n # TODO(rjryan): Benchmark performance and memory usage of the various\n # approaches to computing a DCT via the RFFT.\n dct2 = _math_ops.real(\n fft_ops.rfft(\n input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale)\n\n if norm == \"ortho\":\n n1 = 0.5 * _math_ops.rsqrt(axis_dim_float)\n n2 = n1 * _math_ops.sqrt(2.0)\n # Use tf.pad to make a vector of [n1, n2, n2, n2, ...].\n weights = _array_ops.pad(\n _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],\n constant_values=n2)\n dct2 *= weights\n\n return dct2\n\n elif type == 3:\n if norm == \"ortho\":\n n1 = _math_ops.sqrt(axis_dim_float)\n n2 = n1 * _math_ops.sqrt(0.5)\n # Use tf.pad to make a vector of [n1, n2, n2, n2, ...].\n weights = _array_ops.pad(\n _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],\n constant_values=n2)\n input *= weights\n else:\n input *= axis_dim_float\n scale = 2.0 * _math_ops.exp(\n _math_ops.complex(\n 0.0,\n _math_ops.range(axis_dim_float) * _math.pi * 0.5 /\n axis_dim_float))\n dct3 = _math_ops.real(\n fft_ops.irfft(\n scale * _math_ops.complex(input, 0.0),\n fft_length=[2 * axis_dim]))[..., :axis_dim]\n\n return dct3\n\n\n# TODO(rjryan): Implement `n` and `axis` parameters.\n@tf_export(\"signal.idct\", v1=[\"signal.idct\", \"spectral.idct\"])\ndef idct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin\n \"\"\"Computes the 1D [Inverse Discrete Cosine Transform (DCT)][idct] of `input`.\n\n Currently only Types I, II and III are supported. Type III is the inverse of\n Type II, and vice versa.\n\n Note that you must re-normalize by 1/(2n) to obtain an inverse if `norm` is\n not `'ortho'`. That is:\n `signal == idct(dct(signal)) * 0.5 / signal.shape[-1]`.\n When `norm='ortho'`, we have:\n `signal == idct(dct(signal, norm='ortho'), norm='ortho')`.\n\n @compatibility(scipy)\n Equivalent to [scipy.fftpack.idct](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.idct.html)\n for Type-I, Type-II and Type-III DCT.\n @end_compatibility\n\n Args:\n input: A `[..., samples]` `float32` `Tensor` containing the signals to take\n the DCT of.\n type: The IDCT type to perform. Must be 1, 2 or 3.\n n: For future expansion. The length of the transform. Must be `None`.\n axis: For future expansion. The axis to compute the DCT along. Must be `-1`.\n norm: The normalization to apply. `None` for no normalization or `'ortho'`\n for orthonormal normalization.\n name: An optional name for the operation.\n\n Returns:\n A `[..., samples]` `float32` `Tensor` containing the IDCT of `input`.\n\n Raises:\n ValueError: If `type` is not `1`, `2` or `3`, `n` is not `None, `axis` is\n not `-1`, or `norm` is not `None` or `'ortho'`.\n\n [idct]:\n https://en.wikipedia.org/wiki/Discrete_cosine_transform#Inverse_transforms\n \"\"\"\n _validate_dct_arguments(input, type, n, axis, norm)\n inverse_type = {1: 1, 2: 3, 3: 2}[type]\n return dct(input, type=inverse_type, n=n, axis=axis, norm=norm, name=name)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A `Predictor` constructed from a `SavedModel`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\n\nfrom tensorflow.contrib.predictor import predictor\nfrom tensorflow.contrib.saved_model.python.saved_model import reader\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.saved_model import loader\nfrom tensorflow.python.saved_model import signature_constants\n\n\nDEFAULT_TAGS = 'serve'\n\n_DEFAULT_INPUT_ALTERNATIVE_FORMAT = 'default_input_alternative:{}'\n\n\ndef get_meta_graph_def(saved_model_dir, tags):\n \"\"\"Gets `MetaGraphDef` from a directory containing a `SavedModel`.\n\n Returns the `MetaGraphDef` for the given tag-set and SavedModel directory.\n\n Args:\n saved_model_dir: Directory containing the SavedModel.\n tags: Comma separated list of tags used to identify the correct\n `MetaGraphDef`.\n\n Raises:\n ValueError: An error when the given tags cannot be found.\n\n Returns:\n A `MetaGraphDef` corresponding to the given tags.\n \"\"\"\n saved_model = reader.read_saved_model(saved_model_dir)\n set_of_tags = set([tag.strip() for tag in tags.split(',')])\n for meta_graph_def in saved_model.meta_graphs:\n if set(meta_graph_def.meta_info_def.tags) == set_of_tags:\n return meta_graph_def\n raise ValueError('Could not find MetaGraphDef with tags {}'.format(tags))\n\n\ndef _get_signature_def(signature_def_key, export_dir, tags):\n \"\"\"Construct a `SignatureDef` proto.\"\"\"\n signature_def_key = (\n signature_def_key or\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)\n\n metagraph_def = get_meta_graph_def(export_dir, tags)\n\n try:\n signature_def = metagraph_def.signature_def[signature_def_key]\n except KeyError as e:\n formatted_key = _DEFAULT_INPUT_ALTERNATIVE_FORMAT.format(\n signature_def_key)\n try:\n signature_def = metagraph_def.signature_def[formatted_key]\n except KeyError:\n raise ValueError(\n 'Got signature_def_key \"{}\". Available signatures are {}. '\n 'Original error:\\n{}'.format(\n signature_def_key, list(metagraph_def.signature_def), e))\n logging.warning('Could not find signature def \"%s\". '\n 'Using \"%s\" instead', signature_def_key, formatted_key)\n return signature_def\n\n\ndef _check_signature_arguments(signature_def_key,\n signature_def,\n input_names,\n output_names):\n \"\"\"Validates signature arguments for `SavedModelPredictor`.\"\"\"\n signature_def_key_specified = signature_def_key is not None\n signature_def_specified = signature_def is not None\n input_names_specified = input_names is not None\n output_names_specified = output_names is not None\n if input_names_specified != output_names_specified:\n raise ValueError(\n 'input_names and output_names must both be specified or both be '\n 'unspecified.'\n )\n\n if (signature_def_key_specified + signature_def_specified +\n input_names_specified > 1):\n raise ValueError(\n 'You must specify at most one of signature_def_key OR signature_def OR'\n '(input_names AND output_names).'\n )\n\n\nclass SavedModelPredictor(predictor.Predictor):\n \"\"\"A `Predictor` constructed from a `SavedModel`.\"\"\"\n\n def __init__(self,\n export_dir,\n signature_def_key=None,\n signature_def=None,\n input_names=None,\n output_names=None,\n tags=None,\n graph=None,\n config=None):\n \"\"\"Initialize a `CoreEstimatorPredictor`.\n\n Args:\n export_dir: a path to a directory containing a `SavedModel`.\n signature_def_key: Optional string specifying the signature to use. If\n `None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used. Only one of\n `signature_def_key` and `signature_def` should be specified.\n signature_def: A `SignatureDef` proto specifying the inputs and outputs\n for prediction. Only one of `signature_def_key` and `signature_def`\n should be specified.\n input_names: A dictionary mapping strings to `Tensor`s in the `SavedModel`\n that represent the input. The keys can be any string of the user's\n choosing.\n output_names: A dictionary mapping strings to `Tensor`s in the\n `SavedModel` that represent the output. The keys can be any string of\n the user's choosing.\n tags: Optional. Comma separated list of tags that will be used to retrieve\n the correct `SignatureDef`. Defaults to `DEFAULT_TAGS`.\n graph: Optional. The Tensorflow `graph` in which prediction should be\n done.\n config: `ConfigProto` proto used to configure the session.\n Raises:\n ValueError: If more than one of signature_def_key OR signature_def OR\n (input_names AND output_names) is specified.\n \"\"\"\n _check_signature_arguments(\n signature_def_key, signature_def, input_names, output_names)\n tags = tags or DEFAULT_TAGS\n self._graph = graph or ops.Graph()\n\n with self._graph.as_default():\n self._session = session.Session(config=config)\n loader.load(self._session, tags.split(','), export_dir)\n\n if input_names is None:\n if signature_def is None:\n signature_def = _get_signature_def(signature_def_key, export_dir, tags)\n input_names = {k: v.name for k, v in signature_def.inputs.items()}\n output_names = {k: v.name for k, v in signature_def.outputs.items()}\n\n self._feed_tensors = {k: self._graph.get_tensor_by_name(v)\n for k, v in input_names.items()}\n self._fetch_tensors = {k: self._graph.get_tensor_by_name(v)\n for k, v in output_names.items()}\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A library of helpers for use with SamplingDecoders.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nimport six\n\nfrom tensorflow.contrib.seq2seq.python.ops import decoder\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.util import nest\n\n__all__ = [\n \"Helper\",\n \"TrainingHelper\",\n \"GreedyEmbeddingHelper\",\n \"SampleEmbeddingHelper\",\n \"CustomHelper\",\n \"ScheduledEmbeddingTrainingHelper\",\n \"ScheduledOutputTrainingHelper\",\n \"InferenceHelper\",\n]\n\n_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access\n\n\n# The following sample functions (_call_sampler, bernoulli_sample,\n# categorical_sample) mimic TensorFlow Probability distribution semantics.\n\n\ndef _call_sampler(sample_n_fn, sample_shape, name=None):\n \"\"\"Reshapes vector of samples.\"\"\"\n with ops.name_scope(name, \"call_sampler\", values=[sample_shape]):\n sample_shape = ops.convert_to_tensor(\n sample_shape, dtype=dtypes.int32, name=\"sample_shape\")\n # Ensure sample_shape is a vector (vs just a scalar).\n pad = math_ops.cast(math_ops.equal(array_ops.rank(sample_shape), 0),\n dtypes.int32)\n sample_shape = array_ops.reshape(\n sample_shape,\n array_ops.pad(array_ops.shape(sample_shape),\n paddings=[[pad, 0]],\n constant_values=1))\n samples = sample_n_fn(math_ops.reduce_prod(sample_shape))\n batch_event_shape = array_ops.shape(samples)[1:]\n final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)\n return array_ops.reshape(samples, final_shape)\n\n\ndef bernoulli_sample(probs=None, logits=None, dtype=dtypes.int32,\n sample_shape=(), seed=None):\n \"\"\"Samples from Bernoulli distribution.\"\"\"\n if probs is None:\n probs = math_ops.sigmoid(logits, name=\"probs\")\n else:\n probs = ops.convert_to_tensor(probs, name=\"probs\")\n batch_shape_tensor = array_ops.shape(probs)\n def _sample_n(n):\n \"\"\"Sample vector of Bernoullis.\"\"\"\n new_shape = array_ops.concat([[n], batch_shape_tensor], 0)\n uniform = random_ops.random_uniform(\n new_shape, seed=seed, dtype=probs.dtype)\n return math_ops.cast(math_ops.less(uniform, probs), dtype)\n return _call_sampler(_sample_n, sample_shape)\n\n\ndef categorical_sample(logits, dtype=dtypes.int32,\n sample_shape=(), seed=None):\n \"\"\"Samples from categorical distribution.\"\"\"\n logits = ops.convert_to_tensor(logits, name=\"logits\")\n event_size = array_ops.shape(logits)[-1]\n batch_shape_tensor = array_ops.shape(logits)[:-1]\n def _sample_n(n):\n \"\"\"Sample vector of categoricals.\"\"\"\n if logits.shape.ndims == 2:\n logits_2d = logits\n else:\n logits_2d = array_ops.reshape(logits, [-1, event_size])\n sample_dtype = dtypes.int64 if logits.dtype.size > 4 else dtypes.int32\n draws = random_ops.multinomial(\n logits_2d, n, seed=seed, output_dtype=sample_dtype)\n draws = array_ops.reshape(\n array_ops.transpose(draws),\n array_ops.concat([[n], batch_shape_tensor], 0))\n return math_ops.cast(draws, dtype)\n return _call_sampler(_sample_n, sample_shape)\n\n\ndef _unstack_ta(inp):\n return tensor_array_ops.TensorArray(\n dtype=inp.dtype, size=array_ops.shape(inp)[0],\n element_shape=inp.get_shape()[1:]).unstack(inp)\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Helper(object):\n \"\"\"Interface for implementing sampling in seq2seq decoders.\n\n Helper instances are used by `BasicDecoder`.\n \"\"\"\n\n @abc.abstractproperty\n def batch_size(self):\n \"\"\"Batch size of tensor returned by `sample`.\n\n Returns a scalar int32 tensor.\n \"\"\"\n raise NotImplementedError(\"batch_size has not been implemented\")\n\n @abc.abstractproperty\n def sample_ids_shape(self):\n \"\"\"Shape of tensor returned by `sample`, excluding the batch dimension.\n\n Returns a `TensorShape`.\n \"\"\"\n raise NotImplementedError(\"sample_ids_shape has not been implemented\")\n\n @abc.abstractproperty\n def sample_ids_dtype(self):\n \"\"\"DType of tensor returned by `sample`.\n\n Returns a DType.\n \"\"\"\n raise NotImplementedError(\"sample_ids_dtype has not been implemented\")\n\n @abc.abstractmethod\n def initialize(self, name=None):\n \"\"\"Returns `(initial_finished, initial_inputs)`.\"\"\"\n pass\n\n @abc.abstractmethod\n def sample(self, time, outputs, state, name=None):\n \"\"\"Returns `sample_ids`.\"\"\"\n pass\n\n @abc.abstractmethod\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n \"\"\"Returns `(finished, next_inputs, next_state)`.\"\"\"\n pass\n\n\nclass CustomHelper(Helper):\n \"\"\"Base abstract class that allows the user to customize sampling.\"\"\"\n\n def __init__(self, initialize_fn, sample_fn, next_inputs_fn,\n sample_ids_shape=None, sample_ids_dtype=None):\n \"\"\"Initializer.\n\n Args:\n initialize_fn: callable that returns `(finished, next_inputs)`\n for the first iteration.\n sample_fn: callable that takes `(time, outputs, state)`\n and emits tensor `sample_ids`.\n next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`\n and emits `(finished, next_inputs, next_state)`.\n sample_ids_shape: Either a list of integers, or a 1-D Tensor of type\n `int32`, the shape of each value in the `sample_ids` batch. Defaults to\n a scalar.\n sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.\n \"\"\"\n self._initialize_fn = initialize_fn\n self._sample_fn = sample_fn\n self._next_inputs_fn = next_inputs_fn\n self._batch_size = None\n self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])\n self._sample_ids_dtype = sample_ids_dtype or dtypes.int32\n\n @property\n def batch_size(self):\n if self._batch_size is None:\n raise ValueError(\"batch_size accessed before initialize was called\")\n return self._batch_size\n\n @property\n def sample_ids_shape(self):\n return self._sample_ids_shape\n\n @property\n def sample_ids_dtype(self):\n return self._sample_ids_dtype\n\n def initialize(self, name=None):\n with ops.name_scope(name, \"%sInitialize\" % type(self).__name__):\n (finished, next_inputs) = self._initialize_fn()\n if self._batch_size is None:\n self._batch_size = array_ops.size(finished)\n return (finished, next_inputs)\n\n def sample(self, time, outputs, state, name=None):\n with ops.name_scope(\n name, \"%sSample\" % type(self).__name__, (time, outputs, state)):\n return self._sample_fn(time=time, outputs=outputs, state=state)\n\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n with ops.name_scope(\n name, \"%sNextInputs\" % type(self).__name__, (time, outputs, state)):\n return self._next_inputs_fn(\n time=time, outputs=outputs, state=state, sample_ids=sample_ids)\n\n\nclass TrainingHelper(Helper):\n \"\"\"A helper for use during training. Only reads inputs.\n\n Returned sample_ids are the argmax of the RNN output logits.\n \"\"\"\n\n def __init__(self, inputs, sequence_length, time_major=False, name=None):\n \"\"\"Initializer.\n\n Args:\n inputs: A (structure of) input tensors.\n sequence_length: An int32 vector tensor.\n time_major: Python bool. Whether the tensors in `inputs` are time major.\n If `False` (default), they are assumed to be batch major.\n name: Name scope for any created operations.\n\n Raises:\n ValueError: if `sequence_length` is not a 1D tensor.\n \"\"\"\n with ops.name_scope(name, \"TrainingHelper\", [inputs, sequence_length]):\n inputs = ops.convert_to_tensor(inputs, name=\"inputs\")\n self._inputs = inputs\n if not time_major:\n inputs = nest.map_structure(_transpose_batch_time, inputs)\n\n self._input_tas = nest.map_structure(_unstack_ta, inputs)\n self._sequence_length = ops.convert_to_tensor(\n sequence_length, name=\"sequence_length\")\n if self._sequence_length.get_shape().ndims != 1:\n raise ValueError(\n \"Expected sequence_length to be a vector, but received shape: %s\" %\n self._sequence_length.get_shape())\n\n self._zero_inputs = nest.map_structure(\n lambda inp: array_ops.zeros_like(inp[0, :]), inputs)\n\n self._batch_size = array_ops.size(sequence_length)\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def sequence_length(self):\n return self._sequence_length\n\n @property\n def batch_size(self):\n return self._batch_size\n\n @property\n def sample_ids_shape(self):\n return tensor_shape.TensorShape([])\n\n @property\n def sample_ids_dtype(self):\n return dtypes.int32\n\n def initialize(self, name=None):\n with ops.name_scope(name, \"TrainingHelperInitialize\"):\n finished = math_ops.equal(0, self._sequence_length)\n all_finished = math_ops.reduce_all(finished)\n next_inputs = control_flow_ops.cond(\n all_finished, lambda: self._zero_inputs,\n lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))\n return (finished, next_inputs)\n\n def sample(self, time, outputs, name=None, **unused_kwargs):\n with ops.name_scope(name, \"TrainingHelperSample\", [time, outputs]):\n sample_ids = math_ops.cast(\n math_ops.argmax(outputs, axis=-1), dtypes.int32)\n return sample_ids\n\n def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):\n \"\"\"next_inputs_fn for TrainingHelper.\"\"\"\n with ops.name_scope(name, \"TrainingHelperNextInputs\",\n [time, outputs, state]):\n next_time = time + 1\n finished = (next_time >= self._sequence_length)\n all_finished = math_ops.reduce_all(finished)\n def read_from_ta(inp):\n return inp.read(next_time)\n next_inputs = control_flow_ops.cond(\n all_finished, lambda: self._zero_inputs,\n lambda: nest.map_structure(read_from_ta, self._input_tas))\n return (finished, next_inputs, state)\n\n\nclass ScheduledEmbeddingTrainingHelper(TrainingHelper):\n \"\"\"A training helper that adds scheduled sampling.\n\n Returns -1s for sample_ids where no sampling took place; valid sample id\n values elsewhere.\n \"\"\"\n\n def __init__(self, inputs, sequence_length, embedding, sampling_probability,\n time_major=False, seed=None, scheduling_seed=None, name=None):\n \"\"\"Initializer.\n\n Args:\n inputs: A (structure of) input tensors.\n sequence_length: An int32 vector tensor.\n embedding: A callable that takes a vector tensor of `ids` (argmax ids),\n or the `params` argument for `embedding_lookup`.\n sampling_probability: A 0D `float32` tensor: the probability of sampling\n categorically from the output ids instead of reading directly from the\n inputs.\n time_major: Python bool. Whether the tensors in `inputs` are time major.\n If `False` (default), they are assumed to be batch major.\n seed: The sampling seed.\n scheduling_seed: The schedule decision rule sampling seed.\n name: Name scope for any created operations.\n\n Raises:\n ValueError: if `sampling_probability` is not a scalar or vector.\n \"\"\"\n with ops.name_scope(name, \"ScheduledEmbeddingSamplingWrapper\",\n [embedding, sampling_probability]):\n if callable(embedding):\n self._embedding_fn = embedding\n else:\n self._embedding_fn = (\n lambda ids: embedding_ops.embedding_lookup(embedding, ids))\n self._sampling_probability = ops.convert_to_tensor(\n sampling_probability, name=\"sampling_probability\")\n if self._sampling_probability.get_shape().ndims not in (0, 1):\n raise ValueError(\n \"sampling_probability must be either a scalar or a vector. \"\n \"saw shape: %s\" % (self._sampling_probability.get_shape()))\n self._seed = seed\n self._scheduling_seed = scheduling_seed\n super(ScheduledEmbeddingTrainingHelper, self).__init__(\n inputs=inputs,\n sequence_length=sequence_length,\n time_major=time_major,\n name=name)\n\n def initialize(self, name=None):\n return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)\n\n def sample(self, time, outputs, state, name=None):\n with ops.name_scope(name, \"ScheduledEmbeddingTrainingHelperSample\",\n [time, outputs, state]):\n # Return -1s where we did not sample, and sample_ids elsewhere\n select_sample = bernoulli_sample(\n probs=self._sampling_probability,\n dtype=dtypes.bool,\n sample_shape=self.batch_size,\n seed=self._scheduling_seed)\n return array_ops.where(\n select_sample,\n categorical_sample(logits=outputs, seed=self._seed),\n gen_array_ops.fill([self.batch_size], -1))\n\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n with ops.name_scope(name, \"ScheduledEmbeddingTrainingHelperNextInputs\",\n [time, outputs, state, sample_ids]):\n (finished, base_next_inputs, state) = (\n super(ScheduledEmbeddingTrainingHelper, self).next_inputs(\n time=time,\n outputs=outputs,\n state=state,\n sample_ids=sample_ids,\n name=name))\n\n def maybe_sample():\n \"\"\"Perform scheduled sampling.\"\"\"\n where_sampling = math_ops.cast(\n array_ops.where(sample_ids > -1), dtypes.int32)\n where_not_sampling = math_ops.cast(\n array_ops.where(sample_ids <= -1), dtypes.int32)\n sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)\n inputs_not_sampling = array_ops.gather_nd(\n base_next_inputs, where_not_sampling)\n sampled_next_inputs = self._embedding_fn(sample_ids_sampling)\n base_shape = array_ops.shape(base_next_inputs)\n return (array_ops.scatter_nd(indices=where_sampling,\n updates=sampled_next_inputs,\n shape=base_shape)\n + array_ops.scatter_nd(indices=where_not_sampling,\n updates=inputs_not_sampling,\n shape=base_shape))\n\n all_finished = math_ops.reduce_all(finished)\n next_inputs = control_flow_ops.cond(\n all_finished, lambda: base_next_inputs, maybe_sample)\n return (finished, next_inputs, state)\n\n\nclass ScheduledOutputTrainingHelper(TrainingHelper):\n \"\"\"A training helper that adds scheduled sampling directly to outputs.\n\n Returns False for sample_ids where no sampling took place; True elsewhere.\n \"\"\"\n\n def __init__(self, inputs, sequence_length, sampling_probability,\n time_major=False, seed=None, next_inputs_fn=None,\n auxiliary_inputs=None, name=None):\n \"\"\"Initializer.\n\n Args:\n inputs: A (structure) of input tensors.\n sequence_length: An int32 vector tensor.\n sampling_probability: A 0D `float32` tensor: the probability of sampling\n from the outputs instead of reading directly from the inputs.\n time_major: Python bool. Whether the tensors in `inputs` are time major.\n If `False` (default), they are assumed to be batch major.\n seed: The sampling seed.\n next_inputs_fn: (Optional) callable to apply to the RNN outputs to create\n the next input when sampling. If `None` (default), the RNN outputs will\n be used as the next inputs.\n auxiliary_inputs: An optional (structure of) auxiliary input tensors with\n a shape that matches `inputs` in all but (potentially) the final\n dimension. These tensors will be concatenated to the sampled output or\n the `inputs` when not sampling for use as the next input.\n name: Name scope for any created operations.\n\n Raises:\n ValueError: if `sampling_probability` is not a scalar or vector.\n \"\"\"\n with ops.name_scope(name, \"ScheduledOutputTrainingHelper\",\n [inputs, auxiliary_inputs, sampling_probability]):\n self._sampling_probability = ops.convert_to_tensor(\n sampling_probability, name=\"sampling_probability\")\n if self._sampling_probability.get_shape().ndims not in (0, 1):\n raise ValueError(\n \"sampling_probability must be either a scalar or a vector. \"\n \"saw shape: %s\" % (self._sampling_probability.get_shape()))\n\n if auxiliary_inputs is None:\n maybe_concatenated_inputs = inputs\n else:\n inputs = ops.convert_to_tensor(inputs, name=\"inputs\")\n auxiliary_inputs = ops.convert_to_tensor(\n auxiliary_inputs, name=\"auxiliary_inputs\")\n maybe_concatenated_inputs = nest.map_structure(\n lambda x, y: array_ops.concat((x, y), -1),\n inputs, auxiliary_inputs)\n if not time_major:\n auxiliary_inputs = nest.map_structure(\n _transpose_batch_time, auxiliary_inputs)\n\n self._auxiliary_input_tas = (\n nest.map_structure(_unstack_ta, auxiliary_inputs)\n if auxiliary_inputs is not None else None)\n\n self._seed = seed\n\n self._next_inputs_fn = next_inputs_fn\n\n super(ScheduledOutputTrainingHelper, self).__init__(\n inputs=maybe_concatenated_inputs,\n sequence_length=sequence_length,\n time_major=time_major,\n name=name)\n\n def initialize(self, name=None):\n return super(ScheduledOutputTrainingHelper, self).initialize(name=name)\n\n def sample(self, time, outputs, state, name=None):\n with ops.name_scope(name, \"ScheduledOutputTrainingHelperSample\",\n [time, outputs, state]):\n return bernoulli_sample(\n probs=self._sampling_probability,\n sample_shape=self.batch_size,\n seed=self._seed)\n\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n with ops.name_scope(name, \"ScheduledOutputTrainingHelperNextInputs\",\n [time, outputs, state, sample_ids]):\n (finished, base_next_inputs, state) = (\n super(ScheduledOutputTrainingHelper, self).next_inputs(\n time=time,\n outputs=outputs,\n state=state,\n sample_ids=sample_ids,\n name=name))\n sample_ids = math_ops.cast(sample_ids, dtypes.bool)\n\n def maybe_sample():\n \"\"\"Perform scheduled sampling.\"\"\"\n\n def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):\n \"\"\"Concatenate outputs with auxiliary inputs, if they exist.\"\"\"\n if self._auxiliary_input_tas is None:\n return outputs_\n\n next_time = time + 1\n auxiliary_inputs = nest.map_structure(\n lambda ta: ta.read(next_time), self._auxiliary_input_tas)\n if indices is not None:\n auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)\n return nest.map_structure(\n lambda x, y: array_ops.concat((x, y), -1),\n outputs_, auxiliary_inputs)\n\n if self._next_inputs_fn is None:\n return array_ops.where(\n sample_ids, maybe_concatenate_auxiliary_inputs(outputs),\n base_next_inputs)\n\n where_sampling = math_ops.cast(\n array_ops.where(sample_ids), dtypes.int32)\n where_not_sampling = math_ops.cast(\n array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)\n outputs_sampling = array_ops.gather_nd(outputs, where_sampling)\n inputs_not_sampling = array_ops.gather_nd(base_next_inputs,\n where_not_sampling)\n sampled_next_inputs = maybe_concatenate_auxiliary_inputs(\n self._next_inputs_fn(outputs_sampling), where_sampling)\n\n base_shape = array_ops.shape(base_next_inputs)\n return (array_ops.scatter_nd(indices=where_sampling,\n updates=sampled_next_inputs,\n shape=base_shape)\n + array_ops.scatter_nd(indices=where_not_sampling,\n updates=inputs_not_sampling,\n shape=base_shape))\n\n all_finished = math_ops.reduce_all(finished)\n no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))\n next_inputs = control_flow_ops.cond(\n math_ops.logical_or(all_finished, no_samples),\n lambda: base_next_inputs, maybe_sample)\n return (finished, next_inputs, state)\n\n\nclass GreedyEmbeddingHelper(Helper):\n \"\"\"A helper for use during inference.\n\n Uses the argmax of the output (treated as logits) and passes the\n result through an embedding layer to get the next input.\n \"\"\"\n\n def __init__(self, embedding, start_tokens, end_token):\n \"\"\"Initializer.\n\n Args:\n embedding: A callable that takes a vector tensor of `ids` (argmax ids),\n or the `params` argument for `embedding_lookup`. The returned tensor\n will be passed to the decoder input.\n start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.\n end_token: `int32` scalar, the token that marks end of decoding.\n\n Raises:\n ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a\n scalar.\n \"\"\"\n if callable(embedding):\n self._embedding_fn = embedding\n else:\n self._embedding_fn = (\n lambda ids: embedding_ops.embedding_lookup(embedding, ids))\n\n self._start_tokens = ops.convert_to_tensor(\n start_tokens, dtype=dtypes.int32, name=\"start_tokens\")\n self._end_token = ops.convert_to_tensor(\n end_token, dtype=dtypes.int32, name=\"end_token\")\n if self._start_tokens.get_shape().ndims != 1:\n raise ValueError(\"start_tokens must be a vector\")\n self._batch_size = array_ops.size(start_tokens)\n if self._end_token.get_shape().ndims != 0:\n raise ValueError(\"end_token must be a scalar\")\n self._start_inputs = self._embedding_fn(self._start_tokens)\n\n @property\n def batch_size(self):\n return self._batch_size\n\n @property\n def sample_ids_shape(self):\n return tensor_shape.TensorShape([])\n\n @property\n def sample_ids_dtype(self):\n return dtypes.int32\n\n def initialize(self, name=None):\n finished = array_ops.tile([False], [self._batch_size])\n return (finished, self._start_inputs)\n\n def sample(self, time, outputs, state, name=None):\n \"\"\"sample for GreedyEmbeddingHelper.\"\"\"\n del time, state # unused by sample_fn\n # Outputs are logits, use argmax to get the most probable id\n if not isinstance(outputs, ops.Tensor):\n raise TypeError(\"Expected outputs to be a single Tensor, got: %s\" %\n type(outputs))\n sample_ids = math_ops.argmax(outputs, axis=-1, output_type=dtypes.int32)\n return sample_ids\n\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n \"\"\"next_inputs_fn for GreedyEmbeddingHelper.\"\"\"\n del time, outputs # unused by next_inputs_fn\n finished = math_ops.equal(sample_ids, self._end_token)\n all_finished = math_ops.reduce_all(finished)\n next_inputs = control_flow_ops.cond(\n all_finished,\n # If we're finished, the next_inputs value doesn't matter\n lambda: self._start_inputs,\n lambda: self._embedding_fn(sample_ids))\n return (finished, next_inputs, state)\n\n\nclass SampleEmbeddingHelper(GreedyEmbeddingHelper):\n \"\"\"A helper for use during inference.\n\n Uses sampling (from a distribution) instead of argmax and passes the\n result through an embedding layer to get the next input.\n \"\"\"\n\n def __init__(self, embedding, start_tokens, end_token,\n softmax_temperature=None, seed=None):\n \"\"\"Initializer.\n\n Args:\n embedding: A callable that takes a vector tensor of `ids` (argmax ids),\n or the `params` argument for `embedding_lookup`. The returned tensor\n will be passed to the decoder input.\n start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.\n end_token: `int32` scalar, the token that marks end of decoding.\n softmax_temperature: (Optional) `float32` scalar, value to divide the\n logits by before computing the softmax. Larger values (above 1.0) result\n in more random samples, while smaller values push the sampling\n distribution towards the argmax. Must be strictly greater than 0.\n Defaults to 1.0.\n seed: (Optional) The sampling seed.\n\n Raises:\n ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a\n scalar.\n \"\"\"\n super(SampleEmbeddingHelper, self).__init__(\n embedding, start_tokens, end_token)\n self._softmax_temperature = softmax_temperature\n self._seed = seed\n\n def sample(self, time, outputs, state, name=None):\n \"\"\"sample for SampleEmbeddingHelper.\"\"\"\n del time, state # unused by sample_fn\n # Outputs are logits, we sample instead of argmax (greedy).\n if not isinstance(outputs, ops.Tensor):\n raise TypeError(\"Expected outputs to be a single Tensor, got: %s\" %\n type(outputs))\n if self._softmax_temperature is None:\n logits = outputs\n else:\n logits = outputs / self._softmax_temperature\n\n sample_ids = categorical_sample(logits=logits, seed=self._seed)\n\n return sample_ids\n\n\nclass InferenceHelper(Helper):\n \"\"\"A helper to use during inference with a custom sampling function.\"\"\"\n\n def __init__(self, sample_fn, sample_shape, sample_dtype,\n start_inputs, end_fn, next_inputs_fn=None):\n \"\"\"Initializer.\n\n Args:\n sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`.\n sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`,\n the shape of the each sample in the batch returned by `sample_fn`.\n sample_dtype: the dtype of the sample returned by `sample_fn`.\n start_inputs: The initial batch of inputs.\n end_fn: A callable that takes `sample_ids` and emits a `bool` vector\n shaped `[batch_size]` indicating whether each sample is an end token.\n next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns\n the next batch of inputs. If not provided, `sample_ids` is used as the\n next batch of inputs.\n \"\"\"\n self._sample_fn = sample_fn\n self._end_fn = end_fn\n self._sample_shape = tensor_shape.TensorShape(sample_shape)\n self._sample_dtype = sample_dtype\n self._next_inputs_fn = next_inputs_fn\n self._batch_size = array_ops.shape(start_inputs)[0]\n self._start_inputs = ops.convert_to_tensor(\n start_inputs, name=\"start_inputs\")\n\n @property\n def batch_size(self):\n return self._batch_size\n\n @property\n def sample_ids_shape(self):\n return self._sample_shape\n\n @property\n def sample_ids_dtype(self):\n return self._sample_dtype\n\n def initialize(self, name=None):\n finished = array_ops.tile([False], [self._batch_size])\n return (finished, self._start_inputs)\n\n def sample(self, time, outputs, state, name=None):\n del time, state # unused by sample\n return self._sample_fn(outputs)\n\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n del time, outputs # unused by next_inputs\n if self._next_inputs_fn is None:\n next_inputs = sample_ids\n else:\n next_inputs = self._next_inputs_fn(sample_ids)\n finished = self._end_fn(sample_ids)\n return (finished, next_inputs, state)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base class for testing serializable datasets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\n\nfrom tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import checkpoint_management\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.python.util import nest\n\n\ndef remove_variants(get_next_op):\n # TODO(b/72408568): Remove this once session.run can get\n # variant tensors.\n \"\"\"Remove variants from a nest structure, so sess.run will execute.\"\"\"\n\n def _remove_variant(x):\n if isinstance(x, ops.Tensor) and x.dtype == dtypes.variant:\n return ()\n else:\n return x\n\n return nest.map_structure(_remove_variant, get_next_op)\n\n\nclass DatasetSerializationTestBase(test.TestCase):\n \"\"\"Base class for testing serializable datasets.\"\"\"\n\n def tearDown(self):\n self._delete_ckpt()\n\n # TODO(b/72657739): Remove sparse_tensor argument, which is to test the\n # (deprecated) saveable `SparseTensorSliceDataset`, once the API\n # `from_sparse_tensor_slices()`and related tests are deleted.\n def run_core_tests(self, ds_fn1, ds_fn2, num_outputs, sparse_tensors=False):\n \"\"\"Runs the core tests.\n\n Args:\n ds_fn1: 0-argument function that returns a Dataset.\n ds_fn2: 0-argument function that returns a Dataset different from\n ds_fn1. If None, verify_restore_in_modified_graph test is not run.\n num_outputs: Total number of outputs expected from this Dataset.\n sparse_tensors: Whether dataset is built from SparseTensor(s).\n\n Raises:\n AssertionError if any test fails.\n \"\"\"\n # NOTE: We disable all default optimizations in serialization tests in order\n # to test the actual dataset in question.\n options = dataset_ops.Options()\n options.experimental_optimization.apply_default_optimizations = False\n\n def ds_fn1_no_opt():\n return ds_fn1().with_options(options)\n\n self.verify_unused_iterator(\n ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)\n self.verify_fully_used_iterator(\n ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)\n self.verify_exhausted_iterator(\n ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)\n self.verify_init_before_restore(\n ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)\n self.verify_multiple_breaks(\n ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)\n self.verify_reset_restored_iterator(\n ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)\n self.verify_restore_in_empty_graph(\n ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)\n if ds_fn2:\n\n def ds_fn2_no_opt():\n return ds_fn2().with_options(options)\n\n self.verify_restore_in_modified_graph(\n ds_fn1_no_opt,\n ds_fn2_no_opt,\n num_outputs,\n sparse_tensors=sparse_tensors)\n\n def verify_unused_iterator(self,\n ds_fn,\n num_outputs,\n sparse_tensors=False,\n verify_exhausted=True):\n \"\"\"Verifies that saving and restoring an unused iterator works.\n\n Args:\n ds_fn: See `run_core_tests`.\n num_outputs: See `run_core_tests`.\n sparse_tensors: See `run_core_tests`.\n verify_exhausted: See `gen_outputs`.\n\n Raises:\n AssertionError if any test fails.\n \"\"\"\n self.verify_run_with_breaks(\n ds_fn, [0],\n num_outputs,\n sparse_tensors=sparse_tensors,\n verify_exhausted=verify_exhausted)\n\n def verify_fully_used_iterator(self, ds_fn, num_outputs,\n sparse_tensors=False):\n \"\"\"Verifies that saving and restoring a fully used iterator works.\n\n Note that this only checks saving and restoring an iterator from which\n `num_outputs` items have been produced but does not check for an\n exhausted iterator, i.e., one from which an OutOfRange error has been\n returned.\n\n Args:\n ds_fn: See `run_core_tests`.\n num_outputs: See `run_core_tests`.\n sparse_tensors: See `run_core_tests`.\n\n Raises:\n AssertionError if test fails.\n \"\"\"\n self.verify_run_with_breaks(\n ds_fn, [num_outputs], num_outputs, sparse_tensors=sparse_tensors)\n\n def verify_exhausted_iterator(self, ds_fn, num_outputs, sparse_tensors=False):\n \"\"\"Verifies that saving and restoring an exhausted iterator works.\n\n An exhausted iterator is one which has returned an OutOfRange error.\n\n Args:\n ds_fn: See `run_core_tests`.\n num_outputs: See `run_core_tests`.\n sparse_tensors: See `run_core_tests`.\n\n Raises:\n AssertionError if any test fails.\n \"\"\"\n self.gen_outputs(\n ds_fn, [],\n num_outputs,\n verify_exhausted=True,\n sparse_tensors=sparse_tensors)\n actual = self.gen_outputs(\n ds_fn, [],\n 0,\n ckpt_saved=True,\n verify_exhausted=True,\n sparse_tensors=sparse_tensors)\n self.assertEqual(len(actual), 0)\n\n def verify_init_before_restore(self,\n ds_fn,\n num_outputs,\n sparse_tensors=False,\n verify_exhausted=True):\n \"\"\"Verifies that restoring into an already initialized iterator works.\n\n Args:\n ds_fn: See `run_core_tests`.\n num_outputs: See `run_core_tests`.\n sparse_tensors: See `run_core_tests`.\n verify_exhausted: See `gen_outputs`.\n\n Raises:\n AssertionError if any test fails.\n \"\"\"\n self.verify_run_with_breaks(\n ds_fn,\n self.gen_break_points(num_outputs),\n num_outputs,\n init_before_restore=True,\n sparse_tensors=sparse_tensors,\n verify_exhausted=verify_exhausted)\n\n def verify_multiple_breaks(self,\n ds_fn,\n num_outputs,\n num_breaks=10,\n sparse_tensors=False,\n verify_exhausted=True):\n \"\"\"Attempts to save/restore at multiple break points.\n\n Args:\n ds_fn: See `run_core_tests`.\n num_outputs: See `run_core_tests`.\n num_breaks: The number of break points. These are uniformly spread in\n [0, num_outputs] both inclusive.\n sparse_tensors: See `run_core_tests`.\n verify_exhausted: See `gen_outputs`.\n\n Raises:\n AssertionError if any test fails.\n \"\"\"\n self.verify_run_with_breaks(\n ds_fn,\n self.gen_break_points(num_outputs, num_breaks),\n num_outputs,\n sparse_tensors=sparse_tensors,\n verify_exhausted=verify_exhausted)\n\n def verify_reset_restored_iterator(self,\n ds_fn,\n num_outputs,\n break_point=None,\n sparse_tensors=False,\n verify_exhausted=True):\n \"\"\"Attempts to re-initialize a restored iterator.\n\n This is useful when restoring a training checkpoint during validation.\n\n Args:\n ds_fn: See `run_core_tests`.\n num_outputs: See `run_core_tests`.\n break_point: Break point. Optional. Defaults to num_outputs/2.\n sparse_tensors: See `run_core_tests`.\n verify_exhausted: See `gen_outputs`.\n\n Raises:\n AssertionError if any test fails.\n \"\"\"\n break_point = num_outputs // 2 if not break_point else break_point\n\n # Collect ground truth containing all outputs.\n expected = self.gen_outputs(\n ds_fn, [],\n num_outputs,\n sparse_tensors=sparse_tensors,\n verify_exhausted=verify_exhausted)\n\n # Skip some items and save checkpoint.\n self.gen_outputs(\n ds_fn, [],\n break_point,\n sparse_tensors=sparse_tensors,\n verify_exhausted=False)\n\n actual = []\n # Restore from checkpoint and then run init_op.\n with ops.Graph().as_default() as g:\n saver = self._import_meta_graph()\n init_op, get_next_op = self._get_iterator_ops_from_collection(\n ds_fn, sparse_tensors=sparse_tensors)\n get_next_op = remove_variants(get_next_op)\n with self.session(graph=g) as sess:\n self._restore(saver, sess)\n self._initialize(init_op, sess)\n for _ in range(num_outputs):\n actual.append(sess.run(get_next_op))\n if verify_exhausted:\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next_op)\n self.match(expected, actual)\n\n def verify_restore_in_modified_graph(self,\n ds_fn1,\n ds_fn2,\n num_outputs,\n break_point=None,\n sparse_tensors=False,\n verify_exhausted=True):\n \"\"\"Attempts to restore an iterator in a modified graph.\n\n Builds an input pipeline using ds_fn1, runs it for `break_point` steps\n and saves a checkpoint. Then builds a new graph using ds_fn2, restores\n the checkpoint from ds_fn1 and verifies that the restore is successful.\n\n Args:\n ds_fn1: See `run_core_tests`.\n ds_fn2: See `run_core_tests`.\n num_outputs: See `run_core_tests`.\n break_point: Break point. Optional. Defaults to num_outputs/2.\n sparse_tensors: See `run_core_tests`.\n verify_exhausted: See `gen_outputs`.\n\n Raises:\n AssertionError if any test fails.\n \"\"\"\n break_point = num_outputs // 2 if not break_point else break_point\n\n # Skip `break_point` items and store the remaining produced from ds_fn1\n # in `expected`.\n self.gen_outputs(\n ds_fn1, [],\n break_point,\n sparse_tensors=sparse_tensors,\n verify_exhausted=False)\n expected = self.gen_outputs(\n ds_fn1, [],\n num_outputs - break_point,\n ckpt_saved=True,\n sparse_tensors=sparse_tensors,\n verify_exhausted=verify_exhausted)\n\n # Generate `break_point` items from ds_fn1 and save checkpoint.\n self.gen_outputs(\n ds_fn1, [],\n break_point,\n sparse_tensors=sparse_tensors,\n verify_exhausted=False)\n\n actual = []\n # Build graph for ds_fn2 but load checkpoint for ds_fn1.\n with ops.Graph().as_default() as g:\n _, get_next_op, saver = self._build_graph(\n ds_fn2, sparse_tensors=sparse_tensors)\n get_next_op = remove_variants(get_next_op)\n with self.session(graph=g) as sess:\n self._restore(saver, sess)\n for _ in range(num_outputs - break_point):\n actual.append(sess.run(get_next_op))\n if verify_exhausted:\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next_op)\n\n self.match(expected, actual)\n\n def verify_restore_in_empty_graph(self,\n ds_fn,\n num_outputs,\n break_point=None,\n sparse_tensors=False,\n verify_exhausted=True):\n \"\"\"Attempts to restore an iterator in an empty graph.\n\n Builds an input pipeline using ds_fn, runs it for `break_point` steps\n and saves a checkpoint. Then builds a new empty graph, restores\n the checkpoint from ds_fn and verifies that the restore is successful.\n\n Args:\n ds_fn: See `run_core_tests`.\n num_outputs: See `run_core_tests`.\n break_point: Break point. Optional. Defaults to num_outputs/2.\n sparse_tensors: See `run_core_tests`.\n verify_exhausted: See `gen_outputs`.\n\n Raises:\n AssertionError if any test fails.\n \"\"\"\n break_point = num_outputs // 2 if not break_point else break_point\n\n # Skip `break_point` items and store the remaining produced from ds_fn\n # in `expected`.\n self.gen_outputs(\n ds_fn, [],\n break_point,\n sparse_tensors=sparse_tensors,\n verify_exhausted=False)\n expected = self.gen_outputs(\n ds_fn, [],\n num_outputs - break_point,\n ckpt_saved=True,\n sparse_tensors=sparse_tensors,\n verify_exhausted=verify_exhausted)\n\n # Generate `break_point` items from ds_fn and save checkpoint.\n self.gen_outputs(\n ds_fn, [],\n break_point,\n sparse_tensors=sparse_tensors,\n verify_exhausted=False)\n\n actual = []\n # Build an empty graph but load checkpoint for ds_fn.\n with ops.Graph().as_default() as g:\n get_next_op, saver = self._build_empty_graph(\n ds_fn, sparse_tensors=sparse_tensors)\n get_next_op = remove_variants(get_next_op)\n with self.session(graph=g) as sess:\n self._restore(saver, sess)\n for _ in range(num_outputs - break_point):\n actual.append(sess.run(get_next_op))\n if verify_exhausted:\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next_op)\n\n self.match(expected, actual)\n\n def verify_error_on_save(self,\n ds_fn,\n num_outputs,\n error,\n break_point=None,\n sparse_tensors=False):\n \"\"\"Attempts to save a non-saveable iterator.\n\n Args:\n ds_fn: See `run_core_tests`.\n num_outputs: See `run_core_tests`.\n error: Declared error when trying to save iterator.\n break_point: Break point. Optional. Defaults to num_outputs/2.\n sparse_tensors: See `run_core_tests`.\n\n Raises:\n AssertionError if any test fails.\n \"\"\"\n\n break_point = num_outputs // 2 if not break_point else break_point\n with ops.Graph().as_default() as g:\n init_op, get_next_op, saver = self._build_graph(\n ds_fn, sparse_tensors=sparse_tensors)\n get_next_op = remove_variants(get_next_op)\n with self.session(graph=g) as sess:\n self._initialize(init_op, sess)\n for _ in range(break_point):\n sess.run(get_next_op)\n with self.assertRaises(error):\n self._save(sess, saver)\n\n def verify_run_with_breaks(self,\n ds_fn,\n break_points,\n num_outputs,\n init_before_restore=False,\n sparse_tensors=False,\n verify_exhausted=True):\n \"\"\"Verifies that ds_fn() produces the same outputs with and without breaks.\n\n 1. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it\n *without* stopping at break points.\n 2. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it\n with stopping at break points.\n\n Deep matches outputs from 1 and 2.\n\n Args:\n ds_fn: See `gen_outputs`.\n break_points: See `gen_outputs`.\n num_outputs: See `gen_outputs`.\n init_before_restore: See `gen_outputs`.\n sparse_tensors: See `run_core_tests`.\n verify_exhausted: See `gen_outputs`.\n\n Raises:\n AssertionError if any test fails.\n \"\"\"\n expected = self.gen_outputs(\n ds_fn, [],\n num_outputs,\n init_before_restore=init_before_restore,\n sparse_tensors=sparse_tensors,\n verify_exhausted=verify_exhausted)\n\n actual = self.gen_outputs(\n ds_fn,\n break_points,\n num_outputs,\n init_before_restore=init_before_restore,\n sparse_tensors=sparse_tensors,\n verify_exhausted=verify_exhausted)\n\n self.match(expected, actual)\n\n def gen_outputs(self,\n ds_fn,\n break_points,\n num_outputs,\n ckpt_saved=False,\n init_before_restore=False,\n sparse_tensors=False,\n verify_exhausted=True,\n save_checkpoint_at_end=True):\n \"\"\"Generates elements from input dataset while stopping at break points.\n\n Produces `num_outputs` outputs and saves the state of the iterator in the\n Saver checkpoint.\n\n Args:\n ds_fn: 0-argument function that returns the dataset.\n break_points: A list of integers. For each `break_point` in\n `break_points`, we produce outputs till `break_point` number of items\n have been produced and then checkpoint the state. The current graph\n and session are destroyed and a new graph and session are used to\n produce outputs till next checkpoint or till `num_outputs` elements\n have been produced. `break_point` must be <= `num_outputs`.\n num_outputs: The total number of outputs to produce from the iterator.\n ckpt_saved: Whether a checkpoint already exists. If False, we build the\n graph from ds_fn.\n init_before_restore: Whether init should be called before saver.restore.\n This is just so that we can verify that restoring an already initialized\n iterator works.\n sparse_tensors: Whether dataset is built from SparseTensor(s).\n verify_exhausted: Whether to verify that the iterator has been exhausted\n after producing `num_outputs` elements.\n save_checkpoint_at_end: Whether to save a checkpoint after producing all\n outputs. If False, checkpoints are saved each break point but not at the\n end. Note that checkpoints overwrite each other so there is always only\n a single checkpoint available. Defaults to True.\n\n Returns:\n A list of `num_outputs` items.\n \"\"\"\n outputs = []\n\n def get_ops():\n if ckpt_saved:\n saver = self._import_meta_graph()\n init_op, get_next_op = self._get_iterator_ops_from_collection(\n ds_fn, sparse_tensors=sparse_tensors)\n else:\n init_op, get_next_op, saver = self._build_graph(\n ds_fn, sparse_tensors=sparse_tensors)\n return init_op, get_next_op, saver\n\n for i in range(len(break_points) + 1):\n with ops.Graph().as_default() as g:\n init_op, get_next_op, saver = get_ops()\n get_next_op = remove_variants(get_next_op)\n with self.session(graph=g) as sess:\n if ckpt_saved:\n if init_before_restore:\n self._initialize(init_op, sess)\n self._restore(saver, sess)\n else:\n self._initialize(init_op, sess)\n start = break_points[i - 1] if i > 0 else 0\n end = break_points[i] if i < len(break_points) else num_outputs\n num_iters = end - start\n for _ in range(num_iters):\n outputs.append(sess.run(get_next_op))\n if i == len(break_points) and verify_exhausted:\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next_op)\n if save_checkpoint_at_end or i < len(break_points):\n self._save(sess, saver)\n ckpt_saved = True\n\n return outputs\n\n def match(self, expected, actual):\n \"\"\"Matches nested structures.\n\n Recursively matches shape and values of `expected` and `actual`.\n Handles scalars, numpy arrays and other python sequence containers\n e.g. list, dict.\n\n Args:\n expected: Nested structure 1.\n actual: Nested structure 2.\n\n Raises:\n AssertionError if matching fails.\n \"\"\"\n if isinstance(expected, np.ndarray):\n expected = expected.tolist()\n if isinstance(actual, np.ndarray):\n actual = actual.tolist()\n self.assertEqual(type(expected), type(actual))\n\n if nest.is_sequence(expected):\n self.assertEqual(len(expected), len(actual))\n if isinstance(expected, dict):\n for key1, key2 in zip(sorted(expected), sorted(actual)):\n self.assertEqual(key1, key2)\n self.match(expected[key1], actual[key2])\n else:\n for item1, item2 in zip(expected, actual):\n self.match(item1, item2)\n else:\n self.assertEqual(expected, actual)\n\n def does_not_match(self, expected, actual):\n with self.assertRaises(AssertionError):\n self.match(expected, actual)\n\n def gen_break_points(self, num_outputs, num_samples=10):\n \"\"\"Generates `num_samples` breaks points in [0, num_outputs].\"\"\"\n return np.linspace(0, num_outputs, num_samples, dtype=int)\n\n def _build_graph(self, ds_fn, sparse_tensors=False):\n iterator = dataset_ops.make_initializable_iterator(ds_fn())\n\n saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)\n ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)\n init_op = iterator.initializer\n if sparse_tensors:\n get_next = sparse_tensor.SparseTensor(*iterator.get_next())\n else:\n get_next = iterator.get_next()\n self._add_iterator_ops_to_collection(init_op, get_next, ds_fn,\n sparse_tensors)\n saver = saver_lib.Saver(allow_empty=True)\n return init_op, get_next, saver\n\n def _build_empty_graph(self, ds_fn, sparse_tensors=False):\n iterator = iterator_ops.Iterator.from_structure(\n self._get_output_types(ds_fn),\n output_shapes=self._get_output_shapes(ds_fn),\n output_classes=self._get_output_classes(ds_fn))\n saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)\n ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)\n if sparse_tensors:\n get_next = sparse_tensor.SparseTensor(*iterator.get_next())\n else:\n get_next = iterator.get_next()\n saver = saver_lib.Saver(allow_empty=True)\n return get_next, saver\n\n def _add_iterator_ops_to_collection(self,\n init_op,\n get_next,\n ds_fn,\n sparse_tensors=False):\n ops.add_to_collection(\"iterator_ops\", init_op)\n # `get_next` may be a tuple e.g. in TensorSliceDataset. Since Collections\n # do not support tuples we flatten the tensors and restore the shape in\n # `_get_iterator_ops_from_collection`.\n if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`.\n ops.add_to_collection(\"iterator_ops\", get_next.indices)\n ops.add_to_collection(\"iterator_ops\", get_next.values)\n ops.add_to_collection(\"iterator_ops\", get_next.dense_shape)\n return\n\n get_next_list = nest.flatten(get_next)\n for i, output_class in enumerate(\n nest.flatten(self._get_output_classes(ds_fn))):\n if output_class is sparse_tensor.SparseTensor:\n ops.add_to_collection(\"iterator_ops\", get_next_list[i].indices)\n ops.add_to_collection(\"iterator_ops\", get_next_list[i].values)\n ops.add_to_collection(\"iterator_ops\", get_next_list[i].dense_shape)\n else:\n ops.add_to_collection(\"iterator_ops\", get_next_list[i])\n\n def _get_iterator_ops_from_collection(self, ds_fn, sparse_tensors=False):\n all_ops = ops.get_collection(\"iterator_ops\")\n if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`.\n init_op, indices, values, dense_shape = all_ops\n return init_op, sparse_tensor.SparseTensor(indices, values, dense_shape)\n get_next_list = []\n i = 1\n for output_class in nest.flatten(self._get_output_classes(ds_fn)):\n if output_class is sparse_tensor.SparseTensor:\n indices, values, dense_shape = all_ops[i:i + 3]\n i += 3\n get_next_list.append(\n sparse_tensor.SparseTensor(indices, values, dense_shape))\n else:\n get_next_list.append(all_ops[i])\n i += 1\n return all_ops[0], nest.pack_sequence_as(\n self._get_output_types(ds_fn), get_next_list)\n\n def _get_output_types(self, ds_fn):\n with ops.Graph().as_default():\n return dataset_ops.get_legacy_output_types(ds_fn())\n\n def _get_output_shapes(self, ds_fn):\n with ops.Graph().as_default():\n return dataset_ops.get_legacy_output_shapes(ds_fn())\n\n def _get_output_classes(self, ds_fn):\n with ops.Graph().as_default():\n return dataset_ops.get_legacy_output_classes(ds_fn())\n\n def _ckpt_path(self):\n return os.path.join(self.get_temp_dir(), \"iterator\")\n\n def _latest_ckpt(self):\n return checkpoint_management.latest_checkpoint(self.get_temp_dir())\n\n def _save(self, sess, saver):\n saver.save(sess, self._ckpt_path())\n\n def _restore(self, saver, sess):\n sess.run(lookup_ops.tables_initializer())\n saver.restore(sess, self._latest_ckpt())\n\n def _initialize(self, init_op, sess):\n sess.run(variables.global_variables_initializer())\n sess.run(lookup_ops.tables_initializer())\n sess.run(init_op)\n\n def _import_meta_graph(self):\n meta_file_path = self._ckpt_path() + \".meta\"\n return saver_lib.import_meta_graph(meta_file_path)\n\n def _delete_ckpt(self):\n # Remove all checkpoint files.\n prefix = self._ckpt_path()\n pattern = prefix + \"*\"\n files = gfile.Glob(pattern)\n map(gfile.Remove, files)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.Dataset.list_files()`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom os import path\nimport shutil\nimport tempfile\n\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import compat\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ListFilesTest(test_base.DatasetTestBase):\n\n def setUp(self):\n self.tmp_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.tmp_dir, ignore_errors=True)\n\n def _touchTempFiles(self, filenames):\n for filename in filenames:\n open(path.join(self.tmp_dir, filename), 'a').close()\n\n # Note: eager mode fails in assertion error same as initializer in graph mode.\n @test_util.run_deprecated_v1\n def testSkipEagerEmptyDirectory(self):\n dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))\n self.assertDatasetProduces(dataset, expected_output=[])\n\n def testSimpleDirectory(self):\n filenames = ['a', 'b', 'c']\n self._touchTempFiles(filenames)\n\n dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))\n self.assertDatasetProduces(\n dataset,\n expected_output=[\n compat.as_bytes(path.join(self.tmp_dir, filename))\n for filename in filenames\n ],\n assert_items_equal=True)\n\n def testSimpleDirectoryNotShuffled(self):\n filenames = ['b', 'c', 'a']\n self._touchTempFiles(filenames)\n\n dataset = dataset_ops.Dataset.list_files(\n path.join(self.tmp_dir, '*'), shuffle=False)\n self.assertDatasetProduces(\n dataset,\n expected_output=[\n compat.as_bytes(path.join(self.tmp_dir, filename))\n for filename in sorted(filenames)\n ])\n\n def testFixedSeedResultsInRepeatableOrder(self):\n filenames = ['a', 'b', 'c']\n self._touchTempFiles(filenames)\n\n dataset = dataset_ops.Dataset.list_files(\n path.join(self.tmp_dir, '*'), shuffle=True, seed=37)\n\n expected_filenames = [\n compat.as_bytes(path.join(self.tmp_dir, filename))\n for filename in filenames\n ]\n\n all_actual_filenames = []\n for _ in range(3):\n actual_filenames = []\n next_element = self.getNext(dataset, requires_initialization=True)\n try:\n while True:\n actual_filenames.append(self.evaluate(next_element()))\n except errors.OutOfRangeError:\n pass\n all_actual_filenames.append(actual_filenames)\n\n # Each run should produce the same set of filenames, which may be\n # different from the order of `expected_filenames`.\n self.assertItemsEqual(expected_filenames, all_actual_filenames[0])\n # However, the different runs should produce filenames in the same order\n # as each other.\n self.assertEqual(all_actual_filenames[0], all_actual_filenames[1])\n self.assertEqual(all_actual_filenames[0], all_actual_filenames[2])\n\n def tesEmptyDirectoryInitializer(self):\n\n def dataset_fn():\n return dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))\n\n self.assertDatasetProduces(\n dataset_fn(),\n expected_error=(errors.InvalidArgumentError,\n 'No files matched pattern'),\n requires_initialization=True)\n\n def testSimpleDirectoryInitializer(self):\n filenames = ['a', 'b', 'c']\n self._touchTempFiles(filenames)\n\n dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))\n self.assertDatasetProduces(\n dataset,\n expected_output=[\n compat.as_bytes(path.join(self.tmp_dir, filename))\n for filename in filenames\n ],\n assert_items_equal=True)\n\n def testFileSuffixes(self):\n filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']\n self._touchTempFiles(filenames)\n\n dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*.py'))\n self.assertDatasetProduces(\n dataset,\n expected_output=[\n compat.as_bytes(path.join(self.tmp_dir, filename))\n for filename in filenames[1:-1]\n ],\n assert_items_equal=True)\n\n def testFileMiddles(self):\n filenames = ['a.txt', 'b.py', 'c.pyc']\n self._touchTempFiles(filenames)\n\n dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*.py*'))\n self.assertDatasetProduces(\n dataset,\n expected_output=[\n compat.as_bytes(path.join(self.tmp_dir, filename))\n for filename in filenames[1:]\n ],\n assert_items_equal=True)\n\n def testNoShuffle(self):\n filenames = ['a', 'b', 'c']\n self._touchTempFiles(filenames)\n\n # Repeat the list twice and ensure that the order is the same each time.\n # NOTE(mrry): This depends on an implementation detail of `list_files()`,\n # which is that the list of files is captured when the iterator is\n # initialized. Otherwise, or if e.g. the iterator were initialized more than\n # once, it's possible that the non-determinism of `tf.matching_files()`\n # would cause this test to fail. However, it serves as a useful confirmation\n # that the `shuffle=False` argument is working as intended.\n # TODO(b/73959787): Provide some ordering guarantees so that this test is\n # more meaningful.\n dataset = dataset_ops.Dataset.list_files(\n path.join(self.tmp_dir, '*'), shuffle=False).repeat(2)\n next_element = self.getNext(dataset)\n\n expected_filenames = []\n actual_filenames = []\n for filename in filenames * 2:\n expected_filenames.append(\n compat.as_bytes(path.join(self.tmp_dir, filename)))\n actual_filenames.append(compat.as_bytes(self.evaluate(next_element())))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n self.assertItemsEqual(expected_filenames, actual_filenames)\n self.assertEqual(actual_filenames[:len(filenames)],\n actual_filenames[len(filenames):])\n\n def testMultiplePatternsAsList(self):\n filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']\n self._touchTempFiles(filenames)\n\n patterns = [path.join(self.tmp_dir, pat) for pat in ['*.py', '*.txt']]\n dataset = dataset_ops.Dataset.list_files(patterns)\n self.assertDatasetProduces(\n dataset,\n expected_output=[\n compat.as_bytes(path.join(self.tmp_dir, filename))\n for filename in filenames[:-1]\n ],\n assert_items_equal=True)\n\n def testMultiplePatternsAsTensor(self):\n filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']\n self._touchTempFiles(filenames)\n\n dataset = dataset_ops.Dataset.list_files(\n [path.join(self.tmp_dir, pat) for pat in ['*.py', '*.txt']])\n self.assertDatasetProduces(\n dataset,\n expected_output=[\n compat.as_bytes(path.join(self.tmp_dir, filename))\n for filename in filenames[:-1]\n ],\n assert_items_equal=True)\n\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for VariableClippingOptimizer.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport contextlib\nimport socket\nimport numpy as np\nfrom tensorflow.contrib.opt.python.training import variable_clipping_optimizer\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.training import server_lib\n\n\nclass VariableClippingOptimizerTest(test.TestCase):\n\n def _setupCluster(self):\n\n def get_open_port():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except IOError:\n s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n s.bind((\"\", 0))\n port = s.getsockname()[1]\n s.close()\n return port\n\n port1 = get_open_port()\n port2 = get_open_port()\n cs = server_lib.ClusterSpec({\n \"worker\": [\"localhost:%s\" % port1],\n \"ps\": [\"localhost:%s\" % port2]\n })\n\n worker = server_lib.Server(cs, job_name=\"worker\", start=True)\n ps = server_lib.Server(cs, job_name=\"ps\", start=True)\n\n return worker, ps\n\n @contextlib.contextmanager\n def _maybeWithDevice(self, device):\n if device is not None:\n with ops.device(device):\n yield\n else:\n yield\n\n def _setupDense(self, is_distributed, dtype):\n with self._maybeWithDevice(\"/job:ps\" if is_distributed else None):\n var0 = variables.Variable([[0.0, 1.0], [2.0, 3.0]], dtype=dtype)\n var1 = variables.Variable([4.0, 5.0], dtype=dtype)\n with self._maybeWithDevice(\"/job:worker\" if is_distributed else None):\n grads0 = constant_op.constant([[0.1, 0.1], [0.1, 0.1]], dtype=dtype)\n grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)\n sgd = gradient_descent.GradientDescentOptimizer(3.0)\n clip_opt = variable_clipping_optimizer.VariableClippingOptimizer(\n sgd, {var0: [1]}, 2.0)\n\n update_op = clip_opt.apply_gradients(\n list(zip([grads0, grads1], [var0, var1])))\n variables.global_variables_initializer().run()\n return var0, var1, update_op\n\n def _assertDenseCorrect(self, var0, var1, update_op):\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([[0.0, 1.0], [2.0, 3.0]], var0.eval())\n self.assertAllCloseAccordingToType([4.0, 5.0], var1.eval())\n\n # Run 1 step of sgd, clipping each var0[i] to max L2-norm 2.0\n update_op.run()\n # Validate updated params\n var0_out = var0.eval()\n # var0[0] has norm < 2.0, so it is not clipped.\n self.assertAllCloseAccordingToType([(0.0 - 3.0 * 0.1), (1.0 - 3.0 * 0.1)],\n var0_out[0])\n # var0[1] has norm > 2.0, so it is clipped.\n expected_unclipped = np.array([(2.0 - 3.0 * 0.1), (3.0 - 3.0 * 0.1)])\n self.assertAllCloseAccordingToType(2.0 * expected_unclipped /\n np.linalg.norm(expected_unclipped),\n var0_out[1])\n # var1 is not in the var list, so it should not be clipped\n self.assertAllCloseAccordingToType([4.0 - 3.0 * 0.01, 5.0 - 3.0 * 0.01],\n var1.eval())\n\n def _setupSparse(self, is_distributed, dtype):\n with self._maybeWithDevice(\"/job:ps\" if is_distributed else None):\n var0 = variables.Variable(\n [[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]], dtype=dtype)\n var1 = variables.Variable(\n [[0.0, 1.0], [0.0, 3.0], [0.0, 5.0]], dtype=dtype)\n with self._maybeWithDevice(\"/job:worker\" if is_distributed else None):\n grads = ops.IndexedSlices(\n constant_op.constant(\n [[0.1, 0.1], [0.1, 0.1]], dtype=dtype), [0, 2], [3, 2])\n sgd = gradient_descent.GradientDescentOptimizer(3.0)\n clip_opt = variable_clipping_optimizer.VariableClippingOptimizer(\n sgd, {var0: [1],\n var1: [0]}, 2.0)\n update_op = clip_opt.apply_gradients(\n list(zip([grads, grads], [var0, var1])))\n variables.global_variables_initializer().run()\n return var0, var1, update_op\n\n def _assertSparseCorrect(self, var0, var1, update_op):\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]],\n var0.eval())\n self.assertAllCloseAccordingToType([[0.0, 1.0], [0.0, 3.0], [0.0, 5.0]],\n var1.eval())\n\n # Run 1 step of sgd\n update_op.run()\n\n # var1 is clipped along the sparse dimension, so defaults to using dense\n # calculations. There should be a warning logged, but the numerics\n # should still be correct.\n var1_out = var1.eval()\n # var1[:, 0] has norm < 2.0, so it is not clipped.\n self.assertAllCloseAccordingToType(\n [(0.0 - 3.0 * 0.1), 0.0, (0.0 - 3.0 * 0.1)], var1_out[:, 0])\n # var1[:, 1] has norm > 2.0, so it is clipped.\n expected_unclipped = np.array([(1.0 - 3.0 * 0.1), 3.0, (5.0 - 3.0 * 0.1)])\n self.assertAllCloseAccordingToType(2.0 * expected_unclipped /\n np.linalg.norm(expected_unclipped),\n var1_out[:, 1])\n\n # Validate updated params\n var0_out = var0.eval()\n # var0[0] has norm < 2.0, so it is not clipped.\n self.assertAllCloseAccordingToType([(0.0 - 3.0 * 0.1), (1.0 - 3.0 * 0.1)],\n var0_out[0])\n # var0[1] has no gradients, so it should remain unchanged.\n self.assertAllCloseAccordingToType([2.0, 3.0], var0_out[1])\n # var0[2] has norm > 2.0, so it is clipped.\n expected_unclipped = np.array([(4.0 - 3.0 * 0.1), (5.0 - 3.0 * 0.1)])\n self.assertAllCloseAccordingToType(2.0 * expected_unclipped /\n np.linalg.norm(expected_unclipped),\n var0_out[2])\n\n def testDenseLocal(self):\n for dtype in [dtypes.float32, dtypes.float64, dtypes.half]:\n with self.cached_session():\n var0, var1, update_op = self._setupDense(False, dtype)\n self._assertDenseCorrect(var0, var1, update_op)\n\n def testDenseDistributed(self):\n worker, unused_ps = self._setupCluster()\n for dtype in [dtypes.float64, dtypes.half, dtypes.float32]:\n with session.Session(worker.target):\n var0, var1, update_op = self._setupDense(True, dtype)\n self._assertDenseCorrect(var0, var1, update_op)\n\n def testSparseLocal(self):\n for dtype in [dtypes.float64, dtypes.float32, dtypes.half]:\n with self.cached_session():\n var0, var1, update_op = self._setupSparse(False, dtype)\n self._assertSparseCorrect(var0, var1, update_op)\n\n def testSparseDistributed(self):\n worker, unused_ps = self._setupCluster()\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with session.Session(worker.target):\n var0, var1, update_op = self._setupSparse(True, dtype)\n self._assertSparseCorrect(var0, var1, update_op)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Relaxed One-Hot Categorical distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom scipy.special import gamma\n\nfrom tensorflow.contrib.distributions.python.ops import relaxed_onehot_categorical\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.platform import test\n\n\ndef make_relaxed_categorical(batch_shape, num_classes, dtype=dtypes.float32):\n logits = random_ops.random_uniform(\n list(batch_shape) + [num_classes], -10, 10, dtype=dtype) - 50.\n temperatures = random_ops.random_uniform(\n list(batch_shape), 0.1, 10, dtype=dtypes.float32)\n return relaxed_onehot_categorical.RelaxedOneHotCategorical(\n temperatures, logits, dtype=dtype)\n\n\nclass ExpRelaxedOneHotCategoricalTest(test.TestCase):\n\n def testP(self):\n temperature = 1.0\n logits = [2.0, 3.0, -4.0]\n dist = relaxed_onehot_categorical.ExpRelaxedOneHotCategorical(temperature,\n logits)\n expected_p = np.exp(logits)/np.sum(np.exp(logits))\n with self.cached_session():\n self.assertAllClose(expected_p, dist.probs.eval())\n self.assertAllEqual([3], dist.probs.get_shape())\n\n def testPdf(self):\n temperature = .4\n logits = [.3, .1, .4]\n k = len(logits)\n p = np.exp(logits)/np.sum(np.exp(logits))\n dist = relaxed_onehot_categorical.ExpRelaxedOneHotCategorical(temperature,\n logits)\n with self.cached_session():\n x = dist.sample().eval()\n # analytical ExpConcrete density presented in Maddison et al. 2016\n prod_term = p*np.exp(-temperature * x)\n expected_pdf = (gamma(k) * np.power(temperature, k-1) *\n np.prod(prod_term/np.sum(prod_term)))\n pdf = dist.prob(x).eval()\n self.assertAllClose(expected_pdf, pdf)\n\n\nclass RelaxedOneHotCategoricalTest(test.TestCase):\n\n def testLogits(self):\n temperature = 1.0\n logits = [2.0, 3.0, -4.0]\n dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature,\n logits)\n with self.cached_session():\n # check p for ExpRelaxed base distribution\n self.assertAllClose(logits, dist._distribution.logits.eval())\n self.assertAllEqual([3], dist._distribution.logits.get_shape())\n\n def testSample(self):\n temperature = 1.4\n with self.cached_session():\n # single logit\n logits = [.3, .1, .4]\n dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature,\n logits)\n self.assertAllEqual([3], dist.sample().eval().shape)\n self.assertAllEqual([5, 3], dist.sample(5).eval().shape)\n # multiple distributions\n logits = [[2.0, 3.0, -4.0], [.3, .1, .4]]\n dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature,\n logits)\n self.assertAllEqual([2, 3], dist.sample().eval().shape)\n self.assertAllEqual([5, 2, 3], dist.sample(5).eval().shape)\n # multiple distributions\n logits = np.random.uniform(size=(4, 1, 3)).astype(np.float32)\n dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature,\n logits)\n self.assertAllEqual([4, 1, 3], dist.sample().eval().shape)\n self.assertAllEqual([5, 4, 1, 3], dist.sample(5).eval().shape)\n\n def testPdf(self):\n def analytical_pdf(x, temperature, logits):\n # analytical density of RelaxedOneHotCategorical\n temperature = np.reshape(temperature, (-1, 1))\n if len(x.shape) == 1:\n x = np.expand_dims(x, 0)\n k = logits.shape[1]\n p = np.exp(logits)/np.sum(np.exp(logits), axis=1, keepdims=True)\n term1 = gamma(k)*np.power(temperature, k-1)\n term2 = np.sum(p/(np.power(x, temperature)), axis=1, keepdims=True)\n term3 = np.prod(p/(np.power(x, temperature+1)), axis=1, keepdims=True)\n expected_pdf = term1*np.power(term2, -k)*term3\n return expected_pdf\n\n with self.cached_session():\n temperature = .4\n logits = np.array([[.3, .1, .4]]).astype(np.float32)\n dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature,\n logits)\n x = dist.sample().eval()\n pdf = dist.prob(x).eval()\n expected_pdf = analytical_pdf(x, temperature, logits)\n self.assertAllClose(expected_pdf.flatten(), pdf, rtol=1e-4)\n\n # variable batch size\n logits = np.array([[.3, .1, .4], [.6, -.1, 2.]]).astype(np.float32)\n temperatures = np.array([0.4, 2.3]).astype(np.float32)\n dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperatures,\n logits)\n x = dist.sample().eval()\n pdf = dist.prob(x).eval()\n expected_pdf = analytical_pdf(x, temperatures, logits)\n self.assertAllClose(expected_pdf.flatten(), pdf, rtol=1e-4)\n\n def testShapes(self):\n with self.cached_session():\n for batch_shape in ([], [1], [2, 3, 4]):\n dist = make_relaxed_categorical(batch_shape, 10)\n self.assertAllEqual(batch_shape, dist.batch_shape.as_list())\n self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())\n self.assertAllEqual([10], dist.event_shape_tensor().eval())\n self.assertAllEqual([10], dist.event_shape_tensor().eval())\n\n for batch_shape in ([], [1], [2, 3, 4]):\n dist = make_relaxed_categorical(\n batch_shape, constant_op.constant(10, dtype=dtypes.int32))\n self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)\n self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())\n self.assertAllEqual([10], dist.event_shape_tensor().eval())\n self.assertAllEqual([10], dist.event_shape_tensor().eval())\n\n def testUnknownShape(self):\n with self.cached_session():\n logits_pl = array_ops.placeholder(dtypes.float32)\n temperature = 1.0\n dist = relaxed_onehot_categorical.ExpRelaxedOneHotCategorical(temperature,\n logits_pl)\n with self.cached_session():\n feed_dict = {logits_pl: [.3, .1, .4]}\n self.assertAllEqual([3], dist.sample().eval(feed_dict=feed_dict).shape)\n self.assertAllEqual([5, 3],\n dist.sample(5).eval(feed_dict=feed_dict).shape)\n\n def testDTypes(self):\n # check that sampling and log_prob work for a range of dtypes\n with self.cached_session():\n for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):\n logits = random_ops.random_uniform(shape=[3, 3], dtype=dtype)\n dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(\n temperature=0.5, logits=logits)\n dist.log_prob(dist.sample())\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Ops tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.learn.python.learn import ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\nclass OpsTest(test.TestCase):\n \"\"\"Ops tests.\"\"\"\n\n def test_softmax_classifier(self):\n with self.cached_session() as session:\n features = array_ops.placeholder(dtypes.float32, [None, 3])\n labels = array_ops.placeholder(dtypes.float32, [None, 2])\n weights = constant_op.constant([[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]])\n biases = constant_op.constant([0.2, 0.3])\n class_weight = constant_op.constant([0.1, 0.9])\n prediction, loss = ops.softmax_classifier(features, labels, weights,\n biases, class_weight)\n self.assertEqual(prediction.get_shape()[1], 2)\n self.assertEqual(loss.get_shape(), [])\n value = session.run(loss, {features: [[0.2, 0.3, 0.2]], labels: [[0, 1]]})\n self.assertAllClose(value, 0.55180627)\n\n def test_embedding_lookup(self):\n d_embed = 5\n n_embed = 10\n ids_shape = (2, 3, 4)\n embeds = np.random.randn(n_embed, d_embed)\n ids = np.random.randint(0, n_embed, ids_shape)\n with self.cached_session():\n embed_np = embeds[ids]\n embed_tf = ops.embedding_lookup(embeds, ids).eval()\n self.assertEqual(embed_np.shape, embed_tf.shape)\n self.assertAllClose(embed_np, embed_tf)\n\n def test_categorical_variable(self):\n random_seed.set_random_seed(42)\n with self.cached_session() as sess:\n cat_var_idx = array_ops.placeholder(dtypes.int64, [2, 2])\n embeddings = ops.categorical_variable(\n cat_var_idx, n_classes=5, embedding_size=10, name=\"my_cat_var\")\n sess.run(variables.global_variables_initializer())\n emb1 = sess.run(embeddings,\n feed_dict={cat_var_idx.name: [[0, 1], [2, 3]]})\n emb2 = sess.run(embeddings,\n feed_dict={cat_var_idx.name: [[0, 2], [1, 3]]})\n self.assertEqual(emb1.shape, emb2.shape)\n self.assertAllEqual(np.transpose(emb2, axes=[1, 0, 2]), emb1)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\[email protected]_parameters(\n dict(testcase_name='Defun', function_decorator=function.defun),\n dict(testcase_name='DefFunction', function_decorator=def_function.function))\nclass ArgumentNamingTests(test.TestCase, parameterized.TestCase):\n \"\"\"Tests for recognizable export signatures from concrete functions.\"\"\"\n\n def testBasic(self, function_decorator):\n @function_decorator\n def fn(a, b):\n return a + b, a * b\n # Call the function to make def_function happy\n fn(array_ops.ones([]), array_ops.ones([]))\n\n fn_op = fn.get_concrete_function(\n tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))\n self.assertEqual(\n ['a', 'b'],\n [inp.op.name for inp in fn_op.inputs])\n self.assertEqual(\n [b'a', b'b'],\n [inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs])\n self.assertEqual(2, len(fn_op.graph.structured_outputs))\n self.assertAllClose(\n [3., 2.],\n fn_op(constant_op.constant(1.), constant_op.constant(2.)))\n self.assertAllClose(\n [3., 2.],\n fn_op(a=constant_op.constant(1.), b=constant_op.constant(2.)))\n\n def testVariable(self, function_decorator):\n @function_decorator\n def fn(a, b):\n return a + b, a * b\n # Call the function to make def_function happy\n fn(array_ops.ones([]), array_ops.ones([]))\n\n fn_op = fn.get_concrete_function(\n tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),\n variables.Variable(1.))\n self.assertEqual(\n ['a', 'b'],\n [inp.op.name for inp in fn_op.inputs])\n self.assertEqual(\n [b'a', b'b'],\n [inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs])\n self.assertEqual(2, len(fn_op.graph.structured_outputs))\n\n def testDictReturned(self, function_decorator):\n @function_decorator\n def fn(x, z=(1., 2.), y=3.):\n z1, z2 = z\n return {'alpha': x + y + z1, 'beta': x * y + z2}\n # Call the function to make def_function happy\n fn(array_ops.ones([]))\n\n fn_op = fn.get_concrete_function(\n x=tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),\n y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))\n self.assertEqual(\n ['x', 'y'],\n [inp.op.name for inp in fn_op.inputs])\n self.assertEqual(\n [b'x', b'y'],\n [inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs])\n self.assertEqual({'alpha', 'beta'},\n set(fn_op.graph.structured_outputs.keys()))\n\n with self.assertRaisesRegexp(ValueError, \"two arguments named 'z'\"):\n fn.get_concrete_function(\n z=(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)),\n y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32,\n name='custom'),\n x=4.)\n fn_op2 = fn.get_concrete_function(\n z=(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32,\n name='z_first'),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32,\n name='z_second')),\n y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='custom'),\n x=4.)\n self.assertEqual(\n ['z_first', 'z_second', 'custom'],\n [inp.op.name for inp in fn_op2.inputs])\n self.assertEqual(\n [b'z_first', b'z_second', b'custom'],\n [inp.op.get_attr('_user_specified_name') for inp in fn_op2.inputs])\n\n fn_op3 = fn.get_concrete_function(\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='custom'),\n z=(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32,\n name='z1'),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='z2')),\n y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))\n self.assertEqual(\n ['custom', 'z1', 'z2', 'y'],\n [inp.op.name for inp in fn_op3.inputs])\n self.assertEqual(\n [b'custom', b'z1', b'z2', b'y'],\n [inp.op.get_attr('_user_specified_name') for inp in fn_op3.inputs])\n\n def testMethod(self, function_decorator):\n class HasMethod(object):\n\n @function_decorator\n def method(self, x):\n return x\n\n has_method = HasMethod()\n # Call the function to make def_function happy\n HasMethod.method(has_method, array_ops.ones([]))\n class_op = HasMethod.method.get_concrete_function(\n has_method, tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))\n self.assertEqual(\n ['x'],\n [inp.op.name for inp in class_op.inputs])\n self.assertEqual(\n [b'x'],\n [inp.op.get_attr('_user_specified_name') for inp in class_op.inputs])\n # Call the function to make def_function happy\n has_method.method(array_ops.ones([]))\n method_op = has_method.method.get_concrete_function(\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))\n self.assertEqual(\n ['x'],\n [inp.op.name for inp in method_op.inputs])\n self.assertEqual(\n [b'x'],\n [inp.op.get_attr('_user_specified_name') for inp in method_op.inputs])\n # TODO(allenl): It should be possible to override names when exporting. Do\n # TensorSpec names need to go in cache keys? Or maybe get_concrete_function\n # should always retrace?\n self.skipTest('Not working')\n method_op = has_method.method.get_concrete_function(\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='y'))\n self.assertEqual(\n ['y'],\n [inp.op.name for inp in method_op.inputs])\n self.assertEqual(\n [b'y'],\n [inp.op.get_attr('_user_specified_name') for inp in method_op.inputs])\n\n def testMethodSignature(self, function_decorator):\n\n class HasMethod(object):\n\n @function_decorator(\n input_signature=(tensor_spec.TensorSpec(\n shape=None, dtype=dtypes.float64, name='y'),))\n def method(self, x):\n hash(self) # No weak proxies passed as `self`\n return x\n\n has_method = HasMethod()\n # Call the function to make def_function happy\n has_method.method(array_ops.ones([], dtype=dtypes.float64))\n method_op = has_method.method.get_concrete_function()\n self.assertEqual(\n ['y'],\n [inp.op.name for inp in method_op.inputs])\n self.assertEqual(\n [b'y'],\n [inp.op.get_attr('_user_specified_name') for inp in method_op.inputs])\n method_op2 = has_method.method.get_concrete_function()\n self.assertEqual(\n ['y'],\n [inp.op.name for inp in method_op2.inputs])\n self.assertEqual(\n [b'y'],\n [inp.op.get_attr('_user_specified_name') for inp in method_op2.inputs])\n\n def testVariadic(self, function_decorator):\n @function_decorator\n def variadic_fn(x, *args, **kwargs):\n return x + math_ops.add_n(list(args) + list(kwargs.values()))\n\n # Call the function to make def_function happy\n variadic_fn(array_ops.ones([]), array_ops.ones([]))\n variadic_op = variadic_fn.get_concrete_function(\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32, name='y'),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32,\n name='second_variadic'),\n z=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),\n zz=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='cust'))\n self.assertEqual(\n ['x', 'y', 'args_1', 'second_variadic', 'z', 'cust'],\n [inp.op.name for inp in variadic_op.inputs])\n self.assertEqual(\n [b'x', b'y', b'args_1', b'second_variadic', b'z', b'cust'],\n [inp.op.get_attr('_user_specified_name')\n for inp in variadic_op.inputs])\n\n def testVariadicInputSignature(self, function_decorator):\n @function_decorator(\n input_signature=(\n tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32, name='y'),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='z'),\n ))\n def variadic_fn(x, *args):\n return x + math_ops.add_n(list(args))\n\n # Call the function to make def_function happy\n variadic_fn(array_ops.ones([]), array_ops.ones([]),\n array_ops.ones([]), array_ops.ones([]))\n variadic_op = variadic_fn.get_concrete_function()\n self.assertIn(b'variadic_fn', variadic_op.name)\n self.assertEqual(\n ['x', 'y', 'args_1', 'z'],\n [inp.op.name for inp in variadic_op.inputs])\n self.assertEqual(\n [b'x', b'y', b'args_1', b'z'],\n [inp.op.get_attr('_user_specified_name')\n for inp in variadic_op.inputs])\n\n\nif __name__ == '__main__':\n ops.enable_eager_execution(\n config=config_pb2.ConfigProto(device_count={'CPU': 4}))\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A simple smoke test that runs these examples for 1 training iteration.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport pandas as pd\n\nfrom six.moves import StringIO\n\nimport tensorflow.examples.get_started.regression.imports85 as imports85\n\nsys.modules[\"imports85\"] = imports85\n\n# pylint: disable=g-bad-import-order,g-import-not-at-top\nimport tensorflow.data as data\n\nimport tensorflow.examples.get_started.regression.dnn_regression as dnn_regression\nimport tensorflow.examples.get_started.regression.linear_regression_categorical as linear_regression_categorical\nimport tensorflow.examples.get_started.regression.custom_regression as custom_regression\n\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\n# pylint: disable=g-bad-import-order,g-import-not-at-top\n\n\n# pylint: disable=line-too-long\nFOUR_LINES = \"\\n\".join([\n \"1,?,alfa-romero,gas,std,two,hatchback,rwd,front,94.50,171.20,65.50,52.40,2823,ohcv,six,152,mpfi,2.68,3.47,9.00,154,5000,19,26,16500\",\n \"2,164,audi,gas,std,four,sedan,fwd,front,99.80,176.60,66.20,54.30,2337,ohc,four,109,mpfi,3.19,3.40,10.00,102,5500,24,30,13950\",\n \"2,164,audi,gas,std,four,sedan,4wd,front,99.40,176.60,66.40,54.30,2824,ohc,five,136,mpfi,3.19,3.40,8.00,115,5500,18,22,17450\",\n \"2,?,audi,gas,std,two,sedan,fwd,front,99.80,177.30,66.30,53.10,2507,ohc,five,136,mpfi,3.19,3.40,8.50,110,5500,19,25,15250\",\n])\n\n# pylint: enable=line-too-long\n\n\ndef four_lines_dataframe():\n text = StringIO(FOUR_LINES)\n\n return pd.read_csv(\n text, names=imports85.types.keys(), dtype=imports85.types, na_values=\"?\")\n\n\ndef four_lines_dataset(*args, **kwargs):\n del args, kwargs\n return data.Dataset.from_tensor_slices(FOUR_LINES.split(\"\\n\"))\n\n\nclass RegressionTest(googletest.TestCase):\n \"\"\"Test the regression examples in this directory.\"\"\"\n\n @test.mock.patch.dict(data.__dict__, {\"TextLineDataset\": four_lines_dataset})\n @test.mock.patch.dict(imports85.__dict__, {\"_get_imports85\": (lambda: None)})\n @test.mock.patch.dict(linear_regression_categorical.__dict__, {\"STEPS\": 1})\n def test_linear_regression_categorical(self):\n linear_regression_categorical.main([\"\"])\n\n @test.mock.patch.dict(data.__dict__, {\"TextLineDataset\": four_lines_dataset})\n @test.mock.patch.dict(imports85.__dict__, {\"_get_imports85\": (lambda: None)})\n @test.mock.patch.dict(dnn_regression.__dict__, {\"STEPS\": 1})\n def test_dnn_regression(self):\n dnn_regression.main([\"\"])\n\n @test.mock.patch.dict(data.__dict__, {\"TextLineDataset\": four_lines_dataset})\n @test.mock.patch.dict(imports85.__dict__, {\"_get_imports85\": (lambda: None)})\n @test.mock.patch.dict(custom_regression.__dict__, {\"STEPS\": 1})\n def test_custom_regression(self):\n custom_regression.main([\"\"])\n\n\nif __name__ == \"__main__\":\n googletest.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Matrix functions contains iterative methods for M^p.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\n\n\ndef matrix_square_root(mat_a, mat_a_size, iter_count=100, ridge_epsilon=1e-4):\n \"\"\"Iterative method to get matrix square root.\n\n Stable iterations for the matrix square root, Nicholas J. Higham\n\n Page 231, Eq 2.6b\n http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.6.8799&rep=rep1&type=pdf\n\n Args:\n mat_a: the symmetric PSD matrix whose matrix square root be computed\n mat_a_size: size of mat_a.\n iter_count: Maximum number of iterations.\n ridge_epsilon: Ridge epsilon added to make the matrix positive definite.\n\n Returns:\n mat_a^0.5\n \"\"\"\n\n def _iter_condition(i, unused_mat_y, unused_old_mat_y, unused_mat_z,\n unused_old_mat_z, err, old_err):\n # This method require that we check for divergence every step.\n return math_ops.logical_and(i < iter_count, err < old_err)\n\n def _iter_body(i, mat_y, unused_old_mat_y, mat_z, unused_old_mat_z, err,\n unused_old_err):\n current_iterate = 0.5 * (3.0 * identity - math_ops.matmul(mat_z, mat_y))\n current_mat_y = math_ops.matmul(mat_y, current_iterate)\n current_mat_z = math_ops.matmul(current_iterate, mat_z)\n # Compute the error in approximation.\n mat_sqrt_a = current_mat_y * math_ops.sqrt(norm)\n mat_a_approx = math_ops.matmul(mat_sqrt_a, mat_sqrt_a)\n residual = mat_a - mat_a_approx\n current_err = math_ops.sqrt(math_ops.reduce_sum(residual * residual)) / norm\n return i + 1, current_mat_y, mat_y, current_mat_z, mat_z, current_err, err\n\n identity = linalg_ops.eye(math_ops.cast(mat_a_size, dtypes.int32))\n mat_a = mat_a + ridge_epsilon * identity\n norm = math_ops.sqrt(math_ops.reduce_sum(mat_a * mat_a))\n mat_init_y = mat_a / norm\n mat_init_z = identity\n init_err = norm\n\n _, _, prev_mat_y, _, _, _, _ = control_flow_ops.while_loop(\n _iter_condition, _iter_body, [\n 0, mat_init_y, mat_init_y, mat_init_z, mat_init_z, init_err,\n init_err + 1.0\n ])\n return prev_mat_y * math_ops.sqrt(norm)\n\n\ndef matrix_inverse_pth_root(mat_g,\n mat_g_size,\n alpha,\n iter_count=100,\n epsilon=1e-6,\n ridge_epsilon=1e-6):\n \"\"\"Computes mat_g^alpha, where alpha = -1/p, p a positive integer.\n\n We use an iterative Schur-Newton method from equation 3.2 on page 9 of:\n\n A Schur-Newton Method for the Matrix p-th Root and its Inverse\n by Chun-Hua Guo and Nicholas J. Higham\n SIAM Journal on Matrix Analysis and Applications,\n 2006, Vol. 28, No. 3 : pp. 788-804\n https://pdfs.semanticscholar.org/0abe/7f77433cf5908bfe2b79aa91af881da83858.pdf\n\n Args:\n mat_g: the symmetric PSD matrix whose power it to be computed\n mat_g_size: size of mat_g.\n alpha: exponent, must be -1/p for p a positive integer.\n iter_count: Maximum number of iterations.\n epsilon: accuracy indicator, useful for early termination.\n ridge_epsilon: Ridge epsilon added to make the matrix positive definite.\n\n Returns:\n mat_g^alpha\n \"\"\"\n\n identity = linalg_ops.eye(math_ops.cast(mat_g_size, dtypes.int32))\n\n def mat_power(mat_m, p):\n \"\"\"Computes mat_m^p, for p a positive integer.\n\n Power p is known at graph compile time, so no need for loop and cond.\n Args:\n mat_m: a square matrix\n p: a positive integer\n\n Returns:\n mat_m^p\n \"\"\"\n assert p == int(p) and p > 0\n power = None\n while p > 0:\n if p % 2 == 1:\n power = math_ops.matmul(mat_m, power) if power is not None else mat_m\n p //= 2\n mat_m = math_ops.matmul(mat_m, mat_m)\n return power\n\n def _iter_condition(i, mat_m, _):\n return math_ops.logical_and(\n i < iter_count,\n math_ops.reduce_max(math_ops.abs(mat_m - identity)) > epsilon)\n\n def _iter_body(i, mat_m, mat_x):\n mat_m_i = (1 - alpha) * identity + alpha * mat_m\n return (i + 1, math_ops.matmul(mat_power(mat_m_i, -1.0 / alpha), mat_m),\n math_ops.matmul(mat_x, mat_m_i))\n\n if mat_g_size == 1:\n mat_h = math_ops.pow(mat_g + ridge_epsilon, alpha)\n else:\n damped_mat_g = mat_g + ridge_epsilon * identity\n z = (1 - 1 / alpha) / (2 * linalg_ops.norm(damped_mat_g))\n # The best value for z is\n # (1 - 1/alpha) * (c_max^{-alpha} - c_min^{-alpha}) /\n # (c_max^{1-alpha} - c_min^{1-alpha})\n # where c_max and c_min are the largest and smallest singular values of\n # damped_mat_g.\n # The above estimate assumes that c_max > c_min * 2^p. (p = -1/alpha)\n # Can replace above line by the one below, but it is less accurate,\n # hence needs more iterations to converge.\n # z = (1 - 1/alpha) / math_ops.trace(damped_mat_g)\n # If we want the method to always converge, use z = 1 / norm(damped_mat_g)\n # or z = 1 / math_ops.trace(damped_mat_g), but these can result in many\n # extra iterations.\n _, _, mat_h = control_flow_ops.while_loop(\n _iter_condition, _iter_body,\n [0, damped_mat_g * z, identity * math_ops.pow(z, -alpha)])\n return mat_h\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"TensorFlow Eager Execution Example: RNN Colorbot.\n\nThis example builds, trains, and evaluates a multi-layer RNN that can be\nrun with eager execution enabled. The RNN is trained to map color names to\ntheir RGB values: it takes as input a one-hot encoded character sequence and\noutputs a three-tuple (R, G, B) (scaled by 1/255).\n\nFor example, say we'd like the RNN Colorbot to generate the RGB values for the\ncolor white. To represent our query in a form that the Colorbot could\nunderstand, we would create a sequence of five 256-long vectors encoding the\nASCII values of the characters in \"white\". The first vector in our sequence\nwould be 0 everywhere except for the ord(\"w\")-th position, where it would be\n1, the second vector would be 0 everywhere except for the\nord(\"h\")-th position, where it would be 1, and similarly for the remaining three\nvectors. We refer to such indicator vectors as \"one-hot encodings\" of\ncharacters. After consuming these vectors, a well-trained Colorbot would output\nthe three tuple (1, 1, 1), since the RGB values for white are (255, 255, 255).\nWe are of course free to ask the colorbot to generate colors for any string we'd\nlike, such as \"steel gray,\" \"tensorflow orange,\" or \"green apple,\" though\nyour mileage may vary as your queries increase in creativity.\n\nThis example shows how to:\n 1. read, process, (one-hot) encode, and pad text data via the\n Datasets API;\n 2. build a trainable model;\n 3. implement a multi-layer RNN using Python control flow\n constructs (e.g., a for loop);\n 4. train a model using an iterative gradient-based method; and\n\nThe data used in this example is licensed under the Creative Commons\nAttribution-ShareAlike License and is available at\n https://en.wikipedia.org/wiki/List_of_colors:_A-F\n https://en.wikipedia.org/wiki/List_of_colors:_G-M\n https://en.wikipedia.org/wiki/List_of_colors:_N-Z\n\nThis example was adapted from\n https://github.com/random-forests/tensorflow-workshop/tree/master/extras/colorbot\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport functools\nimport os\nimport sys\nimport time\nimport urllib\n\nimport six\nimport tensorflow as tf\n\nfrom tensorflow.contrib.eager.python import tfe\n\ntry:\n import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top\n HAS_MATPLOTLIB = True\nexcept ImportError:\n HAS_MATPLOTLIB = False\n\nlayers = tf.keras.layers\n\n\ndef parse(line):\n \"\"\"Parse a line from the colors dataset.\"\"\"\n\n # Each line of the dataset is comma-separated and formatted as\n # color_name, r, g, b\n # so `items` is a list [color_name, r, g, b].\n items = tf.string_split([line], \",\").values\n rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.\n # Represent the color name as a one-hot encoded character sequence.\n color_name = items[0]\n chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)\n # The sequence length is needed by our RNN.\n length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)\n return rgb, chars, length\n\n\ndef maybe_download(filename, work_directory, source_url):\n \"\"\"Download the data from source url, unless it's already here.\n\n Args:\n filename: string, name of the file in the directory.\n work_directory: string, path to working directory.\n source_url: url to download from if file doesn't exist.\n\n Returns:\n Path to resulting file.\n \"\"\"\n if not tf.gfile.Exists(work_directory):\n tf.gfile.MakeDirs(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not tf.gfile.Exists(filepath):\n temp_file_name, _ = urllib.request.urlretrieve(source_url)\n tf.gfile.Copy(temp_file_name, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print(\"Successfully downloaded\", filename, size, \"bytes.\")\n return filepath\n\n\ndef load_dataset(data_dir, url, batch_size):\n \"\"\"Loads the colors data at path into a PaddedDataset.\"\"\"\n\n # Downloads data at url into data_dir/basename(url). The dataset has a header\n # row (color_name, r, g, b) followed by comma-separated lines.\n path = maybe_download(os.path.basename(url), data_dir, url)\n\n # This chain of commands loads our data by:\n # 1. skipping the header; (.skip(1))\n # 2. parsing the subsequent lines; (.map(parse))\n # 3. shuffling the data; (.shuffle(...))\n # 3. grouping the data into padded batches (.padded_batch(...)).\n dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle(\n buffer_size=10000).padded_batch(\n batch_size, padded_shapes=([None], [None, None], []))\n return dataset\n\n\n# pylint: disable=not-callable\nclass RNNColorbot(tf.keras.Model):\n \"\"\"Multi-layer (LSTM) RNN that regresses on real-valued vector labels.\n \"\"\"\n\n def __init__(self, rnn_cell_sizes, label_dimension, keep_prob):\n \"\"\"Constructs an RNNColorbot.\n\n Args:\n rnn_cell_sizes: list of integers denoting the size of each LSTM cell in\n the RNN; rnn_cell_sizes[i] is the size of the i-th layer cell\n label_dimension: the length of the labels on which to regress\n keep_prob: (1 - dropout probability); dropout is applied to the outputs of\n each LSTM layer\n \"\"\"\n super(RNNColorbot, self).__init__(name=\"\")\n self.label_dimension = label_dimension\n self.keep_prob = keep_prob\n\n self.cells = tf.contrib.checkpoint.List(\n [tf.nn.rnn_cell.BasicLSTMCell(size) for size in rnn_cell_sizes])\n self.relu = layers.Dense(\n label_dimension, activation=tf.nn.relu, name=\"relu\")\n\n def call(self, inputs, training=False):\n \"\"\"Implements the RNN logic and prediction generation.\n\n Args:\n inputs: A tuple (chars, sequence_length), where chars is a batch of\n one-hot encoded color names represented as a Tensor with dimensions\n [batch_size, time_steps, 256] and sequence_length holds the length\n of each character sequence (color name) as a Tensor with dimension\n [batch_size].\n training: whether the invocation is happening during training\n\n Returns:\n A tensor of dimension [batch_size, label_dimension] that is produced by\n passing chars through a multi-layer RNN and applying a ReLU to the final\n hidden state.\n \"\"\"\n (chars, sequence_length) = inputs\n # Transpose the first and second dimensions so that chars is of shape\n # [time_steps, batch_size, dimension].\n chars = tf.transpose(chars, [1, 0, 2])\n # The outer loop cycles through the layers of the RNN; the inner loop\n # executes the time steps for a particular layer.\n batch_size = int(chars.shape[1])\n for l in range(len(self.cells)):\n cell = self.cells[l]\n outputs = []\n state = cell.zero_state(batch_size, tf.float32)\n # Unstack the inputs to obtain a list of batches, one for each time step.\n chars = tf.unstack(chars, axis=0)\n for ch in chars:\n output, state = cell(ch, state)\n outputs.append(output)\n # The outputs of this layer are the inputs of the subsequent layer.\n chars = tf.stack(outputs, axis=0)\n if training:\n chars = tf.nn.dropout(chars, self.keep_prob)\n # Extract the correct output (i.e., hidden state) for each example. All the\n # character sequences in this batch were padded to the same fixed length so\n # that they could be easily fed through the above RNN loop. The\n # `sequence_length` vector tells us the true lengths of the character\n # sequences, letting us obtain for each sequence the hidden state that was\n # generated by its non-padding characters.\n batch_range = [i for i in range(batch_size)]\n indices = tf.stack([sequence_length - 1, batch_range], axis=1)\n hidden_states = tf.gather_nd(chars, indices)\n return self.relu(hidden_states)\n\n\ndef loss(labels, predictions):\n \"\"\"Computes mean squared loss.\"\"\"\n return tf.reduce_mean(tf.squared_difference(predictions, labels))\n\n\ndef test(model, eval_data):\n \"\"\"Computes the average loss on eval_data, which should be a Dataset.\"\"\"\n avg_loss = tfe.metrics.Mean(\"loss\")\n for (labels, chars, sequence_length) in tfe.Iterator(eval_data):\n predictions = model((chars, sequence_length), training=False)\n avg_loss(loss(labels, predictions))\n print(\"eval/loss: %.6f\\n\" % avg_loss.result())\n with tf.contrib.summary.always_record_summaries():\n tf.contrib.summary.scalar(\"loss\", avg_loss.result())\n\n\ndef train_one_epoch(model, optimizer, train_data, log_interval=10):\n \"\"\"Trains model on train_data using optimizer.\"\"\"\n\n tf.train.get_or_create_global_step()\n\n def model_loss(labels, chars, sequence_length):\n predictions = model((chars, sequence_length), training=True)\n loss_value = loss(labels, predictions)\n tf.contrib.summary.scalar(\"loss\", loss_value)\n return loss_value\n\n for (batch, (labels, chars, sequence_length)) in enumerate(\n tfe.Iterator(train_data)):\n with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):\n batch_model_loss = functools.partial(model_loss, labels, chars,\n sequence_length)\n optimizer.minimize(\n batch_model_loss, global_step=tf.train.get_global_step())\n if log_interval and batch % log_interval == 0:\n print(\"train/batch #%d\\tloss: %.6f\" % (batch, batch_model_loss()))\n\n\nSOURCE_TRAIN_URL = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv\"\nSOURCE_TEST_URL = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv\"\n\n\ndef main(_):\n data_dir = os.path.join(FLAGS.dir, \"data\")\n train_data = load_dataset(\n data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)\n eval_data = load_dataset(\n data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)\n\n model = RNNColorbot(\n rnn_cell_sizes=FLAGS.rnn_cell_sizes,\n label_dimension=3,\n keep_prob=FLAGS.keep_probability)\n optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)\n\n if FLAGS.no_gpu or tfe.num_gpus() <= 0:\n print(tfe.num_gpus())\n device = \"/cpu:0\"\n else:\n device = \"/gpu:0\"\n print(\"Using device %s.\" % device)\n\n log_dir = os.path.join(FLAGS.dir, \"summaries\")\n tf.gfile.MakeDirs(log_dir)\n train_summary_writer = tf.contrib.summary.create_file_writer(\n os.path.join(log_dir, \"train\"), flush_millis=10000)\n test_summary_writer = tf.contrib.summary.create_file_writer(\n os.path.join(log_dir, \"eval\"), flush_millis=10000, name=\"eval\")\n\n with tf.device(device):\n for epoch in range(FLAGS.num_epochs):\n start = time.time()\n with train_summary_writer.as_default():\n train_one_epoch(model, optimizer, train_data, FLAGS.log_interval)\n end = time.time()\n print(\"train/time for epoch #%d: %.2f\" % (epoch, end - start))\n with test_summary_writer.as_default():\n test(model, eval_data)\n\n print(\"Colorbot is ready to generate colors!\")\n while True:\n try:\n color_name = six.moves.input(\n \"Give me a color name (or press enter to exit): \")\n except EOFError:\n return\n\n if not color_name:\n return\n\n _, chars, length = parse(color_name)\n with tf.device(device):\n (chars, length) = (tf.identity(chars), tf.identity(length))\n chars = tf.expand_dims(chars, 0)\n length = tf.expand_dims(length, 0)\n preds = tf.unstack(model((chars, length), training=False)[0])\n\n # Predictions cannot be negative, as they are generated by a ReLU layer;\n # they may, however, be greater than 1.\n clipped_preds = tuple(min(float(p), 1.0) for p in preds)\n rgb = tuple(int(p * 255) for p in clipped_preds)\n print(\"rgb:\", rgb)\n data = [[clipped_preds]]\n if HAS_MATPLOTLIB:\n plt.imshow(data)\n plt.title(color_name)\n plt.show()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--dir\",\n type=str,\n default=\"/tmp/rnn_colorbot/\",\n help=\"Directory to download data files and save logs.\")\n parser.add_argument(\n \"--log_interval\",\n type=int,\n default=10,\n metavar=\"N\",\n help=\"Log training loss every log_interval batches.\")\n parser.add_argument(\n \"--num_epochs\", type=int, default=20, help=\"Number of epochs to train.\")\n parser.add_argument(\n \"--rnn_cell_sizes\",\n type=int,\n nargs=\"+\",\n default=[256, 128],\n help=\"List of sizes for each layer of the RNN.\")\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=64,\n help=\"Batch size for training and eval.\")\n parser.add_argument(\n \"--keep_probability\",\n type=float,\n default=0.5,\n help=\"Keep probability for dropout between layers.\")\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=0.01,\n help=\"Learning rate to be used during training.\")\n parser.add_argument(\n \"--no_gpu\",\n action=\"store_true\",\n default=False,\n help=\"Disables GPU usage even if a GPU is available.\")\n\n FLAGS, unparsed = parser.parse_known_args()\n tfe.run(main=main, argv=[sys.argv[0]] + unparsed)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests l2hmc fit to 2D strongly correlated Gaussian executed eagerly.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nimport numpy.random as npr\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\nfrom tensorflow.contrib.eager.python.examples.l2hmc import l2hmc\n\n\ndef get_default_hparams():\n return tf.contrib.training.HParams(\n x_dim=2,\n n_samples=200,\n n_steps=10,\n eps=.1,\n n_iters=10,\n learning_rate=.0003,\n n_warmup_iters=3)\n\n\ndef step(dynamics, optimizer, samples):\n loss, grads, samples, _ = l2hmc.loss_and_grads(\n dynamics, samples, loss_fn=l2hmc.compute_loss)\n optimizer.apply_gradients(zip(grads, dynamics.variables))\n\n return loss, samples\n\n\n# To be defunnable, the function cannot return an Operation, so the above\n# function is used for defun or eager, and this function is used in graph to be\n# able to run the gradient updates.\ndef graph_step(dynamics, optimizer, samples):\n loss, grads, samples, _ = l2hmc.loss_and_grads(\n dynamics, samples, loss_fn=l2hmc.compute_loss)\n train_op = optimizer.apply_gradients(zip(grads, dynamics.variables))\n\n return train_op, loss, samples\n\n\ndef warmup(dynamics,\n optimizer,\n n_iters=1,\n n_samples=200,\n step_fn=step):\n \"\"\"Warmup optimization to reduce overhead.\"\"\"\n\n samples = tf.random_normal(\n shape=[n_samples, dynamics.x_dim], dtype=tf.float32)\n\n for _ in range(n_iters):\n _, samples = step_fn(dynamics, optimizer, samples)\n\n\ndef fit(dynamics,\n samples,\n optimizer,\n step_fn=step,\n n_iters=5000,\n verbose=True,\n logdir=None):\n \"\"\"Fit L2HMC sampler with given log-likelihood function.\"\"\"\n\n if logdir:\n summary_writer = tf.contrib.summary.create_file_writer(logdir)\n\n for i in range(n_iters):\n loss, samples = step_fn(dynamics, optimizer, samples)\n if verbose:\n print(\"Iteration %d: loss %.4f\" % (i, loss))\n\n if logdir:\n with summary_writer.as_default():\n with tf.contrib.summary.always_record_summaries():\n tf.contrib.summary.scalar(\"loss\", loss)\n\n\nclass L2hmcTest(tf.test.TestCase):\n \"\"\"Unit tests for l2hmc in both eager and graph mode.\"\"\"\n\n def test_apply_transition(self):\n \"\"\"Testing function `Dynamics.apply_transition` in graph and eager mode.\"\"\"\n\n # Eager mode testing\n hparams = get_default_hparams()\n energy_fn, _, _ = l2hmc.get_scg_energy_fn()\n dynamics = l2hmc.Dynamics(\n x_dim=hparams.x_dim,\n minus_loglikelihood_fn=energy_fn,\n n_steps=hparams.n_steps,\n eps=hparams.eps)\n samples = tf.random_normal(shape=[hparams.n_samples, hparams.x_dim])\n x_, v_, x_accept_prob, x_out = dynamics.apply_transition(samples)\n\n self.assertEqual(x_.shape, v_.shape)\n self.assertEqual(x_out.shape, samples.shape)\n self.assertEqual(x_.shape, x_out.shape)\n self.assertEqual(x_accept_prob.shape, (hparams.n_samples,))\n\n # Graph mode testing\n with tf.Graph().as_default():\n energy_fn, _, _ = l2hmc.get_scg_energy_fn()\n dynamics = l2hmc.Dynamics(\n x_dim=hparams.x_dim,\n minus_loglikelihood_fn=energy_fn,\n n_steps=hparams.n_steps,\n eps=hparams.eps)\n x = tf.placeholder(tf.float32, shape=[None, hparams.x_dim])\n x_, v_, x_accept_prob, x_out = dynamics.apply_transition(x)\n samples = npr.normal(size=[hparams.n_samples, hparams.x_dim])\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n np_x_, np_v_, np_x_accept_prob, np_x_out = sess.run(\n [x_, v_, x_accept_prob, x_out], feed_dict={x: samples})\n\n self.assertEqual(np_x_.shape, np_v_.shape)\n self.assertEqual(samples.shape, np_x_out.shape)\n self.assertEqual(np_x_.shape, np_x_out.shape)\n self.assertEqual(np_x_accept_prob.shape, (hparams.n_samples,))\n\n\nclass L2hmcBenchmark(tf.test.Benchmark):\n \"\"\"Eager and graph benchmarks for l2hmc.\"\"\"\n\n def benchmark_graph(self):\n \"\"\"Benchmark Graph performance.\"\"\"\n\n hparams = get_default_hparams()\n tf.enable_resource_variables()\n for sample_size in [10, 25, 50, 100, 200]:\n hparams.n_samples = sample_size\n tf.reset_default_graph()\n with tf.Graph().as_default():\n energy_fn, _, _ = l2hmc.get_scg_energy_fn()\n x = tf.random_normal([hparams.n_samples, hparams.x_dim],\n dtype=tf.float32)\n dynamics = l2hmc.Dynamics(\n x_dim=hparams.x_dim,\n minus_loglikelihood_fn=energy_fn,\n n_steps=hparams.n_steps,\n eps=hparams.eps)\n loss, _, _ = l2hmc.compute_loss(dynamics, x)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)\n train_op, loss, _ = graph_step(dynamics, optimizer, x)\n\n # Single thread; fairer comparison against eager\n session_conf = tf.ConfigProto(inter_op_parallelism_threads=1)\n\n with tf.Session(config=session_conf) as sess:\n sess.run(tf.global_variables_initializer())\n\n # Warmup to reduce initialization effect when timing\n for _ in range(hparams.n_warmup_iters):\n _, _ = sess.run([train_op, loss])\n\n # Training\n start_time = time.time()\n for i in range(hparams.n_iters):\n _, loss_np = sess.run([train_op, loss])\n print(\"Iteration %d: loss %.4f\" % (i, loss_np))\n wall_time = (time.time() - start_time) / hparams.n_iters\n examples_per_sec = hparams.n_samples / wall_time\n\n self.report_benchmark(\n name=\"graph_train_%s_%d\" %\n (\"gpu\" if tf.test.is_gpu_available() else \"cpu\", sample_size),\n iters=hparams.n_iters,\n extras={\"examples_per_sec\": examples_per_sec},\n wall_time=wall_time)\n\n def benchmark_eager(self):\n self._benchmark_eager()\n\n def benchmark_eager_defun(self):\n self._benchmark_eager(defun=True)\n\n def _benchmark_eager(self, defun=False):\n \"\"\"Benchmark Eager performance.\"\"\"\n\n hparams = get_default_hparams()\n for sample_size in [10, 25, 50, 100, 200]:\n hparams.n_samples = sample_size\n energy_fn, _, _ = l2hmc.get_scg_energy_fn()\n dynamics = l2hmc.Dynamics(\n x_dim=hparams.x_dim,\n minus_loglikelihood_fn=energy_fn,\n n_steps=hparams.n_steps,\n eps=hparams.eps)\n optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)\n step_fn = tfe.defun(step) if defun else step\n\n # Warmup to reduce initialization effect when timing\n warmup(\n dynamics,\n optimizer,\n n_iters=hparams.n_warmup_iters,\n n_samples=hparams.n_samples,\n step_fn=step_fn)\n\n # Training\n samples = tf.random_normal(\n shape=[hparams.n_samples, hparams.x_dim], dtype=tf.float32)\n start_time = time.time()\n fit(dynamics,\n samples,\n optimizer,\n step_fn=step_fn,\n n_iters=hparams.n_iters)\n wall_time = (time.time() - start_time) / hparams.n_iters\n examples_per_sec = hparams.n_samples / wall_time\n\n self.report_benchmark(\n name=\"eager_train_%s%s_%d\" %\n (\"gpu\" if tf.test.is_gpu_available() else \"cpu\",\n \"_defun\" if defun else \"\", sample_size),\n iters=hparams.n_iters,\n extras={\"examples_per_sec\": examples_per_sec},\n wall_time=wall_time)\n\n del dynamics\n\n\nif __name__ == \"__main__\":\n tf.enable_eager_execution()\n tf.test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for slim.nets.resnet_v1.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework.python.ops import arg_scope\nfrom tensorflow.contrib.layers.python.layers import utils\nfrom tensorflow.contrib.slim.python.slim.nets import resnet_utils\nfrom tensorflow.contrib.slim.python.slim.nets import resnet_v1\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\ndef create_test_input(batch_size, height, width, channels):\n \"\"\"Create test input tensor.\n\n Args:\n batch_size: The number of images per batch or `None` if unknown.\n height: The height of each image or `None` if unknown.\n width: The width of each image or `None` if unknown.\n channels: The number of channels per image or `None` if unknown.\n\n Returns:\n Either a placeholder `Tensor` of dimension\n [batch_size, height, width, channels] if any of the inputs are `None` or a\n constant `Tensor` with the mesh grid values along the spatial dimensions.\n \"\"\"\n if None in [batch_size, height, width, channels]:\n return array_ops.placeholder(dtypes.float32,\n (batch_size, height, width, channels))\n else:\n return math_ops.cast(\n np.tile(\n np.reshape(\n np.reshape(np.arange(height), [height, 1]) + np.reshape(\n np.arange(width), [1, width]), [1, height, width, 1]),\n [batch_size, 1, 1, channels]), dtypes.float32)\n\n\nclass ResnetUtilsTest(test.TestCase):\n\n def testSubsampleThreeByThree(self):\n x = array_ops.reshape(math_ops.cast(math_ops.range(9), dtypes.float32),\n [1, 3, 3, 1])\n x = resnet_utils.subsample(x, 2)\n expected = array_ops.reshape(\n constant_op.constant([0, 2, 6, 8]), [1, 2, 2, 1])\n with self.cached_session():\n self.assertAllClose(x.eval(), expected.eval())\n\n def testSubsampleFourByFour(self):\n x = array_ops.reshape(math_ops.cast(math_ops.range(16), dtypes.float32),\n [1, 4, 4, 1])\n x = resnet_utils.subsample(x, 2)\n expected = array_ops.reshape(\n constant_op.constant([0, 2, 8, 10]), [1, 2, 2, 1])\n with self.cached_session():\n self.assertAllClose(x.eval(), expected.eval())\n\n def testConv2DSameEven(self):\n n, n2 = 4, 2\n\n # Input image.\n x = create_test_input(1, n, n, 1)\n\n # Convolution kernel.\n w = create_test_input(1, 3, 3, 1)\n w = array_ops.reshape(w, [3, 3, 1, 1])\n\n variable_scope.get_variable('Conv/weights', initializer=w)\n variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))\n variable_scope.get_variable_scope().reuse_variables()\n\n y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')\n y1_expected = math_ops.cast([[14, 28, 43, 26], [28, 48, 66, 37],\n [43, 66, 84, 46], [26, 37, 46, 22]],\n dtypes.float32)\n y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])\n\n y2 = resnet_utils.subsample(y1, 2)\n y2_expected = math_ops.cast([[14, 43], [43, 84]], dtypes.float32)\n y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])\n\n y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')\n y3_expected = y2_expected\n\n y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')\n y4_expected = math_ops.cast([[48, 37], [37, 22]], dtypes.float32)\n y4_expected = array_ops.reshape(y4_expected, [1, n2, n2, 1])\n\n with self.cached_session() as sess:\n sess.run(variables.global_variables_initializer())\n self.assertAllClose(y1.eval(), y1_expected.eval())\n self.assertAllClose(y2.eval(), y2_expected.eval())\n self.assertAllClose(y3.eval(), y3_expected.eval())\n self.assertAllClose(y4.eval(), y4_expected.eval())\n\n def testConv2DSameOdd(self):\n n, n2 = 5, 3\n\n # Input image.\n x = create_test_input(1, n, n, 1)\n\n # Convolution kernel.\n w = create_test_input(1, 3, 3, 1)\n w = array_ops.reshape(w, [3, 3, 1, 1])\n\n variable_scope.get_variable('Conv/weights', initializer=w)\n variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))\n variable_scope.get_variable_scope().reuse_variables()\n\n y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')\n y1_expected = math_ops.cast([[14, 28, 43, 58, 34],\n [28, 48, 66, 84, 46],\n [43, 66, 84, 102, 55],\n [58, 84, 102, 120, 64],\n [34, 46, 55, 64, 30]],\n dtypes.float32)\n y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])\n\n y2 = resnet_utils.subsample(y1, 2)\n y2_expected = math_ops.cast([[14, 43, 34],\n [43, 84, 55],\n [34, 55, 30]],\n dtypes.float32)\n y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])\n\n y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')\n y3_expected = y2_expected\n\n y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')\n y4_expected = y2_expected\n\n with self.cached_session() as sess:\n sess.run(variables.global_variables_initializer())\n self.assertAllClose(y1.eval(), y1_expected.eval())\n self.assertAllClose(y2.eval(), y2_expected.eval())\n self.assertAllClose(y3.eval(), y3_expected.eval())\n self.assertAllClose(y4.eval(), y4_expected.eval())\n\n def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):\n \"\"\"A plain ResNet without extra layers before or after the ResNet blocks.\"\"\"\n with variable_scope.variable_scope(scope, values=[inputs]):\n with arg_scope([layers.conv2d], outputs_collections='end_points'):\n net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)\n end_points = utils.convert_collection_to_dict('end_points')\n return net, end_points\n\n def testEndPointsV1(self):\n \"\"\"Test the end points of a tiny v1 bottleneck network.\"\"\"\n blocks = [\n resnet_v1.resnet_v1_block(\n 'block1', base_depth=1, num_units=2, stride=2),\n resnet_v1.resnet_v1_block(\n 'block2', base_depth=2, num_units=2, stride=1),\n ]\n inputs = create_test_input(2, 32, 16, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_plain(inputs, blocks, scope='tiny')\n expected = [\n 'tiny/block1/unit_1/bottleneck_v1/shortcut',\n 'tiny/block1/unit_1/bottleneck_v1/conv1',\n 'tiny/block1/unit_1/bottleneck_v1/conv2',\n 'tiny/block1/unit_1/bottleneck_v1/conv3',\n 'tiny/block1/unit_2/bottleneck_v1/conv1',\n 'tiny/block1/unit_2/bottleneck_v1/conv2',\n 'tiny/block1/unit_2/bottleneck_v1/conv3',\n 'tiny/block2/unit_1/bottleneck_v1/shortcut',\n 'tiny/block2/unit_1/bottleneck_v1/conv1',\n 'tiny/block2/unit_1/bottleneck_v1/conv2',\n 'tiny/block2/unit_1/bottleneck_v1/conv3',\n 'tiny/block2/unit_2/bottleneck_v1/conv1',\n 'tiny/block2/unit_2/bottleneck_v1/conv2',\n 'tiny/block2/unit_2/bottleneck_v1/conv3']\n self.assertItemsEqual(expected, end_points)\n\n def _stack_blocks_nondense(self, net, blocks):\n \"\"\"A simplified ResNet Block stacker without output stride control.\"\"\"\n for block in blocks:\n with variable_scope.variable_scope(block.scope, 'block', [net]):\n for i, unit in enumerate(block.args):\n with variable_scope.variable_scope('unit_%d' % (i + 1), values=[net]):\n net = block.unit_fn(net, rate=1, **unit)\n return net\n\n def testAtrousValuesBottleneck(self):\n \"\"\"Verify the values of dense feature extraction by atrous convolution.\n\n Make sure that dense feature extraction by stack_blocks_dense() followed by\n subsampling gives identical results to feature extraction at the nominal\n network output stride using the simple self._stack_blocks_nondense() above.\n \"\"\"\n block = resnet_v1.resnet_v1_block\n blocks = [\n block('block1', base_depth=1, num_units=2, stride=2),\n block('block2', base_depth=2, num_units=2, stride=2),\n block('block3', base_depth=4, num_units=2, stride=2),\n block('block4', base_depth=8, num_units=2, stride=1),\n ]\n nominal_stride = 8\n\n # Test both odd and even input dimensions.\n height = 30\n width = 31\n with arg_scope(resnet_utils.resnet_arg_scope()):\n with arg_scope([layers.batch_norm], is_training=False):\n for output_stride in [1, 2, 4, 8, None]:\n with ops.Graph().as_default():\n with self.cached_session() as sess:\n random_seed.set_random_seed(0)\n inputs = create_test_input(1, height, width, 3)\n # Dense feature extraction followed by subsampling.\n output = resnet_utils.stack_blocks_dense(inputs, blocks,\n output_stride)\n if output_stride is None:\n factor = 1\n else:\n factor = nominal_stride // output_stride\n\n output = resnet_utils.subsample(output, factor)\n # Make the two networks use the same weights.\n variable_scope.get_variable_scope().reuse_variables()\n # Feature extraction at the nominal network rate.\n expected = self._stack_blocks_nondense(inputs, blocks)\n sess.run(variables.global_variables_initializer())\n output, expected = sess.run([output, expected])\n self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)\n\n\nclass ResnetCompleteNetworkTest(test.TestCase):\n \"\"\"Tests with complete small ResNet v1 networks.\"\"\"\n\n def _resnet_small(self,\n inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n include_root_block=True,\n reuse=None,\n scope='resnet_v1_small'):\n \"\"\"A shallow and thin ResNet v1 for faster tests.\"\"\"\n block = resnet_v1.resnet_v1_block\n blocks = [\n block('block1', base_depth=1, num_units=3, stride=2),\n block('block2', base_depth=2, num_units=3, stride=2),\n block('block3', base_depth=4, num_units=3, stride=2),\n block('block4', base_depth=8, num_units=2, stride=1),\n ]\n return resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training,\n global_pool, output_stride, include_root_block,\n reuse, scope)\n\n def testClassificationEndPoints(self):\n global_pool = True\n num_classes = 10\n inputs = create_test_input(2, 224, 224, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n logits, end_points = self._resnet_small(\n inputs, num_classes, global_pool=global_pool, scope='resnet')\n self.assertTrue(logits.op.name.startswith('resnet/logits'))\n self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])\n self.assertTrue('predictions' in end_points)\n self.assertListEqual(end_points['predictions'].get_shape().as_list(),\n [2, 1, 1, num_classes])\n\n def testClassificationShapes(self):\n global_pool = True\n num_classes = 10\n inputs = create_test_input(2, 224, 224, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(\n inputs, num_classes, global_pool=global_pool, scope='resnet')\n endpoint_to_shape = {\n 'resnet/block1': [2, 28, 28, 4],\n 'resnet/block2': [2, 14, 14, 8],\n 'resnet/block3': [2, 7, 7, 16],\n 'resnet/block4': [2, 7, 7, 32]\n }\n for endpoint in endpoint_to_shape:\n shape = endpoint_to_shape[endpoint]\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n def testFullyConvolutionalEndpointShapes(self):\n global_pool = False\n num_classes = 10\n inputs = create_test_input(2, 321, 321, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(\n inputs, num_classes, global_pool=global_pool, scope='resnet')\n endpoint_to_shape = {\n 'resnet/block1': [2, 41, 41, 4],\n 'resnet/block2': [2, 21, 21, 8],\n 'resnet/block3': [2, 11, 11, 16],\n 'resnet/block4': [2, 11, 11, 32]\n }\n for endpoint in endpoint_to_shape:\n shape = endpoint_to_shape[endpoint]\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n def testRootlessFullyConvolutionalEndpointShapes(self):\n global_pool = False\n num_classes = 10\n inputs = create_test_input(2, 128, 128, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(\n inputs,\n num_classes,\n global_pool=global_pool,\n include_root_block=False,\n scope='resnet')\n endpoint_to_shape = {\n 'resnet/block1': [2, 64, 64, 4],\n 'resnet/block2': [2, 32, 32, 8],\n 'resnet/block3': [2, 16, 16, 16],\n 'resnet/block4': [2, 16, 16, 32]\n }\n for endpoint in endpoint_to_shape:\n shape = endpoint_to_shape[endpoint]\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n def testAtrousFullyConvolutionalEndpointShapes(self):\n global_pool = False\n num_classes = 10\n output_stride = 8\n inputs = create_test_input(2, 321, 321, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(\n inputs,\n num_classes,\n global_pool=global_pool,\n output_stride=output_stride,\n scope='resnet')\n endpoint_to_shape = {\n 'resnet/block1': [2, 41, 41, 4],\n 'resnet/block2': [2, 41, 41, 8],\n 'resnet/block3': [2, 41, 41, 16],\n 'resnet/block4': [2, 41, 41, 32]\n }\n for endpoint in endpoint_to_shape:\n shape = endpoint_to_shape[endpoint]\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n def testAtrousFullyConvolutionalValues(self):\n \"\"\"Verify dense feature extraction with atrous convolution.\"\"\"\n nominal_stride = 32\n for output_stride in [4, 8, 16, 32, None]:\n with arg_scope(resnet_utils.resnet_arg_scope()):\n with ops.Graph().as_default():\n with self.cached_session() as sess:\n random_seed.set_random_seed(0)\n inputs = create_test_input(2, 81, 81, 3)\n # Dense feature extraction followed by subsampling.\n output, _ = self._resnet_small(\n inputs,\n None,\n is_training=False,\n global_pool=False,\n output_stride=output_stride)\n if output_stride is None:\n factor = 1\n else:\n factor = nominal_stride // output_stride\n output = resnet_utils.subsample(output, factor)\n # Make the two networks use the same weights.\n variable_scope.get_variable_scope().reuse_variables()\n # Feature extraction at the nominal network rate.\n expected, _ = self._resnet_small(\n inputs, None, is_training=False, global_pool=False)\n sess.run(variables.global_variables_initializer())\n self.assertAllClose(\n output.eval(), expected.eval(), atol=2e-4, rtol=1e-4)\n\n def testUnknownBatchSize(self):\n batch = 2\n height, width = 65, 65\n global_pool = True\n num_classes = 10\n inputs = create_test_input(None, height, width, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n logits, _ = self._resnet_small(\n inputs, num_classes, global_pool=global_pool, scope='resnet')\n self.assertTrue(logits.op.name.startswith('resnet/logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [None, 1, 1, num_classes])\n images = create_test_input(batch, height, width, 3)\n with self.cached_session() as sess:\n sess.run(variables.global_variables_initializer())\n output = sess.run(logits, {inputs: images.eval()})\n self.assertEqual(output.shape, (batch, 1, 1, num_classes))\n\n def testFullyConvolutionalUnknownHeightWidth(self):\n batch = 2\n height, width = 65, 65\n global_pool = False\n inputs = create_test_input(batch, None, None, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n output, _ = self._resnet_small(inputs, None, global_pool=global_pool)\n self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])\n images = create_test_input(batch, height, width, 3)\n with self.cached_session() as sess:\n sess.run(variables.global_variables_initializer())\n output = sess.run(output, {inputs: images.eval()})\n self.assertEqual(output.shape, (batch, 3, 3, 32))\n\n def testAtrousFullyConvolutionalUnknownHeightWidth(self):\n batch = 2\n height, width = 65, 65\n global_pool = False\n output_stride = 8\n inputs = create_test_input(batch, None, None, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n output, _ = self._resnet_small(\n inputs, None, global_pool=global_pool, output_stride=output_stride)\n self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])\n images = create_test_input(batch, height, width, 3)\n with self.cached_session() as sess:\n sess.run(variables.global_variables_initializer())\n output = sess.run(output, {inputs: images.eval()})\n self.assertEqual(output.shape, (batch, 9, 9, 32))\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"An example training a Keras Model using MirroredStrategy and native APIs.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.keras.optimizer_v2 import rmsprop\n\n\nNUM_CLASSES = 10\n\n\ndef get_input_datasets(use_bfloat16=False):\n \"\"\"Downloads the MNIST dataset and creates train and eval dataset objects.\n\n Args:\n use_bfloat16: Boolean to determine if input should be cast to bfloat16\n\n Returns:\n Train dataset, eval dataset and input shape.\n\n \"\"\"\n # input image dimensions\n img_rows, img_cols = 28, 28\n cast_dtype = tf.bfloat16 if use_bfloat16 else tf.float32\n\n # the data, split between train and test sets\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\n if tf.keras.backend.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n # convert class vectors to binary class matrices\n y_train = tf.keras.utils.to_categorical(y_train, NUM_CLASSES)\n y_test = tf.keras.utils.to_categorical(y_test, NUM_CLASSES)\n\n # train dataset\n train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n train_ds = train_ds.repeat()\n train_ds = train_ds.map(lambda x, y: (tf.cast(x, cast_dtype), y))\n train_ds = train_ds.batch(64, drop_remainder=True)\n\n # eval dataset\n eval_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))\n eval_ds = eval_ds.repeat()\n eval_ds = eval_ds.map(lambda x, y: (tf.cast(x, cast_dtype), y))\n eval_ds = eval_ds.batch(64, drop_remainder=True)\n\n return train_ds, eval_ds, input_shape\n\n\ndef get_model(input_shape):\n \"\"\"Builds a Sequential CNN model to recognize MNIST digits.\n\n Args:\n input_shape: Shape of the input depending on the `image_data_format`.\n\n Returns:\n a Keras model\n\n \"\"\"\n # Define a CNN model to recognize MNIST digits.\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\n model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(tf.keras.layers.Dropout(0.25))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation='relu'))\n model.add(tf.keras.layers.Dropout(0.5))\n model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax'))\n return model\n\n\ndef main(_):\n # Build the train and eval datasets from the MNIST data. Also return the\n # input shape which is constructed based on the `image_data_format`\n # i.e channels_first or channels_last.\n tf.enable_eager_execution()\n\n train_ds, eval_ds, input_shape = get_input_datasets()\n\n # Instantiate the MirroredStrategy object. If we don't specify `num_gpus` or\n # the `devices` argument then all the GPUs available on the machine are used.\n # TODO(priyag): Use `tf.distribute.MirroredStrategy` once available.\n strategy = mirrored_strategy.MirroredStrategy(['/gpu:0', '/cpu:0'])\n\n # Create and compile the model under Distribution strategy scope.\n # `fit`, `evaluate` and `predict` will be distributed based on the strategy\n # model was compiled with.\n with strategy.scope():\n model = get_model(input_shape)\n optimizer = rmsprop.RMSProp(learning_rate=0.001)\n model.compile(loss=tf.keras.losses.categorical_crossentropy,\n optimizer=optimizer,\n metrics=['accuracy'])\n\n # Train the model with the train dataset.\n model.fit(x=train_ds, epochs=20, steps_per_epoch=468)\n\n # Evaluate the model with the eval dataset.\n score = model.evaluate(eval_ds, steps=10, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for SessionManager.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import checkpoint_management\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.python.training import server_lib\nfrom tensorflow.python.training import session_manager\n\n\nclass SessionManagerTest(test.TestCase):\n\n def testPrepareSessionSucceeds(self):\n with ops.Graph().as_default():\n v = variables.VariableV1([1.0, 2.0, 3.0], name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables())\n sess = sm.prepare_session(\n \"\", init_op=variables.global_variables_initializer())\n self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))\n\n def testPrepareSessionSucceedsWithInitFeedDict(self):\n with ops.Graph().as_default():\n p = array_ops.placeholder(dtypes.float32, shape=(3,))\n v = variables.VariableV1(p, name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables())\n sess = sm.prepare_session(\n \"\",\n init_op=variables.global_variables_initializer(),\n init_feed_dict={p: [1.0, 2.0, 3.0]})\n self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))\n\n def testPrepareSessionSucceedsWithInitFn(self):\n with ops.Graph().as_default():\n v = variables.VariableV1([125], name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables())\n sess = sm.prepare_session(\n \"\", init_fn=lambda sess: sess.run(v.initializer))\n self.assertAllClose([125], sess.run(v))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testPrepareSessionFails(self):\n checkpoint_dir = os.path.join(self.get_temp_dir(), \"prepare_session\")\n checkpoint_dir2 = os.path.join(self.get_temp_dir(), \"prepare_session2\")\n try:\n gfile.DeleteRecursively(checkpoint_dir)\n gfile.DeleteRecursively(checkpoint_dir2)\n except errors.OpError:\n pass # Ignore\n gfile.MakeDirs(checkpoint_dir)\n\n with ops.Graph().as_default():\n v = variables.VariableV1([1.0, 2.0, 3.0], name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables())\n saver = saver_lib.Saver({\"v\": v})\n sess = sm.prepare_session(\n \"\",\n init_op=variables.global_variables_initializer(),\n saver=saver,\n checkpoint_dir=checkpoint_dir)\n self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))\n checkpoint_filename = os.path.join(checkpoint_dir,\n \"prepare_session_checkpoint\")\n saver.save(sess, checkpoint_filename)\n # Create a new Graph and SessionManager and recover.\n with ops.Graph().as_default():\n # Renames the checkpoint directory.\n os.rename(checkpoint_dir, checkpoint_dir2)\n gfile.MakeDirs(checkpoint_dir)\n v = variables.VariableV1([6.0, 7.0, 8.0], name=\"v\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables())\n saver = saver_lib.Saver({\"v\": v})\n # This should fail as there's no checkpoint within 2 seconds.\n with self.assertRaisesRegexp(\n RuntimeError, \"no init_op or init_fn or local_init_op was given\"):\n sess = sm.prepare_session(\n \"\",\n init_op=None,\n saver=saver,\n checkpoint_dir=checkpoint_dir,\n wait_for_checkpoint=True,\n max_wait_secs=2)\n # Rename the checkpoint directory back.\n gfile.DeleteRecursively(checkpoint_dir)\n os.rename(checkpoint_dir2, checkpoint_dir)\n # This should succeed as there's checkpoint.\n sess = sm.prepare_session(\n \"\",\n init_op=None,\n saver=saver,\n checkpoint_dir=checkpoint_dir,\n wait_for_checkpoint=True,\n max_wait_secs=2)\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"v:0\")).eval(session=sess))\n\n def _test_recovered_variable(self,\n checkpoint_dir=None,\n checkpoint_filename_with_path=None):\n # Create a new Graph and SessionManager and recover from a checkpoint.\n with ops.Graph().as_default():\n v = variables.VariableV1(2, name=\"v\")\n with session_lib.Session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n sm2 = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables())\n saver = saver_lib.Saver({\"v\": v})\n sess, initialized = sm2.recover_session(\n \"\",\n saver=saver,\n checkpoint_dir=checkpoint_dir,\n checkpoint_filename_with_path=checkpoint_filename_with_path)\n self.assertTrue(initialized)\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"v:0\")).eval(session=sess))\n self.assertEquals(1, sess.run(v))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testRecoverSession(self):\n # Create a checkpoint.\n checkpoint_dir = os.path.join(self.get_temp_dir(), \"recover_session\")\n try:\n gfile.DeleteRecursively(checkpoint_dir)\n except errors.OpError:\n pass # Ignore\n gfile.MakeDirs(checkpoint_dir)\n\n with ops.Graph().as_default():\n v = variables.VariableV1(1, name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables())\n saver = saver_lib.Saver({\"v\": v})\n sess, initialized = sm.recover_session(\n \"\", saver=saver, checkpoint_dir=checkpoint_dir)\n self.assertFalse(initialized)\n sess.run(v.initializer)\n self.assertEquals(1, sess.run(v))\n saver.save(sess, os.path.join(checkpoint_dir,\n \"recover_session_checkpoint\"))\n self._test_recovered_variable(checkpoint_dir=checkpoint_dir)\n self._test_recovered_variable(\n checkpoint_filename_with_path=checkpoint_management.latest_checkpoint(\n checkpoint_dir))\n # Cannot set both checkpoint_dir and checkpoint_filename_with_path.\n with self.assertRaises(ValueError):\n self._test_recovered_variable(\n checkpoint_dir=checkpoint_dir,\n checkpoint_filename_with_path=checkpoint_management.latest_checkpoint(\n checkpoint_dir))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testWaitForSessionReturnsNoneAfterTimeout(self):\n with ops.Graph().as_default():\n variables.VariableV1(1, name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables(),\n recovery_wait_secs=1)\n\n # Set max_wait_secs to allow us to try a few times.\n with self.assertRaises(errors.DeadlineExceededError):\n sm.wait_for_session(master=\"\", max_wait_secs=3)\n\n def testInitWithNoneLocalInitOpError(self):\n # Creating a SessionManager with a None local_init_op but\n # non-None ready_for_local_init_op raises ValueError\n with self.assertRaisesRegexp(\n ValueError, \"If you pass a ready_for_local_init_op \"\n \"you must also pass a local_init_op \"):\n session_manager.SessionManager(\n ready_for_local_init_op=variables.report_uninitialized_variables(\n variables.global_variables()),\n local_init_op=None)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testRecoverSessionWithReadyForLocalInitOp(self):\n # Create a checkpoint.\n checkpoint_dir = os.path.join(self.get_temp_dir(),\n \"recover_session_ready_for_local_init\")\n try:\n gfile.DeleteRecursively(checkpoint_dir)\n except errors.OpError:\n pass # Ignore\n gfile.MakeDirs(checkpoint_dir)\n\n with ops.Graph().as_default():\n v = variables.VariableV1(1, name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables())\n saver = saver_lib.Saver({\"v\": v})\n sess, initialized = sm.recover_session(\n \"\", saver=saver, checkpoint_dir=checkpoint_dir)\n self.assertFalse(initialized)\n sess.run(v.initializer)\n self.assertEquals(1, sess.run(v))\n saver.save(sess, os.path.join(checkpoint_dir,\n \"recover_session_checkpoint\"))\n # Create a new Graph and SessionManager and recover.\n with ops.Graph().as_default():\n v = variables.VariableV1(2, name=\"v\")\n w = variables.VariableV1(\n v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n self.assertEqual(False, variables.is_variable_initialized(w).eval())\n sm2 = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables(),\n ready_for_local_init_op=variables.report_uninitialized_variables(\n variables.global_variables()),\n local_init_op=w.initializer)\n saver = saver_lib.Saver({\"v\": v})\n sess, initialized = sm2.recover_session(\n \"\", saver=saver, checkpoint_dir=checkpoint_dir)\n self.assertTrue(initialized)\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"v:0\")).eval(session=sess))\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"w:0\")).eval(session=sess))\n self.assertEquals(1, sess.run(v))\n self.assertEquals(1, sess.run(w))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testRecoverSessionWithReadyForLocalInitOpFailsToReadyLocal(self):\n # We use ready_for_local_init_op=report_uninitialized_variables(),\n # which causes recover_session to not run local_init_op, and to return\n # initialized=False\n\n # Create a checkpoint.\n checkpoint_dir = os.path.join(\n self.get_temp_dir(),\n \"recover_session_ready_for_local_init_fails_to_ready_local\")\n try:\n gfile.DeleteRecursively(checkpoint_dir)\n except errors.OpError:\n pass # Ignore\n gfile.MakeDirs(checkpoint_dir)\n\n with ops.Graph().as_default():\n v = variables.VariableV1(1, name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables())\n saver = saver_lib.Saver({\"v\": v})\n sess, initialized = sm.recover_session(\n \"\", saver=saver, checkpoint_dir=checkpoint_dir)\n self.assertFalse(initialized)\n sess.run(v.initializer)\n self.assertEquals(1, sess.run(v))\n saver.save(sess, os.path.join(checkpoint_dir,\n \"recover_session_checkpoint\"))\n # Create a new Graph and SessionManager and recover.\n with ops.Graph().as_default():\n v = variables.VariableV1(2, name=\"v\")\n w = variables.VariableV1(\n v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n self.assertEqual(False, variables.is_variable_initialized(w).eval())\n sm2 = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables(),\n ready_for_local_init_op=variables.report_uninitialized_variables(),\n local_init_op=w.initializer)\n saver = saver_lib.Saver({\"v\": v})\n sess, initialized = sm2.recover_session(\n \"\", saver=saver, checkpoint_dir=checkpoint_dir)\n self.assertFalse(initialized)\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"v:0\")).eval(session=sess))\n self.assertEqual(\n False,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"w:0\")).eval(session=sess))\n self.assertEquals(1, sess.run(v))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testRecoverSessionNoChkptStillRunsLocalInitOp(self):\n # This test checks for backwards compatibility.\n # In particular, we continue to ensure that recover_session will execute\n # local_init_op exactly once, regardless of whether the session was\n # successfully recovered.\n with ops.Graph().as_default():\n w = variables.VariableV1(\n 1,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(w).eval())\n sm2 = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables(),\n ready_for_local_init_op=None,\n local_init_op=w.initializer)\n # Try to recover session from None\n sess, initialized = sm2.recover_session(\n \"\", saver=None, checkpoint_dir=None)\n # Succeeds because recover_session still run local_init_op\n self.assertFalse(initialized)\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"w:0\")).eval(session=sess))\n self.assertEquals(1, sess.run(w))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testRecoverSessionFailsStillRunsLocalInitOp(self):\n # Create a checkpoint.\n checkpoint_dir = os.path.join(\n self.get_temp_dir(),\n \"recover_session_ready_for_local_init_fails_stil_run\")\n try:\n gfile.DeleteRecursively(checkpoint_dir)\n except errors.OpError:\n pass # Ignore\n gfile.MakeDirs(checkpoint_dir)\n\n # Create a new Graph and SessionManager and recover.\n with ops.Graph().as_default():\n v = variables.VariableV1(2, name=\"v\")\n w = variables.VariableV1(\n 1,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n self.assertEqual(False, variables.is_variable_initialized(w).eval())\n sm2 = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables(),\n ready_for_local_init_op=None,\n local_init_op=w.initializer)\n saver = saver_lib.Saver({\"v\": v})\n sess, initialized = sm2.recover_session(\n \"\",\n saver=saver,\n checkpoint_dir=checkpoint_dir,\n wait_for_checkpoint=False)\n self.assertFalse(initialized)\n self.assertEqual(\n False,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"v:0\")).eval(session=sess))\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"w:0\")).eval(session=sess))\n self.assertEquals(1, sess.run(w))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testWaitForSessionLocalInit(self):\n server = server_lib.Server.create_local_server()\n with ops.Graph().as_default() as graph:\n v = variables.VariableV1(1, name=\"v\")\n w = variables.VariableV1(\n v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n sm = session_manager.SessionManager(\n graph=graph,\n ready_op=variables.report_uninitialized_variables(),\n ready_for_local_init_op=variables.report_uninitialized_variables(\n variables.global_variables()),\n local_init_op=w.initializer)\n\n # Initialize v but not w\n s = session_lib.Session(server.target, graph=graph)\n s.run(v.initializer)\n\n sess = sm.wait_for_session(server.target, max_wait_secs=3)\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"v:0\")).eval(session=sess))\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"w:0\")).eval(session=sess))\n self.assertEquals(1, sess.run(v))\n self.assertEquals(1, sess.run(w))\n\n def testWaitForSessionWithReadyForLocalInitOpFailsToReadyLocal(self):\n with ops.Graph().as_default() as graph:\n v = variables.VariableV1(1, name=\"v\")\n w = variables.VariableV1(\n v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n sm = session_manager.SessionManager(\n graph=graph,\n ready_op=variables.report_uninitialized_variables(),\n ready_for_local_init_op=variables.report_uninitialized_variables(),\n local_init_op=w.initializer)\n\n with self.assertRaises(errors_impl.DeadlineExceededError):\n # Time-out because w fails to be initialized,\n # because of overly restrictive ready_for_local_init_op\n sm.wait_for_session(\"\", max_wait_secs=3)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testWaitForSessionInsufficientReadyForLocalInitCheck(self):\n with ops.Graph().as_default() as graph:\n v = variables.VariableV1(1, name=\"v\")\n w = variables.VariableV1(\n v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n sm = session_manager.SessionManager(\n graph=graph,\n ready_op=variables.report_uninitialized_variables(),\n ready_for_local_init_op=None,\n local_init_op=w.initializer)\n with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,\n \"Session was not ready after waiting.*\"):\n sm.wait_for_session(\"\", max_wait_secs=3)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testPrepareSessionWithReadyForLocalInitOp(self):\n with ops.Graph().as_default():\n v = variables.VariableV1(1, name=\"v\")\n w = variables.VariableV1(\n v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n x = variables.VariableV1(\n 3 * v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"x\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n self.assertEqual(False, variables.is_variable_initialized(w).eval())\n self.assertEqual(False, variables.is_variable_initialized(x).eval())\n sm2 = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables(),\n ready_for_local_init_op=variables.report_uninitialized_variables(\n variables.global_variables()),\n local_init_op=[w.initializer, x.initializer])\n sess = sm2.prepare_session(\"\", init_op=v.initializer)\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"v:0\")).eval(session=sess))\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"w:0\")).eval(session=sess))\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"x:0\")).eval(session=sess))\n self.assertEquals(1, sess.run(v))\n self.assertEquals(1, sess.run(w))\n self.assertEquals(3, sess.run(x))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testPrepareSessionWithPartialInitOp(self):\n with ops.Graph().as_default():\n v = variables.VariableV1(1, name=\"v\")\n w = variables.VariableV1(\n v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n x = variables.VariableV1(\n 3 * v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"x\")\n # TODO(b/70206927): Use ResourceVariables once they are handled properly.\n v_res = variables.VariableV1(1, name=\"v_res\")\n w_res = variables.VariableV1(\n v_res,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w_res\")\n x_res = variables.VariableV1(\n 3 * v_res,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"x_res\")\n\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n self.assertEqual(False, variables.is_variable_initialized(w).eval())\n self.assertEqual(False, variables.is_variable_initialized(x).eval())\n self.assertEqual(False, variables.is_variable_initialized(v_res).eval())\n self.assertEqual(False, variables.is_variable_initialized(w_res).eval())\n self.assertEqual(False, variables.is_variable_initialized(x_res).eval())\n sm2 = session_manager.SessionManager(local_init_op=[\n w.initializer, x.initializer, w_res.initializer, x_res.initializer\n ])\n sess = sm2.prepare_session(\"\", init_op=None)\n self.assertEqual(\n False,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"v:0\")).eval(session=sess))\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"w:0\")).eval(session=sess))\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"x:0\")).eval(session=sess))\n self.assertEquals(1, sess.run(w))\n self.assertEquals(3, sess.run(x))\n self.assertEqual(\n False,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"v_res:0\")).eval(session=sess))\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"w_res:0\")).eval(session=sess))\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"x_res:0\")).eval(session=sess))\n self.assertEquals(1, sess.run(w_res))\n self.assertEquals(3, sess.run(x_res))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testPrepareSessionWithCyclicInitializer(self):\n # Regression test. Previously Variable._build_initializer_expr would enter\n # into an infinite recursion when the variable's initial_value involved\n # cyclic dependencies.\n with ops.Graph().as_default():\n i = control_flow_ops.while_loop(lambda i: i < 1, lambda i: i + 1, [0])\n v = variables.VariableV1(array_ops.identity(i), name=\"v\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n sm = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables())\n sess = sm.prepare_session(\"\", init_op=v.initializer)\n self.assertEqual(1, sess.run(v))\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"v:0\")).eval(session=sess))\n\n def testPrepareSessionDidNotInitLocalVariable(self):\n with ops.Graph().as_default():\n v = variables.VariableV1(1, name=\"v\")\n w = variables.VariableV1(\n v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n self.assertEqual(False, variables.is_variable_initialized(w).eval())\n sm2 = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables())\n with self.assertRaisesRegexp(\n RuntimeError, \"Init operations did not make model ready.*\"):\n sm2.prepare_session(\"\", init_op=v.initializer)\n\n def testPrepareSessionDidNotInitLocalVariableList(self):\n with ops.Graph().as_default():\n v = variables.VariableV1(1, name=\"v\")\n w = variables.VariableV1(\n v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n self.assertEqual(False, variables.is_variable_initialized(w).eval())\n sm2 = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables())\n with self.assertRaisesRegexp(RuntimeError,\n \"Init operations did not make model ready\"):\n sm2.prepare_session(\"\", init_op=[v.initializer])\n\n def testPrepareSessionWithReadyNotReadyForLocal(self):\n with ops.Graph().as_default():\n v = variables.VariableV1(1, name=\"v\")\n w = variables.VariableV1(\n v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n self.assertEqual(False, variables.is_variable_initialized(w).eval())\n sm2 = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables(),\n ready_for_local_init_op=variables.report_uninitialized_variables(\n variables.global_variables()),\n local_init_op=w.initializer)\n with self.assertRaisesRegexp(\n RuntimeError,\n \"Init operations did not make model ready for local_init\"):\n sm2.prepare_session(\"\", init_op=None)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testPrepareSessionWithInsufficientReadyForLocalInitCheck(self):\n with ops.Graph().as_default():\n v = variables.VariableV1(1, name=\"v\")\n w = variables.VariableV1(\n v,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name=\"w\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n self.assertEqual(False, variables.is_variable_initialized(w).eval())\n sm2 = session_manager.SessionManager(\n ready_op=variables.report_uninitialized_variables(),\n ready_for_local_init_op=None,\n local_init_op=w.initializer)\n with self.assertRaisesRegexp(RuntimeError,\n \"Init operations did not make model ready.*\"):\n sm2.prepare_session(\"\", init_op=None)\n\n\nclass ObsoleteSessionManagerTest(test.TestCase):\n\n def testPrepareSessionSucceeds(self):\n with ops.Graph().as_default():\n v = variables.VariableV1([1.0, 2.0, 3.0], name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.assert_variables_initialized())\n sess = sm.prepare_session(\n \"\", init_op=variables.global_variables_initializer())\n self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))\n\n def testPrepareSessionSucceedsWithInitFeedDict(self):\n with ops.Graph().as_default():\n p = array_ops.placeholder(dtypes.float32, shape=(3,))\n v = variables.VariableV1(p, name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.assert_variables_initialized())\n sess = sm.prepare_session(\n \"\",\n init_op=variables.global_variables_initializer(),\n init_feed_dict={p: [1.0, 2.0, 3.0]})\n self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))\n\n def testPrepareSessionSucceedsWithInitFn(self):\n with ops.Graph().as_default():\n v = variables.VariableV1([125], name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.assert_variables_initialized())\n sess = sm.prepare_session(\n \"\", init_fn=lambda sess: sess.run(v.initializer))\n self.assertAllClose([125], sess.run(v))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testPrepareSessionFails(self):\n checkpoint_dir = os.path.join(self.get_temp_dir(), \"prepare_session\")\n checkpoint_dir2 = os.path.join(self.get_temp_dir(), \"prepare_session2\")\n try:\n gfile.DeleteRecursively(checkpoint_dir)\n gfile.DeleteRecursively(checkpoint_dir2)\n except errors.OpError:\n pass # Ignore\n gfile.MakeDirs(checkpoint_dir)\n\n with ops.Graph().as_default():\n v = variables.VariableV1([1.0, 2.0, 3.0], name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.assert_variables_initialized())\n saver = saver_lib.Saver({\"v\": v})\n sess = sm.prepare_session(\n \"\",\n init_op=variables.global_variables_initializer(),\n saver=saver,\n checkpoint_dir=checkpoint_dir)\n self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))\n checkpoint_filename = os.path.join(checkpoint_dir,\n \"prepare_session_checkpoint\")\n saver.save(sess, checkpoint_filename)\n # Create a new Graph and SessionManager and recover.\n with ops.Graph().as_default():\n # Renames the checkpoint directory.\n os.rename(checkpoint_dir, checkpoint_dir2)\n gfile.MakeDirs(checkpoint_dir)\n v = variables.VariableV1([6.0, 7.0, 8.0], name=\"v\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n session_manager.SessionManager(\n ready_op=variables.assert_variables_initialized())\n saver = saver_lib.Saver({\"v\": v})\n # This should fail as there's no checkpoint within 2 seconds.\n with self.assertRaisesRegexp(\n RuntimeError, \"no init_op or init_fn or local_init_op was given\"):\n sess = sm.prepare_session(\n \"\",\n init_op=None,\n saver=saver,\n checkpoint_dir=checkpoint_dir,\n wait_for_checkpoint=True,\n max_wait_secs=2)\n # Rename the checkpoint directory back.\n gfile.DeleteRecursively(checkpoint_dir)\n os.rename(checkpoint_dir2, checkpoint_dir)\n # This should succeed as there's checkpoint.\n sess = sm.prepare_session(\n \"\",\n init_op=None,\n saver=saver,\n checkpoint_dir=checkpoint_dir,\n wait_for_checkpoint=True,\n max_wait_secs=2)\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"v:0\")).eval(session=sess))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testRecoverSession(self):\n # Create a checkpoint.\n checkpoint_dir = os.path.join(self.get_temp_dir(), \"recover_session\")\n try:\n gfile.DeleteRecursively(checkpoint_dir)\n except errors.OpError:\n pass # Ignore\n gfile.MakeDirs(checkpoint_dir)\n\n with ops.Graph().as_default():\n v = variables.VariableV1(1, name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.assert_variables_initialized())\n saver = saver_lib.Saver({\"v\": v})\n sess, initialized = sm.recover_session(\n \"\", saver=saver, checkpoint_dir=checkpoint_dir)\n self.assertFalse(initialized)\n sess.run(v.initializer)\n self.assertEquals(1, sess.run(v))\n saver.save(sess, os.path.join(checkpoint_dir,\n \"recover_session_checkpoint\"))\n # Create a new Graph and SessionManager and recover.\n with ops.Graph().as_default():\n v = variables.VariableV1(2, name=\"v\")\n with self.cached_session():\n self.assertEqual(False, variables.is_variable_initialized(v).eval())\n sm2 = session_manager.SessionManager(\n ready_op=variables.assert_variables_initialized())\n saver = saver_lib.Saver({\"v\": v})\n sess, initialized = sm2.recover_session(\n \"\", saver=saver, checkpoint_dir=checkpoint_dir)\n self.assertTrue(initialized)\n self.assertEqual(\n True,\n variables.is_variable_initialized(\n sess.graph.get_tensor_by_name(\"v:0\")).eval(session=sess))\n self.assertEquals(1, sess.run(v))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testWaitForSessionReturnsNoneAfterTimeout(self):\n with ops.Graph().as_default():\n variables.VariableV1(1, name=\"v\")\n sm = session_manager.SessionManager(\n ready_op=variables.assert_variables_initialized(),\n recovery_wait_secs=1)\n\n # Set max_wait_secs to allow us to try a few times.\n with self.assertRaises(errors.DeadlineExceededError):\n sm.wait_for_session(master=\"\", max_wait_secs=3)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.ops.math_ops.reduce_all",
"tensorflow.python.distribute.distribution_strategy_context.get_cross_replica_context",
"tensorflow.python.ops.math_ops.is_finite",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.distribute.distribution_strategy_context.has_strategy",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.training.tracking.base.TrackableReference",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.variable_scope.variable",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.ops.math_ops.cast"
],
[
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.cumsum",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.gather_nd",
"tensorflow.python.ops.nn.top_k",
"tensorflow.python.ops.math_ops.is_nan",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.math_ops.cast"
],
[
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.check_ops.assert_greater_equal",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.iinfo",
"tensorflow.python.ops.math_ops.floor",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.math_ops.cast"
],
[
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.gen_array_ops.reshape",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.platform.test.main",
"numpy.random.randn",
"tensorflow.python.ops.math_ops.matmul"
],
[
"numpy.random.random",
"numpy.random.seed",
"tensorflow.python.ops.gradients.gradients",
"tensorflow.python.ops.control_flow_ops.while_loop",
"numpy.repeat",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.framework.ops.convert_n_to_tensor",
"tensorflow.python.ops.math_ops.accumulate_n",
"tensorflow.python.ops.variables.Variable",
"numpy.random.rand",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.random_ops.random_normal",
"tensorflow.python.framework.tensor_shape.dimension_value",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.array_ops.stack"
],
[
"numpy.array",
"tensorflow.python.platform.test.main",
"numpy.iinfo",
"tensorflow.python.ops.array_ops.dequantize"
],
[
"tensorflow.python.ops.parsing_ops.FixedLenFeature",
"tensorflow.contrib.slim.python.slim.data.dataset.Dataset",
"tensorflow.contrib.slim.python.slim.queues.QueueRunners",
"tensorflow.contrib.slim.python.slim.data.tfexample_decoder.Tensor",
"tensorflow.contrib.slim.python.slim.data.test_utils.create_tfrecord_files",
"tensorflow.python.platform.gfile.MakeDirs",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.platform.gfile.Exists",
"tensorflow.python.platform.test.main",
"tensorflow.contrib.slim.python.slim.data.tfexample_decoder.Image",
"tensorflow.contrib.slim.python.slim.data.dataset_data_provider.DatasetDataProvider",
"tensorflow.contrib.slim.python.slim.data.tfexample_decoder.TFExampleDecoder",
"tensorflow.python.ops.image_ops.resize_bilinear",
"tensorflow.python.client.session.Session",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.array_ops.expand_dims"
],
[
"tensorflow.python.distribute.model_collection.simple_models.SimpleSequentialModel",
"tensorflow.python.distribute.model_collection.simple_models.SimpleFunctionalModel",
"tensorflow.python.distribute.model_collection.simple_models.SimpleSubclassModel"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"numpy.sqrt",
"tensorflow.python.ops.gen_math_ops.mat_mul",
"tensorflow.python.ops.gen_math_ops.cast",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.python.ops.init_ops.random_normal_initializer",
"tensorflow.python.ops.init_ops.random_uniform_initializer",
"tensorflow.python.keras.engine.input_spec.InputSpec",
"tensorflow.python.ops.gen_math_ops.cos",
"numpy.tan",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.nn.bias_add",
"numpy.random.uniform",
"tensorflow.python.keras.initializers.serialize"
],
[
"tensorflow.python.data.experimental.ops.snapshot.snapshot",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.ops.gen_array_ops.broadcast_to",
"tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator",
"tensorflow.python.client.session.Session",
"tensorflow.python.platform.test.main",
"tensorflow.python.platform.test.get_temp_dir"
],
[
"tensorflow.python.ops.linalg_ops_impl.eye",
"tensorflow.python.util.deprecation.deprecated_endpoints",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.constant_op.constant_v1",
"tensorflow.python.ops.array_ops.diag_part",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.random_ops.truncated_normal",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.ops.array_ops.matrix_transpose",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.util.deprecation.deprecated_arg_values",
"tensorflow.python.ops.gen_linalg_ops.qr",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.math_ops.sign",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.ops.random_ops.random_normal",
"numpy.isscalar",
"tensorflow.python.ops.random_ops.random_uniform"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"numpy.reshape",
"tensorflow.python.ops.distributions.bijector_test_util.assert_bijective_and_finite",
"tensorflow.python.ops.array_ops.placeholder",
"numpy.random.randn",
"tensorflow.python.platform.test.main",
"numpy.random.RandomState",
"tensorflow.contrib.distributions.python.ops.bijectors.reshape.Reshape"
],
[
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.keras.backend.int_shape",
"tensorflow.python.keras.regularizers.get",
"tensorflow.python.keras.utils.generic_utils.has_arg",
"tensorflow.python.keras.utils.conv_utils.conv_output_length",
"tensorflow.python.keras.backend.bias_add",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.keras.backend.is_keras_tensor",
"tensorflow.python.keras.backend.sum",
"tensorflow.python.keras.constraints.get",
"tensorflow.python.keras.backend.update",
"tensorflow.python.keras.activations.get",
"tensorflow.python.keras.layers.recurrent._standardize_args",
"tensorflow.python.keras.utils.conv_utils.normalize_tuple",
"tensorflow.python.keras.regularizers.serialize",
"tensorflow.python.keras.backend.rnn",
"tensorflow.python.keras.activations.serialize",
"tensorflow.python.keras.constraints.serialize",
"tensorflow.python.keras.engine.input_spec.InputSpec",
"tensorflow.python.keras.backend.zeros_like",
"tensorflow.python.keras.utils.conv_utils.normalize_data_format",
"tensorflow.python.keras.initializers.Ones",
"tensorflow.python.keras.backend.set_value",
"tensorflow.python.keras.initializers.get",
"tensorflow.python.keras.backend.conv2d",
"tensorflow.python.keras.initializers.serialize",
"tensorflow.python.keras.utils.conv_utils.normalize_padding"
],
[
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.ops.init_ops.ones_initializer",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.cast"
],
[
"tensorflow.python.ops.boosted_trees_ops.quantile_add_summaries",
"tensorflow.python.ops.boosted_trees_ops.get_bucket_boundaries",
"tensorflow.python.ops.gen_boosted_trees_ops.is_boosted_trees_quantile_stream_resource_initialized",
"tensorflow.python.ops.gen_boosted_trees_ops.boosted_trees_quantile_stream_resource_handle_op",
"tensorflow.python.ops.boosted_trees_ops.quantile_flush",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.resources.shared_resources",
"tensorflow.python.ops.boosted_trees_ops.create_quantile_stream_resource",
"tensorflow.python.ops.boosted_trees_ops.make_quantile_summaries",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.boosted_trees_ops.boosted_trees_bucketize",
"tensorflow.python.ops.boosted_trees_ops.QuantileAccumulator",
"tensorflow.python.platform.googletest.main",
"numpy.array",
"tensorflow.python.ops.resources.register_resource",
"tensorflow.python.training.saver.Saver",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.complex",
"tensorflow.python.ops.math_ops.rsqrt",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.framework.tensor_shape.dimension_value",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.signal.fft_ops.rfft",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.math_ops.cast"
],
[
"tensorflow.contrib.saved_model.python.saved_model.reader.read_saved_model",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.client.session.Session"
],
[
"tensorflow.python.ops.gen_array_ops.fill",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.gather_nd",
"tensorflow.python.ops.math_ops.reduce_any",
"tensorflow.python.ops.random_ops.multinomial",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.math_ops.logical_not",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.reduce_all",
"tensorflow.python.ops.math_ops.argmax",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.math_ops.logical_or",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.ops.embedding_ops.embedding_lookup",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.array_ops.scatter_nd",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.math_ops.sigmoid",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.random_ops.random_uniform"
],
[
"numpy.linspace",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.training.saver.import_meta_graph",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.lookup_ops.tables_initializer",
"tensorflow.python.platform.gfile.Glob",
"tensorflow.python.data.ops.dataset_ops.Options",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.data.experimental.ops.iterator_ops.make_saveable_from_iterator",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.training.saver.Saver",
"tensorflow.python.util.nest.flatten"
],
[
"tensorflow.python.platform.test.main",
"tensorflow.python.data.ops.dataset_ops.Dataset.list_files"
],
[
"tensorflow.python.framework.ops.device",
"numpy.linalg.norm",
"tensorflow.python.training.server_lib.ClusterSpec",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.client.session.Session",
"tensorflow.python.platform.test.main",
"tensorflow.python.training.server_lib.Server",
"tensorflow.contrib.opt.python.training.variable_clipping_optimizer.VariableClippingOptimizer",
"numpy.array",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"tensorflow.python.framework.constant_op.constant"
],
[
"numpy.expand_dims",
"scipy.special.gamma",
"numpy.power",
"numpy.reshape",
"tensorflow.contrib.distributions.python.ops.relaxed_onehot_categorical.RelaxedOneHotCategorical",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.platform.test.main",
"tensorflow.contrib.distributions.python.ops.relaxed_onehot_categorical.ExpRelaxedOneHotCategorical",
"numpy.random.uniform",
"numpy.array",
"numpy.exp",
"tensorflow.python.ops.random_ops.random_uniform",
"numpy.sum",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.contrib.learn.python.learn.ops.softmax_classifier",
"tensorflow.contrib.learn.python.learn.ops.categorical_variable",
"tensorflow.contrib.learn.python.learn.ops.embedding_lookup",
"tensorflow.python.ops.array_ops.placeholder",
"numpy.random.randn",
"tensorflow.python.platform.test.main",
"numpy.transpose",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.framework.random_seed.set_random_seed",
"numpy.random.randint"
],
[
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.examples.get_started.regression.imports85.types.keys",
"tensorflow.examples.get_started.regression.dnn_regression.main",
"tensorflow.examples.get_started.regression.linear_regression_categorical.main",
"tensorflow.examples.get_started.regression.custom_regression.main",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.platform.test.mock.patch.dict"
],
[
"tensorflow.python.ops.math_ops.logical_and",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.math_ops.pow",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.ops.linalg_ops.norm",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.math_ops.cast"
],
[
"tensorflow.device",
"matplotlib.pyplot.imshow",
"tensorflow.gfile.Exists",
"tensorflow.stack",
"tensorflow.gfile.GFile",
"tensorflow.contrib.eager.python.tfe.run",
"tensorflow.gfile.MakeDirs",
"tensorflow.contrib.summary.always_record_summaries",
"tensorflow.train.AdamOptimizer",
"tensorflow.string_split",
"tensorflow.string_to_number",
"tensorflow.data.TextLineDataset",
"tensorflow.contrib.eager.python.tfe.num_gpus",
"tensorflow.contrib.eager.python.tfe.Iterator",
"tensorflow.decode_raw",
"tensorflow.train.get_or_create_global_step",
"tensorflow.train.get_global_step",
"tensorflow.nn.dropout",
"tensorflow.contrib.eager.python.tfe.metrics.Mean",
"tensorflow.nn.rnn_cell.BasicLSTMCell",
"tensorflow.gather_nd",
"tensorflow.unstack",
"tensorflow.shape",
"matplotlib.pyplot.title",
"tensorflow.contrib.summary.record_summaries_every_n_global_steps",
"tensorflow.identity",
"matplotlib.pyplot.show",
"tensorflow.transpose",
"tensorflow.gfile.Copy",
"tensorflow.expand_dims",
"tensorflow.contrib.summary.scalar",
"tensorflow.squared_difference"
],
[
"tensorflow.enable_eager_execution",
"tensorflow.contrib.eager.python.examples.l2hmc.l2hmc.compute_loss",
"tensorflow.contrib.summary.always_record_summaries",
"tensorflow.train.AdamOptimizer",
"tensorflow.Graph",
"tensorflow.test.main",
"tensorflow.ConfigProto",
"tensorflow.reset_default_graph",
"tensorflow.enable_resource_variables",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.eager.python.examples.l2hmc.l2hmc.loss_and_grads",
"tensorflow.contrib.eager.python.examples.l2hmc.l2hmc.Dynamics",
"tensorflow.contrib.eager.python.examples.l2hmc.l2hmc.get_scg_energy_fn",
"tensorflow.contrib.eager.defun",
"tensorflow.contrib.training.HParams",
"tensorflow.contrib.summary.create_file_writer",
"numpy.random.normal",
"tensorflow.contrib.summary.scalar",
"tensorflow.test.is_gpu_available",
"tensorflow.random_normal"
],
[
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.contrib.slim.python.slim.nets.resnet_v1.resnet_v1_block",
"tensorflow.contrib.slim.python.slim.nets.resnet_utils.subsample",
"tensorflow.contrib.slim.python.slim.nets.resnet_v1.resnet_v1",
"numpy.arange",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.contrib.layers.python.layers.utils.convert_collection_to_dict",
"tensorflow.contrib.framework.python.ops.arg_scope",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.contrib.slim.python.slim.nets.resnet_utils.conv2d_same",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.contrib.layers.conv2d",
"tensorflow.python.framework.random_seed.set_random_seed",
"tensorflow.python.ops.math_ops.range",
"tensorflow.contrib.slim.python.slim.nets.resnet_utils.stack_blocks_dense",
"tensorflow.contrib.slim.python.slim.nets.resnet_utils.resnet_arg_scope",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.enable_eager_execution",
"tensorflow.keras.backend.image_data_format",
"tensorflow.python.keras.optimizer_v2.rmsprop.RMSProp",
"tensorflow.keras.layers.Dense",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.layers.Conv2D",
"tensorflow.cast",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.python.distribute.mirrored_strategy.MirroredStrategy",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.layers.Flatten",
"tensorflow.app.run"
],
[
"tensorflow.python.ops.variables.assert_variables_initialized",
"tensorflow.python.ops.variables.is_variable_initialized",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.training.checkpoint_management.latest_checkpoint",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.variables.report_uninitialized_variables",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.platform.gfile.DeleteRecursively",
"tensorflow.python.platform.gfile.MakeDirs",
"tensorflow.python.platform.test.main",
"tensorflow.python.training.session_manager.SessionManager",
"tensorflow.python.ops.variables.global_variables",
"tensorflow.python.ops.variables.VariableV1",
"tensorflow.python.training.server_lib.Server.create_local_server",
"tensorflow.python.client.session.Session",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.training.saver.Saver"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"1.7",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.3",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"1.4",
"2.7",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"1.0",
"2.6",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.13",
"1.5",
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"1.4",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.6",
"2.3",
"2.4",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"1.4",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.10",
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
quickgrid/Chunkmogrify | [
"10fb9ab3eef0fb50cec0e474ab48333032ee3c3b"
] | [
"styleclip_mapper.py"
] | [
"import math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn import Module\n\ndef fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):\n rest_dim = [1] * (input.ndim - bias.ndim - 1)\n input = input.cuda()\n if input.ndim == 3:\n return (\n F.leaky_relu(\n input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope\n )\n * scale\n )\n else:\n return (\n F.leaky_relu(\n input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope\n )\n * scale\n )\n\nclass PixelNorm(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, input):\n return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)\n\n\nclass EqualLinear(nn.Module):\n def __init__(\n self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None\n ):\n super().__init__()\n\n self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))\n\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))\n\n else:\n self.bias = None\n\n self.activation = activation\n\n self.scale = (1 / math.sqrt(in_dim)) * lr_mul\n self.lr_mul = lr_mul\n\n def forward(self, input):\n if self.activation:\n out = F.linear(input, self.weight * self.scale)\n out = fused_leaky_relu(out, self.bias * self.lr_mul)\n\n else:\n out = F.linear(\n input, self.weight * self.scale, bias=self.bias * self.lr_mul\n )\n\n return out\n\n def __repr__(self):\n return (\n f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'\n )\n\nclass Mapper(Module):\n\n def __init__(self, opts):\n super(Mapper, self).__init__()\n\n self.opts = opts\n layers = [PixelNorm()]\n\n for i in range(4):\n layers.append(\n EqualLinear(\n 512, 512, lr_mul=0.01, activation='fused_lrelu'\n )\n )\n\n self.mapping = nn.Sequential(*layers)\n\n\n def forward(self, x):\n x = self.mapping(x)\n return x\n\n\nclass SingleMapper(Module):\n\n def __init__(self, opts):\n super(SingleMapper, self).__init__()\n\n self.opts = opts\n\n self.mapping = Mapper(opts)\n\n def forward(self, x):\n out = self.mapping(x)\n return out\n\n\nclass LevelsMapper(Module):\n\n def __init__(self, opts):\n super(LevelsMapper, self).__init__()\n\n self.opts = opts\n\n if not opts.no_coarse_mapper:\n self.course_mapping = Mapper(opts)\n if not opts.no_medium_mapper:\n self.medium_mapping = Mapper(opts)\n if not opts.no_fine_mapper:\n self.fine_mapping = Mapper(opts)\n\n def forward(self, x):\n x_coarse = x[:, :4, :]\n x_medium = x[:, 4:8, :]\n x_fine = x[:, 8:, :]\n\n if not self.opts.no_coarse_mapper:\n x_coarse = self.course_mapping(x_coarse)\n else:\n x_coarse = torch.zeros_like(x_coarse)\n if not self.opts.no_medium_mapper:\n x_medium = self.medium_mapping(x_medium)\n else:\n x_medium = torch.zeros_like(x_medium)\n if not self.opts.no_fine_mapper:\n x_fine = self.fine_mapping(x_fine)\n else:\n x_fine = torch.zeros_like(x_fine)\n\n\n out = torch.cat([x_coarse, x_medium, x_fine], dim=1)\n\n return out\n\ndef get_keys(d, name):\n if 'state_dict' in d:\n d = d['state_dict']\n d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}\n return d_filt\n\n\nclass StyleCLIPMapper(nn.Module):\n\n def __init__(self, opts):\n super().__init__()\n self.opts = opts\n # Define architecture\n self.mapper = self.set_mapper()\n # Load weights if needed\n self.load_weights()\n\n def set_mapper(self):\n if self.opts.mapper_type == 'SingleMapper':\n mapper = SingleMapper(self.opts)\n elif self.opts.mapper_type == 'LevelsMapper':\n mapper = LevelsMapper(self.opts)\n else:\n raise Exception('{} is not a valid mapper'.format(self.opts.mapper_type))\n return mapper\n\n def load_weights(self):\n if self.opts.checkpoint_path is not None:\n print('Loading from checkpoint: {}'.format(self.opts.checkpoint_path))\n ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')\n self.mapper.load_state_dict(get_keys(ckpt, 'mapper'), strict=True)\n"
] | [
[
"torch.nn.Sequential",
"torch.mean",
"torch.load",
"torch.cat",
"torch.randn",
"torch.zeros",
"torch.zeros_like",
"torch.nn.functional.linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RomaKoks/collie_recs | [
"bc8979c8dbf68deefb030336d50f07f788cf1667",
"bc8979c8dbf68deefb030336d50f07f788cf1667",
"bc8979c8dbf68deefb030336d50f07f788cf1667",
"bc8979c8dbf68deefb030336d50f07f788cf1667"
] | [
"collie/metrics.py",
"tests/test_cross_validation.py",
"collie/loss/metadata_utils.py",
"tests/fixtures/loss_fixtures.py"
] | [
"from typing import Any, Callable, Iterable, List, Optional, Tuple, Union\nimport warnings\n\nimport numpy as np\nimport pytorch_lightning\nfrom scipy.sparse import csr_matrix\nimport torch\nfrom torchmetrics import Metric\nfrom torchmetrics.functional import auroc\nfrom tqdm.auto import tqdm\n\nimport collie\nfrom collie.interactions import ExplicitInteractions, Interactions, InteractionsDataLoader\nfrom collie.model import BasePipeline\n\n\ndef _get_user_item_pairs(user_ids: Union[np.array, torch.tensor],\n n_items: int,\n device: Union[str, torch.device]) -> Tuple[torch.tensor, torch.tensor]:\n \"\"\"\n Create tensors pairing each input user ID with each item ID.\n\n Parameters\n ----------\n user_ids: np.array or torch.tensor, 1-d\n Iterable[int] of users to score\n n_items: int\n Number of items in the training data\n device: string\n Device to store tensors on\n\n Returns\n -------\n users: torch.tensor, 1-d\n Tensor with ``n_items`` copies of each user ID\n items: torch.tensor, 1-d\n Tensor with ``len(user_ids)`` copies of each item ID\n\n Example\n -------\n .. code-block:: python\n\n >>> user_ids = np.array([10, 11, 12])\n >>> n_items = 4\n >>> user, item = _get_user_item_pairs(user_ids: user_ids, n_items: 4, device: 'cpu'):\n >>> user\n np.array([10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12])\n >>> item\n np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3])\n\n \"\"\"\n # Added because sometimes we call this function with ``n_items`` as ``np.int64`` type which\n # breaks ``repeat_interleave``.\n if isinstance(n_items, np.int64):\n n_items = n_items.item()\n\n users = torch.tensor(\n user_ids,\n dtype=torch.int64,\n requires_grad=False,\n device=device,\n ).repeat_interleave(n_items)\n\n items = torch.arange(\n start=0,\n end=n_items,\n requires_grad=False,\n device=device,\n ).repeat(len(user_ids))\n\n return users, items\n\n\ndef get_preds(model: BasePipeline,\n user_ids: Union[np.array, torch.tensor],\n n_items: int,\n device: Union[str, torch.device]) -> torch.tensor:\n \"\"\"\n Returns a ``n_users x n_items`` tensor with the item IDs of recommended products for each user\n ID.\n\n Parameters\n ----------\n model: collie.model.BasePipeline\n Model that can take a (user_id, item_id) pair as input and return a recommendation score\n user_ids: np.array or torch.tensor\n Iterable[int] of users to score\n n_items: int\n Number of items in the training data\n device: string\n Device torch should use\n\n Returns\n -------\n predicted_scores: torch.tensor\n Tensor of shape ``n_users x n_items``\n\n \"\"\"\n user, item = _get_user_item_pairs(user_ids, n_items, device)\n\n with torch.no_grad():\n predicted_scores = model(user, item)\n\n return predicted_scores.view(-1, n_items)\n\n\ndef _get_labels(targets: csr_matrix,\n user_ids: Union[np.array, torch.tensor],\n preds: Union[np.array, torch.tensor],\n device: str) -> torch.tensor:\n \"\"\"\n Returns a binary array indicating which of the recommended products are in each user's target\n set.\n\n Parameters\n ----------\n targets: scipy.sparse.csr_matrix\n Interaction matrix containing user and item IDs\n user_ids: np.array or torch.tensor\n Users corresponding to the recommendations in the top k predictions\n preds: torch.tensor\n Top ``k`` item IDs to recommend to each user of shape (n_users x k)\n device: string\n Device torch should use\n\n Returns\n -------\n labels: torch.tensor\n Tensor with the same dimensions as input ``preds``\n\n \"\"\"\n return torch.tensor(\n (targets[user_ids[:, None], np.array(preds.detach().cpu())] > 0)\n .astype('double')\n .toarray(),\n requires_grad=False,\n device=device,\n )\n\n\ndef mapk(targets: csr_matrix,\n user_ids: Union[np.array, torch.tensor],\n preds: Union[np.array, torch.tensor],\n k: int = 10) -> float:\n \"\"\"\n Calculate the mean average precision at K (MAP@K) score for each user.\n\n Parameters\n ----------\n targets: scipy.sparse.csr_matrix\n Interaction matrix containing user and item IDs\n user_ids: np.array or torch.tensor\n Users corresponding to the recommendations in the top k predictions\n preds: torch.tensor\n Tensor of shape (n_users x n_items) with each user's scores for each item\n k: int\n Number of recommendations to consider per user\n\n Returns\n -------\n mapk_score: float\n\n \"\"\"\n device = preds.device\n n_users = preds.shape[0]\n\n try:\n predicted_items = preds.topk(k, dim=1).indices\n except RuntimeError as e:\n raise ValueError(\n f'Ensure ``k`` ({k}) is less than the number of items ({preds.shape[1]}):', str(e)\n )\n\n topk_labeled = _get_labels(targets, user_ids, predicted_items, device)\n accuracy = topk_labeled.int()\n\n weights = (\n 1.0 / torch.arange(\n start=1,\n end=k+1,\n dtype=torch.float64,\n requires_grad=False,\n device=device\n )\n ).repeat(n_users, 1)\n\n denominator = torch.min(\n torch.tensor(k, device=device, dtype=torch.int).repeat(len(user_ids)),\n torch.tensor(targets[user_ids].getnnz(axis=1), device=device)\n )\n\n res = ((accuracy * accuracy.cumsum(axis=1) * weights).sum(axis=1)) / denominator\n res[torch.isnan(res)] = 0\n\n return res.mean().item()\n\n\ndef mrr(targets: csr_matrix,\n user_ids: Union[np.array, torch.tensor],\n preds: Union[np.array, torch.tensor],\n k: Optional[Any] = None) -> float:\n \"\"\"\n Calculate the mean reciprocal rank (MRR) of the input predictions.\n\n Parameters\n ----------\n targets: scipy.sparse.csr_matrix\n Interaction matrix containing user and item IDs\n user_ids: np.array or torch.tensor\n Users corresponding to the recommendations in the top k predictions\n preds: torch.tensor\n Tensor of shape (n_users x n_items) with each user's scores for each item\n k: Any\n Ignored, included only for compatibility with ``mapk``\n\n Returns\n -------\n mrr_score: float\n\n \"\"\"\n predicted_items = preds.topk(preds.shape[1], dim=1).indices\n labeled = _get_labels(targets, user_ids, predicted_items, device=preds.device)\n\n # weighting each 0/1 by position so that topk returns index of *first* postive result\n position_weight = 1.0/(\n torch.arange(1, targets.shape[1] + 1, device=preds.device)\n .repeat(len(user_ids), 1)\n .float()\n )\n labeled_weighted = (labeled.float() * position_weight)\n\n highest_score, rank = labeled_weighted.topk(k=1)\n\n reciprocal_rank = 1.0/(rank.float() + 1)\n reciprocal_rank[highest_score == 0] = 0\n\n return reciprocal_rank.mean().item()\n\n\ndef auc(targets: csr_matrix,\n user_ids: Union[np.array, torch.tensor],\n preds: Union[np.array, torch.tensor],\n k: Optional[Any] = None) -> float:\n \"\"\"\n Calculate the area under the ROC curve (AUC) for each user and average the results.\n\n Parameters\n ----------\n targets: scipy.sparse.csr_matrix\n Interaction matrix containing user and item IDs\n user_ids: np.array or torch.tensor\n Users corresponding to the recommendations in the top k predictions\n preds: torch.tensor\n Tensor of shape (n_users x n_items) with each user's scores for each item\n k: Any\n Ignored, included only for compatibility with ``mapk``\n\n Returns\n -------\n auc_score: float\n\n \"\"\"\n agg = 0\n for i, user_id in enumerate(user_ids):\n target_tensor = torch.tensor(\n targets[user_id].toarray(),\n device=preds.device,\n dtype=torch.long\n ).view(-1)\n # many models' ``preds`` may be unbounded if a final activation layer is not applied\n # we have to normalize ``preds`` here to avoid a ``ValueError`` stating that ``preds``\n # should be probabilities, but values were detected outside of [0,1] range\n auc = auroc(torch.sigmoid(preds[i, :]), target=target_tensor, pos_label=1)\n agg += auc\n\n return (agg/len(user_ids)).item()\n\n\ndef evaluate_in_batches(\n metric_list: Iterable[Callable[\n [csr_matrix, Union[np.array, torch.tensor], Union[np.array, torch.tensor], Optional[int]],\n float\n ]],\n test_interactions: collie.interactions.Interactions,\n model: collie.model.BasePipeline,\n k: int = 10,\n batch_size: int = 20,\n logger: pytorch_lightning.loggers.base.LightningLoggerBase = None,\n verbose: bool = True,\n) -> List[float]:\n \"\"\"\n Evaluate a model with potentially several different metrics.\n\n Memory constraints require that most test sets will need to be evaluated in batches. This\n function handles the looping and batching boilerplate needed to properly evaluate the model\n without running out of memory.\n\n Parameters\n ----------\n metric_list: list of functions\n List of evaluation functions to apply. Each function must accept keyword arguments:\n\n * ``targets``\n\n * ``user_ids``\n\n * ``preds``\n\n * ``k``\n\n test_interactions: collie.interactions.Interactions\n Interactions to use as labels\n model: collie.model.BasePipeline\n Model that can take a (user_id, item_id) pair as input and return a recommendation score\n k: int\n Number of recommendations to consider per user. This is ignored by some metrics\n batch_size: int\n Number of users to score in a single batch. For best efficiency, this number should be as\n high as possible without running out of memory\n logger: pytorch_lightning.loggers.base.LightningLoggerBase\n If provided, will log outputted metrics dictionary using the ``log_metrics`` method with\n keys being the string representation of ``metric_list`` and values being\n ``evaluation_results``. Additionally, if ``model.hparams.num_epochs_completed`` exists, this\n will be logged as well, making it possible to track metrics progress over the course of\n model training\n verbose: bool\n Display progress bar and print statements during function execution\n\n Returns\n -------\n evaluation_results: list\n List of floats, with each metric value corresponding to the respective function passed in\n ``metric_list``\n\n Examples\n --------\n .. code-block:: python\n\n from collie.metrics import auc, evaluate_in_batches, mapk, mrr\n\n\n map_10_score, mrr_score, auc_score = evaluate_in_batches(\n metric_list=[mapk, mrr, auc],\n test_interactions=test,\n model=model,\n )\n\n print(map_10_score, mrr_score, auc_score)\n\n \"\"\"\n if not isinstance(test_interactions, Interactions):\n raise ValueError(\n '``test_interactions`` must be of type ``Interactions``, not '\n f'{type(test_interactions)}. Try using ``explicit_evaluate_in_batches`` instead.'\n )\n\n device = _get_evaluate_in_batches_device(model=model)\n model.to(device)\n model._move_any_external_data_to_device()\n\n test_users = np.unique(test_interactions.mat.row)\n targets = test_interactions.mat.tocsr()\n\n if len(test_users) < batch_size:\n batch_size = len(test_users)\n\n accumulators = [0] * len(metric_list)\n\n data_to_iterate_over = range(int(np.ceil(len(test_users) / batch_size)))\n if verbose:\n data_to_iterate_over = tqdm(data_to_iterate_over)\n\n for i in data_to_iterate_over:\n user_range = test_users[i * batch_size:(i + 1) * batch_size]\n preds = get_preds(model, user_range, test_interactions.num_items, device)\n for metric_ind, metric in enumerate(metric_list):\n score = metric(targets=targets, user_ids=user_range, preds=preds, k=k)\n accumulators[metric_ind] += (score * len(user_range))\n\n all_scores = [acc_score / len(test_users) for acc_score in accumulators]\n\n if logger is not None:\n _log_metrics(model=model,\n logger=logger,\n metric_list=metric_list,\n all_scores=all_scores,\n verbose=verbose)\n\n return all_scores[0] if len(all_scores) == 1 else all_scores\n\n\ndef explicit_evaluate_in_batches(\n metric_list: Iterable[Metric],\n test_interactions: collie.interactions.ExplicitInteractions,\n model: collie.model.BasePipeline,\n logger: pytorch_lightning.loggers.base.LightningLoggerBase = None,\n verbose: bool = True,\n **kwargs,\n) -> List[float]:\n \"\"\"\n Evaluate a model with potentially several different metrics.\n\n Memory constraints require that most test sets will need to be evaluated in batches. This\n function handles the looping and batching boilerplate needed to properly evaluate the model\n without running out of memory.\n\n Parameters\n ----------\n metric_list: list of ``torchmetrics.Metric``\n List of evaluation functions to apply. Each function must accept arguments for predictions\n and targets, in order\n test_interactions: collie.interactions.ExplicitInteractions\n model: collie.model.BasePipeline\n Model that can take a (user_id, item_id) pair as input and return a recommendation score\n batch_size: int\n Number of users to score in a single batch. For best efficiency, this number should be as\n high as possible without running out of memory\n logger: pytorch_lightning.loggers.base.LightningLoggerBase\n If provided, will log outputted metrics dictionary using the ``log_metrics`` method with\n keys being the string representation of ``metric_list`` and values being\n ``evaluation_results``. Additionally, if ``model.hparams.num_epochs_completed`` exists, this\n will be logged as well, making it possible to track metrics progress over the course of\n model training\n verbose: bool\n Display progress bar and print statements during function execution\n **kwargs: keyword arguments\n Additional arguments sent to the ``InteractionsDataLoader``\n\n Returns\n ----------\n evaluation_results: list\n List of floats, with each metric value corresponding to the respective function passed in\n ``metric_list``\n\n Examples\n -------------\n .. code-block:: python\n\n import torchmetrics\n\n from collie.metrics import explicit_evaluate_in_batches\n\n\n mse_score, mae_score = evaluate_in_batches(\n metric_list=[torchmetrics.MeanSquaredError(), torchmetrics.MeanAbsoluteError()],\n test_interactions=test,\n model=model,\n )\n\n print(mse_score, mae_score)\n\n \"\"\"\n if not isinstance(test_interactions, ExplicitInteractions):\n raise ValueError(\n '``test_interactions`` must be of type ``ExplicitInteractions``, not '\n f'{type(test_interactions)}. Try using ``evaluate_in_batches`` instead.'\n )\n\n try:\n device = _get_evaluate_in_batches_device(model=model)\n model.to(device)\n model._move_any_external_data_to_device()\n\n test_loader = InteractionsDataLoader(interactions=test_interactions,\n **kwargs)\n\n data_to_iterate_over = test_loader\n if verbose:\n data_to_iterate_over = tqdm(test_loader)\n\n for batch in data_to_iterate_over:\n users, items, ratings = batch\n\n # move data to batch before sending to model\n users = users.to(device)\n items = items.to(device)\n ratings = ratings.cpu()\n\n preds = model(users, items)\n\n for metric in metric_list:\n metric(preds.cpu(), ratings)\n\n all_scores = [metric.compute() for metric in metric_list]\n\n if logger is not None:\n _log_metrics(model=model,\n logger=logger,\n metric_list=metric_list,\n all_scores=all_scores,\n verbose=verbose)\n\n return all_scores[0] if len(all_scores) == 1 else all_scores\n finally:\n for metric in metric_list:\n metric.reset()\n\n\ndef _get_evaluate_in_batches_device(model: BasePipeline):\n device = getattr(model, 'device', None)\n\n if torch.cuda.is_available() and str(device) == 'cpu':\n warnings.warn('CUDA available but model device is set to CPU - is this desired?')\n\n if device is None:\n if torch.cuda.is_available():\n warnings.warn(\n '``model.device`` attribute is ``None``. Since GPU is available, putting model on '\n 'GPU.'\n )\n device = 'cuda:0'\n else:\n device = 'cpu'\n\n return device\n\n\ndef _log_metrics(model: BasePipeline,\n logger: pytorch_lightning.loggers.base.LightningLoggerBase,\n metric_list: List[Union[Callable[..., Any], Metric]],\n all_scores: List[float],\n verbose: bool):\n try:\n step = model.hparams.get('num_epochs_completed')\n except torch.nn.modules.module.ModuleAttributeError:\n # if, somehow, there is no ``model.hparams`` attribute, this shouldn't fail\n step = None\n\n try:\n metrics_dict = dict(zip([x.__name__ for x in metric_list], all_scores))\n except AttributeError:\n metrics_dict = dict(zip([type(x).__name__ for x in metric_list], all_scores))\n\n if verbose:\n print(f'Logging metrics {metrics_dict} to ``logger``...')\n\n logger.log_metrics(metrics=metrics_dict, step=step)\n",
"import numpy as np\nimport pandas as pd\nimport pytest\nfrom scipy.sparse import coo_matrix\n\nfrom collie.cross_validation import random_split, stratified_split\nfrom collie.interactions import ExplicitInteractions, Interactions\n\n\ndef test_bad_random_split_HDF5Interactions(hdf5_interactions):\n with pytest.raises(AssertionError):\n random_split(\n interactions=hdf5_interactions,\n )\n\n\ndef test_bad_stratified_split_HDF5Interactions(hdf5_interactions):\n with pytest.raises(AssertionError):\n stratified_split(\n interactions=hdf5_interactions,\n )\n\n\[email protected]('data_type', ['implicit', 'explicit'])\ndef test_random_split(implicit_interactions_to_split,\n explicit_interactions_to_split,\n data_type):\n if data_type == 'implicit':\n interactions_class = Interactions\n interactions_kwargs = {\n 'check_num_negative_samples_is_valid': False,\n }\n interactions_to_split = implicit_interactions_to_split\n else:\n interactions_class = ExplicitInteractions\n interactions_kwargs = {}\n interactions_to_split = explicit_interactions_to_split\n\n train_expected_df = pd.DataFrame(\n data={\n 'user_id': [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4],\n 'item_id': [0, 1, 2, 3, 4, 5, 8, 1, 3, 4, 1, 3, 4, 2, 2, 4],\n 'rating': [1, 2, 3, 4, 5, 4, 1, 1, 3, 4, 2, 4, 5, 5, 3, 5],\n }\n )\n train_expected = interactions_class(\n mat=coo_matrix(\n (\n train_expected_df['rating'],\n (train_expected_df['user_id'], train_expected_df['item_id']),\n ),\n shape=(interactions_to_split.num_users, interactions_to_split.num_items),\n ),\n allow_missing_ids=True,\n **interactions_kwargs,\n )\n\n validate_expected_df = pd.DataFrame(\n data={'user_id': [3, 4, 4], 'item_id': [1, 1, 5], 'rating': [1, 2, 4]}\n )\n validate_expected = interactions_class(\n mat=coo_matrix(\n (\n validate_expected_df['rating'],\n (validate_expected_df['user_id'], validate_expected_df['item_id']),\n ),\n shape=(interactions_to_split.num_users, interactions_to_split.num_items),\n ),\n allow_missing_ids=True,\n **interactions_kwargs,\n )\n\n test_expected_df = pd.DataFrame(\n data={\n 'user_id': [0, 0, 1, 2, 3],\n 'item_id': [6, 7, 2, 2, 4],\n 'rating': [3, 2, 2, 3, 4],\n }\n )\n test_expected = interactions_class(\n mat=coo_matrix(\n (\n test_expected_df['rating'],\n (test_expected_df['user_id'], test_expected_df['item_id']),\n ),\n shape=(interactions_to_split.num_users, interactions_to_split.num_items),\n ),\n allow_missing_ids=True,\n **interactions_kwargs,\n )\n\n (train_actual, validate_actual, test_actual) = random_split(\n interactions=interactions_to_split, val_p=0.1, test_p=0.2, seed=42\n )\n\n np.testing.assert_array_equal(train_actual.toarray(), train_expected.toarray())\n np.testing.assert_array_equal(\n validate_actual.toarray(), validate_expected.toarray()\n )\n np.testing.assert_array_equal(test_actual.toarray(), test_expected.toarray())\n\n assert (\n train_actual.num_users\n == train_expected.num_users\n == validate_actual.num_users\n == validate_expected.num_users\n == test_actual.num_users\n == test_expected.num_users\n )\n\n assert (\n train_actual.num_items\n == train_expected.num_items\n == validate_actual.num_items\n == validate_expected.num_items\n == test_actual.num_items\n == test_expected.num_items\n )\n\n assert (\n type(train_actual)\n == type(train_expected)\n == type(validate_actual)\n == type(validate_expected)\n == type(test_actual)\n == type(test_expected)\n == interactions_class\n )\n\n\ndef test_random_split_with_users_with_only_one_interaction(\n interactions_to_split_with_users_with_only_one_interaction,\n):\n # unlike for ``stratified_split``, this should work without error\n random_split(\n interactions=interactions_to_split_with_users_with_only_one_interaction,\n )\n\n\[email protected]('data_type', ['implicit', 'explicit'])\ndef test_stratified_split(implicit_interactions_to_split,\n explicit_interactions_to_split,\n data_type):\n if data_type == 'implicit':\n interactions_class = Interactions\n interactions_kwargs = {\n 'check_num_negative_samples_is_valid': False,\n }\n interactions_to_split = implicit_interactions_to_split\n else:\n interactions_class = ExplicitInteractions\n interactions_kwargs = {}\n interactions_to_split = explicit_interactions_to_split\n\n train_expected_df = pd.DataFrame(\n data={\n 'user_id': [0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 4, 4],\n 'item_id': [1, 2, 3, 4, 6, 8, 1, 2, 3, 4, 2, 4, 5],\n 'rating': [2, 3, 4, 5, 3, 1, 1, 2, 4, 5, 5, 5, 4],\n }\n )\n train_expected = interactions_class(\n mat=coo_matrix(\n (\n train_expected_df['rating'],\n (train_expected_df['user_id'], train_expected_df['item_id']),\n ),\n shape=(interactions_to_split.num_users, interactions_to_split.num_items),\n ),\n allow_missing_ids=True,\n **interactions_kwargs,\n )\n\n validate_expected_df = pd.DataFrame(\n data={\n 'user_id': [0, 1, 2, 3, 4],\n 'item_id': [7, 3, 2, 1, 2],\n 'rating': [2, 3, 3, 1, 3],\n }\n )\n validate_expected = interactions_class(\n mat=coo_matrix(\n (\n validate_expected_df['rating'],\n (validate_expected_df['user_id'], validate_expected_df['item_id']),\n ),\n shape=(interactions_to_split.num_users, interactions_to_split.num_items),\n ),\n allow_missing_ids=True,\n **interactions_kwargs,\n )\n\n test_expected_df = pd.DataFrame(\n data={\n 'user_id': [0, 0, 1, 2, 3, 4],\n 'item_id': [0, 5, 4, 1, 4, 1],\n 'rating': [1, 4, 4, 2, 4, 2],\n }\n )\n test_expected = interactions_class(\n mat=coo_matrix(\n (\n test_expected_df['rating'],\n (test_expected_df['user_id'], test_expected_df['item_id']),\n ),\n shape=(interactions_to_split.num_users, interactions_to_split.num_items),\n ),\n allow_missing_ids=True,\n **interactions_kwargs,\n )\n\n (train_actual, validate_actual, test_actual) = stratified_split(\n interactions=interactions_to_split, val_p=0.1, test_p=0.2, seed=46\n )\n\n np.testing.assert_array_equal(train_actual.toarray(), train_expected.toarray())\n np.testing.assert_array_equal(\n validate_actual.toarray(), validate_expected.toarray()\n )\n np.testing.assert_array_equal(test_actual.toarray(), test_expected.toarray())\n\n assert (\n train_actual.num_users\n == train_expected.num_users\n == validate_actual.num_users\n == validate_expected.num_users\n == test_actual.num_users\n == test_expected.num_users\n )\n\n assert (\n train_actual.num_items\n == train_expected.num_items\n == validate_actual.num_items\n == validate_expected.num_items\n == test_actual.num_items\n == test_expected.num_items\n )\n\n assert (\n type(train_actual)\n == type(train_expected)\n == type(validate_actual)\n == type(validate_expected)\n == type(test_actual)\n == type(test_expected)\n == interactions_class\n )\n\n\[email protected]('processes', [0, -1])\ndef test_stratified_split_with_users_with_only_one_interaction_raises_error(\n interactions_to_split_with_users_with_only_one_interaction,\n processes\n):\n with pytest.raises(\n ValueError,\n match='Unable to stratify split on users - the ``interactions`` object contains users '\n 'with a single interaction. Either set ``force_split = True`` to put all users '\n 'with a single interaction in the training set or run '\n '``collie.utils.remove_users_with_fewer_than_n_interactions`` first.'\n ):\n stratified_split(\n interactions=interactions_to_split_with_users_with_only_one_interaction,\n test_p=0.2,\n seed=42,\n processes=processes,\n )\n\n\[email protected]('processes', [0, -1])\ndef test_stratified_split_with_users_with_only_one_interaction_force_split(\n interactions_to_split_with_users_with_only_one_interaction,\n processes\n):\n users_with_only_one_interaction = [0, 5, 6]\n\n (train_actual, _, _) = stratified_split(\n interactions=interactions_to_split_with_users_with_only_one_interaction,\n val_p=0.1,\n test_p=0.2,\n seed=42,\n processes=processes,\n force_split=True\n )\n\n assert all(user in train_actual[:][0][0].tolist() for user in users_with_only_one_interaction)\n\n\nclass TestSplitsWithWrongP:\n def test_combined_too_large_random(self, implicit_interactions_to_split):\n with pytest.raises(ValueError):\n random_split(interactions=implicit_interactions_to_split, val_p=0.9, test_p=0.2)\n\n def test_combined_too_large_stratified(self, implicit_interactions_to_split):\n with pytest.raises(ValueError):\n stratified_split(interactions=implicit_interactions_to_split, val_p=0.9, test_p=0.2)\n\n def test_combined_equal_one_random(self, implicit_interactions_to_split):\n with pytest.raises(ValueError):\n random_split(interactions=implicit_interactions_to_split, val_p=0.7, test_p=0.3)\n\n def test_combined_equal_one_stratified(self, implicit_interactions_to_split):\n with pytest.raises(ValueError):\n stratified_split(interactions=implicit_interactions_to_split, val_p=0.7, test_p=0.3)\n\n def test_val_negative_but_combined_good_random(self, implicit_interactions_to_split):\n with pytest.raises(ValueError):\n random_split(interactions=implicit_interactions_to_split, val_p=-0.1, test_p=0.3)\n\n def test_val_negative_but_combined_good_stratified(self, implicit_interactions_to_split):\n with pytest.raises(ValueError):\n stratified_split(interactions=implicit_interactions_to_split, val_p=-0.1, test_p=0.3)\n\n def test_test_p_too_large_random(self, implicit_interactions_to_split):\n with pytest.raises(ValueError):\n random_split(interactions=implicit_interactions_to_split, test_p=1.1)\n\n def test_test_p_too_large_stratified(self, implicit_interactions_to_split):\n with pytest.raises(ValueError):\n stratified_split(interactions=implicit_interactions_to_split, test_p=1.1)\n\n def test_test_p_equal_one_random(self, implicit_interactions_to_split):\n with pytest.raises(ValueError):\n random_split(interactions=implicit_interactions_to_split, test_p=1)\n\n def test_test_p_equal_one_stratified(self, implicit_interactions_to_split):\n with pytest.raises(ValueError):\n stratified_split(interactions=implicit_interactions_to_split, test_p=1)\n\n def test_test_p_negative_random(self, implicit_interactions_to_split):\n with pytest.raises(ValueError):\n random_split(interactions=implicit_interactions_to_split, test_p=-0.7)\n\n def test_test_p_negative_stratified(self, implicit_interactions_to_split):\n with pytest.raises(ValueError):\n stratified_split(interactions=implicit_interactions_to_split, test_p=-0.7)\n\n\ndef test_splits_vary_number_of_processes(implicit_interactions_to_split):\n train_1, test_1 = stratified_split(interactions=implicit_interactions_to_split,\n seed=42,\n processes=-1)\n train_2, test_2 = stratified_split(interactions=implicit_interactions_to_split,\n seed=42,\n processes=0)\n train_3, test_3 = stratified_split(interactions=implicit_interactions_to_split,\n seed=42,\n processes=1)\n train_4, test_4 = stratified_split(interactions=implicit_interactions_to_split,\n seed=42,\n processes=2)\n\n # transitive property in action here\n np.testing.assert_array_equal(train_1.toarray(), train_2.toarray())\n np.testing.assert_array_equal(train_2.toarray(), train_3.toarray())\n np.testing.assert_array_equal(train_3.toarray(), train_4.toarray())\n\n np.testing.assert_array_equal(test_1.toarray(), test_2.toarray())\n np.testing.assert_array_equal(test_2.toarray(), test_3.toarray())\n np.testing.assert_array_equal(test_3.toarray(), test_4.toarray())\n",
"from typing import Dict, Optional\n\nimport torch\n\n\ndef ideal_difference_from_metadata(\n positive_items: torch.tensor,\n negative_items: torch.tensor,\n metadata: Optional[Dict[str, torch.tensor]],\n metadata_weights: Optional[Dict[str, float]],\n) -> torch.tensor:\n \"\"\"\n Helper function to calculate the ideal score difference between the positive and negative items.\n\n Without considering metadata, the ideal score difference would be 1.0 (since the function looks\n at a pair of items, one positive item and one negative item). Taking metadata into\n consideration, the ideal score difference should be between 0 and 1 if there is a partial match\n (not the same item, but matching metadata - e.g. the same film genre). This function calculates\n that ideal difference when there is metadata available.\n\n Metadata passed in to this function is independent of metadata given to the model during\n training - it can be the same data or a different set. For example, one might use genre\n embeddings as metadata during training and use genre labels as metadata during loss calculation\n (since all metadata passed in to this function must be categorical).\n\n Parameters\n ----------\n positive_items: torch.tensor, 1-d\n Tensor containing IDs for known positive items\n negative_items: torch.tensor, 1-d\n Tensor containing IDs for sampled negative items\n metadata: dict\n Keys should be strings identifying each metadata type that match keys in\n ``metadata_weights``. Values should be a ``torch.tensor`` of shape (num_items x 1). Each\n tensor should contain categorical metadata information about items (e.g. a number\n representing the genre of the item)\n metadata_weights: dict\n Keys should be strings identifying each metadata type that match keys in ``metadata``.\n Values should be the amount of weight to place on a match of that type of metadata, with the\n sum of all values ``<= 1``.\n e.g. If ``metadata_weights = {'genre': .3, 'director': .2}``, then an item is:\n\n * a 100% match if it's the same item,\n\n * a 50% match if it's a different item with the same genre and same director,\n\n * a 30% match if it's a different item with the same genre and different director,\n\n * a 20% match if it's a different item with a different genre and same director,\n\n * a 0% match if it's a different item with a different genre and different director,\n which is equivalent to the loss without any partial credit\n\n Returns\n -------\n ideal difference: torch.tensor\n Tensor with the same shape as ``positive_items``, with each element between 0 and 1\n\n \"\"\"\n weight_sum = sum(metadata_weights.values())\n if weight_sum > 1:\n raise ValueError(f'sum of metadata weights was {weight_sum}, must be <=1')\n\n match_frac = torch.zeros(positive_items.shape).to(positive_items.device)\n for k, array in metadata.items():\n array = array.squeeze()\n match_frac += (\n array[positive_items.long()] == array[negative_items.long()]\n ).int().to(positive_items.device)*metadata_weights[k]\n\n return 1.0 - match_frac\n",
"import pytest\nimport torch\n\n\nSCORES = torch.tensor([\n 1.1, 1.3, 4.7, -7.234,\n -1.3, 0.7, 4.7, -2.2468,\n -4.7, 1.3, 3.56, -0.8924,\n 4.01, 2.7, 3.7, -5.2468,\n 3.89, 12.8, -1.7, -0.8143,\n])\n\n\[email protected]()\ndef positive_items():\n return torch.tensor([0, 1, 2, 3])\n\n\[email protected]()\ndef negative_items():\n return torch.tensor([4, 5, 6, 7])\n\n\[email protected]()\ndef many_negative_items():\n return torch.tensor([\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n [12, 13, 14, 15],\n [16, 17, 18, 19],\n ])\n\n\[email protected]()\ndef positive_scores(positive_items):\n return SCORES[positive_items]\n\n\[email protected]()\ndef negative_scores(negative_items):\n return SCORES[negative_items]\n\n\[email protected]()\ndef many_negative_scores(many_negative_items):\n return SCORES[many_negative_items]\n\n\[email protected]()\ndef metadata_a():\n return torch.tensor([\n 0, 1, 1, 2,\n 0, 0, 0, 1,\n 2, 1, 1, 2,\n 0, 0, 1, 2,\n 0, 0, 1, 2,\n ])\n\n\[email protected]()\ndef metadata_a_diff():\n return torch.tensor([.8, 1, 1, 1])\n\n\[email protected]()\ndef metadata_b():\n return torch.tensor([\n 1, 2, 2, 3,\n 3, 2, 1, 3,\n 3, 2, 2, 2,\n 1, 1, 2, 2,\n 1, 1, 2, 2,\n ])\n\n\[email protected]()\ndef metadata_a_and_2_diff():\n return torch.tensor([\n [.8, .7, 1, .7],\n [1, .5, .5, .8],\n [.5, 1, .5, .8],\n [.5, 1, .5, .8],\n ])\n"
] | [
[
"torch.sigmoid",
"torch.isnan",
"numpy.unique",
"torch.tensor",
"torch.no_grad",
"torch.cuda.is_available",
"torch.arange"
],
[
"scipy.sparse.coo_matrix",
"pandas.DataFrame"
],
[
"torch.zeros"
],
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RausellLab/tiresias | [
"2acca303a0f6b4b1be784f597a59c1a883dbe43d"
] | [
"src/pipeline/validation/loocv/bagging_logistic_regression.py"
] | [
"import ray\nimport mlflow\nimport torch\nfrom src.models import Bagging\nfrom src.models import LogisticRegression\nfrom src.evaluation import loocv\nfrom src.utils import data_loaders\nfrom src.utils import data_savers\nfrom src.utils import mlflow as u_mlflow\n\n\nRUN_NAME = \"Bagging Logistic Regression\"\nMODEL_NAME = \"bagging-logistic-regression\"\n\n\n#@ray.remote(num_gpus=1)\ndef bagging_logistic_regression(\n embeddings_file, node_labels_file, node_features_file, use_cuda, params, metadata\n):\n mlflow.set_experiment(\"LOOCV\")\n\n with mlflow.start_run(run_name=RUN_NAME):\n mlflow.log_param(\"model\", MODEL_NAME)\n\n u_mlflow.add_params(**params)\n u_mlflow.add_metadata(metadata)\n mlflow.set_tag(\"use_cuda\", use_cuda)\n\n labels = data_loaders.load_labels(node_labels_file, use_cuda=use_cuda)\n n_nodes = labels.size(0)\n\n embeddings = None\n node_features = None\n\n if embeddings_file is not None:\n mlflow.log_param(\"embeddings\", True)\n mlflow.log_artifact(embeddings_file, \"inputs\")\n embeddings = data_loaders.load_embeddings(\n embeddings_file, use_cuda=use_cuda\n )\n else:\n mlflow.log_param(\"embeddings\", False)\n\n if node_features_file is not None:\n mlflow.log_param(\"node_features\", True)\n mlflow.log_artifact(node_features_file, \"inputs\")\n node_features = data_loaders.load_node_features(\n node_features_file, use_cuda\n )\n else:\n mlflow.log_param(\"node_features\", False)\n\n if embeddings is not None and node_features is not None:\n in_features = torch.cat((embeddings, node_features), dim=1)\n elif embeddings is not None:\n in_features = embeddings\n elif node_features is not None:\n in_features = node_features\n\n print(RUN_NAME)\n ranks_df = loocv.run(\n labels=labels,\n model_class=Bagging,\n bagging_model=LogisticRegression,\n features=in_features,\n **params\n )\n\n data_savers.save_ranks(ranks_df, n_nodes, RUN_NAME, params)\n"
] | [
[
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iwasakishuto/Keras-Imitation | [
"8ac0cd7c8912d49d13b19a0182ad534c0781fbfe",
"8ac0cd7c8912d49d13b19a0182ad534c0781fbfe"
] | [
"kerasy/initializers.py",
"kerasy/utils/generic_utils.py"
] | [
"# coding: utf-8\nimport re\nimport numpy as np\nfrom scipy import stats\nfrom abc import ABCMeta, abstractmethod\n\nfrom .utils import mk_class_get\nfrom .utils import handleKeyError\nfrom .utils import handleRandomState\n\nclass KerasyAbstInitializer(metaclass=ABCMeta):\n def __init__(self):\n self.name = re.sub(r\"([a-z])([A-Z])\", r\"\\1_\\2\", self.__class__.__name__).lower()\n\n @abstractmethod\n def __call__(self, shape, dtype=None):\n raise NotImplementedError\n\nclass Zeros(KerasyAbstInitializer):\n def __call__(self, shape, dtype=None):\n return np.zeros(shape=shape, dtype=dtype)\n\nclass Ones(KerasyAbstInitializer):\n def __call__(self, shape, dtype=None):\n return np.ones(shape=shape, dtype=dtype)\n\nclass Constant(KerasyAbstInitializer):\n def __call__(self, shape, value=0, dtype=None):\n return np.full(shape=shape, fill_value=value, dtype=dtype)\n\nclass RandomNormal(KerasyAbstInitializer):\n def __call__(self, shape, mean=0, stddev=0.05, dtype=None, seed=None):\n rnd = handleRandomState(seed)\n return rnd.normal(size=shape, loc=mean, scale=stddev).astype(dtype)\n\nclass RandomUniform(KerasyAbstInitializer):\n def __call__(self, shape, minval=-0.05, maxval=0.05, dtype=None, seed=None):\n rnd = handleRandomState(seed)\n return rnd.uniform(size=shape, low=minval, high=maxval)\n\nclass TruncatedNormal(KerasyAbstInitializer):\n def __call__(self, shape, mean=0.0, stddev=0.05, dtype=None, seed=None):\n X = stats.truncnorm(\n (-stddev - mean) / stddev,\n (stddev - mean) / stddev,\n loc=mean,\n scale=stddev,\n )\n return X.rvs(size=shape,random_state=seed).astype(dtype)\n\nclass VarianceScaling(KerasyAbstInitializer):\n def __call__(self, shape, scale=1.0, mode='fan_in', distribution='normal', dtype=None, seed=None):\n fan_in, fan_out = _compute_fans(shape)\n n = fan_in if mode==\"fan_in\" else fan_out if mode==\"fan_out\" else (fan_in+fan_out)/2\n scale /= max(1., n)\n if distribution=='normal':\n # 0.879... = scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)\n stddev = np.sqrt(scale) / .87962566103423978\n return TruncatedNormal()(shape=shape, mean=0.0, stddev=stddev, dtype=dtype, seed=seed)\n else:\n limit = np.sqrt(3 * scale)\n return RandomUniform()(shape=shape, minval=-limit, maxval=limit, dtype=dtype, seed=seed)\n\nclass Orthogonal(KerasyAbstInitializer):\n def __call__(self, shape, gain=1.0, dtype=None, seed=None):\n rnd = handleRandomState(seed)\n num_rows = 1\n for dim in shape[:-1]:\n num_rows *= dim\n num_cols = shape[-1]\n flat_shape = (num_rows, num_cols)\n a = rnd.normal(loc=0.0, scale=1.0, size=flat_shape).astype(dtype)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n # Pick the one with the correct shape.\n q = u if u.shape == flat_shape else v\n q = q.reshape(shape)\n return gain * q[:shape[0], :shape[1]]\n\nclass Identity(KerasyAbstInitializer):\n def __call__(self, shape, dtype=None, gain=1.0):\n if len(shape) != 2 or shape[0]!=shape[1]:\n raise ValueError('Identity matrix initializer can only be used for 2D Square matrices.')\n return gain * np.eye(N=shape[0], dtype=dtype)\n\nclass GlorotNormal(KerasyAbstInitializer):\n def __call__(self, shape, dtype=None, seed=None):\n return VarianceScaling()(\n shape=shape,\n scale=1.,\n mode='fan_avg',\n distribution='normal',\n dtype=dtype,\n seed=seed\n )\n\nclass GlorotUniform(KerasyAbstInitializer):\n def __call__(self, shape, dtype=None, seed=None):\n return VarianceScaling()(\n shape=shape,\n scale=1.,\n mode='fan_avg',\n distribution='uniform',\n dtype=dtype,\n seed=seed\n )\n\nclass HeNormal(KerasyAbstInitializer):\n def __call__(self, shape, dtype=None, seed=None):\n return VarianceScaling()(\n shape=shape,\n scale=2.,\n mode='fan_in',\n distribution='normal',\n dtype=dtype,\n seed=seed\n )\n\nclass LeCunNormal(KerasyAbstInitializer):\n def __call__(self, shape, dtype=None, seed=None):\n return VarianceScaling()(\n shape=shape,\n scale=1.,\n mode='fan_in',\n distribution='normal',\n dtype=dtype,\n seed=seed\n )\n\nclass HeUniform(KerasyAbstInitializer):\n def __call__(self, shape, dtype=None, seed=None):\n return VarianceScaling()(\n shape=shape,\n scale=2.,\n mode='fan_in',\n distribution='uniform',\n dtype=dtype,\n seed=seed\n )\n\nclass LeCunUniform(KerasyAbstInitializer):\n def __call__(self, shape, dtype=None, seed=None):\n return VarianceScaling()(\n shape=shape,\n scale=1.,\n mode='fan_in',\n distribution='uniform',\n dtype=dtype,\n seed=seed\n )\n\ndef _compute_fans(shape, data_format='channels_last'):\n \"\"\"Computes the number of input and output units for a weight shape.\n @param shape : Integer shape tuple.\n @param data_format: Image data format to use for convolution kernels.\n @return fan_in : size of the input shape.\n @return fan_out : size of the output shape.\n \"\"\"\n if len(shape) == 2:\n fan_in,fan_out = shape\n elif len(shape) in {3, 4, 5}:\n # Assuming convolution kernels (1D, 2D or 3D).\n # TH kernel shape: (depth, input_depth, ...)\n # TF kernel shape: (..., input_depth, depth)\n if data_format == 'channels_first':\n receptive_field_size = np.prod(shape[2:])\n fan_in = shape[1] * receptive_field_size\n fan_out = shape[0] * receptive_field_size\n elif data_format == 'channels_last':\n receptive_field_size = np.prod(shape[:-2])\n fan_in = shape[-2] * receptive_field_size\n fan_out = shape[-1] * receptive_field_size\n else:\n raise ValueError('Invalid data_format: ' + data_format)\n else:\n # No specific assumptions.\n fan_in = np.sqrt(np.prod(shape))\n fan_out = np.sqrt(np.prod(shape))\n return fan_in, fan_out\n\nall = KerasyInitializerFunctions = {\n 'zeros' : Zeros,\n 'ones' : Ones,\n 'constant' : Constant,\n 'random_normal' : RandomNormal,\n 'random_uniform' : RandomUniform,\n 'truncated_normal' : TruncatedNormal,\n 'variance_scaling' : VarianceScaling,\n 'orthogonal' : Orthogonal,\n 'identity' : Identity,\n 'glorot_normal' : GlorotNormal,\n 'glorot_uniform' : GlorotUniform,\n 'he_normal' : HeNormal,\n 'lecun_normal' : LeCunNormal,\n 'he_uniform' : HeUniform,\n 'lecun_uniform' : LeCunUniform,\n}\n\nget = mk_class_get(\n all_classes=KerasyInitializerFunctions,\n kerasy_abst_class=[KerasyAbstInitializer],\n genre=\"initializer\"\n)\n",
"# coding: utf-8\nimport os\nimport re\nimport sys\nimport time\nimport datetime\nimport numpy as np\nfrom collections import defaultdict\n\ndef flatten_dual(lst):\n return [e for sublist in lst for e in sublist]\n\ndef get_varname(var, scope_=globals()):\n for varname,val in scope_.items():\n if id(val)==id(var):\n return varname\n\ndef disp_var_globals(*varnames, head_=True, align_=True, scope_=globals()):\n \"\"\"\n def func():\n a = \"hoge\"\n b = 123\n c = [1,\"1\"]\n disp_var_globals(\"a\",\"b\",\"c\",scope=locals())\n\n func()\n >>> a: hoge\n >>> b: 123\n >>> c: [1, '1']\n \"\"\"\n if head_: print(f\"#=== VARIABLE INFO ===\")\n digit = max([len(e) for e in varnames]) if align_ else 1\n for var in varnames:\n print(f\"{var:<{digit}}: {scope_.get(var)}\")\n\ndef disp_val_globals(*values, head_=True, align_=True, scope_=globals()):\n \"\"\"\n def func():\n a = \"hoge\"\n b = 123\n c = [1,\"1\"]\n disp_val_globals(a,b,c,scope=locals())\n\n func()\n >>> a: hoge\n >>> b: 123\n >>> c: [1, '1']\n \"\"\"\n if head_: print(f\"#=== VARIABLE INFO ===\")\n names = [get_varname(val, scope_=scope_) for val in values]\n digit = max([len(e) for e in names]) if align_ else 1\n for name,val in zip(names, values):\n print(f\"{name:<{digit}}: {val}\")\n\ndef disp_val_shapes(*values, head_=True, align_=True, scope_=globals()):\n if head_: print(f\"#=== ARRAY SHAPES ===\")\n names = [get_varname(val, scope_=scope_) for val in values]\n digit = max([len(e) for e in names]) + 6 if align_ else 1\n for name,val in zip(names, values):\n print(f\"{name+'.shape':<{digit}}: {val.shape}\")\n\n_UID_PREFIXES = defaultdict(int)\ndef get_uid(prefix=\"\"):\n _UID_PREFIXES[prefix] += 1\n return _UID_PREFIXES[prefix]\n\nclass priColor:\n BLACK = '\\033[30m'\n RED = '\\033[31m'\n GREEN = '\\033[32m'\n YELLOW = '\\033[33m'\n BLUE = '\\033[34m'\n PURPLE = '\\033[35m'\n CYAN = '\\033[36m'\n WHITE = '\\033[37m'\n RETURN = '\\033[07m' # 反転\n ACCENT = '\\033[01m' # 強調\n FLASH = '\\033[05m' # 点滅\n RED_FLASH = '\\033[05;41m' # 赤背景+点滅\n END = '\\033[0m'\n\n @staticmethod\n def color(value, color=None):\n if color is None:\n return str(value)\n else:\n color = color.upper()\n handleKeyError(priColor.__dict__.keys(), color=color)\n return f\"{priColor.__dict__[color.upper()]}{value}{priColor.END}\"\n\ndef handleKeyError(lst, msg_=\"\", **kwargs):\n k,v = kwargs.popitem()\n if v not in lst:\n lst = ', '.join([f\"'{e}'\" for e in lst])\n raise KeyError(f\"Please chose the argment `{k}` from {lst}.\\n\\033[32m{msg_}\\033[0m\")\n\ndef handleTypeError(types, msg_=\"\", **kwargs):\n type2str = lambda t: re.sub(r\"<class '(.*?)'>\", r\"\\033[34m\\1\\033[0m\", str(t))\n k,v = kwargs.popitem()\n if not any([isinstance(v,t) for t in types]):\n str_true_types = ', '.join([type2str(t) for t in types])\n srt_false_type = type2str(type(v))\n if len(types)==1:\n err_msg = f\"must be {str_true_types}\"\n else:\n err_msg = f\"must be one of {str_true_types}\"\n raise TypeError(f\"`{k}` {err_msg}, not {srt_false_type}.\\n\\033[32m{msg_}\\033[0m\")\n\ndef urlDecorate(url, addDate=True):\n \"\"\" Decorate URL like Wget. (Add datetime information and coloring url to blue.) \"\"\"\n now = datetime.datetime.now().strftime(\"--%Y-%m-%d %H:%M:%S-- \") if addDate else \"\"\n return now + priColor.color(url, color=\"BLUE\")\n\ndef measure_complexity(func, *args, repetitions_=10, **kwargs):\n times=0\n metrics=[]\n if \"random_state\" in kwargs:\n base_seed = kwargs.get(\"random_state\")\n for i in range(repetitions_):\n kwargs[\"random_state\"] = base_seed+i\n s = time.time()\n ret = func(*args, **kwargs)\n times += time.time()-s\n metrics.append(ret)\n else:\n for _ in range(repetitions_):\n s = time.time()\n ret = func(*args, **kwargs)\n times += time.time()-s\n metrics.append(ret)\n if metrics[0] is None:\n return times/repetitions_\n else:\n return (times/repetitions_, metrics)\n\ndef has_not_attrs(obj, *names):\n return [name for name in names if not hasattr(obj, name)]\n\ndef has_all_attrs(obj, *names):\n return sum([1 for name in names if not hasattr(obj, name)])==0\n\ndef handleRandomState(seed):\n \"\"\" Turn `np.random.RandomState` \"\"\"\n if seed is None:\n return np.random.mtrand._rand\n if isinstance(seed, np.random.RandomState):\n return seed\n if isinstance(seed, int):\n return np.random.RandomState(seed)\n raise ValueError(f\"Could not conver {seed} to numpy.random.RandomState instance.\")\n\ndef fout_args(*args, sep=\"\\t\"):\n return sep.join([str(e) for e in args])+\"\\n\"\n\nf_aligns = [\"<\", \">\", \"=\", \"^\"]\nf_signs = [\"+\", \"-\", \" \", \"\"]\nf_grouping_options = [\"_\", \",\", \"\"]\nf_types = [\"b\", \"c\", \"d\", \"e\", \"E\", \"f\", \"F\", \"g\", \"G\", \"n\", \"o\", \"s\", \"x\", \"X\", \"%\"]\n\ndef format_spec_create(width=0, align=\">\", sign=\"\", zero_padding=False,\n grouping_option=\"\", fmt=\"\"):\n \"\"\"\n Create a function which returns a formatted text.\n ~~~~~\n * Source Code : https://github.com/python/cpython/blob/3.8/Lib/string.py\n * Documentation: https://docs.python.org/3/library/string.html#format-specification-mini-language\n\n format_spec = [[fill]align][sign][#][0][width][grouping_option][.precision][type]\n =========================\n @params align : [[fill]align]\n @params sign : [sign]\n @params zero_padding : [0]\n @params width : [width]\n @params grouping_option : [grouping_option]\n @params fmt : [.precision][type]\n @return lambda : <function __main__.<lambda>(fill)>\n \"\"\"\n handleKeyError(lst=f_aligns, align=align)\n handleKeyError(lst=f_signs, sign=sign)\n handleKeyError(lst=f_grouping_options, grouping_option=grouping_option)\n if len(fmt)>0:\n handleKeyError(lst=f_types, fmt=fmt[-1])\n zero = \"0\" if zero_padding else \"\"\n handleTypeError(types=[int], width=width)\n return lambda fill : f\"{fill:{align}{sign}{zero}{width}{grouping_option}{fmt}}\"\n\ndef print_func_create(width=0, align=\">\", sign=\"\", zero_padding=False,\n grouping_option=\"\", fmt=\"\", color=\"black\",\n left_side_bar=\"\", right_side_bar=\"\",\n left_margin=0, right_margin=0, end=\"\\n\"):\n \"\"\"\n Create a function which prints a formatted text.\n Please see also the function `format_spec_create`.\n ==============================\n @params color : string color\n @params left(right)_side_bar : (str)\n @params left(right)_margin : (int)\n @params end : string appended after the last value, default a newline.\n @return lambda : <function __main__.<lambda>(fill)>\n \"\"\"\n format_spec = format_spec_create(width, align=align, sign=sign,\n zero_padding=zero_padding,\n grouping_option=grouping_option, fmt=fmt)\n def print_func(fill):\n info = f\"{left_side_bar}{' '*left_margin}\"\n info += priColor.color(format_spec(fill), color=color)\n info += f\"{' '*right_margin}{right_side_bar}\"\n print(info, end=end)\n return print_func\n\nclass Table():\n def __init__(self):\n self.cols = {}\n self.table_width = 1\n self.head = None\n\n def _disp_title(self):\n for colname, options in self.cols.items():\n if \"print_values\" not in options:\n continue\n print_func = options.get(\"print_title\")\n print_func(colname)\n print(\"|\")\n\n def _disp_border(self, table_width=None, mark=\"=\"):\n table_width = self.table_width if table_width is None else table_width\n print(mark*table_width)\n\n def _disp_values(self, head=None):\n head = self.head if head is None else head\n for i in range(head):\n for colname, options in self.cols.items():\n if \"print_values\" not in options:\n continue\n print_func = options.get(\"print_values\")\n values = options.get(\"values\")\n print_func(values[i])\n print(\"|\")\n\n def show(self, head=None, table_width=None, mark=\"=\"):\n self._disp_title()\n self._disp_border(table_width=table_width, mark=mark)\n self._disp_values(head=head)\n\n def set_cols(self, colname, values, width=None, align=\">\", sign=\"\",\n zero_padding=False, grouping_option=\"\", fmt=\"\", color=\"black\",\n left_margin=0, right_margin=0):\n title_width = len(str(colname))\n if width is None:\n format_spec = format_spec_create(\n width=0, align=align, sign=sign, zero_padding=zero_padding,\n grouping_option=grouping_option, fmt=fmt\n )\n width = len(max([format_spec(v) for v in values], key=len))\n width = max(width, title_width)\n self.table_width += width + left_margin + right_margin + 1\n\n print_values = print_func_create(\n width=width, align=align, sign=sign, zero_padding=zero_padding,\n grouping_option=grouping_option, fmt=fmt, color=color,\n left_side_bar=\"|\", right_side_bar=\"\", end=\"\",\n left_margin=left_margin, right_margin=right_margin,\n )\n print_title = print_func_create(\n width=width, align=\"^\", sign=\"\", zero_padding=False,\n grouping_option=\"\", fmt=\"\", color=\"ACCENT\",\n left_side_bar=\"|\", right_side_bar=\"\", end=\"\",\n left_margin=left_margin, right_margin=right_margin,\n )\n self.cols.update({colname: dict(\n print_values=print_values, print_title=print_title, values=values\n )})\n if self.head is None:\n self.head = len(values)\n"
] | [
[
"numpy.linalg.svd",
"numpy.sqrt",
"scipy.stats.truncnorm",
"numpy.eye",
"numpy.full",
"numpy.ones",
"numpy.prod",
"numpy.zeros"
],
[
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kartik-hegde/mindmappings | [
"e96f2a287da2a93c4af0794a3bab1211bc95ba0a"
] | [
"mindmappings/costModel/timeloop/model_cnn.py"
] | [
"import random\nimport itertools\nimport subprocess as sp\nimport os\nimport shutil\nfrom subprocess import STDOUT\nimport os, sys\nimport numpy as np\n\nfrom mindmappings.parameters import Parameters\nfrom mindmappings.costModel.timeloop.timeloop import Timeloop\nfrom mindmappings.utils.utils import factors, replicate\nexamples = Parameters(algorithm='CNN-layer')\n\nclass Model_CNN(Timeloop):\n \"\"\"\n This is an object implemeted to support Timeloop on a specific architecture for CNN Layer\n \"\"\"\n\n def __init__(self, problem=[16,256,256,3,3,14,14], parameters=examples, arch=examples.ARCHITECTURE):\n \n # Create the problem specifications.\n self.arch, self.problem = self.defineProblem(arch, problem)\n self.parameters = parameters\n # Generate the search space.\n self.references = self.refGen()\n\n def refGen(self):\n \"\"\"\n Generates a search space. Might include invalid points.\n \"\"\"\n\n numHierarchy = self.arch['numHierarchy']\n numBanks = self.arch['numBanks']\n tiled_dimensions = self.problem['dimension_names']\n bounds = self.problem['dimension_sizes'] # this should be in the same order as the above\n\n # Tile generation\n ref_tiles = [self.getTileGen(bounds[d], numHierarchy+1) for d in range(len(bounds))]\n # Corner case, we want minimum two items (don't ask why, ask SKOPT guys)\n ref_tiles = [replicate(r) for r in ref_tiles]\n\n # Loop order generation\n ref_loop_orders = [''.join(p) for p in list(itertools.permutations(tiled_dimensions))]\n\n # Partition generation (Hardcoded as of now for 3 partitions) - write a recursive function if you need generalization\n ref_partition = [list(([i,j,b-(i+j)] for i in range(1, b, 1)\n for j in range(1, b-i, 1))) for b in numBanks]\n return ref_tiles, ref_loop_orders, ref_partition\n\n def checkTileValidity(self, tile_choices, mapping_partitions):\n \"\"\"\n Make sure the tiles fits in the buffers. This is an optimization to prune out the space.\n Timeloop does not require this.\n \"\"\"\n # print(tile_choices, mapping_partitions)\n L2_partitions, L1_partitions = mapping_partitions\n\n # Buffer sizes\n L2_input, L2_weight, L2_psum = [self.arch['bank_sizes'][0]*L2_partitions[i] for i in range(3)]\n L1_input, L1_weight, L1_psum = [self.arch['bank_sizes'][1]*L1_partitions[i] for i in range(3)]\n\n # Tile sizes\n N,C,K,R,S,P,Q = [np.prod(list(zip(*tile_choices))[i][1:]) for i in range(7)]\n L2_input_tile, L2_weight_tile, L2_psum_tile = N*(P+R-1)*(Q+S-1)*C, K*R*S*C, P*Q*K*N\n N,C,K,R,S,P,Q = [np.prod(list(zip(*tile_choices))[i][2:]) for i in range(7)]\n L1_input_tile, L1_weight_tile, L1_psum_tile = N*(P+R-1)*(Q+S-1)*C, K*R*S*C, P*Q*K*N\n\n if( (L2_input_tile>L2_input) or (L2_weight_tile>L2_weight) or (L2_psum_tile>L2_psum) or\n (L1_input_tile>L1_input) or (L1_weight_tile>L1_weight) or (L1_psum_tile>L1_psum)):\n return False\n else:\n return True\n\n def generateOracleCost(self, metric='RAW'):\n \"\"\"\n The oracle cost towards which we will guide the results towards. \n \"\"\"\n # Sophisticated Oracle (theoretical lower bound)\n\n # Get tensor sizes\n N,C,K,R,S,P,Q = self.problem['dimension_sizes']\n input_size, weight_size, output_size = [N*P*Q*C, R*S*C*K, N*P*Q*K] # Input, weight, output\n\n # Memory energy costs\n DRAM_cost = 200.0\n L2_buf, L1_buf = self.arch['buffer_access_energy']\n\n # Compute costs\n MAC_cost = self.arch['mac_energy']\n num_flops = N*R*S*C*P*Q*K\n num_PE = self.arch['numPEs']\n\n # Oracle costs per tensor per mem hierarchy\n L1_input_energy = input_size * L1_buf\n L1_weight_energy = weight_size * L1_buf\n L1_output_energy = output_size * L1_buf\n L2_input_energy = input_size * L2_buf\n L2_weight_energy = weight_size * L2_buf\n L2_output_energy = output_size * L2_buf\n DRAM_input_energy = input_size * DRAM_cost\n DRAM_weight_energy = weight_size * DRAM_cost\n DRAM_output_energy = output_size * DRAM_cost\n compute_energy = num_flops * MAC_cost\n PE_util = 1.0\n energy = sum([L1_input_energy,L1_weight_energy,L1_output_energy,\n L2_input_energy,L2_weight_energy,L2_output_energy,\n DRAM_input_energy, DRAM_weight_energy, DRAM_output_energy,\n compute_energy]) * 1e-6\n cycles = num_flops/num_PE\n\n cost_arr = np.array([L1_input_energy,L1_weight_energy,L1_output_energy,\n L2_output_energy,L2_weight_energy,L2_input_energy,\n DRAM_weight_energy, DRAM_input_energy, DRAM_output_energy,\n PE_util,energy,cycles])\n\n if(metric == 'RAW'):\n return cost_arr\n elif(metric == 'ENERGY'):\n return cost_arr[-2]*1e-6\n elif(metric == 'CYCLES'):\n return cost_arr[-1]*1e-9\n else:\n return cost_arr[-2]*cost_arr[-1]*1e-15\n\n def defineProblem(self, arch, bounds = [16,256,512,3,3,56,56]):\n \"\"\"\n Define a problem.\n \"\"\"\n\n # Arch Spec (only needed to change based on mapping)\n arch['bank_sizes'] = [arch['bufferSizes'][i]/(arch['numBanks'][i] * arch['bufferWidth'][i]) for i in range(arch['numHierarchy']-1)]\n\n # Define the domain\n dimensions = ['N','C','K','R','S','Q','P']\n problem = {'dimension_sizes': bounds, 'dimension_names':dimensions}\n\n return arch, problem\n\n def writeConfig(self, mapping, paths, unique_ID):\n \"\"\"\n This is a highly specialized version to write out a config file, get the cost and return the validity of the mapping.\n \"\"\"\n OUTPUT_DIR, (CFG_FILE_OUT_ARCH, CFG_FILE_OUT_MAP, CFG_FILE_OUT_PROB, CFG_FILE_OUT_MODEL) = paths\n\n tiling, loop_orders, partitions = mapping\n numHierarchy = self.arch['numHierarchy']\n N,C,K,R,S,P,Q = self.problem['dimension_sizes']\n\n\n # Extract\n dim_factors = [' factors: N={0} C={1} K={2} R={3} S={4} P={5} Q={6}\\n'.format(*tiling[i]) for i in range(numHierarchy+1)]\n\n # Buffer sizes\n DRAM_factors, L2_factors, spatial_factors, L1_factors = dim_factors\n DRAM_orders, L2_orders, L1_orders = loop_orders\n L2_partitions, L1_partitions = partitions\n\n L2_input, L2_weight, L2_psum = [int(self.arch['bank_sizes'][0]*L2_partitions[i]) for i in range(3)]\n L1_input, L1_weight, L1_psum = [int(self.arch['bank_sizes'][1]*L1_partitions[i]) for i in range(3)]\n\n # Open the sample file\n with open(self.parameters.SAMPLE_CFG_FILE, 'r') as f:\n data = f.readlines()\n\n # Do the replacements\n data[20] = ' depth: {0}\\n'.format(L2_input)\n data[30] = ' depth: {0}\\n'.format(L2_weight)\n data[40] = ' depth: {0}\\n'.format(L2_psum)\n data[53] = ' depth: {0}\\n'.format(L1_input)\n data[64] = ' depth: {0}\\n'.format(L1_weight)\n data[75] = ' depth: {0}\\n'.format(L1_psum)\n data[91] = DRAM_factors\n data[92] = ' permutation: {0}\\n'.format(DRAM_orders)\n data[104] = L2_factors\n data[93] = ' - permutation: {0}\\n'.format(L2_orders)\n data[97] = ' - permutation: {0}\\n'.format(L2_orders)\n data[101] = ' - permutation: {0}\\n'.format(L2_orders)\n data[108] = spatial_factors\n data[109] = ' - permutation: {0}\\n'.format(L1_orders)\n data[113] = ' - permutation: {0}\\n'.format(L1_orders)\n data[117] = ' - permutation: {0}\\n'.format(L1_orders)\n data[120] = L1_factors\n data[201] = ' C: {0}\\n'.format(C)\n data[202] = ' K: {0}\\n'.format(K)\n data[203] = ' R: {0}\\n'.format(R)\n data[204] = ' S: {0}\\n'.format(S)\n data[205] = ' P: {0}\\n'.format(P)\n data[206] = ' Q: {0}\\n'.format(Q)\n data[207] = ' N: {0}\\n'.format(N)\n\n data[211] = ' out_prefix: {0}'.format(unique_ID)\n\n # Write the file back\n with open(CFG_FILE_OUT_ARCH, 'w') as f:\n f.writelines(data[:88])\n with open(CFG_FILE_OUT_MAP, 'w') as f:\n f.writelines(data[88:164])\n with open(CFG_FILE_OUT_PROB, 'w') as f:\n f.writelines(data[164:209])\n with open(CFG_FILE_OUT_MODEL, 'w') as f:\n f.writelines(data[209:])\n\n os.chdir(OUTPUT_DIR)\n # print(OUTPUT_DIR)\n\n # Run the config file and check the validity\n command = [ self.parameters.COSTMODEL_EXECUTABLE,\n CFG_FILE_OUT_ARCH,\n CFG_FILE_OUT_MAP,\n CFG_FILE_OUT_PROB,\n CFG_FILE_OUT_MODEL\n ]\n DEVNULL = open(os.devnull, 'wb')\n prnt = sp.call(command, shell=False,stdout=DEVNULL , stderr=DEVNULL)\n\n if(prnt ==0):\n return True\n else:\n return False\n # try:\n # DEVNULL = open(os.devnull, 'wb')\n # prnt = sp.call(command, shell=False,stdout=DEVNULL, stderr=STDOUT)\n # print(prnt)\n # # os.system(COSTMODEL_EXECUTABLE + ' ' + CFG_FILE_OUT)\n # except:\n # return False\n\n return True\n\n def parse(self, PATH):\n \"\"\"\n Parse the output file to get the stats we want.\n \"\"\"\n with open(PATH, 'r') as f:\n data=f.readlines()\n\n energy_IDs = [2,5, 8, 11,14,17,20,23,26]\n energy_count = 0\n\n cost = []\n\n for idx,line in enumerate(data):\n if('Energy (total)' in line):\n energy_count += 1\n if(energy_count in energy_IDs):\n cost.append(float(data[idx].split(\" \")[-2]))\n elif(energy_count > 62):\n break\n cost.append(float(data[-24].split(\" \")[-1]))\n cost.append(float(data[-22].split(\" \")[-2]))\n cost.append(float(data[-23].split(\" \")[-1]))\n\n return cost\n\n def get_domain(self):\n \"\"\"\n Problem domain\n \"\"\"\n ref_tiles, ref_loop_orders, ref_partition = self.references\n\n domain = [\n {'name': 'Nt', 'type': 'discrete', 'domain': (0,len(ref_tiles[0])-1)},\n {'name': 'Ct', 'type': 'discrete', 'domain': (0,len(ref_tiles[1])-1)},\n {'name': 'Kt', 'type': 'discrete', 'domain': (0,len(ref_tiles[2])-1)},\n {'name': 'Rt', 'type': 'discrete', 'domain': (0,len(ref_tiles[3])-1)},\n {'name': 'St', 'type': 'discrete', 'domain': (0,len(ref_tiles[4])-1)},\n {'name': 'Pt', 'type': 'discrete', 'domain': (0,len(ref_tiles[5])-1)},\n {'name': 'Qt', 'type': 'discrete', 'domain': (0,len(ref_tiles[6])-1)},\n {'name': 'loop_order_DRAM', 'type': 'discrete', 'domain': (0,len(ref_loop_orders)-1)},\n {'name': 'loop_order_L2', 'type': 'discrete', 'domain': (0,len(ref_loop_orders)-1)},\n {'name': 'loop_order_L1', 'type': 'discrete', 'domain': (0,len(ref_loop_orders)-1)},\n {'name': 'buffer_part_L2', 'type': 'discrete', 'domain': (0,len(ref_partition[0])-1)},\n {'name': 'buffer_part_L1', 'type': 'discrete', 'domain': (0,len(ref_partition[1])-1)}\n ]\n\n return domain\n\n def parseMetaMapping(self, meta_mapping):\n \"\"\"\n Given a flat mapping, turn it to a mapping that cost model understands.\n \"\"\"\n\n # Extracct\n numHierarchy = self.arch['numHierarchy']\n parHierarchy = self.arch['parallelHierarchy']\n bound = self.problem['dimension_sizes'] # this should be in the same order as the above\n ref_tiles, ref_loop_orders, ref_partition = self.references\n\n tiling, orders, partitions = meta_mapping[:7], meta_mapping[7:10], meta_mapping[10:12]\n\n tile_choices = [ref_tiles[idx][int(t)] for idx,t in enumerate(tiling)]\n mapping_tiles = [list(zip(*tile_choices))[h] for h in range(numHierarchy+1)]\n mapping_orders = [ref_loop_orders[int(o)] for o in orders]\n mapping_partitions = [ref_partition[idx][int(p)] for idx,p in enumerate(partitions)]\n\n return [mapping_tiles, mapping_orders, mapping_partitions]\n\n def getInputVector(self, mapping):\n \"\"\"\n This function returns a flattened mapping vector.\n \"\"\"\n \n # Extract\n tiling, loop_orders, partitions = mapping\n\n ##### Form input tuple: Hyperparameters + Mapping\n\n # Hyperparameters\n input_hyperparams = self.problem['dimension_sizes']\n # Tiling is represented as is\n input_tiling = [item for tile_factors in tiling for item in tile_factors]\n # Loop order is mentioned as the index of each of the dimension.\n input_loop_order = [lord.index(dim) for lord in loop_orders for dim in self.problem['dimension_names']]\n # Partition is mentioned as is.\n input_partitions = [item for partition_sizes in partitions for item in partition_sizes]\n\n # Club them to form input vector\n return input_hyperparams + input_tiling + input_loop_order + input_partitions"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Krishna00111/Machine-Learning-from-Scratch | [
"5d6f5b1a2096acbb57a060385e471123b77b9a68",
"5d6f5b1a2096acbb57a060385e471123b77b9a68"
] | [
"mlfromscratch/supervised_learning/bayesian_regression.py",
"mlfromscratch/utils/misc.py"
] | [
"from __future__ import print_function, division\r\nimport numpy as np\r\nfrom scipy.stats import chi2, multivariate_normal\r\nfrom mlfromscratch.utils import mean_squared_error, train_test_split, polynomial_features\r\n\r\n\r\n\r\nclass BayesianRegression(object):\r\n \"\"\"Bayesian regression model. If poly_degree is specified the features will\r\n be transformed to with a polynomial basis function, which allows for polynomial\r\n regression. Assumes Normal prior and likelihood for the weights and scaled inverse\r\n chi-squared prior and likelihood for the variance of the weights.\r\n\r\n Parameters:\r\n -----------\r\n n_draws: float\r\n The number of simulated draws from the posterior of the parameters.\r\n mu0: array\r\n The mean values of the prior Normal distribution of the parameters.\r\n omega0: array\r\n The precision matrix of the prior Normal distribution of the parameters.\r\n nu0: float\r\n The degrees of freedom of the prior scaled inverse chi squared distribution.\r\n sigma_sq0: float\r\n The scale parameter of the prior scaled inverse chi squared distribution.\r\n poly_degree: int\r\n The polynomial degree that the features should be transformed to. Allows\r\n for polynomial regression.\r\n cred_int: float\r\n The credible interval (ETI in this impl.). 95 => 95% credible interval of the posterior\r\n of the parameters.\r\n\r\n Reference:\r\n https://github.com/mattiasvillani/BayesLearnCourse/raw/master/Slides/BayesLearnL5.pdf\r\n \"\"\"\r\n def __init__(self, n_draws, mu0, omega0, nu0, sigma_sq0, poly_degree=0, cred_int=95):\r\n self.w = None\r\n self.n_draws = n_draws\r\n self.poly_degree = poly_degree\r\n self.cred_int = cred_int\r\n\r\n # Prior parameters\r\n self.mu0 = mu0\r\n self.omega0 = omega0\r\n self.nu0 = nu0\r\n self.sigma_sq0 = sigma_sq0\r\n\r\n # Allows for simulation from the scaled inverse chi squared\r\n # distribution. Assumes the variance is distributed according to\r\n # this distribution.\r\n # Reference:\r\n # https://en.wikipedia.org/wiki/Scaled_inverse_chi-squared_distribution\r\n def _draw_scaled_inv_chi_sq(self, n, df, scale):\r\n X = chi2.rvs(size=n, df=df)\r\n sigma_sq = df * scale / X\r\n return sigma_sq\r\n\r\n def fit(self, X, y):\r\n\r\n # If polynomial transformation\r\n if self.poly_degree:\r\n X = polynomial_features(X, degree=self.poly_degree)\r\n\r\n n_samples, n_features = np.shape(X)\r\n\r\n X_X = X.T.dot(X)\r\n\r\n # Least squares approximate of beta\r\n beta_hat = np.linalg.pinv(X_X).dot(X.T).dot(y)\r\n\r\n # The posterior parameters can be determined analytically since we assume\r\n # conjugate priors for the likelihoods.\r\n\r\n # Normal prior / likelihood => Normal posterior\r\n mu_n = np.linalg.pinv(X_X + self.omega0).dot(X_X.dot(beta_hat)+self.omega0.dot(self.mu0))\r\n omega_n = X_X + self.omega0\r\n # Scaled inverse chi-squared prior / likelihood => Scaled inverse chi-squared posterior\r\n nu_n = self.nu0 + n_samples\r\n sigma_sq_n = (1.0/nu_n)*(self.nu0*self.sigma_sq0 + \\\r\n (y.T.dot(y) + self.mu0.T.dot(self.omega0).dot(self.mu0) - mu_n.T.dot(omega_n.dot(mu_n))))\r\n\r\n # Simulate parameter values for n_draws\r\n beta_draws = np.empty((self.n_draws, n_features))\r\n for i in range(self.n_draws):\r\n sigma_sq = self._draw_scaled_inv_chi_sq(n=1, df=nu_n, scale=sigma_sq_n)\r\n beta = multivariate_normal.rvs(size=1, mean=mu_n[:,0], cov=sigma_sq*np.linalg.pinv(omega_n))\r\n # Save parameter draws\r\n beta_draws[i, :] = beta\r\n\r\n # Select the mean of the simulated variables as the ones used to make predictions\r\n self.w = np.mean(beta_draws, axis=0)\r\n\r\n # Lower and upper boundary of the credible interval\r\n l_eti = 50 - self.cred_int/2\r\n u_eti = 50 + self.cred_int/2\r\n self.eti = np.array([[np.percentile(beta_draws[:,i], q=l_eti), np.percentile(beta_draws[:,i], q=u_eti)] \\\r\n for i in range(n_features)])\r\n\r\n def predict(self, X, eti=False):\r\n\r\n # If polynomial transformation\r\n if self.poly_degree:\r\n X = polynomial_features(X, degree=self.poly_degree)\r\n\r\n y_pred = X.dot(self.w)\r\n # If the lower and upper boundaries for the 95%\r\n # equal tail interval should be returned\r\n if eti:\r\n lower_w = self.eti[:, 0]\r\n upper_w = self.eti[:, 1]\r\n y_lower_pred = X.dot(lower_w)\r\n y_upper_pred = X.dot(upper_w)\r\n return y_pred, y_lower_pred, y_upper_pred\r\n\r\n return y_pred\r\n",
"import progressbar\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cmx\r\nimport matplotlib.colors as colors\r\nimport numpy as np\r\n\r\nfrom mlfromscratch.utils.data_operation import calculate_covariance_matrix\r\nfrom mlfromscratch.utils.data_operation import calculate_correlation_matrix\r\nfrom mlfromscratch.utils.data_manipulation import standardize\r\n\r\nbar_widgets = [\r\n 'Training: ', progressbar.Percentage(), ' ', progressbar.Bar(marker=\"-\", left=\"[\", right=\"]\"),\r\n ' ', progressbar.ETA()\r\n]\r\n\r\nclass Plot():\r\n def __init__(self): \r\n self.cmap = plt.get_cmap('viridis')\r\n\r\n def _transform(self, X, dim):\r\n covariance = calculate_covariance_matrix(X)\r\n eigenvalues, eigenvectors = np.linalg.eig(covariance)\r\n # Sort eigenvalues and eigenvector by largest eigenvalues\r\n idx = eigenvalues.argsort()[::-1]\r\n eigenvalues = eigenvalues[idx][:dim]\r\n eigenvectors = np.atleast_1d(eigenvectors[:, idx])[:, :dim]\r\n # Project the data onto principal components\r\n X_transformed = X.dot(eigenvectors)\r\n\r\n return X_transformed\r\n\r\n\r\n def plot_regression(self, lines, title, axis_labels=None, mse=None, scatter=None, legend={\"type\": \"lines\", \"loc\": \"lower right\"}):\r\n \r\n if scatter:\r\n scatter_plots = scatter_labels = []\r\n for s in scatter:\r\n scatter_plots += [plt.scatter(s[\"x\"], s[\"y\"], color=s[\"color\"], s=s[\"size\"])]\r\n scatter_labels += [s[\"label\"]]\r\n scatter_plots = tuple(scatter_plots)\r\n scatter_labels = tuple(scatter_labels)\r\n\r\n for l in lines:\r\n li = plt.plot(l[\"x\"], l[\"y\"], color=s[\"color\"], linewidth=l[\"width\"], label=l[\"label\"])\r\n\r\n if mse:\r\n plt.suptitle(title)\r\n plt.title(\"MSE: %.2f\" % mse, fontsize=10)\r\n else:\r\n plt.title(title)\r\n\r\n if axis_labels:\r\n plt.xlabel(axis_labels[\"x\"])\r\n plt.ylabel(axis_labels[\"y\"])\r\n\r\n if legend[\"type\"] == \"lines\":\r\n plt.legend(loc=\"lower_left\")\r\n elif legend[\"type\"] == \"scatter\" and scatter:\r\n plt.legend(scatter_plots, scatter_labels, loc=legend[\"loc\"])\r\n\r\n plt.show()\r\n\r\n\r\n\r\n # Plot the dataset X and the corresponding labels y in 2D using PCA.\r\n def plot_in_2d(self, X, y=None, title=None, accuracy=None, legend_labels=None):\r\n X_transformed = self._transform(X, dim=2)\r\n x1 = X_transformed[:, 0]\r\n x2 = X_transformed[:, 1]\r\n class_distr = []\r\n\r\n y = np.array(y).astype(int)\r\n\r\n colors = [self.cmap(i) for i in np.linspace(0, 1, len(np.unique(y)))]\r\n\r\n # Plot the different class distributions\r\n for i, l in enumerate(np.unique(y)):\r\n _x1 = x1[y == l]\r\n _x2 = x2[y == l]\r\n _y = y[y == l]\r\n class_distr.append(plt.scatter(_x1, _x2, color=colors[i]))\r\n\r\n # Plot legend\r\n if not legend_labels is None: \r\n plt.legend(class_distr, legend_labels, loc=1)\r\n\r\n # Plot title\r\n if title:\r\n if accuracy:\r\n perc = 100 * accuracy\r\n plt.suptitle(title)\r\n plt.title(\"Accuracy: %.1f%%\" % perc, fontsize=10)\r\n else:\r\n plt.title(title)\r\n\r\n # Axis labels\r\n plt.xlabel('Principal Component 1')\r\n plt.ylabel('Principal Component 2')\r\n\r\n plt.show()\r\n\r\n # Plot the dataset X and the corresponding labels y in 3D using PCA.\r\n def plot_in_3d(self, X, y=None):\r\n X_transformed = self._transform(X, dim=3)\r\n x1 = X_transformed[:, 0]\r\n x2 = X_transformed[:, 1]\r\n x3 = X_transformed[:, 2]\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.scatter(x1, x2, x3, c=y)\r\n plt.show()\r\n\r\n\r\n"
] | [
[
"scipy.stats.chi2.rvs",
"numpy.percentile",
"numpy.linalg.pinv",
"numpy.shape",
"numpy.mean",
"numpy.empty"
],
[
"numpy.array",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.unique",
"matplotlib.pyplot.scatter",
"numpy.linalg.eig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.get_cmap",
"numpy.atleast_1d",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
matthew-brett/statsmodels | [
"915c9dc2d762c5592ac17a7cf5f1cc957fcbde1c",
"915c9dc2d762c5592ac17a7cf5f1cc957fcbde1c",
"915c9dc2d762c5592ac17a7cf5f1cc957fcbde1c"
] | [
"scikits/statsmodels/datasets/stackloss/data.py",
"scikits/statsmodels/nonparametric/bandwidths.py",
"scikits/statsmodels/genmod/families/links.py"
] | [
"\"\"\"Stack loss data\"\"\"\n\n__all__ = ['COPYRIGHT','TITLE','SOURCE','DESCRSHORT','DESCRLONG','NOTE', 'load']\n\n\n__docformat__ = 'restructuredtext'\n\nCOPYRIGHT = \"\"\"This is public domain. \"\"\"\nTITLE = __doc__\nSOURCE = \"\"\"\nBrownlee, K. A. (1965), \"Statistical Theory and Methodology in\nScience and Engineering\", 2nd edition, New York:Wiley.\n\"\"\"\n\nDESCRSHORT = \"\"\"Stack loss plant data of Brownlee (1965)\"\"\"\n\nDESCRLONG = \"\"\"The stack loss plant data of Brownlee (1965) contains\n21 days of measurements from a plant's oxidation of ammonia to nitric acid.\nThe nitric oxide pollutants are captured in an absorption tower.\"\"\"\n\nNOTE = \"\"\"\nNumber of Observations - 21\n\nNumber of Variables - 4\n\nVariable name definitions::\n\n STACKLOSS - 10 times the percentage of ammonia going into the plant that\n escapes from the absoroption column\n AIRFLOW - Rate of operation of the plant\n WATERTEMP - Cooling water temperature in the absorption tower\n ACIDCONC - Acid concentration of circulating acid minus 50 times 10.\n\"\"\"\n\nfrom numpy import recfromtxt, column_stack, array\nfrom scikits.statsmodels.datasets import Dataset\nfrom os.path import dirname, abspath\n\ndef load():\n \"\"\"\n Load the stack loss data and returns a Dataset class instance.\n\n Returns\n --------\n Dataset instance:\n See DATASET_PROPOSAL.txt for more information.\n \"\"\"\n filepath = dirname(abspath(__file__))\n data = recfromtxt(open(filepath + '/stackloss.csv',\"rb\"), delimiter=\",\",\n names=True, dtype=float)\n names = list(data.dtype.names)\n endog = array(data[names[0]], dtype=float)\n endog_name = names[0]\n exog = column_stack(data[i] for i in names[1:]).astype(float)\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset\n",
"import numpy as np\nfrom scipy.stats import scoreatpercentile as sap\n\n#from scipy.stats import norm\n\ndef _select_sigma(X):\n \"\"\"\n Returns the smaller of std(X, ddof=1) or normalized IQR(X) over axis 0.\n\n References\n ----------\n Silverman (1986) p.47\n \"\"\"\n# normalize = norm.ppf(.75) - norm.ppf(.25)\n normalize = 1.349\n# IQR = np.subtract.reduce(percentile(X, [75,25],\n# axis=axis), axis=axis)/normalize\n IQR = (sap(X, 75) - sap(X, 25))/normalize\n return np.minimum(np.std(X, axis=0, ddof=1), IQR)\n\n\n## Univariate Rule of Thumb Bandwidths ##\ndef bw_scott(x):\n \"\"\"\n Scott's Rule of Thumb\n\n Parameter\n ---------\n x : array-like\n Array for which to get the bandwidth\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n Notes\n -----\n Returns 1.059 * A * n ** (-1/5.)\n\n A = min(std(x, ddof=1), IQR/1.349)\n IQR = np.subtract.reduce(np.percentile(x, [75,25]))\n\n References\n ---------- ::\n\n Scott, D.W. (1992) `Multivariate Density Estimation: Theory, Practice, and\n Visualization.`\n \"\"\"\n A = _select_sigma(x)\n n = len(x)\n return 1.059 * A * n ** -.2\n\ndef bw_silverman(x):\n \"\"\"f\n Silverman's Rule of Thumb\n\n Parameter\n ---------\n x : array-like\n Array for which to get the bandwidth\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n Notes\n -----\n Returns .9 * A * n ** (-1/5.)\n\n A = min(std(x, ddof=1), IQR/1.349)\n IQR = np.subtract.reduce(np.percentile(x, [75,25]))\n\n References\n ---------- ::\n\n Silverman, B.W. (1986) `Density Estimation.`\n \"\"\"\n A = _select_sigma(x)\n n = len(x)\n return .9 * A * n ** -.2\n\n## Plug-In Methods ##\n\n## Least Squares Cross-Validation ##\n\n## Helper Functions ##\n\nbandwidth_funcs = dict(scott=bw_scott,silverman=bw_silverman)\n\ndef select_bandwidth(X, bw, kernel):\n \"\"\"\n Selects bandwidth\n \"\"\"\n bw = bw.lower()\n if bw not in [\"scott\",\"silverman\"]:\n raise ValueError(\"Bandwidth %s not understood\" % bw)\n#TODO: uncomment checks when we have non-rule of thumb bandwidths for diff. kernels\n# if kernel == \"gauss\":\n return bandwidth_funcs[bw](X)\n# else:\n# raise ValueError(\"Only Gaussian Kernels are currently supported\")\n\n",
"'''\nDefines the link functions to be used with GLM families.\n'''\n\nimport numpy as np\nimport scipy.stats\n\n#TODO: are the instance actually \"aliases\"\n# I used this terminology in varfuncs as well -ss\n\nclass Link(object):\n\n \"\"\"\n A generic link function for one-parameter exponential family.\n\n `Link` does nothing, but lays out the methods expected of any subclass.\n \"\"\"\n\n def __call__(self, p):\n \"\"\"\n Return the value of the link function. This is just a placeholder.\n\n Parameters\n ----------\n p : array-like\n Probabilities\n\n Returns\n -------\n The value of the link function g(p) = z\n \"\"\"\n return NotImplementedError\n\n def inverse(self, z):\n \"\"\"\n Inverse of the link function. Just a placeholder.\n\n Parameters\n ----------\n z : array-like\n `z` is usually the linear predictor of the transformed variable\n in the IRLS algorithm for GLM.\n\n Returns\n -------\n The value of the inverse of the link function g^(-1)(z) = p\n\n\n \"\"\"\n return NotImplementedError\n\n def deriv(self, p):\n \"\"\"\n Derivative of the link function g'(p). Just a placeholder.\n\n Parameters\n ----------\n p : array-like\n\n Returns\n -------\n The value of the derivative of the link function g'(p)\n \"\"\"\n return NotImplementedError\n\nclass Logit(Link):\n \"\"\"\n The logit transform\n\n Notes\n -----\n call and derivative use a private method _clean to make trim p by\n 1e-10 so that p is in (0,1)\n\n Alias of Logit:\n logit = Logit()\n \"\"\"\n\n tol = 1.0e-10\n\n def _clean(self, p):\n \"\"\"\n Clip logistic values to range (tol, 1-tol)\n\n Parameters\n -----------\n p : array-like\n Probabilities\n\n Returns\n --------\n pclip : array\n Clipped probabilities\n \"\"\"\n return np.clip(p, Logit.tol, 1. - Logit.tol)\n\n def __call__(self, p):\n \"\"\"\n The logit transform\n\n Parameters\n ----------\n p : array-like\n Probabilities\n\n Returns\n -------\n z : array\n Logit transform of `p`\n\n Notes\n -----\n g(p) = log(p / (1 - p))\n \"\"\"\n p = self._clean(p)\n return np.log(p / (1. - p))\n\n def inverse(self, z):\n \"\"\"\n Inverse of the logit transform\n\n Parameters\n ----------\n z : array-like\n The value of the logit transform at `p`\n\n Returns\n -------\n p : array\n Probabilities\n\n Notes\n -----\n g^(-1)(z) = exp(z)/(1+exp(z))\n \"\"\"\n t = np.exp(z)\n return t / (1. + t)\n\n def deriv(self, p):\n\n \"\"\"\n Derivative of the logit transform\n\n Parameters\n ----------\n p: array-like\n Probabilities\n\n Returns\n -------\n g'(p) : array\n Value of the derivative of logit transform at `p`\n\n Notes\n -----\n g'(p) = 1 / (p * (1 - p))\n\n Alias for `Logit`:\n logit = Logit()\n \"\"\"\n p = self._clean(p)\n return 1. / (p * (1 - p))\n\n#logit = Logit()\nclass logit(Logit):\n pass\n\nclass Power(Link):\n \"\"\"\n The power transform\n\n Parameters\n ----------\n power : float\n The exponent of the power transform\n\n Notes\n -----\n Aliases of Power:\n inverse = Power(power=-1)\n sqrt = Power(power=.5)\n inverse_squared = Power(power=-2.)\n identity = Power(power=1.)\n \"\"\"\n\n def __init__(self, power=1.):\n self.power = power\n\n def __call__(self, p):\n \"\"\"\n Power transform link function\n\n Parameters\n ----------\n p : array-like\n Mean parameters\n\n Returns\n -------\n z : array-like\n Power transform of x\n\n Notes\n -----\n g(p) = x**self.power\n \"\"\"\n\n return np.power(p, self.power)\n\n def inverse(self, z):\n \"\"\"\n Inverse of the power transform link function\n\n\n Parameters\n ----------\n `z` : array-like\n Value of the transformed mean parameters at `p`\n\n Returns\n -------\n `p` : array\n Mean parameters\n\n Notes\n -----\n g^(-1)(z`) = `z`**(1/`power`)\n \"\"\"\n return np.power(z, 1. / self.power)\n\n def deriv(self, p):\n \"\"\"\n Derivative of the power transform\n\n Parameters\n ----------\n p : array-like\n Mean parameters\n\n Returns\n --------\n g'(p) : array\n Derivative of power transform of `p`\n\n Notes\n -----\n g'(`p`) = `power` * `p`**(`power` - 1)\n \"\"\"\n return self.power * np.power(p, self.power - 1)\n\n#inverse = Power(power=-1.)\nclass inverse_power(Power):\n \"\"\"\n The inverse transform\n\n Notes\n -----\n g(p) = 1/p\n\n Alias of statsmodels.family.links.Power(power=-1.)\n \"\"\"\n def __init__(self):\n super(inverse_power, self).__init__(power=-1.)\n\n#sqrt = Power(power=0.5)\nclass sqrt(Power):\n \"\"\"\n The square-root transform\n\n Notes\n -----\n g(`p`) = sqrt(`p`)\n\n Alias of statsmodels.family.links.Power(power=.5)\n \"\"\"\n def __init__(self):\n super(sqrt, self).__init__(power=.5)\n\nclass inverse_squared(Power):\n#inverse_squared = Power(power=-2.)\n \"\"\"\n The inverse squared transform\n\n Notes\n -----\n g(`p`) = 1/(`p`\\ \\*\\*2)\n\n Alias of statsmodels.family.links.Power(power=2.)\n \"\"\"\n def __init__(self):\n super(inverse_squared, self).__init__(power=-2.)\n\nclass identity(Power):\n \"\"\"\n The identity transform\n\n Notes\n -----\n g(`p`) = `p`\n\n Alias of statsmodels.family.links.Power(power=1.)\n \"\"\"\n def __init__(self):\n super(identity, self).__init__(power=1.)\n\nclass Log(Link):\n \"\"\"\n The log transform\n\n Notes\n -----\n call and derivative call a private method _clean to trim the data by\n 1e-10 so that p is in (0,1). log is an alias of Log.\n \"\"\"\n\n tol = 1.0e-10\n\n def _clean(self, x):\n return np.clip(x, Logit.tol, np.inf)\n\n def __call__(self, p, **extra):\n \"\"\"\n Log transform link function\n\n Parameters\n ----------\n x : array-like\n Mean parameters\n\n Returns\n -------\n z : array\n log(x)\n\n Notes\n -----\n g(p) = log(p)\n \"\"\"\n x = self._clean(p)\n return np.log(p)\n\n def inverse(self, z):\n \"\"\"\n Inverse of log transform link function\n\n Parameters\n ----------\n z : array\n The inverse of the link function at `p`\n\n Returns\n -------\n p : array\n The mean probabilities given the value of the inverse `z`\n\n Notes\n -----\n g^{-1}(z) = exp(z)\n \"\"\"\n return np.exp(z)\n\n def deriv(self, p):\n \"\"\"\n Derivative of log transform link function\n\n Parameters\n ----------\n p : array-like\n Mean parameters\n\n Returns\n -------\n g'(p) : array\n derivative of log transform of x\n\n Notes\n -----\n g(x) = 1/x\n \"\"\"\n p = self._clean(p)\n return 1. / p\n\nclass log(Log):\n \"\"\"\n The log transform\n\n Notes\n -----\n log is a an alias of Log.\n \"\"\"\n pass\n\n#TODO: the CDFLink is untested\nclass CDFLink(Logit):\n \"\"\"\n The use the CDF of a scipy.stats distribution\n\n CDFLink is a subclass of logit in order to use its _clean method\n for the link and its derivative.\n\n Parameters\n ----------\n dbn : scipy.stats distribution\n Default is dbn=scipy.stats.norm\n\n Notes\n -----\n The CDF link is untested.\n \"\"\"\n\n def __init__(self, dbn=scipy.stats.norm):\n self.dbn = dbn\n\n def __call__(self, p):\n \"\"\"\n CDF link function\n\n Parameters\n ----------\n p : array-like\n Mean parameters\n\n Returns\n -------\n z : array\n (ppf) inverse of CDF transform of p\n\n Notes\n -----\n g(`p`) = `dbn`.ppf(`p`)\n \"\"\"\n p = self._clean(p)\n return self.dbn.ppf(p)\n\n def inverse(self, z):\n \"\"\"\n The inverse of the CDF link\n\n Parameters\n ----------\n z : array-like\n The value of the inverse of the link function at `p`\n\n Returns\n -------\n p : array\n Mean probabilities. The value of the inverse of CDF link of `z`\n\n Notes\n -----\n g^(-1)(`z`) = `dbn`.cdf(`z`)\n \"\"\"\n return self.dbn.cdf(z)\n\n def deriv(self, p):\n \"\"\"\n Derivative of CDF link\n\n Parameters\n ----------\n p : array-like\n mean parameters\n\n Returns\n -------\n g'(p) : array\n The derivative of CDF transform at `p`\n\n Notes\n -----\n g'(`p`) = 1./ `dbn`.pdf(`p`)\n \"\"\"\n# Or is it\n# g'(`p`) = 1/`dbn`.pdf(`dbn`.ppf(`p`))\n#TODO: make sure this is correct.\n#can we just have a numerical approximation?\n p = self._clean(p)\n return 1. / self.dbn.pdf(p)\n\n#probit = CDFLink()\nclass probit(CDFLink):\n \"\"\"\n The probit (standard normal CDF) transform\n\n Notes\n --------\n g(p) = scipy.stats.norm.ppf(p)\n\n probit is an alias of CDFLink.\n \"\"\"\n pass\n\nclass cauchy(CDFLink):\n \"\"\"\n The Cauchy (standard Cauchy CDF) transform\n\n Notes\n -----\n g(p) = scipy.stats.cauchy.ppf(p)\n\n cauchy is an alias of CDFLink with dbn=scipy.stats.cauchy\n \"\"\"\n def __init__(self):\n super(cauchy, self).__init__(dbn=scipy.stats.cauchy)\n\n#TODO: CLogLog is untested\nclass CLogLog(Logit):\n \"\"\"\n The complementary log-log transform\n\n CLogLog inherits from Logit in order to have access to its _clean method\n for the link and its derivative.\n\n Notes\n -----\n CLogLog is untested.\n \"\"\"\n def __call__(self, p):\n \"\"\"\n C-Log-Log transform link function\n\n Parameters\n ----------\n p : array\n Mean parameters\n\n Returns\n -------\n z : array\n The CLogLog transform of `p`\n\n Notes\n -----\n g(p) = log(-log(1-p))\n \"\"\"\n p = self._clean(p)\n return np.log(-np.log(1-p))\n\n def inverse(self, z):\n \"\"\"\n Inverse of C-Log-Log transform link function\n\n\n Parameters\n ----------\n z : array-like\n The value of the inverse of the CLogLog link function at `p`\n\n Returns\n -------\n p : array\n Mean parameters\n\n Notes\n -----\n g^(-1)(`z`) = 1-exp(-exp(`z`))\n \"\"\"\n return 1-np.exp(-np.exp(z))\n\n def deriv(self, p):\n \"\"\"\n Derivatve of C-Log-Log transform link function\n\n Parameters\n ----------\n p : array-like\n Mean parameters\n\n Returns\n -------\n g'(p) : array\n The derivative of the CLogLog transform link function\n\n Notes\n -----\n g'(p) = - 1 / (log(p) * p)\n \"\"\"\n p = self._clean(p)\n return 1. / ((p-1)*(np.log(1-p)))\n\nclass cloglog(CLogLog):\n \"\"\"\n The CLogLog transform link function.\n\n Notes\n -----\n g(`p`) = log(-log(1-`p`))\n\n cloglog is an alias for CLogLog\n cloglog = CLogLog()\n \"\"\"\n pass\n\nclass NegativeBinomial(object):\n '''\n The negative binomial link function\n\n Parameters\n ----------\n alpha : float, optional\n Alpha is the ancillary parameter of the Negative Binomial link function.\n It is assumed to be nonstochastic. The default value is 1. Permissible\n values are usually assumed to be in (.01,2).\n '''\n\n tol = 1.0e-10\n\n def __init__(self, alpha=1.):\n self.alpha = alpha\n\n def _clean(self, x):\n return np.clip(x, NegativeBinomial.tol, np.inf)\n\n def __call__(self, x):\n '''\n Negative Binomial transform link function\n\n Parameters\n ----------\n p : array-like\n Mean parameters\n\n Returns\n -------\n z : array\n The negative binomial transform of `p`\n\n Notes\n -----\n g(p) = log(p/(p + 1/alpha))\n '''\n p = self._clean(p)\n return np.log(p/(p+1/self.alpha))\n\n def inverse(self, z):\n '''\n Inverse of the negative binomial transform\n\n Parameters\n -----------\n z : array-like\n The value of the inverse of the negative binomial link at `p`.\n Returns\n -------\n p : array\n Mean parameters\n\n Notes\n -----\n g^(-1)(z) = exp(z)/(alpha*(1-exp(z)))\n '''\n return np.exp(z)/(self.alpha*(1-np.exp(z)))\n\n def deriv(self,p):\n '''\n Derivative of the negative binomial transform\n\n Parameters\n ----------\n p : array-like\n Mean parameters\n\n Returns\n -------\n g'(p) : array\n The derivative of the negative binomial transform link function\n\n Notes\n -----\n g'(x) = 1/(x+alpha*x^2)\n '''\n return 1/(p+self.alpha*p**2)\n\nclass nbinom(NegativeBinomial):\n \"\"\"\n The negative binomial link function.\n\n Notes\n -----\n g(p) = log(p/(p + 1/alpha))\n\n nbinom is an alias of NegativeBinomial.\n nbinom = NegativeBinomial(alpha=1.)\n \"\"\"\n pass\n"
] | [
[
"numpy.array",
"numpy.column_stack"
],
[
"numpy.std",
"scipy.stats.scoreatpercentile"
],
[
"numpy.log",
"numpy.exp",
"numpy.power",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vutuanhai237/QuantumTomographyProject | [
"78058e3faece2209e46c9f9e16a1c38cdb33e7e2"
] | [
"codes/qtm/qcompilation.py"
] | [
"import qtm.base\nimport qtm.optimizer\nimport qtm.loss\nimport qtm.utilities\nimport numpy as np\nimport typing, types\nimport qiskit\nimport matplotlib.pyplot as plt\n\nclass QuantumCompilation():\n def __init__(self) -> None:\n self.u = None\n self.vdagger = None\n self.is_trained = False\n self.optimizer = None\n self.loss_func = None\n self.thetas = None\n self.thetass = []\n self.loss_values = []\n self.fidelities = []\n self.traces = []\n self.kwargs = None\n return\n\n def __init__(self, u: typing.Union[types.FunctionType, qiskit.QuantumCircuit], vdagger: typing.Union[types.FunctionType, qiskit.QuantumCircuit], optimizer: typing.Union[types.FunctionType, str], loss_func: typing.Union[types.FunctionType, str], thetas: np.ndarray = np.array([]), **kwargs):\n \"\"\"_summary_\n\n Args:\n - u (typing.Union[types.FunctionType, qiskit.QuantumCircuit]): In quantum state preparation problem, this is the ansatz. In tomography, this is the circuit that generate random Haar state.\n - vdagger (typing.Union[types.FunctionType, qiskit.QuantumCircuit]): In quantum tomography problem, this is the ansatz. In state preparation, this is the circuit that generate random Haar state.\n - optimizer (typing.Union[types.FunctionType, str]): You can put either string or function here. If type string, qcompilation produces some famous optimizers such as: 'sgd', 'adam', 'qng-fubini-study', 'qng-qfim', 'qng-adam'.\n - loss_func (typing.Union[types.FunctionType, str]): You can put either string or function here. If type string, qcompilation produces some famous optimizers such as: 'loss_basic' (1 - p0) and 'loss_fubini_study' (\\sqrt{(1 - p0)}).\n - thetas (np.ndarray, optional): initial parameters. Note that it must fit with your ansatz. Defaults to np.array([]).\n \"\"\"\n self.set_u(u)\n self.set_vdagger(vdagger)\n self.set_optimizer(optimizer)\n self.set_loss_func(loss_func)\n self.set_kwargs(**kwargs)\n self.set_thetas(thetas)\n return\n\n def set_u(self, _u: typing.Union[types.FunctionType, qiskit.QuantumCircuit]):\n \"\"\"In quantum state preparation problem, this is the ansatz. In tomography, this is the circuit that generate random Haar state.\n\n Args:\n - _u (typing.Union[types.FunctionType, qiskit.QuantumCircuit]): init circuit\n \"\"\"\n if callable(_u) or isinstance(_u, qiskit.QuantumCircuit):\n self.u = _u\n else:\n raise ValueError('The U part must be a function f: thetas -> qiskit.QuantumCircuit or a determined quantum circuit')\n return\n\n def set_vdagger(self, _vdagger):\n \"\"\"In quantum state tomography problem, this is the ansatz. In state preparation, this is the circuit that generate random Haar state.\n\n Args:\n - _vdagger (typing.Union[types.FunctionType, qiskit.QuantumCircuit]): init circuit\n \"\"\"\n if callable(_vdagger) or isinstance(_vdagger, qiskit.QuantumCircuit):\n self.vdagger = _vdagger\n else:\n raise ValueError('The V dagger part must be a function f: thetas -> qiskit.QuantumCircuit or a determined quantum circuit')\n return\n\n def set_loss_func(self, _loss_func: typing.Union[types.FunctionType, str]):\n \"\"\"Set the loss function for compiler\n\n Args:\n - _loss_func (typing.Union[types.FunctionType, str])\n\n Raises:\n ValueError: when you pass wrong type\n \"\"\"\n if callable(_loss_func):\n self.loss_func = _loss_func\n elif isinstance(_loss_func, str):\n if _loss_func == 'loss-basic':\n self.loss_func = qtm.loss.loss_basis\n elif _loss_func == 'loss-fubini-study':\n self.loss_func = qtm.loss.loss_fubini_study\n else:\n raise ValueError('The loss function must be a function f: measurement value -> loss value or string in [\"loss_basic\", \"loss_fubini_study\"]')\n return\n\n def set_optimizer(self, _optimizer: typing.Union[types.FunctionType, str]):\n \"\"\"Change the optimizer of the compiler\n\n Args:\n - _optimizer (typing.Union[types.FunctionType, str])\n\n Raises:\n ValueError: when you pass wrong type\n \"\"\"\n if callable(_optimizer):\n self.optimizer = _optimizer\n elif isinstance(_optimizer,str):\n if _optimizer == 'sgd':\n self.optimizer = qtm.optimizer.sgd\n elif _optimizer == 'adam':\n self.optimizer = qtm.optimizer.adam\n elif _optimizer == 'qng-fubini-study':\n self.optimizer = qtm.optimizer.qng_fubini_study\n elif _optimizer == 'qng-qfim':\n self.optimizer = qtm.optimizer.qng_qfim\n elif _optimizer == 'qng-adam':\n self.optimizer = qtm.optimizer.qng_adam\n else:\n raise ValueError('The optimizer must be a function f: thetas -> thetas or string in [\"sgd\", \"adam\", \"qng_qfim\", \"qng_fubini_study\", \"qng_adam\"]')\n return\n \n def set_num_step(self, _num_step: int):\n \"\"\"Set the number of iteration for compiler\n\n Args:\n - _num_step (int): number of iterations\n\n Raises:\n ValueError: when you pass a nasty value\n \"\"\"\n if _num_step > 0 and isinstance(_num_step, int):\n self.num_step = _num_step\n else:\n raise ValueError('Number of iterations must be a integer, such that 10 or 100.')\n return\n\n def set_thetas(self, _thetas: np.ndarray):\n \"\"\"Set parameter, it will be updated at each iteration\n\n Args:\n _thetas (np.ndarray): parameter for u or vdagger\n \"\"\"\n if isinstance(_thetas, np.ndarray):\n self.thetas = _thetas\n else:\n raise ValueError('The parameter must be numpy array')\n return\n\n def set_kwargs(self, **kwargs):\n \"\"\"Arguments supported for u or vdagger only. Ex: number of layer\n \"\"\"\n self.__dict__.update(**kwargs)\n self.kwargs = kwargs\n return\n\n def fit(self, num_steps: int = 100, verbose: int = 0):\n \"\"\"Optimize the thetas parameters\n\n Args:\n - num_steps: number of iterations\n - verbose (int, optional): 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per 10 steps. Verbose 1 is good for timing training time, verbose 2 if you want to log loss values to a file. Please install package tdqm if you want to use verbose 1. \n \n \"\"\"\n self.thetass, self.loss_values = qtm.base.fit(\n self.u, self.vdagger, self.thetas, num_steps, self.loss_func, self.optimizer, verbose, is_return_all_thetas=True, **self.kwargs)\n self.is_trained = True\n if callable(self.u):\n self.traces, self.fidelities = qtm.utilities.calculate_state_preparation_metrics(self.u, self.vdagger, self.thetass, **self.kwargs)\n else:\n self.traces, self.fidelities = qtm.utilities.calculate_state_tomography_metrics(self.u, self.vdagger, self.thetass, **self.kwargs)\n return\n\n def save(self, metric: str = \"\", text = \"\", path = './', save_all: bool = False):\n \"\"\"_summary_\n\n Args:\n - metric (str)\n - text (str): Defaults to './'. Additional file name string\n - path (str, optional): Defaults to './'.\n - save_all (bool, optional): Save thetass, fidelity, trace and loss_value if save_all = True\n\n Raises:\n ValueError: if save_all = False and metric is not right.\n \"\"\"\n if save_all:\n np.savetxt(path + \"/thetass\" + text + \".csv\", self.thetass, delimiter=\",\")\n np.savetxt(path + \"/fidelities\"+ text + \".csv\", self.fidelities, delimiter=\",\")\n np.savetxt(path + \"/traces\" + text + \".csv\", self.traces, delimiter=\",\")\n np.savetxt(path + \"/loss_values\" + text + \".csv\", self.loss_values, delimiter=\",\")\n else:\n if metric == 'thetas':\n np.savetxt(path + \"/thetass\" + text + \".csv\", self.thetass, delimiter=\",\")\n elif metric == 'fidelity':\n np.savetxt(path + \"/fidelities\" + text + \".csv\", self.fidelities, delimiter=\",\")\n elif metric == 'trace':\n np.savetxt(path + \"/traces\" + text + \".csv\", self.traces, delimiter=\",\")\n elif metric == 'loss_value':\n np.savetxt(path + \"/loss_values\" + text + \".csv\", self.loss_values, delimiter=\",\")\n else:\n raise ValueError('The metric must be thetas, fidelity, trace or loss_value')\n print(\"Saved \" + metric + \" at \" + path)\n return\n\n def reset(self):\n \"\"\"Delete all current property of compiler\n \"\"\"\n self.u = None\n self.vdagger = None\n self.is_trained = False\n self.optimizer = None\n self.loss_func = None\n self.num_step = 0\n self.thetas = None\n self.thetass = []\n self.loss_values = []\n return\n"
] | [
[
"numpy.savetxt",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
snoopycrimecop/scripts | [
"7ec5df31e83f1fda7efc02aca3f3426174547308"
] | [
"omero/util_scripts/Combine_Images.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n-----------------------------------------------------------------------------\n Copyright (C) 2006-2014 University of Dundee. All rights reserved.\n\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License along\n with this program; if not, write to the Free Software Foundation, Inc.,\n 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\n------------------------------------------------------------------------------\n\nThis script takes a number of images (or Z-stacks) and merges them to create\nadditional C, T, Z dimensions.\n\n@author Will Moore \n<a href=\"mailto:[email protected]\">[email protected]</a>\n@version 3.0\n<small>\n(<b>Internal version:</b> $Revision: $Date: $)\n</small>\n@since 3.0-Beta4.2\n\n\"\"\"\n\nimport re\nfrom numpy import zeros\n\nimport omero\nimport omero.scripts as scripts\nfrom omero.gateway import BlitzGateway\nimport omero.constants\nfrom omero.rtypes import rstring, rlong, robject\nimport omero.util.script_utils as script_utils\n\nCOLOURS = script_utils.COLOURS\n\nDEFAULT_T_REGEX = \"_T\"\nDEFAULT_Z_REGEX = \"_Z\"\nDEFAULT_C_REGEX = \"_C\"\n\nchannel_regexes = {\n DEFAULT_C_REGEX: r'_C(?P<C>.+?)(_|$)',\n \"C\": r'C(?P<C>\\w+?)',\n \"_c\": r'_c(?P<C>\\w+?)',\n \"_w\": r'_w(?P<C>\\w+?)',\n \"None (single channel)\": False}\n\nz_regexes = {\n DEFAULT_Z_REGEX: r'_Z(?P<Z>\\d+)',\n \"Z\": r'Z(?P<Z>\\d+)',\n \"_z\": r'_z(?P<Z>\\d+)',\n \"None (single z section)\": False}\n\ntime_regexes = {\n DEFAULT_T_REGEX: r'_T(?P<T>\\d+)',\n \"T\": r'T(?P<T>\\d+)',\n \"_t\": r'_t(?P<T>\\d+)',\n \"None (single time point)\": False}\n\n\ndef get_plane(raw_pixel_store, pixels, the_z, the_c, the_t):\n \"\"\"\n This method downloads the specified plane of the OMERO image and returns\n it as a numpy array.\n\n @param session The OMERO session\n @param imageId The ID of the image to download\n @param pixels The pixels object, with pixelsType\n @param imageName The name of the image to write. If no path, saved in\n the current directory.\n \"\"\"\n\n # get the plane\n pixels_id = pixels.getId().getValue()\n raw_pixel_store.setPixelsId(pixels_id, True)\n return script_utils.download_plane(\n raw_pixel_store, pixels, the_z, the_c, the_t)\n\n\ndef manually_assign_images(parameter_map, image_ids, source_z):\n\n size_z = source_z\n size_c = 1\n size_t = 1\n\n dims = []\n dim_sizes = [1, 1, 1] # at least 1 in each dimension\n dim_map = {\"C\": \"Size_C\", \"Z\": \"Size_Z\", \"T\": \"Size_T\"}\n dimension_params = [\"Dimension_1\", \"Dimension_2\", \"Dimension_3\"]\n\n for i, d in enumerate(dimension_params):\n if d in parameter_map and len(parameter_map[d]) > 0:\n # First letter of 'Channel' or 'Time' or 'Z'\n dim = parameter_map[d][0]\n dims.append(dim)\n if dim == \"Z\" and source_z > 1:\n continue\n size_param = dim_map[dim]\n if size_param in parameter_map:\n dim_sizes[i] = parameter_map[size_param]\n else:\n dim_sizes[i] = len(image_ids) // \\\n (dim_sizes[0] * dim_sizes[1] * dim_sizes[2])\n\n index = 0\n\n image_map = {} # map of (z,c,t) : imageId\n\n for dim3 in range(dim_sizes[2]):\n for dim2 in range(dim_sizes[1]):\n for dim1 in range(dim_sizes[0]):\n if index >= len(image_ids):\n break\n z, c, t = (0, 0, 0)\n ddd = (dim1, dim2, dim3)\n # bit of a hack, but this somehow does my head in!!\n for i, d in enumerate(dims):\n if d == \"C\":\n c = ddd[i]\n size_c = max(size_c, c+1)\n elif d == \"T\":\n t = ddd[i]\n size_t = max(size_t, t+1)\n elif d == \"Z\":\n z = ddd[i]\n size_z = max(size_z, z+1)\n # handle Z stacks...\n if source_z > 1:\n for src_z in range(source_z):\n image_map[(src_z, c, t)] = (image_ids[index], src_z)\n else:\n image_map[(z, c, t)] = (image_ids[index], 0)\n index += 1\n\n return (size_z, size_c, size_t, image_map)\n\n\ndef assign_images_by_regex(parameter_map, image_ids, query_service, source_z,\n id_name_map=None):\n\n c = None\n regex_channel = channel_regexes[parameter_map[\"Channel_Name_Pattern\"]]\n if regex_channel:\n c = re.compile(regex_channel)\n\n t = None\n regex_t = time_regexes[parameter_map[\"Time_Name_Pattern\"]]\n if regex_t:\n t = re.compile(regex_t)\n\n z = None\n regex_z = z_regexes[parameter_map[\"Z_Name_Pattern\"]]\n if regex_z:\n z = re.compile(regex_z)\n\n # other parameters we need to determine\n size_z = source_z\n size_t = 1\n z_start = None # could be 0 or 1 ?\n t_start = None\n\n image_map = {} # map of (z,c,t) : imageId\n channels = []\n\n if id_name_map is None:\n id_name_map = get_image_names(query_service, image_ids)\n\n # assign each (imageId,zPlane) to combined image (z,c,t) by name.\n for iid in image_ids:\n name = id_name_map[iid]\n if t:\n t_search = t.search(name)\n if c:\n c_search = c.search(name)\n\n if t is None or t_search is None:\n the_t = 0\n else:\n the_t = int(t_search.group('T'))\n\n if c is None or c_search is None:\n c_name = \"0\"\n else:\n c_name = c_search.group('C')\n if c_name in channels:\n the_c = channels.index(c_name)\n else:\n the_c = len(channels)\n channels.append(c_name)\n\n size_t = max(size_t, the_t+1)\n if t_start is None:\n t_start = the_t\n else:\n t_start = min(t_start, the_t)\n\n # we have T and C now. Need to check if source images are Z stacks\n if source_z > 1:\n z_start = 0\n for src_z in range(source_z):\n image_map[(src_z, the_c, the_t)] = (iid, src_z)\n else:\n if z:\n z_search = z.search(name)\n\n if z is None or z_search is None:\n the_z = 0\n else:\n the_z = int(z_search.group('Z'))\n\n size_z = max(size_z, the_z+1)\n if z_start is None:\n z_start = the_z\n else:\n z_start = min(z_start, the_z)\n\n # every plane comes from z=0\n image_map[(the_z, the_c, the_t)] = (iid, 0)\n\n # if indexes were 1-based (or higher), need to shift indexes accordingly.\n if t_start > 0 or z_start > 0:\n size_t = size_t-t_start\n size_z = size_z-z_start\n i_map = {}\n for key, value in image_map.items():\n z, c, t = key\n i_map[(z-z_start, c, t-t_start)] = value\n else:\n i_map = image_map\n\n c_names = {}\n for c, name in enumerate(channels):\n c_names[c] = name\n return (size_z, c_names, size_t, i_map)\n\n\ndef get_image_names(query_service, image_ids):\n id_string = \",\".join([str(i) for i in image_ids])\n query_string = \"select i from Image i where i.id in (%s)\" % id_string\n images = query_service.findAllByQuery(query_string, None)\n id_map = {}\n for i in images:\n iid = i.getId().getValue()\n name = i.getName().getValue()\n id_map[iid] = name\n return id_map\n\n\ndef pick_pixel_sizes(pixel_sizes):\n \"\"\"\n Process a list of pixel sizes and pick sizes to set for new image.\n If we have different sizes from different images, return None\n \"\"\"\n pix_size = None\n for px in pixel_sizes:\n if px is None:\n continue\n if pix_size is None:\n pix_size = px\n else:\n # compare - if different, return None\n if (pix_size.getValue() != px.getValue() or\n pix_size.getUnit() != px.getUnit()):\n return None\n return pix_size\n\n\ndef make_single_image(services, parameter_map, image_ids, dataset, colour_map):\n \"\"\"\n This takes the images specified by image_ids, sorts them in to Z,C,T\n dimensions according to parameters in the parameter_map, assembles them\n into a new Image, which is saved in dataset.\n \"\"\"\n\n if len(image_ids) == 0:\n return\n\n rendering_engine = services[\"renderingEngine\"]\n query_service = services[\"queryService\"]\n pixels_service = services[\"pixelsService\"]\n raw_pixel_store = services[\"rawPixelStore\"]\n raw_pixel_store_upload = services[\"rawPixelStoreUpload\"]\n update_service = services[\"updateService\"]\n container_service = services[\"containerService\"]\n\n # Filter images by name if user has specified filter.\n id_name_map = None\n if \"Filter_Names\" in parameter_map:\n filter_string = parameter_map[\"Filter_Names\"]\n if len(filter_string) > 0:\n id_name_map = get_image_names(query_service, image_ids)\n image_ids = [i for i in image_ids\n if id_name_map[i].find(filter_string) > -1]\n\n image_id = image_ids[0]\n\n # get pixels, with pixelsType, from the first image\n query_string = \"select p from Pixels p join fetch p.image i join \"\\\n \"fetch p.pixelsType pt where i.id='%d'\" % image_id\n pixels = query_service.findByQuery(query_string, None)\n # use the pixels type object we got from the first image.\n pixels_type = pixels.getPixelsType()\n\n # combined image will have same X and Y sizes...\n size_x = pixels.getSizeX().getValue()\n size_y = pixels.getSizeY().getValue()\n # if we have a Z stack, use this in new image (don't combine Z)\n source_z = pixels.getSizeZ().getValue()\n\n # Now we need to find where our planes are coming from.\n # imageMap is a map of destination:source, defined as (newX, newY,\n # newZ):(imageId, z)\n if \"Manually_Define_Dimensions\" in parameter_map and \\\n parameter_map[\"Manually_Define_Dimensions\"]:\n size_z, size_c, size_t, image_map = manually_assign_images(\n parameter_map, image_ids, source_z)\n c_names = {}\n else:\n size_z, c_names, size_t, image_map = assign_images_by_regex(\n parameter_map, image_ids, query_service, source_z, id_name_map)\n size_c = len(c_names)\n\n if \"Channel_Names\" in parameter_map:\n for c, name in enumerate(parameter_map[\"Channel_Names\"]):\n c_names[c] = name\n\n image_name = \"combinedImage\"\n description = \"created from image Ids: %s\" % image_ids\n\n channel_list = range(size_c)\n iid = pixels_service.createImage(size_x, size_y, size_z, size_t,\n channel_list, pixels_type, image_name,\n description)\n image = container_service.getImages(\"Image\", [iid.getValue()], None)[0]\n\n pixels_id = image.getPrimaryPixels().getId().getValue()\n raw_pixel_store_upload.setPixelsId(pixels_id, True)\n\n pixel_sizes = {'x': [], 'y': []}\n for the_c in range(size_c):\n min_value = 0\n max_value = 0\n for the_z in range(size_z):\n for the_t in range(size_t):\n if (the_z, the_c, the_t) in image_map:\n image_id, plane_z = image_map[(the_z, the_c, the_t)]\n query_string = \"select p from Pixels p join fetch \"\\\n \"p.image i join fetch p.pixelsType pt where \"\\\n \"i.id='%d'\" % image_id\n pixels = query_service.findByQuery(query_string,\n None)\n plane_2d = get_plane(raw_pixel_store, pixels, plane_z,\n 0, 0)\n # Note pixels sizes (may be None)\n pixel_sizes['x'].append(pixels.getPhysicalSizeX())\n pixel_sizes['y'].append(pixels.getPhysicalSizeY())\n else:\n plane_2d = zeros((size_y, size_x))\n script_utils.upload_plane(raw_pixel_store_upload,\n plane_2d, the_z, the_c, the_t)\n min_value = min(min_value, plane_2d.min())\n max_value = max(max_value, plane_2d.max())\n pixels_service.setChannelGlobalMinMax(pixels_id, the_c,\n float(min_value),\n float(max_value))\n rgba = COLOURS[\"White\"]\n if the_c in colour_map:\n rgba = colour_map[the_c]\n script_utils.reset_rendering_settings(rendering_engine, pixels_id,\n the_c, min_value, max_value,\n rgba)\n\n # rename new channels\n pixels = rendering_engine.getPixels()\n # has channels loaded - (getting Pixels from image doesn't)\n i = 0\n for c in pixels.iterateChannels():\n # c is an instance of omero.model.ChannelI\n if i >= len(c_names):\n break\n lc = c.getLogicalChannel() # returns omero.model.LogicalChannelI\n lc.setName(rstring(c_names[i]))\n update_service.saveObject(lc)\n i += 1\n\n # Set pixel sizes if known\n pix_size_x = pick_pixel_sizes(pixel_sizes['x'])\n pix_size_y = pick_pixel_sizes(pixel_sizes['y'])\n if pix_size_x is not None or pix_size_y is not None:\n # reload to avoid OptimisticLockException\n pixels = services[\"queryService\"].get('Pixels',\n pixels.getId().getValue())\n if pix_size_x is not None:\n pixels.setPhysicalSizeX(pix_size_x)\n if pix_size_y is not None:\n pixels.setPhysicalSizeY(pix_size_y)\n services[\"updateService\"].saveObject(pixels)\n\n # put the image in dataset, if specified.\n if dataset and dataset.canLink():\n link = omero.model.DatasetImageLinkI()\n link.parent = omero.model.DatasetI(dataset.getId(), False)\n link.child = omero.model.ImageI(image.getId().getValue(), False)\n update_service.saveAndReturnObject(link)\n else:\n link = None\n\n return image, link\n\n\ndef combine_images(conn, parameter_map):\n\n # get the services we need\n services = {}\n services[\"containerService\"] = conn.getContainerService()\n services[\"renderingEngine\"] = conn.createRenderingEngine()\n services[\"queryService\"] = conn.getQueryService()\n services[\"pixelsService\"] = conn.getPixelsService()\n services[\"rawPixelStore\"] = conn.c.sf.createRawPixelsStore()\n services[\"rawPixelStoreUpload\"] = conn.c.sf.createRawPixelsStore()\n services[\"updateService\"] = conn.getUpdateService()\n services[\"rawFileStore\"] = conn.createRawFileStore()\n\n query_service = services[\"queryService\"]\n\n colour_map = {}\n if \"Channel_Colours\" in parameter_map:\n for c, colour in enumerate(parameter_map[\"Channel_Colours\"]):\n if colour in COLOURS:\n colour_map[c] = COLOURS[colour]\n\n # Get images or datasets\n message = \"\"\n objects, log_message = script_utils.get_objects(conn, parameter_map)\n message += log_message\n if not objects:\n return None, message\n\n # get the images IDs from list (in order) or dataset (sorted by name)\n output_images = []\n links = []\n\n data_type = parameter_map[\"Data_Type\"]\n if data_type == \"Image\":\n dataset = None\n objects.sort(key=lambda x: (x.getName())) # Sort images by name\n image_ids = [image.id for image in objects]\n # get dataset from first image\n query_string = \"select i from Image i join fetch i.datasetLinks idl\"\\\n \" join fetch idl.parent where i.id in (%s)\" % image_ids[0]\n image = query_service.findByQuery(query_string, None)\n if image:\n for link in image.iterateDatasetLinks():\n ds = link.parent\n dataset = conn.getObject(\"Dataset\", ds.getId().getValue())\n break # only use 1st dataset\n new_img, link = make_single_image(services, parameter_map, image_ids,\n dataset, colour_map)\n if new_img:\n output_images.append(new_img)\n if link:\n links.append(link)\n else:\n for dataset in objects:\n images = list(dataset.listChildren())\n if not images:\n continue\n images.sort(key=lambda x: (x.getName()))\n image_ids = [i.getId() for i in images]\n new_img, link = make_single_image(services, parameter_map,\n image_ids, dataset, colour_map)\n if new_img:\n output_images.append(new_img)\n if link:\n links.append(link)\n\n # try and close any stateful services\n for s in services:\n try:\n s.close()\n except Exception:\n pass\n\n if output_images:\n if len(output_images) > 1:\n message += \"%s new images created\" % len(output_images)\n else:\n message += \"New image created\"\n if not links or not len(links) == len(output_images):\n message += \" but could not be attached\"\n else:\n message += \"No image created\"\n message += \".\"\n\n return output_images, message\n\n\ndef run_script():\n \"\"\"\n The main entry point of the script, as called by the client via the\n scripting service, passing the required parameters.\n \"\"\"\n ckeys = list(COLOURS.keys())\n ckeys.sort()\n c_options = [rstring(col) for col in ckeys]\n data_types = [rstring('Dataset'), rstring('Image')]\n first_dim = [rstring('Time'), rstring('Channel'), rstring('Z')]\n extra_dims = [rstring(''), rstring('Time'), rstring('Channel'),\n rstring('Z')]\n channel_regs = [rstring(r) for r in channel_regexes.keys()]\n z_regs = [rstring(r) for r in z_regexes.keys()]\n t_regs = [rstring(r) for r in time_regexes.keys()]\n\n client = scripts.client(\n 'Combine_Images.py',\n \"\"\"Combine several single-plane images (or Z-stacks) into one with \\\ngreater Z, C, T dimensions.\nSee http://help.openmicroscopy.org/scripts.html\"\"\",\n\n scripts.String(\n \"Data_Type\", optional=False, grouping=\"1\",\n description=\"Use all the images in specified 'Datasets' or choose\"\n \" individual 'Images'.\", values=data_types, default=\"Image\"),\n\n scripts.List(\n \"IDs\", optional=False, grouping=\"2\",\n description=\"List of Dataset IDs or Image IDs to \"\n \"combine.\").ofType(rlong(0)),\n\n scripts.String(\n \"Filter_Names\", grouping=\"2.1\",\n description=\"Filter the images by names that contain this value\"),\n\n scripts.Bool(\n \"Auto_Define_Dimensions\", grouping=\"3\", default=True,\n description=\"\"\"Choose new dimensions with respect to the order of\"\n \" the input images. See URL above.\"\"\"),\n\n scripts.String(\n \"Channel_Name_Pattern\", grouping=\"3.1\", default=DEFAULT_C_REGEX,\n values=channel_regs,\n description=\"\"\"Auto-pick images by channel in the image name\"\"\"),\n\n scripts.String(\n \"Z_Name_Pattern\", grouping=\"3.2\",\n default=DEFAULT_Z_REGEX, values=z_regs,\n description=\"\"\"Auto-pick images by Z-index in the image name\"\"\"),\n\n scripts.String(\n \"Time_Name_Pattern\", grouping=\"3.3\", default=DEFAULT_T_REGEX,\n values=t_regs,\n description=\"\"\"Auto-pick images by T-index in the image name\"\"\"),\n\n scripts.Bool(\n \"Manually_Define_Dimensions\", grouping=\"4\", default=False,\n description=\"\"\"Choose new dimensions with respect to the order of\"\n \" the input images. See URL above.\"\"\"),\n\n scripts.String(\n \"Dimension_1\", grouping=\"4.1\",\n description=\"The first Dimension to change\", values=first_dim),\n\n scripts.String(\n \"Dimension_2\", grouping=\"4.2\", values=extra_dims, default=\"\",\n description=\"The second Dimension to change. Only specify this if\"\n \" combining multiple dimensions.\"),\n\n scripts.String(\n \"Dimension_3\", grouping=\"4.3\", values=extra_dims, default=\"\",\n description=\"The third Dimension to change. Only specify this if\"\n \" combining multiple dimensions.\"),\n\n scripts.Int(\n \"Size_Z\", grouping=\"4.4\",\n description=\"Number of Z planes in new image\", min=1),\n\n scripts.Int(\n \"Size_C\", grouping=\"4.5\",\n description=\"Number of channels in new image\", min=1),\n\n scripts.Int(\n \"Size_T\", grouping=\"4.6\",\n description=\"Number of time-points in new image\", min=1),\n\n scripts.List(\n \"Channel_Colours\", grouping=\"7\",\n description=\"List of Colors for channels.\", default=\"White\",\n values=c_options).ofType(rstring(\"\")),\n\n scripts.List(\n \"Channel_Names\", grouping=\"8\",\n description=\"List of Names for channels in the new image.\"),\n\n version=\"4.2.0\",\n authors=[\"William Moore\", \"OME Team\"],\n institutions=[\"University of Dundee\"],\n contact=\"[email protected]\",\n )\n\n try:\n parameter_map = client.getInputs(unwrap=True)\n\n conn = BlitzGateway(client_obj=client)\n\n # create the combined image\n images, message = combine_images(conn, parameter_map)\n\n client.setOutput(\"Message\", rstring(message))\n if images:\n if len(images) == 1:\n client.setOutput(\"Combined_Image\", robject(images[0]))\n elif len(images) > 1:\n client.setOutput(\"First_Image\", robject(images[0]))\n\n finally:\n client.closeSession()\n\n\nif __name__ == \"__main__\":\n run_script()\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yecharlie/keras-retinanet | [
"abcc99ecb75293b895b4e1be274ddc84e76f9c84"
] | [
"keras_retinanet/bin/evaluate.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport numpy as np\n\nimport yaml\nimport keras\nimport tensorflow as tf\n\n# Allow relative imports when being executed as script.\nif __name__ == \"__main__\" and __package__ is None:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))\n import keras_retinanet.bin # noqa: F401\n __package__ = \"keras_retinanet.bin\"\n\n# Change these to absolute imports if you copy this script outside the keras_retinanet package.\nfrom keras_retinanet import models\nfrom keras_retinanet.preprocessing.csv_generator import CSVGenerator\nfrom keras_retinanet.preprocessing.pascal_voc import PascalVocGenerator\nfrom keras_retinanet.utils.eval import evaluate\nfrom keras_retinanet.utils.keras_version import check_keras_version\nfrom keras_retinanet.models.retinanet import AnchorParameters\n\n\ndef get_session():\n \"\"\" Construct a modified tf session.\n \"\"\"\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n return tf.Session(config=config)\n\n#def get_absolute_name(name,prefix):\n# return name if os.path.exists(name) else os.path.join(prefix,name)\n\ndef get_anchors_params(anchors_in=None):\n if anchors_in:\n anchors_in = open(anchors_in,'r')\n anchors_params = yaml.load(anchors_in)\n anchors_params.update(ratios=np.array(anchors_params['ratios'],keras.backend.floatx())) \n anchors_params.update(scales=np.array(anchors_params['scales'],keras.backend.floatx())) \n else:\n #just use the default params.\n anchors_params = {'sizes':AnchorParameters.default.sizes,\n 'ratios':AnchorParameters.default.ratios,\n 'scales':AnchorParameters.default.scales,\n 'strides':AnchorParameters.default.strides}\n \n return anchors_params\n\ndef create_generator(args):\n \"\"\" Create generators for evaluation.\n \"\"\"\n if args.dataset_type == 'coco':\n # import here to prevent unnecessary dependency on cocoapi\n from ..preprocessing.coco import CocoGenerator\n\n validation_generator = CocoGenerator(\n args.coco_path,\n 'val2017',\n image_min_side=args.image_min_side,\n image_max_side=args.image_max_side\n )\n elif args.dataset_type == 'pascal':\n validation_generator = PascalVocGenerator(\n args.pascal_path,\n 'test',\n image_min_side=args.image_min_side,\n image_max_side=args.image_max_side\n )\n elif args.dataset_type == 'csv':\n validation_generator = CSVGenerator(\n args.annotations,\n args.classes,\n image_min_side=args.image_min_side,\n image_max_side=args.image_max_side,\n )\n else:\n raise ValueError('Invalid data type received: {}'.format(args.dataset_type))\n\n return validation_generator\n\ndef parse_args(args):\n \"\"\" Parse the arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description='Evaluation script for a RetinaNet network.')\n subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')\n subparsers.required = True\n\n coco_parser = subparsers.add_parser('coco')\n coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')\n\n pascal_parser = subparsers.add_parser('pascal')\n pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')\n\n csv_parser = subparsers.add_parser('csv')\n csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for evaluation.')\n csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')\n\n parser.add_argument('model', help='Path to RetinaNet model.')\n\n parser.add_argument(\"--convert-model\", help='Convert the model to an inference model (ie. the input is a training model).', action='store_true')\n parser.add_argument('--backbone', help='The backbone of the model.', default='resnet50')\n parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')\n parser.add_argument('--score-threshold', help='Threshold on score to filter detections with (defaults to 0.05).', default=0.05, type=float)\n parser.add_argument('--iou-threshold', help='IoU Threshold to count for a positive detection (defaults to 0.5).', default=0.5, type=float)\n parser.add_argument('--max-detections', help='Max Detections per image (defaults to 100).', default=100, type=int)\n parser.add_argument('--save-path', help='Path for saving images with detections (doesn\\'t work for COCO).')\n parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)\n parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)\n parser.add_argument('--anchors', help='Load anchors parameters by a yaml file.',default=None)\n\n return parser.parse_args(args)\n\ndef main(args=None):\n # parse arguments\n if args is None:\n args = sys.argv[1:]\n args = parse_args(args)\n\n # make sure keras is the minimum required version\n check_keras_version()\n\n # optionally choose specific GPU\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n keras.backend.tensorflow_backend.set_session(get_session())\n\n # make save path if it doesn't exist\n if args.save_path is not None and not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n \n if not args.anchors:\n #automatically search the snapshot path for anchors configure\n #if it doesn't exist, then default anchors paramaters are assumed.\n anchors_path = os.path.join(os.path.dirname(args.model),\"anchors.yaml\")\n anchors_path = anchors_path if os.path.exists(anchors_path) else None\n else:\n anchors_path = args.anchors\n anchors_dict = get_anchors_params(anchors_path)\n anchors_params = AnchorParameters(**anchors_dict)\n\n # create the generator\n #(It's ok not to update anchors args, as we only use the generator for load images and annotations.)\n generator = create_generator(args)\n\n # load the model\n print('Loading model, this may take a second...')\n model = models.load_model(args.model, backbone_name=args.backbone, convert=args.convert_model,anchor_parameters = anchors_params)\n\n # print model summary\n # print(model.summary())\n\n # start evaluation\n if args.dataset_type == 'coco':\n from ..utils.coco_eval import evaluate_coco\n evaluate_coco(generator, model, args.score_threshold)\n else:\n average_precisions = evaluate(\n generator,\n model,\n iou_threshold=args.iou_threshold,\n score_threshold=args.score_threshold,\n max_detections=args.max_detections,\n save_path=args.save_path\n )\n\n # print evaluation\n present_classes = 0\n precision = 0\n for label, (average_precision, num_annotations) in average_precisions.items():\n print('{:.0f} instances of class'.format(num_annotations),\n generator.label_to_name(label), 'with average precision: {:.4f}'.format(average_precision))\n if num_annotations > 0:\n present_classes += 1\n precision += average_precision\n print('mAP: {:.4f}'.format(precision / present_classes))\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
ishatserka/MachineLearningAndDataAnalysisCoursera | [
"c54661f13857d5bcb0095ba2fb12f5a403a4a70f",
"e82e772df2f4aec162cb34ac6127df10d14a625a",
"1985d4c73fabd5f08f54b922e73a9306e09c77a5",
"e82e772df2f4aec162cb34ac6127df10d14a625a",
"e82e772df2f4aec162cb34ac6127df10d14a625a",
"e82e772df2f4aec162cb34ac6127df10d14a625a",
"e82e772df2f4aec162cb34ac6127df10d14a625a",
"e82e772df2f4aec162cb34ac6127df10d14a625a"
] | [
"venv/Lib/site-packages/pybrain2/rl/learners/valuebased/interface.py",
"venv/Lib/site-packages/tensorflow/contrib/periodic_resample/python/ops/gen_periodic_resample_op.py",
"venv/Lib/site-packages/seaborn/tests/test_axisgrid.py",
"venv/Lib/site-packages/gensim/models/ldamodel.py",
"venv/Lib/site-packages/pybrain3/supervised/evolino/networkwrapper.py",
"venv/Lib/site-packages/gensim/test/test_coherencemodel.py",
"venv/Lib/site-packages/pybrain3/optimization/finitedifference/pgpe.py",
"venv/Lib/site-packages/tensorflow/python/ops/math_ops.py"
] | [
"__author__ = 'Thomas Rueckstiess, [email protected]'\n\nfrom pybrain.utilities import abstractMethod\nfrom pybrain.structure.modules import Table, Module, TanhLayer, LinearLayer, BiasUnit\nfrom pybrain.structure.connections import FullConnection\nfrom pybrain.structure.networks import FeedForwardNetwork\nfrom pybrain.structure.parametercontainer import ParameterContainer\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom pybrain.utilities import one_to_n\n\nfrom scipy import argmax, array, r_, asarray, where\nfrom random import choice\n\n\nclass ActionValueInterface(object):\n \"\"\" Interface for different ActionValue modules, like the\n ActionValueTable or the ActionValueNetwork.\n \"\"\"\n\n numActions = None\n\n def getMaxAction(self, state):\n abstractMethod()\n\n def getActionValues(self, state):\n abstractMethod()\n\n\nclass ActionValueTable(Table, ActionValueInterface):\n \"\"\" A special table that is used for Value Estimation methods\n in Reinforcement Learning. This table is used for value-based\n TD algorithms like Q or SARSA.\n \"\"\"\n\n def __init__(self, numStates, numActions, name=None):\n Module.__init__(self, 1, 1, name)\n ParameterContainer.__init__(self, numStates * numActions)\n self.numRows = numStates\n self.numColumns = numActions\n\n @property\n def numActions(self):\n return self.numColumns\n\n def _forwardImplementation(self, inbuf, outbuf):\n \"\"\" Take a vector of length 1 (the state coordinate) and return\n the action with the maximum value over all actions for this state.\n \"\"\"\n outbuf[0] = self.getMaxAction(inbuf[0])\n\n def getMaxAction(self, state):\n \"\"\" Return the action with the maximal value for the given state. \"\"\"\n values = self.params.reshape(self.numRows, self.numColumns)[state, :].flatten()\n action = where(values == max(values))[0]\n action = choice(action)\n return action\n\n def getActionValues(self, state):\n return self.params.reshape(self.numRows, self.numColumns)[state, :].flatten()\n\n def initialize(self, value=0.0):\n \"\"\" Initialize the whole table with the given value. \"\"\"\n self._params[:] = value\n\n\nclass ActionValueNetwork(Module, ActionValueInterface):\n \"\"\" A network that approximates action values for continuous state /\n discrete action RL environments. To receive the maximum action\n for a given state, a forward pass is executed for all discrete\n actions, and the maximal action is returned. This network is used\n for the NFQ algorithm. \"\"\"\n\n def __init__(self, dimState, numActions, name=None):\n Module.__init__(self, dimState, 1, name)\n self.network = buildNetwork(dimState + numActions, dimState + numActions, 1)\n self.numActions = numActions\n\n def _forwardImplementation(self, inbuf, outbuf):\n \"\"\" takes the state vector and return the discrete action with\n the maximum value over all actions for this state.\n \"\"\"\n outbuf[0] = self.getMaxAction(asarray(inbuf))\n\n def getMaxAction(self, state):\n \"\"\" Return the action with the maximal value for the given state. \"\"\"\n return argmax(self.getActionValues(state))\n\n def getActionValues(self, state):\n \"\"\" Run forward activation for each of the actions and returns all values. \"\"\"\n values = array([self.network.activate(r_[state, one_to_n(i, self.numActions)]) for i in range(self.numActions)])\n return values\n\n def getValue(self, state, action):\n return self.network.activate(r_[state, one_to_n(action, self.numActions)])",
"\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\n\"\"\"\n\nimport collections as _collections\n\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export('PeriodicResample')\ndef periodic_resample(values, shape, name=None):\n r\"\"\"Periodically resample elements of a tensor to conform to `shape`.\n\n This function implements a slightly more generic version of the subpixel\n convolutions found in this [paper](https://arxiv.org/abs/1609.05158).\n\n The formula for computing the elements in the `output` tensor is as follows:\n `T` = `values` tensor of rank `R`\n `S` = desired `shape` of output tensor (vector of length `R`)\n `P` = `output` tensor of rank `R`\n \\((T_1,\\ldots,T_R)\\) = shape(`T`)\n \\([S_1,\\ldots,S_q,\\ldots,S_R]\\) = elements of vector `S`\n\n A single element in `S` is left unspecified (denoted \\(S_q=-1\\)).\n Let \\(f_i\\) denote the (possibly non-integer) factor that relates the original\n dimension to the desired dimensions, \\(S_i=f_i T_i\\), for \\(i\\neq q\\) where\n \\(f_i>0\\).\n Define the following:\n \\(g_i=\\lceil f_i\\rceil\\)\n \\(t=\\prod_i T_i\\)\n \\(s=\\prod_{i\\neq q} S_i\\)\n \\(S_q\\) can then be defined as by \\(S_q=\\lfloor t/s\\rfloor\\).\n The elements of the resulting tensor are defined as\n \\(P_{s_1,\\ldots,s_R}=T_{h_1,\\ldots,h_q,\\ldots,h_R}\\).\n The \\(h_i\\) (\\(i\\neq q\\)) are defined by \\(h_i=\\lfloor s_i/g_i\\rfloor\\).\n \\(h_q=S_q\\sum_{j\\neq q}^{q-1}G_j \\mathrm{mod}(s_j,g_j) + s_q\\), where\n \\(G_j=\\prod_{i}^{j-1}g_i\\) (\\(G_0=1\\)).\n\n One drawback of this method is that whenever the output dimensions are slightly\n less than integer multiples of the input dimensions, many of the tensor elements\n are repeated in an inefficient way. This is resolved by specifying that all\n desired dimensions are integer multiples of the input tensor.\n\n For example:\n\n ```prettyprint\n `input` is [[ 0 1 2 3]\n [ 4 5 6 7]\n [ 8 9 10 11]]\n\n tf.periodic_resample(input, [6, None]) ==> [[ 0 1]\n [ 2 3]\n [ 4 5]\n [ 6 7]\n [ 8 9]\n [10 11]]\n ```\n\n Args:\n values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`, `uint32`, `uint64`, `bfloat16`.\n The tensor of rank `R` to periodic_resample\n shape: A `tf.TensorShape` or list of `ints`.\n A 1-D tensor representing the desired shape of the output tensor.\n Exactly one element of this tensor must have the value `None` which represents\n that this dimension of `values` can be adjusted downward in order to\n accommodate increases in other dimensions. The specified sizes of the\n non-adjustable dimensions must by at least as large as in the `values` tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `values`.\n Periodically resampled tensor that has dimensions specified as in\n `shape` except that the dimension specified as `None` will be minimally\n decreased as necessary.\n \"\"\"\n shape = _execute.make_shape(shape, \"shape\")\n _ctx = _context.context()\n if _ctx.in_graph_mode():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"PeriodicResample\", values=values, shape=shape, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"shape\", _op.get_attr(\"shape\"))\n else:\n _attr_T, (values,) = _execute.args_to_matching_eager([values], _ctx)\n _inputs_flat = [values]\n _attrs = (\"T\", _attr_T, \"shape\", shape)\n _result = _execute.execute(b\"PeriodicResample\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"PeriodicResample\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"PeriodicResample\"\n# input_arg {\n# name: \"values\"\n# type_attr: \"T\"\n# }\n# output_arg {\n# name: \"output\"\n# type_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_FLOAT\n# type: DT_DOUBLE\n# type: DT_INT64\n# type: DT_INT32\n# type: DT_UINT8\n# type: DT_UINT16\n# type: DT_INT16\n# type: DT_INT8\n# type: DT_COMPLEX64\n# type: DT_COMPLEX128\n# type: DT_QINT8\n# type: DT_QUINT8\n# type: DT_QINT32\n# type: DT_HALF\n# type: DT_UINT32\n# type: DT_UINT64\n# type: DT_BFLOAT16\n# }\n# }\n# }\n# attr {\n# name: \"shape\"\n# type: \"shape\"\n# }\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\n^\\n\\020PeriodicResample\\022\\013\\n\\006values\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\" \\n\\001T\\022\\004type:\\025\\n\\0232\\021\\001\\002\\t\\003\\004\\021\\005\\006\\010\\022\\013\\014\\r\\023\\026\\027\\016\\\"\\016\\n\\005shape\\022\\005shape\")\n",
"import warnings\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom distutils.version import LooseVersion\n\nimport nose.tools as nt\nimport numpy.testing as npt\nfrom numpy.testing.decorators import skipif\ntry:\n import pandas.testing as tm\nexcept ImportError:\n import pandas.util.testing as tm\n\nfrom . import PlotTestCase\nfrom .. import axisgrid as ag\nfrom .. import rcmod\nfrom ..palettes import color_palette\nfrom ..distributions import kdeplot, _freedman_diaconis_bins\nfrom ..categorical import pointplot\nfrom ..utils import categorical_order\n\nrs = np.random.RandomState(0)\n\nold_matplotlib = LooseVersion(mpl.__version__) < \"1.4\"\npandas_has_categoricals = LooseVersion(pd.__version__) >= \"0.15\"\n\n\nclass TestFacetGrid(PlotTestCase):\n\n df = pd.DataFrame(dict(x=rs.normal(size=60),\n y=rs.gamma(4, size=60),\n a=np.repeat(list(\"abc\"), 20),\n b=np.tile(list(\"mn\"), 30),\n c=np.tile(list(\"tuv\"), 20),\n d=np.tile(list(\"abcdefghij\"), 6)))\n\n def test_self_data(self):\n\n g = ag.FacetGrid(self.df)\n nt.assert_is(g.data, self.df)\n\n def test_self_fig(self):\n\n g = ag.FacetGrid(self.df)\n nt.assert_is_instance(g.fig, plt.Figure)\n\n def test_self_axes(self):\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\", hue=\"c\")\n for ax in g.axes.flat:\n nt.assert_is_instance(ax, plt.Axes)\n\n def test_axes_array_size(self):\n\n g1 = ag.FacetGrid(self.df)\n nt.assert_equal(g1.axes.shape, (1, 1))\n\n g2 = ag.FacetGrid(self.df, row=\"a\")\n nt.assert_equal(g2.axes.shape, (3, 1))\n\n g3 = ag.FacetGrid(self.df, col=\"b\")\n nt.assert_equal(g3.axes.shape, (1, 2))\n\n g4 = ag.FacetGrid(self.df, hue=\"c\")\n nt.assert_equal(g4.axes.shape, (1, 1))\n\n g5 = ag.FacetGrid(self.df, row=\"a\", col=\"b\", hue=\"c\")\n nt.assert_equal(g5.axes.shape, (3, 2))\n\n for ax in g5.axes.flat:\n nt.assert_is_instance(ax, plt.Axes)\n\n def test_single_axes(self):\n\n g1 = ag.FacetGrid(self.df)\n nt.assert_is_instance(g1.ax, plt.Axes)\n\n g2 = ag.FacetGrid(self.df, row=\"a\")\n with nt.assert_raises(AttributeError):\n g2.ax\n\n g3 = ag.FacetGrid(self.df, col=\"a\")\n with nt.assert_raises(AttributeError):\n g3.ax\n\n g4 = ag.FacetGrid(self.df, col=\"a\", row=\"b\")\n with nt.assert_raises(AttributeError):\n g4.ax\n\n def test_col_wrap(self):\n\n g = ag.FacetGrid(self.df, col=\"d\")\n nt.assert_equal(g.axes.shape, (1, 10))\n nt.assert_is(g.facet_axis(0, 8), g.axes[0, 8])\n\n g_wrap = ag.FacetGrid(self.df, col=\"d\", col_wrap=4)\n nt.assert_equal(g_wrap.axes.shape, (10,))\n nt.assert_is(g_wrap.facet_axis(0, 8), g_wrap.axes[8])\n nt.assert_equal(g_wrap._ncol, 4)\n nt.assert_equal(g_wrap._nrow, 3)\n\n with nt.assert_raises(ValueError):\n g = ag.FacetGrid(self.df, row=\"b\", col=\"d\", col_wrap=4)\n\n df = self.df.copy()\n df.loc[df.d == \"j\"] = np.nan\n g_missing = ag.FacetGrid(df, col=\"d\")\n nt.assert_equal(g_missing.axes.shape, (1, 9))\n\n g_missing_wrap = ag.FacetGrid(df, col=\"d\", col_wrap=4)\n nt.assert_equal(g_missing_wrap.axes.shape, (9,))\n\n def test_normal_axes(self):\n\n null = np.empty(0, object).flat\n\n g = ag.FacetGrid(self.df)\n npt.assert_array_equal(g._bottom_axes, g.axes.flat)\n npt.assert_array_equal(g._not_bottom_axes, null)\n npt.assert_array_equal(g._left_axes, g.axes.flat)\n npt.assert_array_equal(g._not_left_axes, null)\n npt.assert_array_equal(g._inner_axes, null)\n\n g = ag.FacetGrid(self.df, col=\"c\")\n npt.assert_array_equal(g._bottom_axes, g.axes.flat)\n npt.assert_array_equal(g._not_bottom_axes, null)\n npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)\n npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)\n npt.assert_array_equal(g._inner_axes, null)\n\n g = ag.FacetGrid(self.df, row=\"c\")\n npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)\n npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)\n npt.assert_array_equal(g._left_axes, g.axes.flat)\n npt.assert_array_equal(g._not_left_axes, null)\n npt.assert_array_equal(g._inner_axes, null)\n\n g = ag.FacetGrid(self.df, col=\"a\", row=\"c\")\n npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)\n npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)\n npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)\n npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)\n npt.assert_array_equal(g._inner_axes, g.axes[:-1, 1:].flat)\n\n def test_wrapped_axes(self):\n\n null = np.empty(0, object).flat\n\n g = ag.FacetGrid(self.df, col=\"a\", col_wrap=2)\n npt.assert_array_equal(g._bottom_axes,\n g.axes[np.array([1, 2])].flat)\n npt.assert_array_equal(g._not_bottom_axes, g.axes[:1].flat)\n npt.assert_array_equal(g._left_axes, g.axes[np.array([0, 2])].flat)\n npt.assert_array_equal(g._not_left_axes, g.axes[np.array([1])].flat)\n npt.assert_array_equal(g._inner_axes, null)\n\n def test_figure_size(self):\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\")\n npt.assert_array_equal(g.fig.get_size_inches(), (6, 9))\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\", size=6)\n npt.assert_array_equal(g.fig.get_size_inches(), (12, 18))\n\n g = ag.FacetGrid(self.df, col=\"c\", size=4, aspect=.5)\n npt.assert_array_equal(g.fig.get_size_inches(), (6, 4))\n\n def test_figure_size_with_legend(self):\n\n g1 = ag.FacetGrid(self.df, col=\"a\", hue=\"c\", size=4, aspect=.5)\n npt.assert_array_equal(g1.fig.get_size_inches(), (6, 4))\n g1.add_legend()\n nt.assert_greater(g1.fig.get_size_inches()[0], 6)\n\n g2 = ag.FacetGrid(self.df, col=\"a\", hue=\"c\", size=4, aspect=.5,\n legend_out=False)\n npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))\n g2.add_legend()\n npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))\n\n def test_legend_data(self):\n\n g1 = ag.FacetGrid(self.df, hue=\"a\")\n g1.map(plt.plot, \"x\", \"y\")\n g1.add_legend()\n palette = color_palette(n_colors=3)\n\n nt.assert_equal(g1._legend.get_title().get_text(), \"a\")\n\n a_levels = sorted(self.df.a.unique())\n\n lines = g1._legend.get_lines()\n nt.assert_equal(len(lines), len(a_levels))\n\n for line, hue in zip(lines, palette):\n nt.assert_equal(line.get_color(), hue)\n\n labels = g1._legend.get_texts()\n nt.assert_equal(len(labels), len(a_levels))\n\n for label, level in zip(labels, a_levels):\n nt.assert_equal(label.get_text(), level)\n\n def test_legend_data_missing_level(self):\n\n g1 = ag.FacetGrid(self.df, hue=\"a\", hue_order=list(\"azbc\"))\n g1.map(plt.plot, \"x\", \"y\")\n g1.add_legend()\n\n b, g, r, p = color_palette(n_colors=4)\n palette = [b, r, p]\n\n nt.assert_equal(g1._legend.get_title().get_text(), \"a\")\n\n a_levels = sorted(self.df.a.unique())\n\n lines = g1._legend.get_lines()\n nt.assert_equal(len(lines), len(a_levels))\n\n for line, hue in zip(lines, palette):\n nt.assert_equal(line.get_color(), hue)\n\n labels = g1._legend.get_texts()\n nt.assert_equal(len(labels), 4)\n\n for label, level in zip(labels, list(\"azbc\")):\n nt.assert_equal(label.get_text(), level)\n\n def test_get_boolean_legend_data(self):\n\n self.df[\"b_bool\"] = self.df.b == \"m\"\n g1 = ag.FacetGrid(self.df, hue=\"b_bool\")\n g1.map(plt.plot, \"x\", \"y\")\n g1.add_legend()\n palette = color_palette(n_colors=2)\n\n nt.assert_equal(g1._legend.get_title().get_text(), \"b_bool\")\n\n b_levels = list(map(str, categorical_order(self.df.b_bool)))\n\n lines = g1._legend.get_lines()\n nt.assert_equal(len(lines), len(b_levels))\n\n for line, hue in zip(lines, palette):\n nt.assert_equal(line.get_color(), hue)\n\n labels = g1._legend.get_texts()\n nt.assert_equal(len(labels), len(b_levels))\n\n for label, level in zip(labels, b_levels):\n nt.assert_equal(label.get_text(), level)\n\n def test_legend_options(self):\n\n g1 = ag.FacetGrid(self.df, hue=\"b\")\n g1.map(plt.plot, \"x\", \"y\")\n g1.add_legend()\n\n def test_legendout_with_colwrap(self):\n\n g = ag.FacetGrid(self.df, col=\"d\", hue='b',\n col_wrap=4, legend_out=False)\n g.map(plt.plot, \"x\", \"y\", linewidth=3)\n g.add_legend()\n\n def test_subplot_kws(self):\n\n g = ag.FacetGrid(self.df, despine=False,\n subplot_kws=dict(projection=\"polar\"))\n for ax in g.axes.flat:\n nt.assert_true(\"PolarAxesSubplot\" in str(type(ax)))\n\n @skipif(old_matplotlib)\n def test_gridspec_kws(self):\n ratios = [3, 1, 2]\n sizes = [0.46, 0.15, 0.31]\n\n gskws = dict(width_ratios=ratios, height_ratios=ratios)\n g = ag.FacetGrid(self.df, col='c', row='a', gridspec_kws=gskws)\n\n # clear out all ticks\n for ax in g.axes.flat:\n ax.set_xticks([])\n ax.set_yticks([])\n\n g.fig.tight_layout()\n widths, heights = np.meshgrid(sizes, sizes)\n for n, ax in enumerate(g.axes.flat):\n npt.assert_almost_equal(\n ax.get_position().width,\n widths.flatten()[n],\n decimal=2\n )\n npt.assert_almost_equal(\n ax.get_position().height,\n heights.flatten()[n],\n decimal=2\n )\n\n @skipif(old_matplotlib)\n def test_gridspec_kws_col_wrap(self):\n ratios = [3, 1, 2, 1, 1]\n sizes = [0.46, 0.15, 0.31]\n\n gskws = dict(width_ratios=ratios)\n with warnings.catch_warnings():\n warnings.resetwarnings()\n warnings.simplefilter(\"always\")\n npt.assert_warns(UserWarning, ag.FacetGrid, self.df, col='d',\n col_wrap=5, gridspec_kws=gskws)\n\n @skipif(not old_matplotlib)\n def test_gridsic_kws_old_mpl(self):\n ratios = [3, 1, 2]\n sizes = [0.46, 0.15, 0.31]\n\n gskws = dict(width_ratios=ratios, height_ratios=ratios)\n with warnings.catch_warnings():\n warnings.resetwarnings()\n warnings.simplefilter(\"always\")\n npt.assert_warns(UserWarning, ag.FacetGrid, self.df, col='c',\n row='a', gridspec_kws=gskws)\n\n def test_data_generator(self):\n\n g = ag.FacetGrid(self.df, row=\"a\")\n d = list(g.facet_data())\n nt.assert_equal(len(d), 3)\n\n tup, data = d[0]\n nt.assert_equal(tup, (0, 0, 0))\n nt.assert_true((data[\"a\"] == \"a\").all())\n\n tup, data = d[1]\n nt.assert_equal(tup, (1, 0, 0))\n nt.assert_true((data[\"a\"] == \"b\").all())\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\")\n d = list(g.facet_data())\n nt.assert_equal(len(d), 6)\n\n tup, data = d[0]\n nt.assert_equal(tup, (0, 0, 0))\n nt.assert_true((data[\"a\"] == \"a\").all())\n nt.assert_true((data[\"b\"] == \"m\").all())\n\n tup, data = d[1]\n nt.assert_equal(tup, (0, 1, 0))\n nt.assert_true((data[\"a\"] == \"a\").all())\n nt.assert_true((data[\"b\"] == \"n\").all())\n\n tup, data = d[2]\n nt.assert_equal(tup, (1, 0, 0))\n nt.assert_true((data[\"a\"] == \"b\").all())\n nt.assert_true((data[\"b\"] == \"m\").all())\n\n g = ag.FacetGrid(self.df, hue=\"c\")\n d = list(g.facet_data())\n nt.assert_equal(len(d), 3)\n tup, data = d[1]\n nt.assert_equal(tup, (0, 0, 1))\n nt.assert_true((data[\"c\"] == \"u\").all())\n\n def test_map(self):\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\", hue=\"c\")\n g.map(plt.plot, \"x\", \"y\", linewidth=3)\n\n lines = g.axes[0, 0].lines\n nt.assert_equal(len(lines), 3)\n\n line1, _, _ = lines\n nt.assert_equal(line1.get_linewidth(), 3)\n x, y = line1.get_data()\n mask = (self.df.a == \"a\") & (self.df.b == \"m\") & (self.df.c == \"t\")\n npt.assert_array_equal(x, self.df.x[mask])\n npt.assert_array_equal(y, self.df.y[mask])\n\n def test_map_dataframe(self):\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\", hue=\"c\")\n plot = lambda x, y, data=None, **kws: plt.plot(data[x], data[y], **kws)\n g.map_dataframe(plot, \"x\", \"y\", linestyle=\"--\")\n\n lines = g.axes[0, 0].lines\n nt.assert_equal(len(lines), 3)\n\n line1, _, _ = lines\n nt.assert_equal(line1.get_linestyle(), \"--\")\n x, y = line1.get_data()\n mask = (self.df.a == \"a\") & (self.df.b == \"m\") & (self.df.c == \"t\")\n npt.assert_array_equal(x, self.df.x[mask])\n npt.assert_array_equal(y, self.df.y[mask])\n\n def test_set(self):\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\")\n xlim = (-2, 5)\n ylim = (3, 6)\n xticks = [-2, 0, 3, 5]\n yticks = [3, 4.5, 6]\n g.set(xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks)\n for ax in g.axes.flat:\n npt.assert_array_equal(ax.get_xlim(), xlim)\n npt.assert_array_equal(ax.get_ylim(), ylim)\n npt.assert_array_equal(ax.get_xticks(), xticks)\n npt.assert_array_equal(ax.get_yticks(), yticks)\n\n def test_set_titles(self):\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\")\n g.map(plt.plot, \"x\", \"y\")\n\n # Test the default titles\n nt.assert_equal(g.axes[0, 0].get_title(), \"a = a | b = m\")\n nt.assert_equal(g.axes[0, 1].get_title(), \"a = a | b = n\")\n nt.assert_equal(g.axes[1, 0].get_title(), \"a = b | b = m\")\n\n # Test a provided title\n g.set_titles(\"{row_var} == {row_name} \\/ {col_var} == {col_name}\")\n nt.assert_equal(g.axes[0, 0].get_title(), \"a == a \\/ b == m\")\n nt.assert_equal(g.axes[0, 1].get_title(), \"a == a \\/ b == n\")\n nt.assert_equal(g.axes[1, 0].get_title(), \"a == b \\/ b == m\")\n\n # Test a single row\n g = ag.FacetGrid(self.df, col=\"b\")\n g.map(plt.plot, \"x\", \"y\")\n\n # Test the default titles\n nt.assert_equal(g.axes[0, 0].get_title(), \"b = m\")\n nt.assert_equal(g.axes[0, 1].get_title(), \"b = n\")\n\n # test with dropna=False\n g = ag.FacetGrid(self.df, col=\"b\", hue=\"b\", dropna=False)\n g.map(plt.plot, 'x', 'y')\n\n def test_set_titles_margin_titles(self):\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\", margin_titles=True)\n g.map(plt.plot, \"x\", \"y\")\n\n # Test the default titles\n nt.assert_equal(g.axes[0, 0].get_title(), \"b = m\")\n nt.assert_equal(g.axes[0, 1].get_title(), \"b = n\")\n nt.assert_equal(g.axes[1, 0].get_title(), \"\")\n\n # Test the row \"titles\"\n nt.assert_equal(g.axes[0, 1].texts[0].get_text(), \"a = a\")\n nt.assert_equal(g.axes[1, 1].texts[0].get_text(), \"a = b\")\n\n # Test a provided title\n g.set_titles(col_template=\"{col_var} == {col_name}\")\n nt.assert_equal(g.axes[0, 0].get_title(), \"b == m\")\n nt.assert_equal(g.axes[0, 1].get_title(), \"b == n\")\n nt.assert_equal(g.axes[1, 0].get_title(), \"\")\n\n def test_set_ticklabels(self):\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\")\n g.map(plt.plot, \"x\", \"y\")\n xlab = [l.get_text() + \"h\" for l in g.axes[1, 0].get_xticklabels()]\n ylab = [l.get_text() for l in g.axes[1, 0].get_yticklabels()]\n\n g.set_xticklabels(xlab)\n g.set_yticklabels(rotation=90)\n\n got_x = [l.get_text() + \"h\" for l in g.axes[1, 1].get_xticklabels()]\n got_y = [l.get_text() for l in g.axes[0, 0].get_yticklabels()]\n npt.assert_array_equal(got_x, xlab)\n npt.assert_array_equal(got_y, ylab)\n\n x, y = np.arange(10), np.arange(10)\n df = pd.DataFrame(np.c_[x, y], columns=[\"x\", \"y\"])\n g = ag.FacetGrid(df).map(pointplot, \"x\", \"y\", order=x)\n g.set_xticklabels(step=2)\n got_x = [int(l.get_text()) for l in g.axes[0, 0].get_xticklabels()]\n npt.assert_array_equal(x[::2], got_x)\n\n g = ag.FacetGrid(self.df, col=\"d\", col_wrap=5)\n g.map(plt.plot, \"x\", \"y\")\n g.set_xticklabels(rotation=45)\n g.set_yticklabels(rotation=75)\n for ax in g._bottom_axes:\n for l in ax.get_xticklabels():\n nt.assert_equal(l.get_rotation(), 45)\n for ax in g._left_axes:\n for l in ax.get_yticklabels():\n nt.assert_equal(l.get_rotation(), 75)\n\n def test_set_axis_labels(self):\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\")\n g.map(plt.plot, \"x\", \"y\")\n xlab = 'xx'\n ylab = 'yy'\n\n g.set_axis_labels(xlab, ylab)\n\n got_x = [ax.get_xlabel() for ax in g.axes[-1, :]]\n got_y = [ax.get_ylabel() for ax in g.axes[:, 0]]\n npt.assert_array_equal(got_x, xlab)\n npt.assert_array_equal(got_y, ylab)\n\n def test_axis_lims(self):\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\", xlim=(0, 4), ylim=(-2, 3))\n nt.assert_equal(g.axes[0, 0].get_xlim(), (0, 4))\n nt.assert_equal(g.axes[0, 0].get_ylim(), (-2, 3))\n\n def test_data_orders(self):\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\", hue=\"c\")\n\n nt.assert_equal(g.row_names, list(\"abc\"))\n nt.assert_equal(g.col_names, list(\"mn\"))\n nt.assert_equal(g.hue_names, list(\"tuv\"))\n nt.assert_equal(g.axes.shape, (3, 2))\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\", hue=\"c\",\n row_order=list(\"bca\"),\n col_order=list(\"nm\"),\n hue_order=list(\"vtu\"))\n\n nt.assert_equal(g.row_names, list(\"bca\"))\n nt.assert_equal(g.col_names, list(\"nm\"))\n nt.assert_equal(g.hue_names, list(\"vtu\"))\n nt.assert_equal(g.axes.shape, (3, 2))\n\n g = ag.FacetGrid(self.df, row=\"a\", col=\"b\", hue=\"c\",\n row_order=list(\"bcda\"),\n col_order=list(\"nom\"),\n hue_order=list(\"qvtu\"))\n\n nt.assert_equal(g.row_names, list(\"bcda\"))\n nt.assert_equal(g.col_names, list(\"nom\"))\n nt.assert_equal(g.hue_names, list(\"qvtu\"))\n nt.assert_equal(g.axes.shape, (4, 3))\n\n def test_palette(self):\n\n rcmod.set()\n\n g = ag.FacetGrid(self.df, hue=\"c\")\n nt.assert_equal(g._colors, color_palette(n_colors=3))\n\n g = ag.FacetGrid(self.df, hue=\"d\")\n nt.assert_equal(g._colors, color_palette(\"husl\", 10))\n\n g = ag.FacetGrid(self.df, hue=\"c\", palette=\"Set2\")\n nt.assert_equal(g._colors, color_palette(\"Set2\", 3))\n\n dict_pal = dict(t=\"red\", u=\"green\", v=\"blue\")\n list_pal = color_palette([\"red\", \"green\", \"blue\"], 3)\n g = ag.FacetGrid(self.df, hue=\"c\", palette=dict_pal)\n nt.assert_equal(g._colors, list_pal)\n\n list_pal = color_palette([\"green\", \"blue\", \"red\"], 3)\n g = ag.FacetGrid(self.df, hue=\"c\", hue_order=list(\"uvt\"),\n palette=dict_pal)\n nt.assert_equal(g._colors, list_pal)\n\n def test_hue_kws(self):\n\n kws = dict(marker=[\"o\", \"s\", \"D\"])\n g = ag.FacetGrid(self.df, hue=\"c\", hue_kws=kws)\n g.map(plt.plot, \"x\", \"y\")\n\n for line, marker in zip(g.axes[0, 0].lines, kws[\"marker\"]):\n nt.assert_equal(line.get_marker(), marker)\n\n def test_dropna(self):\n\n df = self.df.copy()\n hasna = pd.Series(np.tile(np.arange(6), 10), dtype=np.float)\n hasna[hasna == 5] = np.nan\n df[\"hasna\"] = hasna\n g = ag.FacetGrid(df, dropna=False, row=\"hasna\")\n nt.assert_equal(g._not_na.sum(), 60)\n\n g = ag.FacetGrid(df, dropna=True, row=\"hasna\")\n nt.assert_equal(g._not_na.sum(), 50)\n\n def test_unicode_column_label_with_rows(self):\n\n # use a smaller copy of the default testing data frame:\n df = self.df.copy()\n df = df[[\"a\", \"b\", \"x\"]]\n\n # rename column 'a' (which will be used for the columns in the grid)\n # by using a Unicode string:\n unicode_column_label = u\"\\u01ff\\u02ff\\u03ff\"\n df = df.rename(columns={\"a\": unicode_column_label})\n\n # ensure that the data frame columns have the expected names:\n nt.assert_equal(list(df.columns), [unicode_column_label, \"b\", \"x\"])\n\n # plot the grid -- if successful, no UnicodeEncodingError should\n # occur:\n g = ag.FacetGrid(df, col=unicode_column_label, row=\"b\")\n g = g.map(plt.plot, \"x\")\n\n def test_unicode_column_label_no_rows(self):\n\n # use a smaller copy of the default testing data frame:\n df = self.df.copy()\n df = df[[\"a\", \"x\"]]\n\n # rename column 'a' (which will be used for the columns in the grid)\n # by using a Unicode string:\n unicode_column_label = u\"\\u01ff\\u02ff\\u03ff\"\n df = df.rename(columns={\"a\": unicode_column_label})\n\n # ensure that the data frame columns have the expected names:\n nt.assert_equal(list(df.columns), [unicode_column_label, \"x\"])\n\n # plot the grid -- if successful, no UnicodeEncodingError should\n # occur:\n g = ag.FacetGrid(df, col=unicode_column_label)\n g = g.map(plt.plot, \"x\")\n\n def test_unicode_row_label_with_columns(self):\n\n # use a smaller copy of the default testing data frame:\n df = self.df.copy()\n df = df[[\"a\", \"b\", \"x\"]]\n\n # rename column 'b' (which will be used for the rows in the grid)\n # by using a Unicode string:\n unicode_row_label = u\"\\u01ff\\u02ff\\u03ff\"\n df = df.rename(columns={\"b\": unicode_row_label})\n\n # ensure that the data frame columns have the expected names:\n nt.assert_equal(list(df.columns), [\"a\", unicode_row_label, \"x\"])\n\n # plot the grid -- if successful, no UnicodeEncodingError should\n # occur:\n g = ag.FacetGrid(df, col=\"a\", row=unicode_row_label)\n g = g.map(plt.plot, \"x\")\n\n def test_unicode_row_label_no_columns(self):\n\n # use a smaller copy of the default testing data frame:\n df = self.df.copy()\n df = df[[\"b\", \"x\"]]\n\n # rename column 'b' (which will be used for the rows in the grid)\n # by using a Unicode string:\n unicode_row_label = u\"\\u01ff\\u02ff\\u03ff\"\n df = df.rename(columns={\"b\": unicode_row_label})\n\n # ensure that the data frame columns have the expected names:\n nt.assert_equal(list(df.columns), [unicode_row_label, \"x\"])\n\n # plot the grid -- if successful, no UnicodeEncodingError should\n # occur:\n g = ag.FacetGrid(df, row=unicode_row_label)\n g = g.map(plt.plot, \"x\")\n\n def test_unicode_content_with_row_and_column(self):\n\n df = self.df.copy()\n\n # replace content of column 'a' (which will form the columns in the\n # grid) by Unicode characters:\n unicode_column_val = np.repeat((u'\\u01ff', u'\\u02ff', u'\\u03ff'), 20)\n df[\"a\"] = unicode_column_val\n\n # make sure that the replacement worked as expected:\n nt.assert_equal(\n list(df[\"a\"]),\n [u'\\u01ff'] * 20 + [u'\\u02ff'] * 20 + [u'\\u03ff'] * 20)\n\n # plot the grid -- if successful, no UnicodeEncodingError should\n # occur:\n g = ag.FacetGrid(df, col=\"a\", row=\"b\")\n g = g.map(plt.plot, \"x\")\n\n def test_unicode_content_no_rows(self):\n\n df = self.df.copy()\n\n # replace content of column 'a' (which will form the columns in the\n # grid) by Unicode characters:\n unicode_column_val = np.repeat((u'\\u01ff', u'\\u02ff', u'\\u03ff'), 20)\n df[\"a\"] = unicode_column_val\n\n # make sure that the replacement worked as expected:\n nt.assert_equal(\n list(df[\"a\"]),\n [u'\\u01ff'] * 20 + [u'\\u02ff'] * 20 + [u'\\u03ff'] * 20)\n\n # plot the grid -- if successful, no UnicodeEncodingError should\n # occur:\n g = ag.FacetGrid(df, col=\"a\")\n g = g.map(plt.plot, \"x\")\n\n def test_unicode_content_no_columns(self):\n\n df = self.df.copy()\n\n # replace content of column 'a' (which will form the rows in the\n # grid) by Unicode characters:\n unicode_column_val = np.repeat((u'\\u01ff', u'\\u02ff', u'\\u03ff'), 20)\n df[\"b\"] = unicode_column_val\n\n # make sure that the replacement worked as expected:\n nt.assert_equal(\n list(df[\"b\"]),\n [u'\\u01ff'] * 20 + [u'\\u02ff'] * 20 + [u'\\u03ff'] * 20)\n\n # plot the grid -- if successful, no UnicodeEncodingError should\n # occur:\n g = ag.FacetGrid(df, row=\"b\")\n g = g.map(plt.plot, \"x\")\n\n @skipif(not pandas_has_categoricals)\n def test_categorical_column_missing_categories(self):\n\n df = self.df.copy()\n df['a'] = df['a'].astype('category')\n\n g = ag.FacetGrid(df[df['a'] == 'a'], col=\"a\", col_wrap=1)\n\n nt.assert_equal(g.axes.shape, (len(df['a'].cat.categories),))\n\n def test_categorical_warning(self):\n\n g = ag.FacetGrid(self.df, col=\"b\")\n with warnings.catch_warnings():\n warnings.resetwarnings()\n warnings.simplefilter(\"always\")\n npt.assert_warns(UserWarning, g.map, pointplot, \"b\", \"x\")\n\n\nclass TestPairGrid(PlotTestCase):\n\n rs = np.random.RandomState(sum(map(ord, \"PairGrid\")))\n df = pd.DataFrame(dict(x=rs.normal(size=80),\n y=rs.randint(0, 4, size=(80)),\n z=rs.gamma(3, size=80),\n a=np.repeat(list(\"abcd\"), 20),\n b=np.repeat(list(\"abcdefgh\"), 10)))\n\n def test_self_data(self):\n\n g = ag.PairGrid(self.df)\n nt.assert_is(g.data, self.df)\n\n def test_ignore_datelike_data(self):\n\n df = self.df.copy()\n df['date'] = pd.date_range('2010-01-01', periods=len(df), freq='d')\n result = ag.PairGrid(self.df).data\n expected = df.drop('date', axis=1)\n tm.assert_frame_equal(result, expected)\n\n def test_self_fig(self):\n\n g = ag.PairGrid(self.df)\n nt.assert_is_instance(g.fig, plt.Figure)\n\n def test_self_axes(self):\n\n g = ag.PairGrid(self.df)\n for ax in g.axes.flat:\n nt.assert_is_instance(ax, plt.Axes)\n\n def test_default_axes(self):\n\n g = ag.PairGrid(self.df)\n nt.assert_equal(g.axes.shape, (3, 3))\n nt.assert_equal(g.x_vars, [\"x\", \"y\", \"z\"])\n nt.assert_equal(g.y_vars, [\"x\", \"y\", \"z\"])\n nt.assert_true(g.square_grid)\n\n def test_specific_square_axes(self):\n\n vars = [\"z\", \"x\"]\n g = ag.PairGrid(self.df, vars=vars)\n nt.assert_equal(g.axes.shape, (len(vars), len(vars)))\n nt.assert_equal(g.x_vars, vars)\n nt.assert_equal(g.y_vars, vars)\n nt.assert_true(g.square_grid)\n\n def test_specific_nonsquare_axes(self):\n\n x_vars = [\"x\", \"y\"]\n y_vars = [\"z\", \"y\", \"x\"]\n g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)\n nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))\n nt.assert_equal(g.x_vars, x_vars)\n nt.assert_equal(g.y_vars, y_vars)\n nt.assert_true(not g.square_grid)\n\n x_vars = [\"x\", \"y\"]\n y_vars = \"z\"\n g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)\n nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))\n nt.assert_equal(g.x_vars, list(x_vars))\n nt.assert_equal(g.y_vars, list(y_vars))\n nt.assert_true(not g.square_grid)\n\n def test_specific_square_axes_with_array(self):\n\n vars = np.array([\"z\", \"x\"])\n g = ag.PairGrid(self.df, vars=vars)\n nt.assert_equal(g.axes.shape, (len(vars), len(vars)))\n nt.assert_equal(g.x_vars, list(vars))\n nt.assert_equal(g.y_vars, list(vars))\n nt.assert_true(g.square_grid)\n\n def test_specific_nonsquare_axes_with_array(self):\n\n x_vars = np.array([\"x\", \"y\"])\n y_vars = np.array([\"z\", \"y\", \"x\"])\n g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)\n nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))\n nt.assert_equal(g.x_vars, list(x_vars))\n nt.assert_equal(g.y_vars, list(y_vars))\n nt.assert_true(not g.square_grid)\n\n def test_size(self):\n\n g1 = ag.PairGrid(self.df, size=3)\n npt.assert_array_equal(g1.fig.get_size_inches(), (9, 9))\n\n g2 = ag.PairGrid(self.df, size=4, aspect=.5)\n npt.assert_array_equal(g2.fig.get_size_inches(), (6, 12))\n\n g3 = ag.PairGrid(self.df, y_vars=[\"z\"], x_vars=[\"x\", \"y\"],\n size=2, aspect=2)\n npt.assert_array_equal(g3.fig.get_size_inches(), (8, 2))\n\n def test_map(self):\n\n vars = [\"x\", \"y\", \"z\"]\n g1 = ag.PairGrid(self.df)\n g1.map(plt.scatter)\n\n for i, axes_i in enumerate(g1.axes):\n for j, ax in enumerate(axes_i):\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n g2 = ag.PairGrid(self.df, \"a\")\n g2.map(plt.scatter)\n\n for i, axes_i in enumerate(g2.axes):\n for j, ax in enumerate(axes_i):\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n for k, k_level in enumerate(\"abcd\"):\n x_in_k = x_in[self.df.a == k_level]\n y_in_k = y_in[self.df.a == k_level]\n x_out, y_out = ax.collections[k].get_offsets().T\n npt.assert_array_equal(x_in_k, x_out)\n npt.assert_array_equal(y_in_k, y_out)\n\n def test_map_nonsquare(self):\n\n x_vars = [\"x\"]\n y_vars = [\"y\", \"z\"]\n g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)\n g.map(plt.scatter)\n\n x_in = self.df.x\n for i, i_var in enumerate(y_vars):\n ax = g.axes[i, 0]\n y_in = self.df[i_var]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n def test_map_lower(self):\n\n vars = [\"x\", \"y\", \"z\"]\n g = ag.PairGrid(self.df)\n g.map_lower(plt.scatter)\n\n for i, j in zip(*np.tril_indices_from(g.axes, -1)):\n ax = g.axes[i, j]\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n for i, j in zip(*np.triu_indices_from(g.axes)):\n ax = g.axes[i, j]\n nt.assert_equal(len(ax.collections), 0)\n\n def test_map_upper(self):\n\n vars = [\"x\", \"y\", \"z\"]\n g = ag.PairGrid(self.df)\n g.map_upper(plt.scatter)\n\n for i, j in zip(*np.triu_indices_from(g.axes, 1)):\n ax = g.axes[i, j]\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n for i, j in zip(*np.tril_indices_from(g.axes)):\n ax = g.axes[i, j]\n nt.assert_equal(len(ax.collections), 0)\n\n @skipif(old_matplotlib)\n def test_map_diag(self):\n\n g1 = ag.PairGrid(self.df)\n g1.map_diag(plt.hist)\n\n for ax in g1.diag_axes:\n nt.assert_equal(len(ax.patches), 10)\n\n g2 = ag.PairGrid(self.df)\n g2.map_diag(plt.hist, bins=15)\n\n for ax in g2.diag_axes:\n nt.assert_equal(len(ax.patches), 15)\n\n g3 = ag.PairGrid(self.df, hue=\"a\")\n g3.map_diag(plt.hist)\n\n for ax in g3.diag_axes:\n nt.assert_equal(len(ax.patches), 40)\n\n g4 = ag.PairGrid(self.df, hue=\"a\")\n g4.map_diag(plt.hist, histtype='step')\n\n for ax in g4.diag_axes:\n for ptch in ax.patches:\n nt.assert_equal(ptch.fill, False)\n\n @skipif(old_matplotlib)\n def test_map_diag_color(self):\n\n color = \"red\"\n rgb_color = mpl.colors.colorConverter.to_rgba(color)\n\n g1 = ag.PairGrid(self.df)\n g1.map_diag(plt.hist, color=color)\n\n for ax in g1.diag_axes:\n for patch in ax.patches:\n nt.assert_equals(patch.get_facecolor(), rgb_color)\n\n g2 = ag.PairGrid(self.df)\n g2.map_diag(kdeplot, color='red')\n\n for ax in g2.diag_axes:\n for line in ax.lines:\n nt.assert_equals(line.get_color(), color)\n\n @skipif(old_matplotlib)\n def test_map_diag_palette(self):\n\n pal = color_palette(n_colors=len(self.df.a.unique()))\n g = ag.PairGrid(self.df, hue=\"a\")\n g.map_diag(kdeplot)\n\n for ax in g.diag_axes:\n for line, color in zip(ax.lines, pal):\n nt.assert_equals(line.get_color(), color)\n\n @skipif(old_matplotlib)\n def test_map_diag_and_offdiag(self):\n\n vars = [\"x\", \"y\", \"z\"]\n g = ag.PairGrid(self.df)\n g.map_offdiag(plt.scatter)\n g.map_diag(plt.hist)\n\n for ax in g.diag_axes:\n nt.assert_equal(len(ax.patches), 10)\n\n for i, j in zip(*np.triu_indices_from(g.axes, 1)):\n ax = g.axes[i, j]\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n for i, j in zip(*np.tril_indices_from(g.axes, -1)):\n ax = g.axes[i, j]\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n for i, j in zip(*np.diag_indices_from(g.axes)):\n ax = g.axes[i, j]\n nt.assert_equal(len(ax.collections), 0)\n\n def test_palette(self):\n\n rcmod.set()\n\n g = ag.PairGrid(self.df, hue=\"a\")\n nt.assert_equal(g.palette, color_palette(n_colors=4))\n\n g = ag.PairGrid(self.df, hue=\"b\")\n nt.assert_equal(g.palette, color_palette(\"husl\", 8))\n\n g = ag.PairGrid(self.df, hue=\"a\", palette=\"Set2\")\n nt.assert_equal(g.palette, color_palette(\"Set2\", 4))\n\n dict_pal = dict(a=\"red\", b=\"green\", c=\"blue\", d=\"purple\")\n list_pal = color_palette([\"red\", \"green\", \"blue\", \"purple\"], 4)\n g = ag.PairGrid(self.df, hue=\"a\", palette=dict_pal)\n nt.assert_equal(g.palette, list_pal)\n\n list_pal = color_palette([\"purple\", \"blue\", \"red\", \"green\"], 4)\n g = ag.PairGrid(self.df, hue=\"a\", hue_order=list(\"dcab\"),\n palette=dict_pal)\n nt.assert_equal(g.palette, list_pal)\n\n def test_hue_kws(self):\n\n kws = dict(marker=[\"o\", \"s\", \"d\", \"+\"])\n g = ag.PairGrid(self.df, hue=\"a\", hue_kws=kws)\n g.map(plt.plot)\n\n for line, marker in zip(g.axes[0, 0].lines, kws[\"marker\"]):\n nt.assert_equal(line.get_marker(), marker)\n\n g = ag.PairGrid(self.df, hue=\"a\", hue_kws=kws,\n hue_order=list(\"dcab\"))\n g.map(plt.plot)\n\n for line, marker in zip(g.axes[0, 0].lines, kws[\"marker\"]):\n nt.assert_equal(line.get_marker(), marker)\n\n @skipif(old_matplotlib)\n def test_hue_order(self):\n\n order = list(\"dcab\")\n g = ag.PairGrid(self.df, hue=\"a\", hue_order=order)\n g.map(plt.plot)\n\n for line, level in zip(g.axes[1, 0].lines, order):\n x, y = line.get_xydata().T\n npt.assert_array_equal(x, self.df.loc[self.df.a == level, \"x\"])\n npt.assert_array_equal(y, self.df.loc[self.df.a == level, \"y\"])\n\n plt.close(\"all\")\n\n g = ag.PairGrid(self.df, hue=\"a\", hue_order=order)\n g.map_diag(plt.plot)\n\n for line, level in zip(g.axes[0, 0].lines, order):\n x, y = line.get_xydata().T\n npt.assert_array_equal(x, self.df.loc[self.df.a == level, \"x\"])\n npt.assert_array_equal(y, self.df.loc[self.df.a == level, \"x\"])\n\n plt.close(\"all\")\n\n g = ag.PairGrid(self.df, hue=\"a\", hue_order=order)\n g.map_lower(plt.plot)\n\n for line, level in zip(g.axes[1, 0].lines, order):\n x, y = line.get_xydata().T\n npt.assert_array_equal(x, self.df.loc[self.df.a == level, \"x\"])\n npt.assert_array_equal(y, self.df.loc[self.df.a == level, \"y\"])\n\n plt.close(\"all\")\n\n g = ag.PairGrid(self.df, hue=\"a\", hue_order=order)\n g.map_upper(plt.plot)\n\n for line, level in zip(g.axes[0, 1].lines, order):\n x, y = line.get_xydata().T\n npt.assert_array_equal(x, self.df.loc[self.df.a == level, \"y\"])\n npt.assert_array_equal(y, self.df.loc[self.df.a == level, \"x\"])\n\n plt.close(\"all\")\n\n @skipif(old_matplotlib)\n def test_hue_order_missing_level(self):\n\n order = list(\"dcaeb\")\n g = ag.PairGrid(self.df, hue=\"a\", hue_order=order)\n g.map(plt.plot)\n\n for line, level in zip(g.axes[1, 0].lines, order):\n x, y = line.get_xydata().T\n npt.assert_array_equal(x, self.df.loc[self.df.a == level, \"x\"])\n npt.assert_array_equal(y, self.df.loc[self.df.a == level, \"y\"])\n\n plt.close(\"all\")\n\n g = ag.PairGrid(self.df, hue=\"a\", hue_order=order)\n g.map_diag(plt.plot)\n\n for line, level in zip(g.axes[0, 0].lines, order):\n x, y = line.get_xydata().T\n npt.assert_array_equal(x, self.df.loc[self.df.a == level, \"x\"])\n npt.assert_array_equal(y, self.df.loc[self.df.a == level, \"x\"])\n\n plt.close(\"all\")\n\n g = ag.PairGrid(self.df, hue=\"a\", hue_order=order)\n g.map_lower(plt.plot)\n\n for line, level in zip(g.axes[1, 0].lines, order):\n x, y = line.get_xydata().T\n npt.assert_array_equal(x, self.df.loc[self.df.a == level, \"x\"])\n npt.assert_array_equal(y, self.df.loc[self.df.a == level, \"y\"])\n\n plt.close(\"all\")\n\n g = ag.PairGrid(self.df, hue=\"a\", hue_order=order)\n g.map_upper(plt.plot)\n\n for line, level in zip(g.axes[0, 1].lines, order):\n x, y = line.get_xydata().T\n npt.assert_array_equal(x, self.df.loc[self.df.a == level, \"y\"])\n npt.assert_array_equal(y, self.df.loc[self.df.a == level, \"x\"])\n\n plt.close(\"all\")\n\n def test_nondefault_index(self):\n\n df = self.df.copy().set_index(\"b\")\n\n vars = [\"x\", \"y\", \"z\"]\n g1 = ag.PairGrid(df)\n g1.map(plt.scatter)\n\n for i, axes_i in enumerate(g1.axes):\n for j, ax in enumerate(axes_i):\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n g2 = ag.PairGrid(df, \"a\")\n g2.map(plt.scatter)\n\n for i, axes_i in enumerate(g2.axes):\n for j, ax in enumerate(axes_i):\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n for k, k_level in enumerate(\"abcd\"):\n x_in_k = x_in[self.df.a == k_level]\n y_in_k = y_in[self.df.a == k_level]\n x_out, y_out = ax.collections[k].get_offsets().T\n npt.assert_array_equal(x_in_k, x_out)\n npt.assert_array_equal(y_in_k, y_out)\n\n @skipif(old_matplotlib)\n def test_pairplot(self):\n\n vars = [\"x\", \"y\", \"z\"]\n g = ag.pairplot(self.df)\n\n for ax in g.diag_axes:\n nt.assert_equal(len(ax.patches), 10)\n\n for i, j in zip(*np.triu_indices_from(g.axes, 1)):\n ax = g.axes[i, j]\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n for i, j in zip(*np.tril_indices_from(g.axes, -1)):\n ax = g.axes[i, j]\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n for i, j in zip(*np.diag_indices_from(g.axes)):\n ax = g.axes[i, j]\n nt.assert_equal(len(ax.collections), 0)\n\n @skipif(old_matplotlib)\n def test_pairplot_reg(self):\n\n vars = [\"x\", \"y\", \"z\"]\n g = ag.pairplot(self.df, kind=\"reg\")\n\n for ax in g.diag_axes:\n nt.assert_equal(len(ax.patches), 10)\n\n for i, j in zip(*np.triu_indices_from(g.axes, 1)):\n ax = g.axes[i, j]\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n nt.assert_equal(len(ax.lines), 1)\n nt.assert_equal(len(ax.collections), 2)\n\n for i, j in zip(*np.tril_indices_from(g.axes, -1)):\n ax = g.axes[i, j]\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n nt.assert_equal(len(ax.lines), 1)\n nt.assert_equal(len(ax.collections), 2)\n\n for i, j in zip(*np.diag_indices_from(g.axes)):\n ax = g.axes[i, j]\n nt.assert_equal(len(ax.collections), 0)\n\n @skipif(old_matplotlib)\n def test_pairplot_kde(self):\n\n vars = [\"x\", \"y\", \"z\"]\n g = ag.pairplot(self.df, diag_kind=\"kde\")\n\n for ax in g.diag_axes:\n nt.assert_equal(len(ax.lines), 1)\n\n for i, j in zip(*np.triu_indices_from(g.axes, 1)):\n ax = g.axes[i, j]\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n for i, j in zip(*np.tril_indices_from(g.axes, -1)):\n ax = g.axes[i, j]\n x_in = self.df[vars[j]]\n y_in = self.df[vars[i]]\n x_out, y_out = ax.collections[0].get_offsets().T\n npt.assert_array_equal(x_in, x_out)\n npt.assert_array_equal(y_in, y_out)\n\n for i, j in zip(*np.diag_indices_from(g.axes)):\n ax = g.axes[i, j]\n nt.assert_equal(len(ax.collections), 0)\n\n @skipif(old_matplotlib)\n def test_pairplot_markers(self):\n\n vars = [\"x\", \"y\", \"z\"]\n markers = [\"o\", \"x\", \"s\", \"d\"]\n g = ag.pairplot(self.df, hue=\"a\", vars=vars, markers=markers)\n nt.assert_equal(g.hue_kws[\"marker\"], markers)\n plt.close(\"all\")\n\n with nt.assert_raises(ValueError):\n g = ag.pairplot(self.df, hue=\"a\", vars=vars, markers=markers[:-2])\n\n\nclass TestJointGrid(PlotTestCase):\n\n rs = np.random.RandomState(sum(map(ord, \"JointGrid\")))\n x = rs.randn(100)\n y = rs.randn(100)\n x_na = x.copy()\n x_na[10] = np.nan\n x_na[20] = np.nan\n data = pd.DataFrame(dict(x=x, y=y, x_na=x_na))\n\n def test_margin_grid_from_arrays(self):\n\n g = ag.JointGrid(self.x, self.y)\n npt.assert_array_equal(g.x, self.x)\n npt.assert_array_equal(g.y, self.y)\n\n def test_margin_grid_from_series(self):\n\n g = ag.JointGrid(self.data.x, self.data.y)\n npt.assert_array_equal(g.x, self.x)\n npt.assert_array_equal(g.y, self.y)\n\n def test_margin_grid_from_dataframe(self):\n\n g = ag.JointGrid(\"x\", \"y\", self.data)\n npt.assert_array_equal(g.x, self.x)\n npt.assert_array_equal(g.y, self.y)\n\n def test_margin_grid_from_dataframe_bad_variable(self):\n\n with nt.assert_raises(ValueError):\n g = ag.JointGrid(\"x\", \"bad_column\", self.data)\n\n def test_margin_grid_axis_labels(self):\n\n g = ag.JointGrid(\"x\", \"y\", self.data)\n\n xlabel, ylabel = g.ax_joint.get_xlabel(), g.ax_joint.get_ylabel()\n nt.assert_equal(xlabel, \"x\")\n nt.assert_equal(ylabel, \"y\")\n\n g.set_axis_labels(\"x variable\", \"y variable\")\n xlabel, ylabel = g.ax_joint.get_xlabel(), g.ax_joint.get_ylabel()\n nt.assert_equal(xlabel, \"x variable\")\n nt.assert_equal(ylabel, \"y variable\")\n\n def test_dropna(self):\n\n g = ag.JointGrid(\"x_na\", \"y\", self.data, dropna=False)\n nt.assert_equal(len(g.x), len(self.x_na))\n\n g = ag.JointGrid(\"x_na\", \"y\", self.data, dropna=True)\n nt.assert_equal(len(g.x), pd.notnull(self.x_na).sum())\n\n def test_axlims(self):\n\n lim = (-3, 3)\n g = ag.JointGrid(\"x\", \"y\", self.data, xlim=lim, ylim=lim)\n\n nt.assert_equal(g.ax_joint.get_xlim(), lim)\n nt.assert_equal(g.ax_joint.get_ylim(), lim)\n\n nt.assert_equal(g.ax_marg_x.get_xlim(), lim)\n nt.assert_equal(g.ax_marg_y.get_ylim(), lim)\n\n def test_marginal_ticks(self):\n\n g = ag.JointGrid(\"x\", \"y\", self.data)\n nt.assert_true(~len(g.ax_marg_x.get_xticks()))\n nt.assert_true(~len(g.ax_marg_y.get_yticks()))\n\n def test_bivariate_plot(self):\n\n g = ag.JointGrid(\"x\", \"y\", self.data)\n g.plot_joint(plt.plot)\n\n x, y = g.ax_joint.lines[0].get_xydata().T\n npt.assert_array_equal(x, self.x)\n npt.assert_array_equal(y, self.y)\n\n def test_univariate_plot(self):\n\n g = ag.JointGrid(\"x\", \"x\", self.data)\n g.plot_marginals(kdeplot)\n\n _, y1 = g.ax_marg_x.lines[0].get_xydata().T\n y2, _ = g.ax_marg_y.lines[0].get_xydata().T\n npt.assert_array_equal(y1, y2)\n\n def test_plot(self):\n\n g = ag.JointGrid(\"x\", \"x\", self.data)\n g.plot(plt.plot, kdeplot)\n\n x, y = g.ax_joint.lines[0].get_xydata().T\n npt.assert_array_equal(x, self.x)\n npt.assert_array_equal(y, self.x)\n\n _, y1 = g.ax_marg_x.lines[0].get_xydata().T\n y2, _ = g.ax_marg_y.lines[0].get_xydata().T\n npt.assert_array_equal(y1, y2)\n\n def test_annotate(self):\n\n g = ag.JointGrid(\"x\", \"y\", self.data)\n rp = stats.pearsonr(self.x, self.y)\n\n g.annotate(stats.pearsonr)\n annotation = g.ax_joint.legend_.texts[0].get_text()\n nt.assert_equal(annotation, \"pearsonr = %.2g; p = %.2g\" % rp)\n\n g.annotate(stats.pearsonr, stat=\"correlation\")\n annotation = g.ax_joint.legend_.texts[0].get_text()\n nt.assert_equal(annotation, \"correlation = %.2g; p = %.2g\" % rp)\n\n def rsquared(x, y):\n return stats.pearsonr(x, y)[0] ** 2\n\n r2 = rsquared(self.x, self.y)\n g.annotate(rsquared)\n annotation = g.ax_joint.legend_.texts[0].get_text()\n nt.assert_equal(annotation, \"rsquared = %.2g\" % r2)\n\n template = \"{stat} = {val:.3g} (p = {p:.3g})\"\n g.annotate(stats.pearsonr, template=template)\n annotation = g.ax_joint.legend_.texts[0].get_text()\n nt.assert_equal(annotation, template.format(stat=\"pearsonr\",\n val=rp[0], p=rp[1]))\n\n def test_space(self):\n\n g = ag.JointGrid(\"x\", \"y\", self.data, space=0)\n\n joint_bounds = g.ax_joint.bbox.bounds\n marg_x_bounds = g.ax_marg_x.bbox.bounds\n marg_y_bounds = g.ax_marg_y.bbox.bounds\n\n nt.assert_equal(joint_bounds[2], marg_x_bounds[2])\n nt.assert_equal(joint_bounds[3], marg_y_bounds[3])\n\n\nclass TestJointPlot(PlotTestCase):\n\n rs = np.random.RandomState(sum(map(ord, \"jointplot\")))\n x = rs.randn(100)\n y = rs.randn(100)\n data = pd.DataFrame(dict(x=x, y=y))\n\n def test_scatter(self):\n\n g = ag.jointplot(\"x\", \"y\", self.data)\n nt.assert_equal(len(g.ax_joint.collections), 1)\n\n x, y = g.ax_joint.collections[0].get_offsets().T\n npt.assert_array_equal(self.x, x)\n npt.assert_array_equal(self.y, y)\n\n x_bins = _freedman_diaconis_bins(self.x)\n nt.assert_equal(len(g.ax_marg_x.patches), x_bins)\n\n y_bins = _freedman_diaconis_bins(self.y)\n nt.assert_equal(len(g.ax_marg_y.patches), y_bins)\n\n def test_reg(self):\n\n g = ag.jointplot(\"x\", \"y\", self.data, kind=\"reg\")\n nt.assert_equal(len(g.ax_joint.collections), 2)\n\n x, y = g.ax_joint.collections[0].get_offsets().T\n npt.assert_array_equal(self.x, x)\n npt.assert_array_equal(self.y, y)\n\n x_bins = _freedman_diaconis_bins(self.x)\n nt.assert_equal(len(g.ax_marg_x.patches), x_bins)\n\n y_bins = _freedman_diaconis_bins(self.y)\n nt.assert_equal(len(g.ax_marg_y.patches), y_bins)\n\n nt.assert_equal(len(g.ax_joint.lines), 1)\n nt.assert_equal(len(g.ax_marg_x.lines), 1)\n nt.assert_equal(len(g.ax_marg_y.lines), 1)\n\n def test_resid(self):\n\n g = ag.jointplot(\"x\", \"y\", self.data, kind=\"resid\")\n nt.assert_equal(len(g.ax_joint.collections), 1)\n nt.assert_equal(len(g.ax_joint.lines), 1)\n nt.assert_equal(len(g.ax_marg_x.lines), 0)\n nt.assert_equal(len(g.ax_marg_y.lines), 1)\n\n def test_hex(self):\n\n g = ag.jointplot(\"x\", \"y\", self.data, kind=\"hex\")\n nt.assert_equal(len(g.ax_joint.collections), 1)\n\n x_bins = _freedman_diaconis_bins(self.x)\n nt.assert_equal(len(g.ax_marg_x.patches), x_bins)\n\n y_bins = _freedman_diaconis_bins(self.y)\n nt.assert_equal(len(g.ax_marg_y.patches), y_bins)\n\n def test_kde(self):\n\n g = ag.jointplot(\"x\", \"y\", self.data, kind=\"kde\")\n\n nt.assert_true(len(g.ax_joint.collections) > 0)\n nt.assert_equal(len(g.ax_marg_x.collections), 1)\n nt.assert_equal(len(g.ax_marg_y.collections), 1)\n\n nt.assert_equal(len(g.ax_marg_x.lines), 1)\n nt.assert_equal(len(g.ax_marg_y.lines), 1)\n\n def test_color(self):\n\n g = ag.jointplot(\"x\", \"y\", self.data, color=\"purple\")\n\n purple = mpl.colors.colorConverter.to_rgb(\"purple\")\n scatter_color = g.ax_joint.collections[0].get_facecolor()[0, :3]\n nt.assert_equal(tuple(scatter_color), purple)\n\n hist_color = g.ax_marg_x.patches[0].get_facecolor()[:3]\n nt.assert_equal(hist_color, purple)\n\n def test_annotation(self):\n\n g = ag.jointplot(\"x\", \"y\", self.data)\n nt.assert_equal(len(g.ax_joint.legend_.get_texts()), 1)\n\n g = ag.jointplot(\"x\", \"y\", self.data, stat_func=None)\n nt.assert_is(g.ax_joint.legend_, None)\n\n def test_hex_customise(self):\n\n # test that default gridsize can be overridden\n g = ag.jointplot(\"x\", \"y\", self.data, kind=\"hex\",\n joint_kws=dict(gridsize=5))\n nt.assert_equal(len(g.ax_joint.collections), 1)\n a = g.ax_joint.collections[0].get_array()\n nt.assert_equal(28, a.shape[0]) # 28 hexagons expected for gridsize 5\n\n def test_bad_kind(self):\n\n with nt.assert_raises(ValueError):\n ag.jointplot(\"x\", \"y\", self.data, kind=\"not_a_kind\")\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2011 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\n\"\"\"\n**For a faster implementation of LDA (parallelized for multicore machines), see** :mod:`gensim.models.ldamulticore`.\n\nLatent Dirichlet Allocation (LDA) in Python.\n\nThis module allows both LDA model estimation from a training corpus and inference of topic\ndistribution on new, unseen documents. The model can also be updated with new documents\nfor online training.\n\nThe core estimation code is based on the `onlineldavb.py` script by M. Hoffman [1]_, see\n**Hoffman, Blei, Bach: Online Learning for Latent Dirichlet Allocation, NIPS 2010.**\n\nThe algorithm:\n\n* is **streamed**: training documents may come in sequentially, no random access required,\n* runs in **constant memory** w.r.t. the number of documents: size of the\n training corpus does not affect memory footprint, can process corpora larger than RAM, and\n* is **distributed**: makes use of a cluster of machines, if available, to\n speed up model estimation.\n\n.. [1] http://www.cs.princeton.edu/~mdhoffma\n\n\"\"\"\n\n\nimport logging\nimport numpy as np\nimport numbers\nfrom random import sample\nimport os\n\nfrom gensim import interfaces, utils, matutils\nfrom gensim.matutils import dirichlet_expectation\nfrom gensim.models import basemodel\nfrom gensim.matutils import kullback_leibler, hellinger, jaccard_distance\n\nfrom itertools import chain\nfrom scipy.special import gammaln, psi # gamma function utils\nfrom scipy.special import polygamma\nfrom six.moves import xrange\nimport six\n\n# log(sum(exp(x))) that tries to avoid overflow\ntry:\n # try importing from here if older scipy is installed\n from scipy.maxentropy import logsumexp\nexcept ImportError:\n # maxentropy has been removed in recent releases, logsumexp now in misc\n from scipy.misc import logsumexp\n\n\nlogger = logging.getLogger('gensim.models.ldamodel')\n\n\ndef update_dir_prior(prior, N, logphat, rho):\n \"\"\"\n Updates a given prior using Newton's method, described in\n **Huang: Maximum Likelihood Estimation of Dirichlet Distribution Parameters.**\n http://jonathan-huang.org/research/dirichlet/dirichlet.pdf\n \"\"\"\n dprior = np.copy(prior)\n gradf = N * (psi(np.sum(prior)) - psi(prior) + logphat)\n\n c = N * polygamma(1, np.sum(prior))\n q = -N * polygamma(1, prior)\n\n b = np.sum(gradf / q) / (1 / c + np.sum(1 / q))\n\n dprior = -(gradf - b) / q\n\n if all(rho * dprior + prior > 0):\n prior += rho * dprior\n else:\n logger.warning(\"updated prior not positive\")\n\n return prior\n\n\nclass LdaState(utils.SaveLoad):\n \"\"\"\n Encapsulate information for distributed computation of LdaModel objects.\n\n Objects of this class are sent over the network, so try to keep them lean to\n reduce traffic.\n\n \"\"\"\n def __init__(self, eta, shape):\n self.eta = eta\n self.sstats = np.zeros(shape)\n self.numdocs = 0\n\n def reset(self):\n \"\"\"\n Prepare the state for a new EM iteration (reset sufficient stats).\n\n \"\"\"\n self.sstats[:] = 0.0\n self.numdocs = 0\n\n def merge(self, other):\n \"\"\"\n Merge the result of an E step from one node with that of another node\n (summing up sufficient statistics).\n\n The merging is trivial and after merging all cluster nodes, we have the\n exact same result as if the computation was run on a single node (no\n approximation).\n\n \"\"\"\n assert other is not None\n self.sstats += other.sstats\n self.numdocs += other.numdocs\n\n def blend(self, rhot, other, targetsize=None):\n \"\"\"\n Given LdaState `other`, merge it with the current state. Stretch both to\n `targetsize` documents before merging, so that they are of comparable\n magnitude.\n\n Merging is done by average weighting: in the extremes, `rhot=0.0` means\n `other` is completely ignored; `rhot=1.0` means `self` is completely ignored.\n\n This procedure corresponds to the stochastic gradient update from Hoffman\n et al., algorithm 2 (eq. 14).\n\n \"\"\"\n assert other is not None\n if targetsize is None:\n targetsize = self.numdocs\n\n # stretch the current model's expected n*phi counts to target size\n if self.numdocs == 0 or targetsize == self.numdocs:\n scale = 1.0\n else:\n scale = 1.0 * targetsize / self.numdocs\n self.sstats *= (1.0 - rhot) * scale\n\n # stretch the incoming n*phi counts to target size\n if other.numdocs == 0 or targetsize == other.numdocs:\n scale = 1.0\n else:\n logger.info(\"merging changes from %i documents into a model of %i documents\",\n other.numdocs, targetsize)\n scale = 1.0 * targetsize / other.numdocs\n self.sstats += rhot * scale * other.sstats\n\n self.numdocs = targetsize\n\n def blend2(self, rhot, other, targetsize=None):\n \"\"\"\n Alternative, more simple blend.\n \"\"\"\n assert other is not None\n if targetsize is None:\n targetsize = self.numdocs\n\n # merge the two matrices by summing\n self.sstats += other.sstats\n self.numdocs = targetsize\n\n def get_lambda(self):\n return self.eta + self.sstats\n\n def get_Elogbeta(self):\n return dirichlet_expectation(self.get_lambda())\n# endclass LdaState\n\n\nclass LdaModel(interfaces.TransformationABC, basemodel.BaseTopicModel):\n \"\"\"\n The constructor estimates Latent Dirichlet Allocation model parameters based\n on a training corpus:\n\n >>> lda = LdaModel(corpus, num_topics=10)\n\n You can then infer topic distributions on new, unseen documents, with\n\n >>> doc_lda = lda[doc_bow]\n\n The model can be updated (trained) with new documents via\n\n >>> lda.update(other_corpus)\n\n Model persistency is achieved through its `load`/`save` methods.\n \"\"\"\n def __init__(self, corpus=None, num_topics=100, id2word=None,\n distributed=False, chunksize=2000, passes=1, update_every=1,\n alpha='symmetric', eta=None, decay=0.5, offset=1.0,\n eval_every=10, iterations=50, gamma_threshold=0.001,\n minimum_probability=0.01, random_state=None, ns_conf={},\n minimum_phi_value=0.01, per_word_topics=False):\n \"\"\"\n If given, start training from the iterable `corpus` straight away. If not given,\n the model is left untrained (presumably because you want to call `update()` manually).\n\n `num_topics` is the number of requested latent topics to be extracted from\n the training corpus.\n\n `id2word` is a mapping from word ids (integers) to words (strings). It is\n used to determine the vocabulary size, as well as for debugging and topic\n printing.\n\n `alpha` and `eta` are hyperparameters that affect sparsity of the document-topic\n (theta) and topic-word (lambda) distributions. Both default to a symmetric\n 1.0/num_topics prior.\n\n `alpha` can be set to an explicit array = prior of your choice. It also\n support special values of 'asymmetric' and 'auto': the former uses a fixed\n normalized asymmetric 1.0/topicno prior, the latter learns an asymmetric\n prior directly from your data.\n\n `eta` can be a scalar for a symmetric prior over topic/word\n distributions, or a vector of shape num_words, which can be used to\n impose (user defined) asymmetric priors over the word distribution.\n It also supports the special value 'auto', which learns an asymmetric\n prior over words directly from your data. `eta` can also be a matrix\n of shape num_topics x num_words, which can be used to impose\n asymmetric priors over the word distribution on a per-topic basis\n (can not be learned from data).\n\n Turn on `distributed` to force distributed computing (see the `web tutorial <http://radimrehurek.com/gensim/distributed.html>`_\n on how to set up a cluster of machines for gensim).\n\n Calculate and log perplexity estimate from the latest mini-batch every\n `eval_every` model updates (setting this to 1 slows down training ~2x;\n default is 10 for better performance). Set to None to disable perplexity estimation.\n\n `decay` and `offset` parameters are the same as Kappa and Tau_0 in\n Hoffman et al, respectively.\n\n `minimum_probability` controls filtering the topics returned for a document (bow).\n\n `random_state` can be a np.random.RandomState object or the seed for one\n\n Example:\n\n >>> lda = LdaModel(corpus, num_topics=100) # train model\n >>> print(lda[doc_bow]) # get topic probability distribution for a document\n >>> lda.update(corpus2) # update the LDA model with additional documents\n >>> print(lda[doc_bow])\n\n >>> lda = LdaModel(corpus, num_topics=50, alpha='auto', eval_every=5) # train asymmetric alpha from data\n\n \"\"\"\n\n # store user-supplied parameters\n self.id2word = id2word\n if corpus is None and self.id2word is None:\n raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')\n\n if self.id2word is None:\n logger.warning(\"no word id mapping provided; initializing from corpus, assuming identity\")\n self.id2word = utils.dict_from_corpus(corpus)\n self.num_terms = len(self.id2word)\n elif len(self.id2word) > 0:\n self.num_terms = 1 + max(self.id2word.keys())\n else:\n self.num_terms = 0\n\n if self.num_terms == 0:\n raise ValueError(\"cannot compute LDA over an empty collection (no terms)\")\n\n self.distributed = bool(distributed)\n self.num_topics = int(num_topics)\n self.chunksize = chunksize\n self.decay = decay\n self.offset = offset\n self.minimum_probability = minimum_probability\n self.num_updates = 0\n\n self.passes = passes\n self.update_every = update_every\n self.eval_every = eval_every\n self.minimum_phi_value = minimum_phi_value\n self.per_word_topics = per_word_topics\n\n self.alpha, self.optimize_alpha = self.init_dir_prior(alpha, 'alpha')\n\n assert self.alpha.shape == (self.num_topics,), \"Invalid alpha shape. Got shape %s, but expected (%d, )\" % (str(self.alpha.shape), self.num_topics)\n\n if isinstance(eta, six.string_types):\n if eta == 'asymmetric':\n raise ValueError(\"The 'asymmetric' option cannot be used for eta\")\n\n self.eta, self.optimize_eta = self.init_dir_prior(eta, 'eta')\n\n self.random_state = utils.get_random_state(random_state)\n\n assert self.eta.shape == (self.num_terms,) or self.eta.shape == (self.num_topics, self.num_terms), (\n \"Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)\" %\n (str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms))\n\n # VB constants\n self.iterations = iterations\n self.gamma_threshold = gamma_threshold\n\n # set up distributed environment if necessary\n if not distributed:\n logger.info(\"using serial LDA version on this node\")\n self.dispatcher = None\n self.numworkers = 1\n else:\n if self.optimize_alpha:\n raise NotImplementedError(\"auto-optimizing alpha not implemented in distributed LDA\")\n # set up distributed version\n try:\n import Pyro4\n with utils.getNS(**ns_conf) as ns:\n from gensim.models.lda_dispatcher import LDA_DISPATCHER_PREFIX\n self.dispatcher = Pyro4.Proxy(ns.list(prefix=LDA_DISPATCHER_PREFIX)[LDA_DISPATCHER_PREFIX])\n logger.debug(\"looking for dispatcher at %s\" % str(self.dispatcher._pyroUri))\n self.dispatcher.initialize(id2word=self.id2word, num_topics=self.num_topics,\n chunksize=chunksize, alpha=alpha, eta=eta, distributed=False)\n self.numworkers = len(self.dispatcher.getworkers())\n logger.info(\"using distributed version with %i workers\" % self.numworkers)\n except Exception as err:\n logger.error(\"failed to initialize distributed LDA (%s)\", err)\n raise RuntimeError(\"failed to initialize distributed LDA (%s)\" % err)\n\n # Initialize the variational distribution q(beta|lambda)\n self.state = LdaState(self.eta, (self.num_topics, self.num_terms))\n self.state.sstats = self.random_state.gamma(100., 1. / 100., (self.num_topics, self.num_terms))\n self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))\n\n # if a training corpus was provided, start estimating the model right away\n if corpus is not None:\n use_numpy = self.dispatcher is not None\n self.update(corpus, chunks_as_numpy=use_numpy)\n\n def init_dir_prior(self, prior, name):\n if prior is None:\n prior = 'symmetric'\n\n if name == 'alpha':\n prior_shape = self.num_topics\n elif name == 'eta':\n prior_shape = self.num_terms\n else:\n raise ValueError(\"'name' must be 'alpha' or 'eta'\")\n\n is_auto = False\n\n if isinstance(prior, six.string_types):\n if prior == 'symmetric':\n logger.info(\"using symmetric %s at %s\", name, 1.0 / prior_shape)\n init_prior = np.asarray([1.0 / self.num_topics for i in xrange(prior_shape)])\n elif prior == 'asymmetric':\n init_prior = np.asarray([1.0 / (i + np.sqrt(prior_shape)) for i in xrange(prior_shape)])\n init_prior /= init_prior.sum()\n logger.info(\"using asymmetric %s %s\", name, list(init_prior))\n elif prior == 'auto':\n is_auto = True\n init_prior = np.asarray([1.0 / self.num_topics for i in xrange(prior_shape)])\n if name == 'alpha':\n logger.info(\"using autotuned %s, starting with %s\", name, list(init_prior))\n else:\n raise ValueError(\"Unable to determine proper %s value given '%s'\" % (name, prior))\n elif isinstance(prior, list):\n init_prior = np.asarray(prior)\n elif isinstance(prior, np.ndarray):\n init_prior = prior\n elif isinstance(prior, np.number) or isinstance(prior, numbers.Real):\n init_prior = np.asarray([prior] * prior_shape)\n else:\n raise ValueError(\"%s must be either a np array of scalars, list of scalars, or scalar\" % name)\n\n return init_prior, is_auto\n\n def __str__(self):\n return \"LdaModel(num_terms=%s, num_topics=%s, decay=%s, chunksize=%s)\" % \\\n (self.num_terms, self.num_topics, self.decay, self.chunksize)\n\n def sync_state(self):\n self.expElogbeta = np.exp(self.state.get_Elogbeta())\n\n def clear(self):\n \"\"\"Clear model state (free up some memory). Used in the distributed algo.\"\"\"\n self.state = None\n self.Elogbeta = None\n\n def inference(self, chunk, collect_sstats=False):\n \"\"\"\n Given a chunk of sparse document vectors, estimate gamma (parameters\n controlling the topic weights) for each document in the chunk.\n\n This function does not modify the model (=is read-only aka const). The\n whole input chunk of document is assumed to fit in RAM; chunking of a\n large corpus must be done earlier in the pipeline.\n\n If `collect_sstats` is True, also collect sufficient statistics needed\n to update the model's topic-word distributions, and return a 2-tuple\n `(gamma, sstats)`. Otherwise, return `(gamma, None)`. `gamma` is of shape\n `len(chunk) x self.num_topics`.\n\n Avoids computing the `phi` variational parameter directly using the\n optimization presented in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.\n\n \"\"\"\n try:\n _ = len(chunk)\n except:\n # convert iterators/generators to plain list, so we have len() etc.\n chunk = list(chunk)\n if len(chunk) > 1:\n logger.debug(\"performing inference on a chunk of %i documents\", len(chunk))\n\n # Initialize the variational distribution q(theta|gamma) for the chunk\n gamma = self.random_state.gamma(100., 1. / 100., (len(chunk), self.num_topics))\n Elogtheta = dirichlet_expectation(gamma)\n expElogtheta = np.exp(Elogtheta)\n if collect_sstats:\n sstats = np.zeros_like(self.expElogbeta)\n else:\n sstats = None\n converged = 0\n\n # Now, for each document d update that document's gamma and phi\n # Inference code copied from Hoffman's `onlineldavb.py` (esp. the\n # Lee&Seung trick which speeds things up by an order of magnitude, compared\n # to Blei's original LDA-C code, cool!).\n for d, doc in enumerate(chunk):\n if len(doc) > 0 and not isinstance(doc[0][0], six.integer_types + (np.integer,)):\n # make sure the term IDs are ints, otherwise np will get upset\n ids = [int(id) for id, _ in doc]\n else:\n ids = [id for id, _ in doc]\n cts = np.array([cnt for _, cnt in doc])\n gammad = gamma[d, :]\n Elogthetad = Elogtheta[d, :]\n expElogthetad = expElogtheta[d, :]\n expElogbetad = self.expElogbeta[:, ids]\n\n # The optimal phi_{dwk} is proportional to expElogthetad_k * expElogbetad_w.\n # phinorm is the normalizer.\n # TODO treat zeros explicitly, instead of adding 1e-100?\n phinorm = np.dot(expElogthetad, expElogbetad) + 1e-100\n\n # Iterate between gamma and phi until convergence\n for _ in xrange(self.iterations):\n lastgamma = gammad\n # We represent phi implicitly to save memory and time.\n # Substituting the value of the optimal phi back into\n # the update for gamma gives this update. Cf. Lee&Seung 2001.\n gammad = self.alpha + expElogthetad * np.dot(cts / phinorm, expElogbetad.T)\n Elogthetad = dirichlet_expectation(gammad)\n expElogthetad = np.exp(Elogthetad)\n phinorm = np.dot(expElogthetad, expElogbetad) + 1e-100\n # If gamma hasn't changed much, we're done.\n meanchange = np.mean(abs(gammad - lastgamma))\n if (meanchange < self.gamma_threshold):\n converged += 1\n break\n gamma[d, :] = gammad\n if collect_sstats:\n # Contribution of document d to the expected sufficient\n # statistics for the M step.\n sstats[:, ids] += np.outer(expElogthetad.T, cts / phinorm)\n\n if len(chunk) > 1:\n logger.debug(\"%i/%i documents converged within %i iterations\",\n converged, len(chunk), self.iterations)\n\n if collect_sstats:\n # This step finishes computing the sufficient statistics for the\n # M step, so that\n # sstats[k, w] = \\sum_d n_{dw} * phi_{dwk}\n # = \\sum_d n_{dw} * exp{Elogtheta_{dk} + Elogbeta_{kw}} / phinorm_{dw}.\n sstats *= self.expElogbeta\n return gamma, sstats\n\n def do_estep(self, chunk, state=None):\n \"\"\"\n Perform inference on a chunk of documents, and accumulate the collected\n sufficient statistics in `state` (or `self.state` if None).\n\n \"\"\"\n if state is None:\n state = self.state\n gamma, sstats = self.inference(chunk, collect_sstats=True)\n state.sstats += sstats\n state.numdocs += gamma.shape[0] # avoids calling len(chunk) on a generator\n return gamma\n\n def update_alpha(self, gammat, rho):\n \"\"\"\n Update parameters for the Dirichlet prior on the per-document\n topic weights `alpha` given the last `gammat`.\n \"\"\"\n N = float(len(gammat))\n logphat = sum(dirichlet_expectation(gamma) for gamma in gammat) / N\n\n self.alpha = update_dir_prior(self.alpha, N, logphat, rho)\n logger.info(\"optimized alpha %s\", list(self.alpha))\n\n return self.alpha\n\n def update_eta(self, lambdat, rho):\n \"\"\"\n Update parameters for the Dirichlet prior on the per-topic\n word weights `eta` given the last `lambdat`.\n \"\"\"\n N = float(lambdat.shape[0])\n logphat = (sum(dirichlet_expectation(lambda_) for lambda_ in lambdat) / N).reshape((self.num_terms,))\n\n self.eta = update_dir_prior(self.eta, N, logphat, rho)\n\n return self.eta\n\n def log_perplexity(self, chunk, total_docs=None):\n \"\"\"\n Calculate and return per-word likelihood bound, using the `chunk` of\n documents as evaluation corpus. Also output the calculated statistics. incl.\n perplexity=2^(-bound), to log at INFO level.\n\n \"\"\"\n if total_docs is None:\n total_docs = len(chunk)\n corpus_words = sum(cnt for document in chunk for _, cnt in document)\n subsample_ratio = 1.0 * total_docs / len(chunk)\n perwordbound = self.bound(chunk, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words)\n logger.info(\"%.3f per-word bound, %.1f perplexity estimate based on a held-out corpus of %i documents with %i words\" %\n (perwordbound, np.exp2(-perwordbound), len(chunk), corpus_words))\n return perwordbound\n\n def update(self, corpus, chunksize=None, decay=None, offset=None,\n passes=None, update_every=None, eval_every=None, iterations=None,\n gamma_threshold=None, chunks_as_numpy=False):\n \"\"\"\n Train the model with new documents, by EM-iterating over `corpus` until\n the topics converge (or until the maximum number of allowed iterations\n is reached). `corpus` must be an iterable (repeatable stream of documents),\n\n In distributed mode, the E step is distributed over a cluster of machines.\n\n This update also supports updating an already trained model (`self`)\n with new documents from `corpus`; the two models are then merged in\n proportion to the number of old vs. new documents. This feature is still\n experimental for non-stationary input streams.\n\n For stationary input (no topic drift in new documents), on the other hand,\n this equals the online update of Hoffman et al. and is guaranteed to\n converge for any `decay` in (0.5, 1.0>. Additionally, for smaller\n `corpus` sizes, an increasing `offset` may be beneficial (see\n Table 1 in Hoffman et al.)\n\n Args:\n corpus (gensim corpus): The corpus with which the LDA model should be updated.\n\n chunks_as_numpy (bool): Whether each chunk passed to `.inference` should be a np\n array of not. np can in some settings turn the term IDs\n into floats, these will be converted back into integers in\n inference, which incurs a performance hit. For distributed\n computing it may be desirable to keep the chunks as np\n arrays.\n\n For other parameter settings, see :class:`LdaModel` constructor.\n\n \"\"\"\n # use parameters given in constructor, unless user explicitly overrode them\n if decay is None:\n decay = self.decay\n if offset is None:\n offset = self.offset\n if passes is None:\n passes = self.passes\n if update_every is None:\n update_every = self.update_every\n if eval_every is None:\n eval_every = self.eval_every\n if iterations is None:\n iterations = self.iterations\n if gamma_threshold is None:\n gamma_threshold = self.gamma_threshold\n\n try:\n lencorpus = len(corpus)\n except:\n logger.warning(\"input corpus stream has no len(); counting documents\")\n lencorpus = sum(1 for _ in corpus)\n if lencorpus == 0:\n logger.warning(\"LdaModel.update() called with an empty corpus\")\n return\n\n if chunksize is None:\n chunksize = min(lencorpus, self.chunksize)\n\n self.state.numdocs += lencorpus\n\n if update_every:\n updatetype = \"online\"\n if passes == 1:\n updatetype += \" (single-pass)\"\n else:\n updatetype += \" (multi-pass)\"\n updateafter = min(lencorpus, update_every * self.numworkers * chunksize)\n else:\n updatetype = \"batch\"\n updateafter = lencorpus\n evalafter = min(lencorpus, (eval_every or 0) * self.numworkers * chunksize)\n\n updates_per_pass = max(1, lencorpus / updateafter)\n logger.info(\n \"running %s LDA training, %s topics, %i passes over \"\n \"the supplied corpus of %i documents, updating model once \"\n \"every %i documents, evaluating perplexity every %i documents, \"\n \"iterating %ix with a convergence threshold of %f\",\n updatetype, self.num_topics, passes, lencorpus,\n updateafter, evalafter, iterations,\n gamma_threshold)\n\n if updates_per_pass * passes < 10:\n logger.warning(\n \"too few updates, training might not converge; consider \"\n \"increasing the number of passes or iterations to improve accuracy\")\n\n # rho is the \"speed\" of updating; TODO try other fncs\n # pass_ + num_updates handles increasing the starting t for each pass,\n # while allowing it to \"reset\" on the first pass of each update\n def rho():\n return pow(offset + pass_ + (self.num_updates / chunksize), -decay)\n\n for pass_ in xrange(passes):\n if self.dispatcher:\n logger.info('initializing %s workers' % self.numworkers)\n self.dispatcher.reset(self.state)\n else:\n other = LdaState(self.eta, self.state.sstats.shape)\n dirty = False\n\n reallen = 0\n for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize, as_numpy=chunks_as_numpy)):\n reallen += len(chunk) # keep track of how many documents we've processed so far\n\n if eval_every and ((reallen == lencorpus) or ((chunk_no + 1) % (eval_every * self.numworkers) == 0)):\n self.log_perplexity(chunk, total_docs=lencorpus)\n\n if self.dispatcher:\n # add the chunk to dispatcher's job queue, so workers can munch on it\n logger.info('PROGRESS: pass %i, dispatching documents up to #%i/%i',\n pass_, chunk_no * chunksize + len(chunk), lencorpus)\n # this will eventually block until some jobs finish, because the queue has a small finite length\n self.dispatcher.putjob(chunk)\n else:\n logger.info('PROGRESS: pass %i, at document #%i/%i',\n pass_, chunk_no * chunksize + len(chunk), lencorpus)\n gammat = self.do_estep(chunk, other)\n\n if self.optimize_alpha:\n self.update_alpha(gammat, rho())\n\n dirty = True\n del chunk\n\n # perform an M step. determine when based on update_every, don't do this after every chunk\n if update_every and (chunk_no + 1) % (update_every * self.numworkers) == 0:\n if self.dispatcher:\n # distributed mode: wait for all workers to finish\n logger.info(\"reached the end of input; now waiting for all remaining jobs to finish\")\n other = self.dispatcher.getstate()\n self.do_mstep(rho(), other, pass_ > 0)\n del other # frees up memory\n\n if self.dispatcher:\n logger.info('initializing workers')\n self.dispatcher.reset(self.state)\n else:\n other = LdaState(self.eta, self.state.sstats.shape)\n dirty = False\n # endfor single corpus iteration\n if reallen != lencorpus:\n raise RuntimeError(\"input corpus size changed during training (don't use generators as input)\")\n\n if dirty:\n # finish any remaining updates\n if self.dispatcher:\n # distributed mode: wait for all workers to finish\n logger.info(\"reached the end of input; now waiting for all remaining jobs to finish\")\n other = self.dispatcher.getstate()\n self.do_mstep(rho(), other, pass_ > 0)\n del other\n dirty = False\n # endfor entire corpus update\n\n def do_mstep(self, rho, other, extra_pass=False):\n \"\"\"\n M step: use linear interpolation between the existing topics and\n collected sufficient statistics in `other` to update the topics.\n\n \"\"\"\n logger.debug(\"updating topics\")\n # update self with the new blend; also keep track of how much did\n # the topics change through this update, to assess convergence\n diff = np.log(self.expElogbeta)\n self.state.blend(rho, other)\n diff -= self.state.get_Elogbeta()\n self.sync_state()\n\n # print out some debug info at the end of each EM iteration\n self.print_topics(5)\n logger.info(\"topic diff=%f, rho=%f\", np.mean(np.abs(diff)), rho)\n\n if self.optimize_eta:\n self.update_eta(self.state.get_lambda(), rho)\n\n if not extra_pass:\n # only update if this isn't an additional pass\n self.num_updates += other.numdocs\n\n def bound(self, corpus, gamma=None, subsample_ratio=1.0):\n \"\"\"\n Estimate the variational bound of documents from `corpus`:\n E_q[log p(corpus)] - E_q[log q(corpus)]\n\n `gamma` are the variational parameters on topic weights for each `corpus`\n document (=2d matrix=what comes out of `inference()`).\n If not supplied, will be inferred from the model.\n\n \"\"\"\n score = 0.0\n _lambda = self.state.get_lambda()\n Elogbeta = dirichlet_expectation(_lambda)\n\n for d, doc in enumerate(corpus): # stream the input doc-by-doc, in case it's too large to fit in RAM\n if d % self.chunksize == 0:\n logger.debug(\"bound: at document #%i\", d)\n if gamma is None:\n gammad, _ = self.inference([doc])\n else:\n gammad = gamma[d]\n Elogthetad = dirichlet_expectation(gammad)\n\n # E[log p(doc | theta, beta)]\n score += np.sum(cnt * logsumexp(Elogthetad + Elogbeta[:, int(id)]) for id, cnt in doc)\n\n # E[log p(theta | alpha) - log q(theta | gamma)]; assumes alpha is a vector\n score += np.sum((self.alpha - gammad) * Elogthetad)\n score += np.sum(gammaln(gammad) - gammaln(self.alpha))\n score += gammaln(np.sum(self.alpha)) - gammaln(np.sum(gammad))\n\n # Compensate likelihood for when `corpus` above is only a sample of the whole corpus. This ensures\n # that the likelihood is always rougly on the same scale.\n score *= subsample_ratio\n\n # E[log p(beta | eta) - log q (beta | lambda)]; assumes eta is a scalar\n score += np.sum((self.eta - _lambda) * Elogbeta)\n score += np.sum(gammaln(_lambda) - gammaln(self.eta))\n\n if np.ndim(self.eta) == 0:\n sum_eta = self.eta * self.num_terms\n else:\n sum_eta = np.sum(self.eta)\n\n score += np.sum(gammaln(sum_eta) - gammaln(np.sum(_lambda, 1)))\n\n return score\n\n def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):\n \"\"\"\n For `num_topics` number of topics, return `num_words` most significant words\n (10 words per topic, by default).\n\n The topics are returned as a list -- a list of strings if `formatted` is\n True, or a list of `(word, probability)` 2-tuples if False.\n\n If `log` is True, also output this result to log.\n\n Unlike LSA, there is no natural ordering between the topics in LDA.\n The returned `num_topics <= self.num_topics` subset of all topics is therefore\n arbitrary and may change between two LDA training runs.\n\n \"\"\"\n if num_topics < 0 or num_topics >= self.num_topics:\n num_topics = self.num_topics\n chosen_topics = range(num_topics)\n else:\n num_topics = min(num_topics, self.num_topics)\n\n # add a little random jitter, to randomize results around the same alpha\n sort_alpha = self.alpha + 0.0001 * self.random_state.rand(len(self.alpha))\n\n sorted_topics = list(matutils.argsort(sort_alpha))\n chosen_topics = sorted_topics[:num_topics // 2] + sorted_topics[-num_topics // 2:]\n\n shown = []\n\n topic = self.state.get_lambda()\n for i in chosen_topics:\n topic_ = topic[i]\n topic_ = topic_ / topic_.sum() # normalize to probability distribution\n bestn = matutils.argsort(topic_, num_words, reverse=True)\n topic_ = [(self.id2word[id], topic_[id]) for id in bestn]\n if formatted:\n topic_ = ' + '.join(['%.3f*\"%s\"' % (v, k) for k, v in topic_])\n\n shown.append((i, topic_))\n if log:\n logger.info(\"topic #%i (%.3f): %s\", i, self.alpha[i], topic_)\n\n return shown\n\n def show_topic(self, topicid, topn=10):\n \"\"\"\n Return a list of `(word, probability)` 2-tuples for the most probable\n words in topic `topicid`.\n\n Only return 2-tuples for the topn most probable words (ignore the rest).\n\n \"\"\"\n return [(self.id2word[id], value) for id, value in self.get_topic_terms(topicid, topn)]\n\n def get_topic_terms(self, topicid, topn=10):\n \"\"\"\n Return a list of `(word_id, probability)` 2-tuples for the most\n probable words in topic `topicid`.\n\n Only return 2-tuples for the topn most probable words (ignore the rest).\n\n \"\"\"\n topic = self.state.get_lambda()[topicid]\n topic = topic / topic.sum() # normalize to probability distribution\n bestn = matutils.argsort(topic, topn, reverse=True)\n return [(id, topic[id]) for id in bestn]\n\n def top_topics(self, corpus, num_words=20):\n \"\"\"\n Calculate the Umass topic coherence for each topic. Algorithm from\n **Mimno, Wallach, Talley, Leenders, McCallum: Optimizing Semantic Coherence in Topic Models, CEMNLP 2011.**\n \"\"\"\n is_corpus, corpus = utils.is_corpus(corpus)\n if not is_corpus:\n logger.warning(\"LdaModel.top_topics() called with an empty corpus\")\n return\n\n topics = []\n str_topics = []\n for topic in self.state.get_lambda():\n topic = topic / topic.sum() # normalize to probability distribution\n bestn = matutils.argsort(topic, topn=num_words, reverse=True)\n topics.append(bestn)\n beststr = [(topic[id], self.id2word[id]) for id in bestn]\n str_topics.append(beststr)\n\n # top_ids are limited to every topics top words. should not exceed the\n # vocabulary size.\n top_ids = set(chain.from_iterable(topics))\n\n # create a document occurence sparse matrix for each word\n doc_word_list = {}\n for id in top_ids:\n id_list = set()\n for n, document in enumerate(corpus):\n if id in frozenset(x[0] for x in document):\n id_list.add(n)\n\n doc_word_list[id] = id_list\n\n coherence_scores = []\n for t, top_words in enumerate(topics):\n # Calculate each coherence score C(t, top_words)\n coherence = 0.0\n # Sum of top words m=2..M\n for m in top_words[1:]:\n # m_docs is v_m^(t)\n m_docs = doc_word_list[m]\n m_index = np.where(top_words == m)[0][0]\n\n # Sum of top words l=1..m\n # i.e., all words ranked higher than the current word m\n for l in top_words[:m_index]:\n # l_docs is v_l^(t)\n l_docs = doc_word_list[l]\n\n # make sure this word appears in some documents.\n if len(l_docs) > 0:\n # co_doc_frequency is D(v_m^(t), v_l^(t))\n co_doc_frequency = len(m_docs.intersection(l_docs))\n\n # add to the coherence sum for these two words m, l\n coherence += np.log((co_doc_frequency + 1.0) / len(l_docs))\n\n coherence_scores.append((str_topics[t], coherence))\n\n top_topics = sorted(coherence_scores, key=lambda t: t[1], reverse=True)\n return top_topics\n\n def get_document_topics(self, bow, minimum_probability=None, minimum_phi_value=None, per_word_topics=False):\n \"\"\"\n Return topic distribution for the given document `bow`, as a list of\n (topic_id, topic_probability) 2-tuples.\n\n Ignore topics with very low probability (below `minimum_probability`).\n\n If per_word_topics is True, it also returns a list of topics, sorted in descending order of most likely topics for that word.\n It also returns a list of word_ids and each words corresponding topics' phi_values, multiplied by feature length (i.e, word count)\n\n \"\"\"\n if minimum_probability is None:\n minimum_probability = self.minimum_probability\n minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output\n\n if minimum_phi_value is None:\n minimum_phi_value = self.minimum_probability\n minimum_phi_value = max(minimum_phi_value, 1e-8) # never allow zero values in sparse output\n\n # if the input vector is a corpus, return a transformed corpus\n is_corpus, corpus = utils.is_corpus(bow)\n if is_corpus:\n kwargs = dict(\n per_word_topics=per_word_topics,\n minimum_probability=minimum_probability,\n minimum_phi_value=minimum_phi_value\n )\n return self._apply(corpus, **kwargs)\n\n gamma, phis = self.inference([bow], collect_sstats=per_word_topics)\n topic_dist = gamma[0] / sum(gamma[0]) # normalize distribution\n\n document_topics = [\n (topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)\n if topicvalue >= minimum_probability\n ]\n\n if not per_word_topics:\n return document_topics\n else:\n word_topic = [] # contains word and corresponding topic\n word_phi = [] # contains word and phi values\n for word_type, weight in bow:\n phi_values = [] # contains (phi_value, topic) pairing to later be sorted\n phi_topic = [] # contains topic and corresponding phi value to be returned 'raw' to user\n for topic_id in range(0, self.num_topics):\n if phis[topic_id][word_type] >= minimum_phi_value:\n # appends phi values for each topic for that word\n # these phi values are scaled by feature length\n phi_values.append((phis[topic_id][word_type], topic_id))\n phi_topic.append((topic_id, phis[topic_id][word_type]))\n\n # list with ({word_id => [(topic_0, phi_value), (topic_1, phi_value) ...]).\n word_phi.append((word_type, phi_topic))\n # sorts the topics based on most likely topic\n # returns a list like ({word_id => [topic_id_most_probable, topic_id_second_most_probable, ...]).\n sorted_phi_values = sorted(phi_values, reverse=True)\n topics_sorted = [x[1] for x in sorted_phi_values]\n word_topic.append((word_type, topics_sorted))\n return (document_topics, word_topic, word_phi) # returns 2-tuple\n\n def get_term_topics(self, word_id, minimum_probability=None):\n \"\"\"\n Returns most likely topics for a particular word in vocab.\n\n \"\"\"\n if minimum_probability is None:\n minimum_probability = self.minimum_probability\n minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output\n\n # if user enters word instead of id in vocab, change to get id\n if isinstance(word_id, str):\n word_id = self.id2word.doc2bow([word_id])[0][0]\n\n values = []\n for topic_id in range(0, self.num_topics):\n if self.expElogbeta[topic_id][word_id] >= minimum_probability:\n values.append((topic_id, self.expElogbeta[topic_id][word_id]))\n\n return values\n\n def diff(self, other, distance=\"kullback_leibler\", num_words=100, n_ann_terms=10, normed=True):\n \"\"\"\n Calculate difference topic2topic between two Lda models\n `other` instances of `LdaMulticore` or `LdaModel`\n `distance` is function that will be applied to calculate difference between any topic pair.\n Available values: `kullback_leibler`, `hellinger` and `jaccard`\n `num_words` is quantity of most relevant words that used if distance == `jaccard` (also used for annotation)\n `n_ann_terms` is max quantity of words in intersection/symmetric difference between topics (used for annotation)\n Returns a matrix Z with shape (m1.num_topics, m2.num_topics), where Z[i][j] - difference between topic_i and topic_j\n and matrix annotation with shape (m1.num_topics, m2.num_topics, 2, None),\n where:\n\n annotation[i][j] = [[`int_1`, `int_2`, ...], [`diff_1`, `diff_2`, ...]] and\n `int_k` is word from intersection of `topic_i` and `topic_j` and\n `diff_l` is word from symmetric difference of `topic_i` and `topic_j`\n `normed` is a flag. If `true`, matrix Z will be normalized\n\n Example:\n\n >>> m1, m2 = LdaMulticore.load(path_1), LdaMulticore.load(path_2)\n >>> mdiff, annotation = m1.diff(m2)\n >>> print(mdiff) # get matrix with difference for each topic pair from `m1` and `m2`\n >>> print(annotation) # get array with positive/negative words for each topic pair from `m1` and `m2`\n\n \"\"\"\n\n distances = {\n \"kullback_leibler\": kullback_leibler,\n \"hellinger\": hellinger,\n \"jaccard\": jaccard_distance,\n }\n\n if distance not in distances:\n valid_keys = \", \".join(\"`{}`\".format(x) for x in distances.keys())\n raise ValueError(\"Incorrect distance, valid only {}\".format(valid_keys))\n\n if not isinstance(other, self.__class__):\n raise ValueError(\"The parameter `other` must be of type `{}`\".format(self.__name__))\n\n distance_func = distances[distance]\n d1, d2 = self.state.get_lambda(), other.state.get_lambda()\n t1_size, t2_size = d1.shape[0], d2.shape[0]\n\n fst_topics = [{w for (w, _) in self.show_topic(topic, topn=num_words)} for topic in xrange(t1_size)]\n snd_topics = [{w for (w, _) in other.show_topic(topic, topn=num_words)} for topic in xrange(t2_size)]\n\n if distance == \"jaccard\":\n d1, d2 = fst_topics, snd_topics\n\n z = np.zeros((t1_size, t2_size))\n for topic1 in range(t1_size):\n for topic2 in range(t2_size):\n z[topic1][topic2] = distance_func(d1[topic1], d2[topic2])\n\n if normed:\n if np.abs(np.max(z)) > 1e-8:\n z /= np.max(z)\n\n annotation = [[None] * t1_size for _ in range(t2_size)]\n\n for topic1 in range(t1_size):\n for topic2 in range(t2_size):\n pos_tokens = fst_topics[topic1] & snd_topics[topic2]\n neg_tokens = fst_topics[topic1].symmetric_difference(snd_topics[topic2])\n\n pos_tokens = sample(pos_tokens, min(len(pos_tokens), n_ann_terms))\n neg_tokens = sample(neg_tokens, min(len(neg_tokens), n_ann_terms))\n\n annotation[topic1][topic2] = [pos_tokens, neg_tokens]\n\n return z, annotation\n\n def __getitem__(self, bow, eps=None):\n \"\"\"\n Return topic distribution for the given document `bow`, as a list of\n (topic_id, topic_probability) 2-tuples.\n\n Ignore topics with very low probability (below `eps`).\n\n \"\"\"\n return self.get_document_topics(bow, eps, self.minimum_phi_value, self.per_word_topics)\n\n def save(self, fname, ignore=['state', 'dispatcher'], separately=None, *args, **kwargs):\n \"\"\"\n Save the model to file.\n\n Large internal arrays may be stored into separate files, with `fname` as prefix.\n\n `separately` can be used to define which arrays should be stored in separate files.\n\n `ignore` parameter can be used to define which variables should be ignored, i.e. left\n out from the pickled lda model. By default the internal `state` is ignored as it uses\n its own serialisation not the one provided by `LdaModel`. The `state` and `dispatcher`\n will be added to any ignore parameter defined.\n\n\n Note: do not save as a compressed file if you intend to load the file back with `mmap`.\n\n Note: If you intend to use models across Python 2/3 versions there are a few things to\n keep in mind:\n\n 1. The pickled Python dictionaries will not work across Python versions\n 2. The `save` method does not automatically save all np arrays using np, only\n those ones that exceed `sep_limit` set in `gensim.utils.SaveLoad.save`. The main\n concern here is the `alpha` array if for instance using `alpha='auto'`.\n\n Please refer to the wiki recipes section (https://github.com/piskvorky/gensim/wiki/Recipes-&-FAQ#q9-how-do-i-load-a-model-in-python-3-that-was-trained-and-saved-using-python-2)\n for an example on how to work around these issues.\n \"\"\"\n if self.state is not None:\n self.state.save(utils.smart_extension(fname, '.state'), *args, **kwargs)\n # Save the dictionary separately if not in 'ignore'.\n if 'id2word' not in ignore:\n utils.pickle(self.id2word, utils.smart_extension(fname, '.id2word'))\n\n # make sure 'state', 'id2word' and 'dispatcher' are ignored from the pickled object, even if\n # someone sets the ignore list themselves\n if ignore is not None and ignore:\n if isinstance(ignore, six.string_types):\n ignore = [ignore]\n ignore = [e for e in ignore if e] # make sure None and '' are not in the list\n ignore = list(set(['state', 'dispatcher', 'id2word']) | set(ignore))\n else:\n ignore = ['state', 'dispatcher', 'id2word']\n\n # make sure 'expElogbeta' and 'sstats' are ignored from the pickled object, even if\n # someone sets the separately list themselves.\n separately_explicit = ['expElogbeta', 'sstats']\n # Also add 'alpha' and 'eta' to separately list if they are set 'auto' or some\n # array manually.\n if (isinstance(self.alpha, six.string_types) and self.alpha == 'auto') or (isinstance(self.alpha, np.ndarray) and len(self.alpha.shape) != 1):\n separately_explicit.append('alpha')\n if (isinstance(self.eta, six.string_types) and self.eta == 'auto') or (isinstance(self.eta, np.ndarray) and len(self.eta.shape) != 1):\n separately_explicit.append('eta')\n # Merge separately_explicit with separately.\n if separately:\n if isinstance(separately, six.string_types):\n separately = [separately]\n separately = [e for e in separately if e] # make sure None and '' are not in the list\n separately = list(set(separately_explicit) | set(separately))\n else:\n separately = separately_explicit\n super(LdaModel, self).save(fname, ignore=ignore, separately=separately, *args, **kwargs)\n\n @classmethod\n def load(cls, fname, *args, **kwargs):\n \"\"\"\n Load a previously saved object from file (also see `save`).\n\n Large arrays can be memmap'ed back as read-only (shared memory) by setting `mmap='r'`:\n\n >>> LdaModel.load(fname, mmap='r')\n\n \"\"\"\n kwargs['mmap'] = kwargs.get('mmap', None)\n result = super(LdaModel, cls).load(fname, *args, **kwargs)\n\n # check if `random_state` attribute has been set after main pickle load\n # if set -> the model to be loaded was saved using a >= 0.13.2 version of Gensim\n # if not set -> the model to be loaded was saved using a < 0.13.2 version of Gensim, so set `random_state` as the default value\n if not hasattr(result, 'random_state'):\n result.random_state = utils.get_random_state(None) # using default value `get_random_state(None)`\n logging.warning(\"random_state not set so using default value\")\n\n state_fname = utils.smart_extension(fname, '.state')\n try:\n result.state = super(LdaModel, cls).load(state_fname, *args, **kwargs)\n except Exception as e:\n logging.warning(\"failed to load state from %s: %s\", state_fname, e)\n\n id2word_fname = utils.smart_extension(fname, '.id2word')\n # check if `id2word_fname` file is present on disk\n # if present -> the model to be loaded was saved using a >= 0.13.2 version of Gensim, so set `result.id2word` using the `id2word_fname` file\n # if not present -> the model to be loaded was saved using a < 0.13.2 version of Gensim, so `result.id2word` already set after the main pickle load\n if (os.path.isfile(id2word_fname)):\n try:\n result.id2word = utils.unpickle(id2word_fname)\n except Exception as e:\n logging.warning(\"failed to load id2word dictionary from %s: %s\", id2word_fname, e)\n return result\n# endclass LdaModel\n",
"__author__ = 'Michael Isik'\n\n\nfrom pybrain3.structure.networks.network import Network\nfrom pybrain3.structure.modules.lstm import LSTMLayer\nfrom pybrain3.structure.modules.linearlayer import LinearLayer\nfrom pybrain3.structure.connections.full import FullConnection\nfrom pybrain3.structure.modules.module import Module\nfrom pybrain3.structure.modules.biasunit import BiasUnit\n\nfrom numpy import zeros, array, append\n\n\nclass EvolinoNetwork(Module):\n def __init__(self, indim, outdim, hiddim=6):\n Module.__init__(self, indim, outdim)\n\n self._network = Network()\n self._in_layer = LinearLayer(indim + outdim)\n self._hid_layer = LSTMLayer(hiddim)\n self._out_layer = LinearLayer(outdim)\n self._bias = BiasUnit()\n\n self._network.addInputModule(self._in_layer)\n self._network.addModule(self._hid_layer)\n self._network.addModule(self._bias)\n self._network.addOutputModule(self._out_layer)\n\n\n self._hid_to_out_connection = FullConnection(self._hid_layer , self._out_layer)\n self._in_to_hid_connection = FullConnection(self._in_layer , self._hid_layer)\n self._network.addConnection(self._hid_to_out_connection)\n self._network.addConnection(self._in_to_hid_connection)\n self._network.addConnection(FullConnection(self._bias, self._hid_layer))\n\n self._network.sortModules()\n\n self.time = self._network.time\n self.backprojectionFactor = 0.01\n\n def reset(self):\n self._network.reset()\n\n\n def _washout(self, input, target, first_idx=None, last_idx=None):\n assert self.indim == len(input[0])\n assert self.outdim == len(target[0])\n assert len(input) == len(target)\n\n if first_idx is None: first_idx = 0\n if last_idx is None: last_idx = len(target) - 1\n raw_outputs = []\n for i in range(first_idx, last_idx + 1):\n backprojection = self._getLastOutput()\n backprojection *= self.backprojectionFactor\n full_inp = self._createFullInput(input[i], backprojection)\n self._activateNetwork(full_inp)\n raw_out = self._getRawOutput()\n# print \"RAWOUT: \", full_inp, \" --> \", raw_out, self._getLastOutput()\n raw_outputs.append(array(raw_out))\n self._setLastOutput(target[i])\n\n return array(raw_outputs)\n\n\n\n def _activateNetwork(self, input):\n assert len(input) == self._network.indim\n output = self._network.activate(input)\n self.time = self._network.time\n# print \"INNNNNNN=\", input, \" OUTPP=\", output\n return output\n\n def activate(self, input):\n assert len(input) == self.indim\n\n backprojection = self._getLastOutput()\n backprojection *= self.backprojectionFactor\n full_inp = self._createFullInput(input, backprojection)\n out = self._activateNetwork(full_inp)\n# print \"AAAAAACT: \", full_inp, \"-->\", out\n\n# self._setLastOutput(last_out*5)\n\n return out\n\n\n def calculateOutput(self, dataset, washout_calculation_ratio=(1, 2)):\n washout_calculation_ratio = array(washout_calculation_ratio, float)\n ratio = washout_calculation_ratio / sum(washout_calculation_ratio)\n\n # iterate through all sequences\n collected_input = None\n collected_output = None\n collected_target = None\n for i in range(dataset.getNumSequences()):\n\n seq = dataset.getSequence(i)\n input = seq[0]\n target = seq[1]\n\n washout_steps = int(len(input) * ratio[0])\n\n washout_input = input [ : washout_steps ]\n washout_target = target [ : washout_steps ]\n calculation_target = target [ washout_steps : ]\n\n\n # reset\n self.reset()\n\n # washout\n self._washout(washout_input, washout_target)\n\n\n # collect calculation data\n outputs = []\n inputs = []\n# for i in xrange(washout_steps, len(input)):\n for inp in input[washout_steps:]:\n out = self.activate(inp)\n# print out\n# print inp\n inputs.append(inp)\n outputs.append(out)\n\n # collect output and targets\n if collected_input is not None:\n collected_input = append(collected_input, inputs, axis=0)\n else:\n collected_input = array(inputs)\n# print collected_input; exit()\n\n if collected_output is not None:\n collected_output = append(collected_output, outputs, axis=0)\n else:\n collected_output = array(outputs)\n\n if collected_target is not None:\n collected_target = append(collected_target, calculation_target, axis=0)\n else:\n collected_target = calculation_target\n\n return collected_input, collected_output, collected_target\n\n def _createFullInput(self, input, output):\n if self.indim > 0:\n return append(input, output)\n else:\n return array(output)\n\n\n\n def _getLastOutput(self):\n if self.time == 0:\n return zeros(self.outdim)\n else:\n return self._out_layer.outputbuffer[self.time - 1]\n\n def _setLastOutput(self, output):\n self._out_layer.outputbuffer[self.time - 1][:] = output\n\n\n # ======================================================== Genome related ===\n\n\n def _validateGenomeLayer(self, layer):\n \"\"\" Validates the type and state of a layer\n \"\"\"\n assert isinstance(layer, LSTMLayer)\n assert not layer.peepholes\n\n\n def getGenome(self):\n \"\"\" Returns the Genome of the network.\n See class description for more details.\n \"\"\"\n return self._getGenomeOfLayer(self._hid_layer)\n\n\n def setGenome(self, weights):\n \"\"\" Sets the Genome of the network.\n See class description for more details.\n \"\"\"\n weights = deepcopy(weights)\n self._setGenomeOfLayer(self._hid_layer, weights)\n\n\n\n def _getGenomeOfLayer(self, layer):\n \"\"\" Returns the genome of a single layer.\n \"\"\"\n self._validateGenomeLayer(layer)\n\n dim = layer.outdim\n layer_weights = []\n\n connections = self._getInputConnectionsOfLayer(layer)\n\n for cell_idx in range(dim):\n # todo: the evolino paper uses a different order of weights for the genotype of a lstm cell\n cell_weights = []\n for c in connections:\n cell_weights += [\n c.params[ cell_idx + 0 * dim ],\n c.params[ cell_idx + 1 * dim ],\n c.params[ cell_idx + 2 * dim ],\n c.params[ cell_idx + 3 * dim ] ]\n\n layer_weights.append(cell_weights)\n return layer_weights\n\n\n\n\n\n def _setGenomeOfLayer(self, layer, weights):\n \"\"\" Sets the genome of a single layer.\n \"\"\"\n self._validateGenomeLayer(layer)\n\n dim = layer.outdim\n\n connections = self._getInputConnectionsOfLayer(layer)\n\n for cell_idx in range(dim):\n cell_weights = weights.pop(0)\n for c in connections:\n params = c.params\n params[cell_idx + 0 * dim] = cell_weights.pop(0)\n params[cell_idx + 1 * dim] = cell_weights.pop(0)\n params[cell_idx + 2 * dim] = cell_weights.pop(0)\n params[cell_idx + 3 * dim] = cell_weights.pop(0)\n assert not len(cell_weights)\n\n\n\n\n\n # ============================================ Linear Regression related ===\n\n def setOutputWeightMatrix(self, W):\n \"\"\" Sets the weight matrix of the output layer's input connection.\n \"\"\"\n c = self._hid_to_out_connection\n c.params[:] = W.flatten()\n\n def getOutputWeightMatrix(self):\n \"\"\" Sets the weight matrix of the output layer's input connection.\n \"\"\"\n c = self._hid_to_out_connection\n p = c.getParameters()\n return reshape(p, (c.outdim, c.indim))\n\n\n\n\n def _getRawOutput(self):\n \"\"\" Returns the current output of the last hidden layer.\n This is needed for linear regression, which calculates\n the weight matrix W of the full connection between this layer\n and the output layer.\n \"\"\"\n return copy(self._hid_layer.outputbuffer[self.time - 1])\n\n\n\n\n\n\n # ====================================================== Topology Helper ===\n\n\n\n def _getInputConnectionsOfLayer(self, layer):\n \"\"\" Returns a list of all input connections for the layer. \"\"\"\n connections = []\n for c in sum(list(self._network.connections.values()), []):\n if c.outmod is layer:\n if not isinstance(c, FullConnection):\n raise NotImplementedError(\"At the time there is only support for FullConnection\")\n connections.append(c)\n return connections\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nfrom numpy import reshape\nfrom copy import copy, deepcopy\n\n\nclass NetworkWrapper(object):\n \"\"\" Network wrapper class for Evolino Networks\n\n This class implements methods for extracting and setting the genome of\n the supplied network to allow its evolving.\n The genome of the network consists of the input weights of each hidden\n lstm neuron. The structure of the genome will be a list of lists,\n where the inner lists bundle all input weights of on neuron:\n [ [ neuron1's inweights ] , [ neuron2's inweights ] , ... ]\n The inner lists will be used as chromosomes inside the evolino framework.\n\n Also there are methods that help with the linear regression part.\n They can extract end set the weight matrix W for the last full-connection.\n\n At the moment the network must meet following constraints:\n - All hidden layers that have input connections must be of type LSTMLayer\n - The LSTMLayer do not use peepholes\n - There must be exactly one output-layer\n - There must be exactly one input-layer\n - There must be only one layer, that is connected to the output layer\n - The input layer must be connected to only one hidden layer\n - All used connections must be of type FullConnection\n\n When the network is supplied it will be augmented with a\n recurrent full connection from the output layer to the first hidden layer.\n So do not do this yourself.\n\n \"\"\"\n def __init__(self, network):\n \"\"\" :key network: The network to be wrapped\n \"\"\"\n self.network = network\n self._output_connection = None\n self._last_hidden_layer = None\n self._first_hidden_layer = None\n self._establishRecurrence()\n\n def getNetwork(self):\n \"\"\" Returns the Network \"\"\"\n return self.network\n\n def _establishRecurrence(self):\n \"\"\" Adds a recurrent full connection from the output layer to the first\n hidden layer.\n \"\"\"\n network = self.network\n outlayer = self.getOutputLayer()\n hid1layer = self.getFirstHiddenLayer()\n network.addRecurrentConnection(FullConnection(outlayer, hid1layer))\n\n\n # ======================================================== Genome related ===\n\n\n def _validateGenomeLayer(self, layer):\n \"\"\" Validates the type and state of a layer\n \"\"\"\n assert isinstance(layer, LSTMLayer)\n assert not layer.peepholes\n\n\n def getGenome(self):\n \"\"\" Returns the Genome of the network.\n See class description for more details.\n \"\"\"\n weights = []\n for layer in self.getHiddenLayers():\n if isinstance(layer, LSTMLayer):\n# if layer is not self._recurrence_layer:\n weights += self._getGenomeOfLayer(layer)\n return weights\n\n def setGenome(self, weights):\n \"\"\" Sets the Genome of the network.\n See class description for more details.\n \"\"\"\n weights = deepcopy(weights)\n for layer in self.getHiddenLayers():\n if isinstance(layer, LSTMLayer):\n# if layer is not self._recurrence_layer:\n self._setGenomeOfLayer(layer, weights)\n\n\n\n def _getGenomeOfLayer(self, layer):\n \"\"\" Returns the genome of a single layer.\n \"\"\"\n self._validateGenomeLayer(layer)\n\n dim = layer.outdim\n layer_weights = []\n\n connections = self._getInputConnectionsOfLayer(layer)\n\n for cell_idx in range(dim):\n # todo: the evolino paper uses a different order of weights for the genotype of a lstm cell\n cell_weights = []\n for c in connections:\n cell_weights += [\n c.getParameters()[ cell_idx + 0 * dim ],\n c.getParameters()[ cell_idx + 1 * dim ],\n c.getParameters()[ cell_idx + 2 * dim ],\n c.getParameters()[ cell_idx + 3 * dim ] ]\n\n layer_weights.append(cell_weights)\n return layer_weights\n\n\n\n\n\n def _setGenomeOfLayer(self, layer, weights):\n \"\"\" Sets the genome of a single layer.\n \"\"\"\n self._validateGenomeLayer(layer)\n\n dim = layer.outdim\n\n connections = self._getInputConnectionsOfLayer(layer)\n\n for cell_idx in range(dim):\n cell_weights = weights.pop(0)\n for c in connections:\n params = c.getParameters()\n params[cell_idx + 0 * dim] = cell_weights.pop(0)\n params[cell_idx + 1 * dim] = cell_weights.pop(0)\n params[cell_idx + 2 * dim] = cell_weights.pop(0)\n params[cell_idx + 3 * dim] = cell_weights.pop(0)\n assert not len(cell_weights)\n\n\n\n # ============================================ Linear Regression related ===\n\n def setOutputWeightMatrix(self, W):\n \"\"\" Sets the weight matrix of the output layer's input connection.\n \"\"\"\n c = self.getOutputConnection()\n p = c.getParameters()\n p[:] = W.flatten()\n\n def getOutputWeightMatrix(self):\n \"\"\" Sets the weight matrix of the output layer's input connection.\n \"\"\"\n c = self.getOutputConnection()\n p = c.getParameters()\n return reshape(p, (c.outdim, c.indim))\n\n\n def injectBackproject(self, injection):\n \"\"\" Injects a vector into the recurrent connection.\n This will be used in the evolino trainingsphase, where the target\n values need to be backprojected instead of the real output of the net.\n \n :key injection: vector of length self.network.outdim\n \"\"\"\n outlayer = self.getOutputLayer()\n outlayer.outputbuffer[self.network.time - 1][:] = injection\n\n\n def _getRawOutput(self):\n \"\"\" Returns the current output of the last hidden layer.\n This is needed for linear regression, which calculates\n the weight matrix W of the full connection between this layer\n and the output layer.\n \"\"\"\n return copy(self.getLastHiddenLayer().outputbuffer[self.network.time - 1])\n\n\n # ====================================================== Topology Helper ===\n\n\n def getOutputLayer(self):\n \"\"\" Returns the output layer \"\"\"\n assert len(self.network.outmodules) == 1\n return self.network.outmodules[0]\n\n\n\n def getOutputConnection(self):\n \"\"\" Returns the input connection of the output layer. \"\"\"\n if self._output_connection is None:\n outlayer = self.getOutputLayer()\n lastlayer = self.getLastHiddenLayer()\n for c in self.getConnections():\n if c.outmod is outlayer:\n assert c.inmod is lastlayer\n self._output_connection = c\n\n return self._output_connection\n\n\n\n def getLastHiddenLayer(self):\n \"\"\" Returns the last hidden layer. \"\"\"\n if self._last_hidden_layer is None:\n outlayer = self.getOutputLayer()\n layers = []\n for c in self.getConnections():\n if c.outmod is outlayer:\n# print c.inmod\n layers.append(c.inmod)\n\n assert len(layers) == 1\n self._last_hidden_layer = layers[0]\n return self._last_hidden_layer\n\n\n\n def getFirstHiddenLayer(self):\n \"\"\" Returns the first hidden layer. \"\"\"\n if self._first_hidden_layer is None:\n inlayer = self.getInputLayer()\n layers = []\n for c in self.getConnections():\n if c.inmod is inlayer:\n layers.append(c.outmod)\n\n assert len(layers) == 1\n self._first_hidden_layer = layers[0]\n return self._first_hidden_layer\n\n\n\n def getConnections(self):\n \"\"\" Returns a list of all connections. \"\"\"\n return sum(list(self.network.connections.values()), [])\n\n def getInputLayer(self):\n \"\"\" Returns the input layer. \"\"\"\n assert len(self.network.inmodules) == 1\n return self.network.inmodules[0]\n\n def _getInputConnectionsOfLayer(self, layer):\n \"\"\" Returns a list of all input connections for the layer. \"\"\"\n connections = []\n for c in sum(list(self.network.connections.values()), []):\n if c.outmod is layer:\n if not isinstance(c, FullConnection):\n raise NotImplementedError(\"At the time there is only support for FullConnection\")\n connections.append(c)\n return connections\n\n\n\n def getHiddenLayers(self):\n \"\"\" Returns a list of all hidden layers. \"\"\"\n layers = []\n network = self.network\n for m in network.modules:\n if m not in network.inmodules and m not in network.outmodules:\n layers.append(m)\n return layers\n\n\n\n\n\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2010 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nAutomated tests for checking transformation algorithms (the models package).\n\"\"\"\n\nimport logging\nimport os\nimport tempfile\nimport unittest\n\nimport numpy as np\n\nfrom gensim.corpora.dictionary import Dictionary\nfrom gensim.matutils import argsort\nfrom gensim.models.coherencemodel import CoherenceModel, boolean_document_based\nfrom gensim.models.ldamodel import LdaModel\nfrom gensim.models.wrappers import LdaMallet\nfrom gensim.models.wrappers import LdaVowpalWabbit\n\n\ndef testfile():\n # temporary data will be stored to this file\n return os.path.join(tempfile.gettempdir(), 'gensim_models.tst')\n\n\nclass TestCoherenceModel(unittest.TestCase):\n\n # set up vars used in testing (\"Deerwester\" from the web tutorial)\n texts = [\n ['human', 'interface', 'computer'],\n ['survey', 'user', 'computer', 'system', 'response', 'time'],\n ['eps', 'user', 'interface', 'system'],\n ['system', 'human', 'system', 'eps'],\n ['user', 'response', 'time'],\n ['trees'],\n ['graph', 'trees'],\n ['graph', 'minors', 'trees'],\n ['graph', 'minors', 'survey']\n ]\n dictionary = Dictionary(texts)\n\n @classmethod\n def setUpClass(cls):\n cls.corpus = [cls.dictionary.doc2bow(text) for text in cls.texts]\n\n def setUp(self):\n # Suppose given below are the topics which two different LdaModels come up with.\n # `topics1` is clearly better as it has a clear distinction between system-human\n # interaction and graphs. Hence both the coherence measures for `topics1` should be\n # greater.\n self.topics1 = [['human', 'computer', 'system', 'interface'],\n ['graph', 'minors', 'trees', 'eps']]\n self.topics2 = [['user', 'graph', 'minors', 'system'],\n ['time', 'graph', 'survey', 'minors']]\n self.ldamodel = LdaModel(\n corpus=self.corpus, id2word=self.dictionary, num_topics=2,\n passes=0, iterations=0)\n\n mallet_home = os.environ.get('MALLET_HOME', None)\n self.mallet_path = os.path.join(mallet_home, 'bin', 'mallet') if mallet_home else None\n if self.mallet_path:\n self.malletmodel = LdaMallet(\n mallet_path=self.mallet_path, corpus=self.corpus,\n id2word=self.dictionary, num_topics=2, iterations=0)\n\n vw_path = os.environ.get('VOWPAL_WABBIT_PATH', None)\n if not vw_path:\n logging.info(\n \"Environment variable 'VOWPAL_WABBIT_PATH' not specified,\"\n \" skipping sanity checks for LDA Model\")\n self.vw_path = None\n else:\n self.vw_path = vw_path\n self.vwmodel = LdaVowpalWabbit(\n self.vw_path, corpus=self.corpus, id2word=self.dictionary,\n num_topics=2, passes=0)\n\n def check_coherence_measure(self, coherence):\n \"\"\"Check provided topic coherence algorithm on given topics\"\"\"\n if coherence in boolean_document_based:\n kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence=coherence)\n else:\n kwargs = dict(texts=self.texts, dictionary=self.dictionary, coherence=coherence)\n\n cm1 = CoherenceModel(topics=self.topics1, **kwargs)\n cm2 = CoherenceModel(topics=self.topics2, **kwargs)\n self.assertGreater(cm1.get_coherence(), cm2.get_coherence())\n\n def testUMass(self):\n \"\"\"Test U_Mass topic coherence algorithm on given topics\"\"\"\n self.check_coherence_measure('u_mass')\n\n def testCv(self):\n \"\"\"Test C_v topic coherence algorithm on given topics\"\"\"\n self.check_coherence_measure('c_v')\n\n def testCuci(self):\n \"\"\"Test C_uci topic coherence algorithm on given topics\"\"\"\n self.check_coherence_measure('c_uci')\n\n def testCnpmi(self):\n \"\"\"Test C_npmi topic coherence algorithm on given topics\"\"\"\n self.check_coherence_measure('c_npmi')\n\n def testUMassLdaModel(self):\n \"\"\"Perform sanity check to see if u_mass coherence works with LDA Model\"\"\"\n # Note that this is just a sanity check because LDA does not guarantee a better coherence\n # value on the topics if iterations are increased. This can be seen here:\n # https://gist.github.com/dsquareindia/60fd9ab65b673711c3fa00509287ddde\n CoherenceModel(model=self.ldamodel, corpus=self.corpus, coherence='u_mass')\n\n def testCvLdaModel(self):\n \"\"\"Perform sanity check to see if c_v coherence works with LDA Model\"\"\"\n CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_v')\n\n def testCuciLdaModel(self):\n \"\"\"Perform sanity check to see if c_uci coherence works with LDA Model\"\"\"\n CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_uci')\n\n def testCnpmiLdaModel(self):\n \"\"\"Perform sanity check to see if c_npmi coherence works with LDA Model\"\"\"\n CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_npmi')\n\n def testUMassMalletModel(self):\n \"\"\"Perform sanity check to see if u_mass coherence works with LDA Mallet gensim wrapper\"\"\"\n if not self.mallet_path:\n return\n CoherenceModel(model=self.malletmodel, corpus=self.corpus, coherence='u_mass')\n\n def testCvMalletModel(self):\n \"\"\"Perform sanity check to see if c_v coherence works with LDA Mallet gensim wrapper\"\"\"\n if not self.mallet_path:\n return\n CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_v')\n\n def testCuciMalletModel(self):\n \"\"\"Perform sanity check to see if c_uci coherence works with LDA Mallet gensim wrapper\"\"\"\n if not self.mallet_path:\n return\n CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_uci')\n\n def testCnpmiMalletModel(self):\n \"\"\"Perform sanity check to see if c_npmi coherence works with LDA Mallet gensim wrapper\"\"\"\n if not self.mallet_path:\n return\n CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_npmi')\n\n def testUMassVWModel(self):\n \"\"\"Perform sanity check to see if u_mass coherence works with LDA VW gensim wrapper\"\"\"\n if not self.vw_path:\n return\n CoherenceModel(model=self.vwmodel, corpus=self.corpus, coherence='u_mass')\n\n def testCvVWModel(self):\n \"\"\"Perform sanity check to see if c_v coherence works with LDA VW gensim wrapper\"\"\"\n if not self.vw_path:\n return\n CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_v')\n\n def testCuciVWModel(self):\n \"\"\"Perform sanity check to see if c_uci coherence works with LDA VW gensim wrapper\"\"\"\n if not self.vw_path:\n return\n CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_uci')\n\n def testCnpmiVWModel(self):\n \"\"\"Perform sanity check to see if c_npmi coherence works with LDA VW gensim wrapper\"\"\"\n if not self.vw_path:\n return\n CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_npmi')\n\n def testErrors(self):\n \"\"\"Test if errors are raised on bad input\"\"\"\n # not providing dictionary\n self.assertRaises(\n ValueError, CoherenceModel, topics=self.topics1, corpus=self.corpus,\n coherence='u_mass')\n # not providing texts for c_v and instead providing corpus\n self.assertRaises(\n ValueError, CoherenceModel, topics=self.topics1, corpus=self.corpus,\n dictionary=self.dictionary, coherence='c_v')\n # not providing corpus or texts for u_mass\n self.assertRaises(\n ValueError, CoherenceModel, topics=self.topics1, dictionary=self.dictionary,\n coherence='u_mass')\n\n def testPersistence(self):\n fname = testfile()\n model = CoherenceModel(\n topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')\n model.save(fname)\n model2 = CoherenceModel.load(fname)\n self.assertTrue(model.get_coherence() == model2.get_coherence())\n\n def testPersistenceCompressed(self):\n fname = testfile() + '.gz'\n model = CoherenceModel(\n topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')\n model.save(fname)\n model2 = CoherenceModel.load(fname)\n self.assertTrue(model.get_coherence() == model2.get_coherence())\n\n def testPersistenceAfterProbabilityEstimationUsingCorpus(self):\n fname = testfile()\n model = CoherenceModel(\n topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')\n model.estimate_probabilities()\n model.save(fname)\n model2 = CoherenceModel.load(fname)\n self.assertIsNotNone(model2._accumulator)\n self.assertTrue(model.get_coherence() == model2.get_coherence())\n\n def testPersistenceAfterProbabilityEstimationUsingTexts(self):\n fname = testfile()\n model = CoherenceModel(\n topics=self.topics1, texts=self.texts, dictionary=self.dictionary, coherence='c_v')\n model.estimate_probabilities()\n model.save(fname)\n model2 = CoherenceModel.load(fname)\n self.assertIsNotNone(model2._accumulator)\n self.assertTrue(model.get_coherence() == model2.get_coherence())\n\n def testAccumulatorCachingSameSizeTopics(self):\n kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')\n cm1 = CoherenceModel(topics=self.topics1, **kwargs)\n cm1.estimate_probabilities()\n accumulator = cm1._accumulator\n self.assertIsNotNone(accumulator)\n cm1.topics = self.topics1\n self.assertEqual(accumulator, cm1._accumulator)\n cm1.topics = self.topics2\n self.assertEqual(None, cm1._accumulator)\n\n def testAccumulatorCachingTopicSubsets(self):\n kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')\n cm1 = CoherenceModel(topics=self.topics1, **kwargs)\n cm1.estimate_probabilities()\n accumulator = cm1._accumulator\n self.assertIsNotNone(accumulator)\n cm1.topics = [t[:2] for t in self.topics1]\n self.assertEqual(accumulator, cm1._accumulator)\n cm1.topics = self.topics1\n self.assertEqual(accumulator, cm1._accumulator)\n\n def testAccumulatorCachingWithModelSetting(self):\n kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')\n cm1 = CoherenceModel(topics=self.topics1, **kwargs)\n cm1.estimate_probabilities()\n self.assertIsNotNone(cm1._accumulator)\n cm1.model = self.ldamodel\n topics = []\n for topic in self.ldamodel.state.get_lambda():\n bestn = argsort(topic, topn=cm1.topn, reverse=True)\n topics.append(bestn)\n self.assertTrue(np.array_equal(topics, cm1.topics))\n self.assertIsNone(cm1._accumulator)\n\n\nif __name__ == '__main__':\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)\n unittest.main()\n",
"__author__ = 'Frank Sehnke, [email protected], Tom Schaul'\n\nfrom scipy import ones, random\n\nfrom pybrain3.auxiliary import GradientDescent\nfrom .fd import FiniteDifferences\n\n\nclass PGPE(FiniteDifferences):\n \"\"\" Policy Gradients with Parameter Exploration (ICANN 2008).\"\"\"\n \n batchSize = 2 \n #:exploration type\n exploration = \"local\"\n #: specific settings for sigma updates\n learningRate = 0.2 \n #: specific settings for sigma updates\n sigmaLearningRate = 0.1\n #: Initial value of sigmas\n epsilon = 2.0\n #:lasso weight decay (0 to deactivate)\n wDecay = 0.0\n #:momentum term (0 to deactivate)\n momentum = 0.0\n #:rprop decent (False to deactivate)\n rprop = False\n \n def _additionalInit(self):\n if self.sigmaLearningRate is None:\n self.sigmaLearningRate = self.learningRate \n self.gdSig = GradientDescent()\n self.gdSig.alpha = self.sigmaLearningRate\n self.gdSig.rprop = self.rprop\n self.sigList = ones(self.numParameters) * self.epsilon #Stores the list of standard deviations (sigmas)\n self.gdSig.init(self.sigList)\n self.baseline = None\n \n def perturbation(self):\n \"\"\" Generate a difference vector with the given standard deviations \"\"\"\n return random.normal(0., self.sigList)\n \n def _learnStep(self):\n \"\"\" calculates the gradient and executes a step in the direction\n of the gradient, scaled with a learning rate alpha. \"\"\"\n deltas = self.perturbation()\n #reward of positive and negative perturbations\n reward1 = self._oneEvaluation(self.current + deltas) \n reward2 = self._oneEvaluation(self.current - deltas)\n self.mreward = (reward1 + reward2) / 2. \n if self.baseline is None: \n # first learning step\n self.baseline = self.mreward\n fakt = 0.\n fakt2 = 0. \n else: \n #calc the gradients\n if reward1 != reward2:\n #gradient estimate alla SPSA but with likelihood gradient and normalization\n fakt = (reward1 - reward2) / (2. * self.bestEvaluation - reward1 - reward2) \n else: \n fakt=0.\n #normalized sigma gradient with moving average baseline\n norm = (self.bestEvaluation-self.baseline)\n if norm != 0.0:\n fakt2=(self.mreward-self.baseline)/(self.bestEvaluation-self.baseline) \n else:\n fakt2 = 0.0\n #update baseline \n self.baseline = 0.9 * self.baseline + 0.1 * self.mreward \n # update parameters and sigmas\n self.current = self.gd(fakt * deltas - self.current * self.sigList * self.wDecay) \n if fakt2 > 0.: #for sigma adaption alg. follows only positive gradients\n if self.exploration == \"global\": \n #apply sigma update globally \n self.sigList = self.gdSig(fakt2 * ((self.deltas ** 2).sum() - (self.sigList ** 2).sum())\n / (self.sigList * float(self.numParameters)))\n elif self.exploration == \"local\":\n #apply sigma update locally\n self.sigList = self.gdSig(fakt2 * (deltas * deltas - self.sigList * self.sigList) / self.sigList) \n elif self.exploration == \"cma\":\n #I have to think about that - needs also an option in perturbation\n raise NotImplementedError()\n else:\n raise NotImplementedError(str(self.exploration) + \" not a known exploration parameter setting.\")\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Basic arithmetic operators.\n\nSee the @{$python/math_ops} guide.\n\n@@add\n@@subtract\n@@multiply\n@@scalar_mul\n@@div\n@@divide\n@@truediv\n@@floordiv\n@@realdiv\n@@truncatediv\n@@floor_div\n@@truncatemod\n@@floormod\n@@mod\n@@cross\n@@add_n\n@@abs\n@@negative\n@@sign\n@@reciprocal\n@@square\n@@round\n@@sqrt\n@@rsqrt\n@@pow\n@@exp\n@@expm1\n@@log\n@@log1p\n@@sinh\n@@cosh\n@@asinh\n@@acosh\n@@atanh\n@@ceil\n@@floor\n@@maximum\n@@minimum\n@@cos\n@@sin\n@@lbeta\n@@tan\n@@acos\n@@asin\n@@atan\n@@atan2\n@@lgamma\n@@digamma\n@@erf\n@@erfc\n@@squared_difference\n@@igamma\n@@igammac\n@@zeta\n@@polygamma\n@@betainc\n@@rint\n@@diag\n@@diag_part\n@@trace\n@@transpose\n@@eye\n@@matrix_diag\n@@matrix_diag_part\n@@matrix_band_part\n@@matrix_set_diag\n@@matrix_transpose\n@@matmul\n@@norm\n@@matrix_determinant\n@@matrix_inverse\n@@cholesky\n@@cholesky_solve\n@@matrix_exponential\n@@matrix_solve\n@@matrix_triangular_solve\n@@matrix_solve_ls\n@@qr\n@@self_adjoint_eig\n@@self_adjoint_eigvals\n@@svd\n@@tensordot\n@@complex\n@@conj\n@@imag\n@@angle\n@@real\n@@fft\n@@ifft\n@@fft2d\n@@ifft2d\n@@fft3d\n@@ifft3d\n@@reduce_sum\n@@reduce_prod\n@@reduce_min\n@@reduce_max\n@@reduce_mean\n@@reduce_all\n@@reduce_any\n@@reduce_logsumexp\n@@count_nonzero\n@@accumulate_n\n@@einsum\n@@bincount\n@@cumsum\n@@cumprod\n@@segment_sum\n@@segment_prod\n@@segment_min\n@@segment_max\n@@segment_mean\n@@unsorted_segment_sum\n@@unsorted_segment_max\n@@sparse_segment_sum\n@@sparse_segment_mean\n@@sparse_segment_sqrt_n\n@@argmin\n@@argmax\n@@setdiff1d\n@@where\n@@unique\n@@edit_distance\n@@invert_permutation\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import common_shapes\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_control_flow_ops\nfrom tensorflow.python.ops import gen_data_flow_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import gen_sparse_ops\nfrom tensorflow.python.ops import gen_spectral_ops\nfrom tensorflow.python.ops import gen_state_ops\nfrom tensorflow.python.ops import state_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_math_ops import *\n# pylint: enable=wildcard-import\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import deprecation\n\n# Aliases for some automatically-generated names.\nlinspace = gen_math_ops.lin_space\n\narg_max = deprecation.deprecated(None, \"Use `argmax` instead\")(arg_max) # pylint: disable=used-before-assignment\narg_min = deprecation.deprecated(None, \"Use `argmin` instead\")(arg_min) # pylint: disable=used-before-assignment\n\n\ndef _set_doc(doc):\n\n def _decorator(func):\n func.__doc__ = doc\n return func\n\n return _decorator\n\n\n# pylint: disable=redefined-builtin\[email protected]_args(None, \"Use the `axis` argument instead\",\n \"dimension\")\n@_set_doc(\n gen_math_ops.arg_max.__doc__.replace(\"dimensions\", \"axes\").replace(\n \"dimension\", \"axis\"))\ndef argmax(input,\n axis=None,\n name=None,\n dimension=None,\n output_type=dtypes.int64):\n if dimension is not None:\n if axis is not None:\n raise ValueError(\"Cannot specify both 'axis' and 'dimension'\")\n axis = dimension\n elif axis is None:\n axis = 0\n return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)\n\n\[email protected]_args(None, \"Use the `axis` argument instead\",\n \"dimension\")\n@_set_doc(\n gen_math_ops.arg_min.__doc__.replace(\"dimensions\", \"axes\").replace(\n \"dimension\", \"axis\"))\ndef argmin(input,\n axis=None,\n name=None,\n dimension=None,\n output_type=dtypes.int64):\n if dimension is not None:\n if axis is not None:\n raise ValueError(\"Cannot specify both 'axis' and 'dimension'\")\n axis = dimension\n elif axis is None:\n axis = 0\n return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)\n\n\n# pylint: enable=redefined-builtin\n\n\n# pylint: disable=anomalous-backslash-in-string,protected-access\n# pylint: disable=g-docstring-has-escape\ndef abs(x, name=None):\n r\"\"\"Computes the absolute value of a tensor.\n\n Given a tensor `x` of complex numbers, this operation returns a tensor of type\n `float32` or `float64` that is the absolute value of each element in `x`. All\n elements in `x` must be complex numbers of the form \\\\(a + bj\\\\). The\n absolute value is computed as \\\\( \\sqrt{a^2 + b^2}\\\\). For example:\n ```python\n x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])\n tf.abs(x) # [5.25594902, 6.60492229]\n ```\n\n Args:\n x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`,\n `int64`, `complex64` or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` the same size and type as `x` with absolute\n values.\n Note, for `complex64` or `complex128` input, the returned `Tensor` will be\n of type `float32` or `float64`, respectively.\n \"\"\"\n with ops.name_scope(name, \"Abs\", [x]) as name:\n if isinstance(x, sparse_tensor.SparseTensor):\n if x.values.dtype.is_complex:\n x_abs = gen_math_ops._complex_abs(\n x.values, Tout=x.values.dtype.real_dtype, name=name)\n return sparse_tensor.SparseTensor(\n indices=x.indices, values=x_abs, dense_shape=x.dense_shape)\n x_abs = gen_math_ops._abs(x.values, name=name)\n return sparse_tensor.SparseTensor(\n indices=x.indices, values=x_abs, dense_shape=x.dense_shape)\n else:\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.is_complex:\n return gen_math_ops._complex_abs(x, Tout=x.dtype.real_dtype, name=name)\n return gen_math_ops._abs(x, name=name)\n\n\n# pylint: enable=g-docstring-has-escape\n\n\n# pylint: disable=redefined-builtin\ndef _bucketize(input, boundaries, name=None):\n return gen_math_ops._bucketize(input=input, boundaries=boundaries, name=name)\n\n\n# pylint: enable=redefined-builtin\n\n\nclass DivideDelegateWithName(object):\n \"\"\"Use Python2/Python3 division delegation to implement divide for tensors.\"\"\"\n\n def __init__(self, x, name):\n \"\"\"Construct DivideDelegateWithName.\n\n Args:\n x: Tensor to use as left operand in operator overloads\n name: The name that is preferred for the op created.\n \"\"\"\n self.x = x\n self.name = name\n\n def __truediv__(self, y):\n return _truediv_python3(self.x, y, self.name)\n\n def __floordiv__(self, y):\n return floordiv(self.x, y, self.name)\n\n def __div__(self, y):\n return _div_python2(self.x, y, self.name)\n\n\ndef divide(x, y, name=None):\n \"\"\"Computes Python style division of `x` by `y`.\"\"\"\n\n if name is not None:\n # Cannot use tensors operator overload, because it has no way to track\n # override names. Use a dummy class to track the runtime division behavior\n return DivideDelegateWithName(x, name) / y\n else:\n return x / y\n\n\ndef multiply(x, y, name=None):\n return gen_math_ops._mul(x, y, name)\n\n\nmultiply.__doc__ = gen_math_ops._mul.__doc__.replace(\"Mul\", \"`tf.multiply`\")\n\n\n# TODO(aselle): put deprecation in after another round of global code changes\[email protected](\n \"2016-12-30\",\n \"`tf.mul(x, y)` is deprecated, please use `tf.multiply(x, y)` or `x * y`\")\ndef _mul(x, y, name=None):\n return gen_math_ops._mul(x, y, name)\n\n\n_mul.__doc__ = (\n gen_math_ops._mul.__doc__ + (\"\" if _mul.__doc__ is None else _mul.__doc__))\n\n\ndef subtract(x, y, name=None):\n return gen_math_ops._sub(x, y, name)\n\n\nsubtract.__doc__ = gen_math_ops._sub.__doc__.replace(\"`Sub`\", \"`tf.subtract`\")\n\n\n# TODO(aselle): put deprecation in after another round of global code changes\[email protected](\n \"2016-12-30\",\n \"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`\")\ndef _sub(x, y, name=None):\n return gen_math_ops._sub(x, y, name)\n\n\n_sub.__doc__ = (\n gen_math_ops._sub.__doc__ + (\"\" if _sub.__doc__ is None else _sub.__doc__))\n\n\n# pylint: disable=g-docstring-has-escape\ndef negative(x, name=None):\n \"\"\"Computes numerical negative value element-wise.\n\n I.e., \\\\(y = -x\\\\).\n\n Args:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"Neg\", [x]) as name:\n if isinstance(x, sparse_tensor.SparseTensor):\n x_neg = gen_math_ops._neg(x.values, name=name)\n return sparse_tensor.SparseTensor(\n indices=x.indices, values=x_neg, dense_shape=x.dense_shape)\n else:\n return gen_math_ops._neg(x, name=name)\n\n\n# pylint: enable=g-docstring-has-escape\n\n\n# pylint: disable=g-docstring-has-escape\[email protected](\n \"2016-12-30\",\n \"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`\")\ndef _neg(x, name=None):\n \"\"\"Computes numerical negative value element-wise.\n\n I.e., \\\\(y = -x\\\\).\n\n Args:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n \"\"\"\n return negative(x, name)\n\n\n# pylint: enable=g-docstring-has-escape\n\n\ndef sign(x, name=None):\n \"\"\"Returns an element-wise indication of the sign of a number.\n\n `y = sign(x) = -1` if `x < 0`; 0 if `x == 0` or `tf.is_nan(x)`; 1 if `x > 0`.\n\n Zero is returned for NaN inputs.\n\n For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.\n\n Args:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n @compatibility(numpy)\n Equivalent to numpy.sign except for the behavior for input values of NaN.\n @end_compatibility\n \"\"\"\n with ops.name_scope(name, \"Sign\", [x]) as name:\n if isinstance(x, sparse_tensor.SparseTensor):\n x_sign = gen_math_ops.sign(x.values, name=name)\n return sparse_tensor.SparseTensor(\n indices=x.indices, values=x_sign, dense_shape=x.dense_shape)\n else:\n return gen_math_ops.sign(x, name=name)\n\n\ndef square(x, name=None):\n r\"\"\"Computes square of x element-wise.\n\n I.e., \\\\(y = x * x = x^2\\\\).\n\n Args:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor`. Has the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"Square\", [x]) as name:\n if isinstance(x, sparse_tensor.SparseTensor):\n x_square = gen_math_ops.square(x.values, name=name)\n return sparse_tensor.SparseTensor(\n indices=x.indices, values=x_square, dense_shape=x.dense_shape)\n else:\n return gen_math_ops.square(x, name=name)\n\n\ndef sqrt(x, name=None):\n r\"\"\"Computes square root of x element-wise.\n\n I.e., \\\\(y = \\sqrt{x} = x^{1/2}\\\\).\n\n Args:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"Sqrt\", [x]) as name:\n if isinstance(x, sparse_tensor.SparseTensor):\n x_sqrt = gen_math_ops.sqrt(x.values, name=name)\n return sparse_tensor.SparseTensor(\n indices=x.indices, values=x_sqrt, dense_shape=x.dense_shape)\n else:\n return gen_math_ops.sqrt(x, name=name)\n\n\ndef erf(x, name=None):\n \"\"\"Computes the Gauss error function of `x` element-wise.\n\n Args:\n x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"Erf\", [x]) as name:\n if isinstance(x, sparse_tensor.SparseTensor):\n x_erf = gen_math_ops.erf(x.values, name=name)\n return sparse_tensor.SparseTensor(\n indices=x.indices, values=x_erf, dense_shape=x.dense_shape)\n else:\n return gen_math_ops.erf(x, name=name)\n\n\ndef scalar_mul(scalar, x):\n \"\"\"Multiplies a scalar times a `Tensor` or `IndexedSlices` object.\n\n Intended for use in gradient code which might deal with `IndexedSlices`\n objects, which are easy to multiply by a scalar but more expensive to\n multiply with arbitrary tensors.\n\n Args:\n scalar: A 0-D scalar `Tensor`. Must have known shape.\n x: A `Tensor` or `IndexedSlices` to be scaled.\n\n Returns:\n `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.\n\n Raises:\n ValueError: if scalar is not a 0-D `scalar`.\n \"\"\"\n scalar = ops.convert_to_tensor(\n scalar, dtype=x.dtype.base_dtype, name=\"scalar\")\n shape = scalar.get_shape()\n if shape.ndims == 0:\n if isinstance(x, ops.IndexedSlices):\n return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)\n else:\n return scalar * x\n else:\n raise ValueError(\"Only scalar multiply works, got shape %s\" % shape)\n\n\ndef pow(x, y, name=None):\n r\"\"\"Computes the power of one value to another.\n\n Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\n corresponding elements in `x` and `y`. For example:\n\n ```python\n x = tf.constant([[2, 2], [3, 3]])\n y = tf.constant([[8, 16], [2, 3]])\n tf.pow(x, y) # [[256, 65536], [9, 27]]\n ```\n\n Args:\n x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,\n or `complex128`.\n y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,\n or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`.\n \"\"\"\n with ops.name_scope(name, \"Pow\", [x]) as name:\n return gen_math_ops._pow(x, y, name=name)\n\n\n# pylint: disable=redefined-builtin,redefined-outer-name\ndef complex(real, imag, name=None):\n r\"\"\"Converts two real numbers to a complex number.\n\n Given a tensor `real` representing the real part of a complex number, and a\n tensor `imag` representing the imaginary part of a complex number, this\n operation returns complex numbers elementwise of the form \\\\(a + bj\\\\), where\n *a* represents the `real` part and *b* represents the `imag` part.\n\n The input tensors `real` and `imag` must have the same shape.\n\n For example:\n\n ```python\n real = tf.constant([2.25, 3.25])\n imag = tf.constant([4.75, 5.75])\n tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]\n ```\n\n Args:\n real: A `Tensor`. Must be one of the following types: `float32`,\n `float64`.\n imag: A `Tensor`. Must have the same type as `real`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `complex64` or `complex128`.\n \"\"\"\n real = ops.convert_to_tensor(real, name=\"real\")\n imag = ops.convert_to_tensor(imag, name=\"imag\")\n with ops.name_scope(name, \"Complex\", [real, imag]) as name:\n input_types = (real.dtype, imag.dtype)\n if input_types == (dtypes.float64, dtypes.float64):\n Tout = dtypes.complex128\n elif input_types == (dtypes.float32, dtypes.float32):\n Tout = dtypes.complex64\n else:\n raise TypeError(\"real and imag have incorrect types: \"\n \"{} {}\".format(real.dtype.name, imag.dtype.name))\n return gen_math_ops._complex(real, imag, Tout=Tout, name=name)\n\n\ndef real(input, name=None):\n r\"\"\"Returns the real part of a complex (or real) tensor.\n\n Given a tensor `input`, this operation returns a tensor of type `float` that\n is the real part of each element in `input` considered as a complex number.\n\n For example:\n\n ```python\n x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\n tf.real(x) # [-2.25, 3.25]\n ```\n\n If `input` is already real, it is returned unchanged.\n\n Args:\n input: A `Tensor`. Must have numeric type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32` or `float64`.\n \"\"\"\n with ops.name_scope(name, \"Real\", [input]) as name:\n if input.dtype.is_complex:\n real_dtype = input.dtype.real_dtype\n return gen_math_ops.real(input, Tout=real_dtype, name=name)\n else:\n return input\n\n\ndef imag(input, name=None):\n r\"\"\"Returns the imaginary part of a complex (or real) tensor.\n\n Given a tensor `input`, this operation returns a tensor of type `float` that\n is the imaginary part of each element in `input` considered as a complex\n number. If `input` is real, a tensor of all zeros is returned.\n\n For example:\n\n ```python\n x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\n tf.imag(x) # [4.75, 5.75]\n ```\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float`, `double`,\n `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32` or `float64`.\n \"\"\"\n with ops.name_scope(name, \"Imag\", [input]) as name:\n if input.dtype.is_complex:\n return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)\n else:\n return array_ops.zeros_like(input)\n\n\ndef angle(input, name=None):\n r\"\"\"Returns the element-wise argument of a complex (or real) tensor.\n\n Given a tensor `input`, this operation returns a tensor of type `float` that\n is the argument of each element in `input` considered as a complex number.\n\n The elements in `input` are considered to be complex numbers of the form\n \\\\(a + bj\\\\), where *a* is the real part and *b* is the imaginary part.\n If `input` is real then *b* is zero by definition.\n\n The argument returned by this function is of the form \\\\(atan2(b, a)\\\\).\n If `input` is real, a tensor of all zeros is returned.\n\n For example:\n\n ```\n # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]\n tf.angle(input) ==> [2.0132, 1.056]\n ```\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float`, `double`,\n `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32` or `float64`.\n \"\"\"\n with ops.name_scope(name, \"Angle\", [input]) as name:\n if input.dtype.is_complex:\n return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)\n else:\n return array_ops.zeros_like(input)\n\n\n# pylint: enable=redefined-outer-name,redefined-builtin\n\n\ndef round(x, name=None):\n \"\"\"Rounds the values of a tensor to the nearest integer, element-wise.\n\n Rounds half to even. Also known as bankers rounding. If you want to round\n according to the current system rounding mode use tf::cint.\n For example:\n\n ```python\n x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])\n tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]\n ```\n\n Args:\n x: A `Tensor` of type `float32` or `float64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as `x`.\n \"\"\"\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.is_integer:\n return x\n else:\n return gen_math_ops.round(x, name=name)\n\n\ndef cast(x, dtype, name=None):\n \"\"\"Casts a tensor to a new type.\n\n The operation casts `x` (in case of `Tensor`) or `x.values`\n (in case of `SparseTensor`) to `dtype`.\n\n For example:\n\n ```python\n x = tf.constant([1.8, 2.2], dtype=tf.float32)\n tf.cast(x, tf.int32) # [1, 2], dtype=tf.int32\n ```\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n dtype: The destination type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `dtype`.\n \"\"\"\n base_type = dtypes.as_dtype(dtype).base_dtype\n with ops.name_scope(name, \"Cast\", [x]) as name:\n if isinstance(x, sparse_tensor.SparseTensor):\n values_cast = cast(x.values, base_type, name=name)\n return sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)\n else:\n # TODO(josh11b): If x is not already a Tensor, we could return\n # ops.convert_to_tensor(x, dtype=dtype, ...) here, but that\n # allows some conversions that cast() can't do, e.g. casting numbers to\n # strings.\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.base_dtype == base_type:\n return x\n return gen_math_ops.cast(x, base_type, name=name)\n\n\ndef saturate_cast(value, dtype, name=None):\n \"\"\"Performs a safe saturating cast of `value` to `dtype`.\n\n This function casts the input to `dtype` without applying any scaling. If\n there is a danger that values would over or underflow in the cast, this op\n applies the appropriate clamping before the cast.\n\n Args:\n value: A `Tensor`.\n dtype: The desired output `DType`.\n name: A name for the operation (optional).\n\n Returns:\n `value` safely cast to `dtype`.\n \"\"\"\n # When casting to a type with smaller representable range, clamp.\n # Note that this covers casting to unsigned types as well.\n with ops.name_scope(name, \"saturate_cast\", [value]) as name:\n value = ops.convert_to_tensor(value, name=\"value\")\n dtype = dtypes.as_dtype(dtype).base_dtype\n if value.dtype.min < dtype.min:\n value = gen_math_ops.maximum(value,\n ops.convert_to_tensor(\n dtype.min, dtype=value.dtype,\n name=\"min\"))\n if value.dtype.max > dtype.max:\n value = gen_math_ops.minimum(value,\n ops.convert_to_tensor(\n dtype.max, dtype=value.dtype,\n name=\"max\"))\n return cast(value, dtype, name=name)\n\n\ndef to_float(x, name=\"ToFloat\"):\n \"\"\"Casts a tensor to type `float32`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `float32`.\n \"\"\"\n return cast(x, dtypes.float32, name=name)\n\n\ndef to_double(x, name=\"ToDouble\"):\n \"\"\"Casts a tensor to type `float64`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `float64`.\n \"\"\"\n return cast(x, dtypes.float64, name=name)\n\n\ndef to_int32(x, name=\"ToInt32\"):\n \"\"\"Casts a tensor to type `int32`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `int32`.\n \"\"\"\n return cast(x, dtypes.int32, name=name)\n\n\ndef to_int64(x, name=\"ToInt64\"):\n \"\"\"Casts a tensor to type `int64`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `int64`.\n \"\"\"\n return cast(x, dtypes.int64, name=name)\n\n\ndef to_bfloat16(x, name=\"ToBFloat16\"):\n \"\"\"Casts a tensor to type `bfloat16`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `bfloat16`.\n \"\"\"\n return cast(x, dtypes.bfloat16, name=name)\n\n\nops.Tensor._override_operator(\"__neg__\", gen_math_ops._neg)\nops.Tensor._override_operator(\"__abs__\", abs)\n# __invert__ corresponds to the ~ operator. Here we follow the numpy convention\n# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean\n# tensors and will throw a TypeError if used on nonboolean arrays\nops.Tensor._override_operator(\"__invert__\", gen_math_ops.logical_not)\n\n\ndef _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):\n \"\"\"Register operators with different tensor and scalar versions.\n\n If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,\n sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.\n\n Args:\n func: the operator\n op_name: name of the operator being overridden\n clazz_object: class to override for. Either `Tensor` or `SparseTensor`.\n \"\"\"\n\n def binary_op_wrapper(x, y):\n with ops.name_scope(None, op_name, [x, y]) as name:\n if not isinstance(y, sparse_tensor.SparseTensor):\n try:\n y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name=\"y\")\n except TypeError:\n # If the RHS is not a tensor, it might be a tensor aware object\n # that can implement the operator with knowledge of itself\n # and the tensor.\n if hasattr(type(y), \"__r%s__\" % op_name):\n return NotImplemented\n else:\n raise\n return func(x, y, name=name)\n\n def binary_op_wrapper_sparse(sp_x, y):\n with ops.name_scope(None, op_name, [sp_x, y]) as name:\n y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name=\"y\")\n return sparse_tensor.SparseTensor(sp_x.indices,\n func(\n sp_x.indices,\n sp_x.values,\n sp_x.dense_shape,\n y,\n name=name), sp_x.dense_shape)\n\n def r_binary_op_wrapper(y, x):\n with ops.name_scope(None, op_name, [x, y]) as name:\n x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name=\"x\")\n return func(x, y, name=name)\n\n # Propagate func.__doc__ to the wrappers\n try:\n doc = func.__doc__\n except AttributeError:\n doc = None\n binary_op_wrapper.__doc__ = doc\n r_binary_op_wrapper.__doc__ = doc\n binary_op_wrapper_sparse.__doc__ = doc\n\n if clazz_object is ops.Tensor:\n clazz_object._override_operator(\"__%s__\" % op_name, binary_op_wrapper)\n del binary_op_wrapper\n clazz_object._override_operator(\"__r%s__\" % op_name, r_binary_op_wrapper)\n del r_binary_op_wrapper\n else:\n clazz_object._override_operator(\"__%s__\" % op_name,\n binary_op_wrapper_sparse)\n del binary_op_wrapper_sparse\n\n\n# Conversion table for __truediv__. None entries mean no conversion required.\n_TRUEDIV_TABLE = {\n dtypes.uint8: dtypes.float32,\n dtypes.int8: dtypes.float32,\n dtypes.uint16: dtypes.float32,\n dtypes.int16: dtypes.float32,\n dtypes.int32: dtypes.float64,\n dtypes.int64: dtypes.float64,\n dtypes.bfloat16: None,\n dtypes.float16: None,\n dtypes.float32: None,\n dtypes.float64: None,\n dtypes.complex64: None,\n dtypes.complex128: None,\n}\n\n\n# NOTE: the support of \"sparse (true)div dense\" is currently not baked in into\n# \"tf.(true_)div()\". Until such an API decision is made, the supported usage is\n# to explicitly use the \"/\" operator to invoke either truediv or div.\ndef _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):\n \"\"\"Internal helper function for 'sp_t / dense_t'.\"\"\"\n with ops.name_scope(name, \"truediv\",\n [sp_indices, sp_values, sp_shape, y]) as name:\n sp_values = ops.convert_to_tensor(sp_values, name=\"sp_values\")\n y = ops.convert_to_tensor(y, name=\"y\")\n x_dtype = sp_values.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n try:\n dtype = _TRUEDIV_TABLE[x_dtype]\n except KeyError:\n raise TypeError(\"Invalid dtype %r in __truediv__\" % x_dtype)\n if dtype is not None:\n sp_values = cast(sp_values, dtype)\n y = cast(y, dtype)\n return gen_sparse_ops.sparse_dense_cwise_div(\n sp_indices, sp_values, sp_shape, y, name=name)\n\n\ndef _truediv_python3(x, y, name=None):\n with ops.name_scope(name, \"truediv\", [x, y]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\")\n x_dtype = x.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n try:\n dtype = _TRUEDIV_TABLE[x_dtype]\n except KeyError:\n raise TypeError(\"Invalid dtype %r in __truediv__\" % x_dtype)\n if dtype is not None:\n x = cast(x, dtype)\n y = cast(y, dtype)\n return gen_math_ops._real_div(x, y, name=name)\n\n\ndef _div_python2(x, y, name=None):\n \"\"\"Divide two values using Python 2 semantics. Used for Tensor.__div__.\n\n Args:\n x: `Tensor` numerator of real numeric type.\n y: `Tensor` denominator of real numeric type.\n name: A name for the operation (optional).\n Returns:\n `x / y` returns the quotient of x and y.\n \"\"\"\n\n with ops.name_scope(name, \"div\", [x, y]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\", dtype=x.dtype.base_dtype)\n x_dtype = x.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n if x_dtype.is_floating or x_dtype.is_complex:\n return gen_math_ops._real_div(x, y, name=name)\n else:\n return gen_math_ops._floor_div(x, y, name=name)\n\n\ndef truediv(x, y, name=None):\n \"\"\"Divides x / y elementwise (using Python 3 division operator semantics).\n\n NOTE: Prefer using the Tensor operator or tf.divide which obey Python\n division operator semantics.\n\n This function forces Python 3 division operator semantics where all integer\n arguments are cast to floating types first. This op is generated by normal\n `x / y` division in Python 3 and in Python 2.7 with\n `from __future__ import division`. If you want integer division that rounds\n down, use `x // y` or `tf.floordiv`.\n\n `x` and `y` must have the same numeric type. If the inputs are floating\n point, the output will have the same type. If the inputs are integral, the\n inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`\n and `int64` (matching the behavior of Numpy).\n\n Args:\n x: `Tensor` numerator of numeric type.\n y: `Tensor` denominator of numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` evaluated in floating point.\n\n Raises:\n TypeError: If `x` and `y` have different dtypes.\n \"\"\"\n return _truediv_python3(x, y, name)\n\n\ndef div(x, y, name=None):\n \"\"\"Divides x / y elementwise (using Python 2 division operator semantics).\n\n NOTE: Prefer using the Tensor division operator or tf.divide which obey Python\n division operator semantics.\n\n This function divides `x` and `y`, forcing Python 2.7 semantics. That is,\n if one of `x` or `y` is a float, then the result will be a float.\n Otherwise, the output will be an integer type. Flooring semantics are used\n for integer division.\n\n Args:\n x: `Tensor` numerator of real numeric type.\n y: `Tensor` denominator of real numeric type.\n name: A name for the operation (optional).\n Returns:\n `x / y` returns the quotient of x and y.\n \"\"\"\n return _div_python2(x, y, name)\n\n\n# TODO(aselle): This should be removed\nmod = gen_math_ops._floor_mod\n\n\n# TODO(aselle): Deprecate this once all internal functionality uses\n# tf.truncatediv\ndef floordiv(x, y, name=None):\n \"\"\"Divides `x / y` elementwise, rounding toward the most negative integer.\n\n The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for\n floating point arguments so that the result is always an integer (though\n possibly an integer represented as floating point). This op is generated by\n `x // y` floor division in Python 3 and in Python 2.7 with\n `from __future__ import division`.\n\n Note that for efficiency, `floordiv` uses C semantics for negative numbers\n (unlike Python and Numpy).\n\n `x` and `y` must have the same type, and the result will have the same type\n as well.\n\n Args:\n x: `Tensor` numerator of real numeric type.\n y: `Tensor` denominator of real numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` rounded down (except possibly towards zero for negative integers).\n\n Raises:\n TypeError: If the inputs are complex.\n \"\"\"\n with ops.name_scope(name, \"floordiv\", [x, y]) as name:\n return gen_math_ops._floor_div(x, y, name=name)\n\n\nrealdiv = gen_math_ops._real_div\ntruncatediv = gen_math_ops._truncate_div\n# TODO(aselle): Rename this to floordiv when we can.\nfloor_div = gen_math_ops._floor_div\ntruncatemod = gen_math_ops._truncate_mod\nfloormod = gen_math_ops._floor_mod\n\n\ndef _mul_dispatch(x, y, name=None):\n \"\"\"Dispatches cwise mul for \"Dense*Dense\" and \"Dense*Sparse\".\"\"\"\n is_tensor_y = isinstance(y, ops.Tensor)\n if is_tensor_y:\n return gen_math_ops._mul(x, y, name=name)\n else:\n assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.\n new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,\n y.dense_shape, x, name)\n return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)\n\n\n# NOTE(aselle): When integer division is added for sparse_dense_cwise,\n# div, truediv, and floordiv should be delegated appropriately for\n# Python sematnics, analogous to dense cwise tensor operations.\n_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, \"div\",\n sparse_tensor.SparseTensor)\n_OverrideBinaryOperatorHelper(_sparse_dense_truediv, \"truediv\",\n sparse_tensor.SparseTensor)\n_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, \"mul\",\n sparse_tensor.SparseTensor)\n\n_OverrideBinaryOperatorHelper(gen_math_ops.add, \"add\")\n_OverrideBinaryOperatorHelper(gen_math_ops._sub, \"sub\")\n_OverrideBinaryOperatorHelper(_mul_dispatch, \"mul\")\n_OverrideBinaryOperatorHelper(_div_python2, \"div\")\n_OverrideBinaryOperatorHelper(_truediv_python3, \"truediv\")\n_OverrideBinaryOperatorHelper(floordiv, \"floordiv\")\n_OverrideBinaryOperatorHelper(gen_math_ops._floor_mod, \"mod\")\n_OverrideBinaryOperatorHelper(pow, \"pow\")\n\n\ndef logical_xor(x, y, name=\"LogicalXor\"):\n \"\"\"x ^ y = (x | y) & ~(x & y).\"\"\"\n # TODO(alemi) Make this a cwise op if people end up relying on it.\n return gen_math_ops.logical_and(\n gen_math_ops.logical_or(x, y),\n gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),\n name=name)\n\n\n_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, \"and\")\n_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, \"or\")\n_OverrideBinaryOperatorHelper(logical_xor, \"xor\")\n\nops.Tensor._override_operator(\"__lt__\", gen_math_ops.less)\nops.Tensor._override_operator(\"__le__\", gen_math_ops.less_equal)\nops.Tensor._override_operator(\"__gt__\", gen_math_ops.greater)\nops.Tensor._override_operator(\"__ge__\", gen_math_ops.greater_equal)\n\n\ndef range(start, limit=None, delta=1, dtype=None, name=\"range\"):\n \"\"\"Creates a sequence of numbers.\n\n Creates a sequence of numbers that begins at `start` and extends by\n increments of `delta` up to but not including `limit`.\n\n The dtype of the resulting tensor is inferred from the inputs unless\n it is provided explicitly.\n\n Like the Python builtin `range`, `start` defaults to 0, so that\n `range(n) = range(0, n)`.\n\n For example:\n\n ```python\n start = 3\n limit = 18\n delta = 3\n tf.range(start, limit, delta) # [3, 6, 9, 12, 15]\n\n start = 3\n limit = 1\n delta = -0.5\n tf.range(start, limit, delta) # [3, 2.5, 2, 1.5]\n\n limit = 5\n tf.range(limit) # [0, 1, 2, 3, 4]\n ```\n\n Args:\n start: A 0-D `Tensor` (scalar). Acts as first entry in the range if\n `limit` is not None; otherwise, acts as range limit and first entry\n defaults to 0.\n limit: A 0-D `Tensor` (scalar). Upper limit of sequence,\n exclusive. If None, defaults to the value of `start` while the first\n entry of the range defaults to 0.\n delta: A 0-D `Tensor` (scalar). Number that increments\n `start`. Defaults to 1.\n dtype: The type of the elements of the resulting tensor.\n name: A name for the operation. Defaults to \"range\".\n\n Returns:\n An 1-D `Tensor` of type `dtype`.\n\n @compatibility(numpy)\n Equivalent to np.arange\n @end_compatibility\n \"\"\"\n if limit is None:\n start, limit = 0, start\n\n with ops.name_scope(name, \"Range\", [start, limit, delta]) as name:\n start = ops.convert_to_tensor(start, dtype=dtype, name=\"start\")\n limit = ops.convert_to_tensor(limit, dtype=dtype, name=\"limit\")\n delta = ops.convert_to_tensor(delta, dtype=dtype, name=\"delta\")\n\n # infer dtype if not explicitly provided\n if dtype is None:\n dtype_hierarchy = [\n dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64\n ]\n assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])\n inferred_dtype = max(\n [arg.dtype for arg in [start, limit, delta]],\n key=dtype_hierarchy.index)\n\n start = cast(start, inferred_dtype)\n limit = cast(limit, inferred_dtype)\n delta = cast(delta, inferred_dtype)\n\n return gen_math_ops._range(start, limit, delta, name=name)\n\n\n# Reduction operations\ndef _ReductionDims(x, axis, reduction_indices):\n \"\"\"Returns range(0, rank(x)) if reduction_indices is None.\"\"\"\n # TODO(aselle): Remove this after deprecation\n if reduction_indices is not None:\n if axis is not None:\n raise ValueError(\"Can't specify both axis' and 'reduction_indices'.\")\n axis = reduction_indices\n if axis is not None:\n return axis\n else:\n # Fast path: avoid creating Rank and Range ops if ndims is known.\n if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:\n return constant_op.constant(\n np.arange(x.get_shape().ndims), dtype=dtypes.int32)\n if (isinstance(x, sparse_tensor.SparseTensor) and\n x.dense_shape.get_shape().is_fully_defined()):\n rank = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.\n return constant_op.constant(np.arange(rank), dtype=dtypes.int32)\n\n # Otherwise, we rely on Range and Rank to do the right thing at run-time.\n return range(0, array_ops.rank(x))\n\n\ndef _may_reduce_to_scalar(keepdims, axis, reduction_indices, output):\n \"\"\"Set a reduction's output's shape to be a scalar if we are certain.\"\"\"\n if (not output.shape.is_fully_defined()) and (not keepdims) and (\n axis is None) and (reduction_indices is None):\n output.set_shape(())\n return output\n\n\[email protected]_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_sum(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the sum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1, 1, 1], [1, 1, 1]])\n tf.reduce_sum(x) # 6\n tf.reduce_sum(x, 0) # [2, 2, 2]\n tf.reduce_sum(x, 1) # [3, 3]\n tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]]\n tf.reduce_sum(x, [0, 1]) # 6\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.sum\n @end_compatibility\n \"\"\"\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n if keepdims is None:\n keepdims = False\n\n return _may_reduce_to_scalar(keepdims, axis, reduction_indices,\n gen_math_ops._sum(\n input_tensor,\n _ReductionDims(input_tensor, axis,\n reduction_indices),\n keepdims,\n name=name))\n\n\[email protected]_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef count_nonzero(input_tensor,\n axis=None,\n keepdims=None,\n dtype=dtypes.int64,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes number of nonzero elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n **NOTE** Floating point comparison to zero is done by exact floating point\n equality check. Small values are **not** rounded to zero for purposes of\n the nonzero check.\n\n For example:\n\n ```python\n x = tf.constant([[0, 1, 0], [1, 1, 0]])\n tf.count_nonzero(x) # 3\n tf.count_nonzero(x, 0) # [1, 2, 0]\n tf.count_nonzero(x, 1) # [1, 2]\n tf.count_nonzero(x, 1, keepdims=True) # [[1], [2]]\n tf.count_nonzero(x, [0, 1]) # 3\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should be of numeric type, or `bool`.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n dtype: The output dtype; defaults to `tf.int64`.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor (number of nonzero values).\n \"\"\"\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n if keepdims is None:\n keepdims = False\n\n with ops.name_scope(name, \"count_nonzero\", [input_tensor]):\n input_tensor = ops.convert_to_tensor(input_tensor, name=\"input_tensor\")\n zero = input_tensor.dtype.as_numpy_dtype()\n return cast(\n reduce_sum(\n # int64 reduction happens on GPU\n to_int64(gen_math_ops.not_equal(input_tensor, zero)),\n axis=axis,\n keepdims=keepdims,\n reduction_indices=reduction_indices),\n dtype=dtype)\n\n\[email protected]_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_mean(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the mean of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1., 1.], [2., 2.]])\n tf.reduce_mean(x) # 1.5\n tf.reduce_mean(x, 0) # [1.5, 1.5]\n tf.reduce_mean(x, 1) # [1., 2.]\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.mean\n\n Please note that `np.mean` has a `dtype` parameter that could be used to\n specify the output type. By default this is `dtype=float64`. On the other\n hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,\n for example:\n\n ```python\n x = tf.constant([1, 0, 1, 0])\n tf.reduce_mean(x) # 0\n y = tf.constant([1., 0., 1., 0.])\n tf.reduce_mean(y) # 0.5\n ```\n\n @end_compatibility\n \"\"\"\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n\n if keepdims is None:\n keepdims = False\n return _may_reduce_to_scalar(keepdims, axis, reduction_indices,\n gen_math_ops._mean(\n input_tensor,\n _ReductionDims(input_tensor, axis,\n reduction_indices),\n keepdims,\n name=name))\n\n\[email protected]_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_prod(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the product of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.prod\n @end_compatibility\n \"\"\"\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n\n if keepdims is None:\n keepdims = False\n return _may_reduce_to_scalar(keepdims, axis, reduction_indices,\n gen_math_ops._prod(\n input_tensor,\n _ReductionDims(input_tensor, axis,\n reduction_indices),\n keepdims,\n name=name))\n\n\[email protected]_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_min(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the minimum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.min\n @end_compatibility\n \"\"\"\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n if keepdims is None:\n keepdims = False\n return _may_reduce_to_scalar(keepdims, axis, reduction_indices,\n gen_math_ops._min(\n input_tensor,\n _ReductionDims(input_tensor, axis,\n reduction_indices),\n keepdims,\n name=name))\n\n\[email protected]_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_max(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the maximum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.max\n @end_compatibility\n \"\"\"\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n if keepdims is None:\n keepdims = False\n return _may_reduce_to_scalar(keepdims, axis, reduction_indices,\n gen_math_ops._max(\n input_tensor,\n _ReductionDims(input_tensor, axis,\n reduction_indices),\n keepdims,\n name=name))\n\n\[email protected]_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_all(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the \"logical and\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[True, True], [False, False]])\n tf.reduce_all(x) # False\n tf.reduce_all(x, 0) # [False, False]\n tf.reduce_all(x, 1) # [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.all\n @end_compatibility\n \"\"\"\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n if keepdims is None:\n keepdims = False\n return _may_reduce_to_scalar(keepdims, axis, reduction_indices,\n gen_math_ops._all(\n input_tensor,\n _ReductionDims(input_tensor, axis,\n reduction_indices),\n keepdims,\n name=name))\n\n\[email protected]_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_any(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the \"logical or\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[True, True], [False, False]])\n tf.reduce_any(x) # True\n tf.reduce_any(x, 0) # [True, True]\n tf.reduce_any(x, 1) # [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.any\n @end_compatibility\n \"\"\"\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n if keepdims is None:\n keepdims = False\n return _may_reduce_to_scalar(keepdims, axis, reduction_indices,\n gen_math_ops._any(\n input_tensor,\n _ReductionDims(input_tensor, axis,\n reduction_indices),\n keepdims,\n name=name))\n\n\[email protected]_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_logsumexp(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes log(sum(exp(elements across dimensions of a tensor))).\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n This function is more numerically stable than log(sum(exp(input))). It avoids\n overflows caused by taking the exp of large inputs and underflows caused by\n taking the log of small inputs.\n\n For example:\n\n ```python\n x = tf.constant([[0., 0., 0.], [0., 0., 0.]])\n tf.reduce_logsumexp(x) # log(6)\n tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]\n tf.reduce_logsumexp(x, 1) # [log(3), log(3)]\n tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]\n tf.reduce_logsumexp(x, [0, 1]) # log(6)\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n \"\"\"\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n if keepdims is None:\n keepdims = False\n with ops.name_scope(name, \"ReduceLogSumExp\", [input_tensor]) as name:\n raw_max = reduce_max(\n input_tensor,\n axis=axis,\n reduction_indices=reduction_indices,\n keepdims=True)\n my_max = array_ops.stop_gradient(\n array_ops.where(\n gen_math_ops.is_finite(raw_max), raw_max,\n array_ops.zeros_like(raw_max)))\n result = gen_math_ops.log(\n reduce_sum(\n gen_math_ops.exp(input_tensor - my_max),\n axis,\n keepdims=True,\n reduction_indices=reduction_indices)) + my_max\n if not keepdims:\n if isinstance(axis, int):\n axis = [axis]\n result = array_ops.squeeze(result, axis)\n return _may_reduce_to_scalar(keepdims, axis, reduction_indices, result)\n\n\ndef trace(x, name=None):\n \"\"\"Compute the trace of a tensor `x`.\n\n `trace(x)` returns the sum along the main diagonal of each inner-most matrix\n in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output\n is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where\n\n `output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`\n\n For example:\n\n ```python\n x = tf.constant([[1, 2], [3, 4]])\n tf.trace(x) # 5\n\n x = tf.constant([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n tf.trace(x) # 15\n\n x = tf.constant([[[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]],\n [[-1, -2, -3],\n [-4, -5, -6],\n [-7, -8, -9]]])\n tf.trace(x) # [15, -15]\n ```\n\n Args:\n x: tensor.\n name: A name for the operation (optional).\n\n Returns:\n The trace of input tensor.\n \"\"\"\n with ops.name_scope(name, \"Trace\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)\n\n\ndef matmul(a,\n b,\n transpose_a=False,\n transpose_b=False,\n adjoint_a=False,\n adjoint_b=False,\n a_is_sparse=False,\n b_is_sparse=False,\n name=None):\n \"\"\"Multiplies matrix `a` by matrix `b`, producing `a` * `b`.\n\n The inputs must, following any transpositions, be tensors of rank >= 2\n where the inner 2 dimensions specify valid matrix multiplication arguments,\n and any further outer dimensions match.\n\n Both matrices must be of the same type. The supported types are:\n `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.\n\n Either matrix can be transposed or adjointed (conjugated and transposed) on\n the fly by setting one of the corresponding flag to `True`. These are `False`\n by default.\n\n If one or both of the matrices contain a lot of zeros, a more efficient\n multiplication algorithm can be used by setting the corresponding\n `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n This optimization is only available for plain matrices (rank-2 tensors) with\n datatypes `bfloat16` or `float32`.\n\n For example:\n\n ```python\n # 2-D tensor `a`\n # [[1, 2, 3],\n # [4, 5, 6]]\n a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n\n # 2-D tensor `b`\n # [[ 7, 8],\n # [ 9, 10],\n # [11, 12]]\n b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])\n\n # `a` * `b`\n # [[ 58, 64],\n # [139, 154]]\n c = tf.matmul(a, b)\n\n\n # 3-D tensor `a`\n # [[[ 1, 2, 3],\n # [ 4, 5, 6]],\n # [[ 7, 8, 9],\n # [10, 11, 12]]]\n a = tf.constant(np.arange(1, 13, dtype=np.int32),\n shape=[2, 2, 3])\n\n # 3-D tensor `b`\n # [[[13, 14],\n # [15, 16],\n # [17, 18]],\n # [[19, 20],\n # [21, 22],\n # [23, 24]]]\n b = tf.constant(np.arange(13, 25, dtype=np.int32),\n shape=[2, 3, 2])\n\n # `a` * `b`\n # [[[ 94, 100],\n # [229, 244]],\n # [[508, 532],\n # [697, 730]]]\n c = tf.matmul(a, b)\n\n # Since python >= 3.5 the @ operator is supported (see PEP 465).\n # In TensorFlow, it simply calls the `tf.matmul()` function, so the\n # following lines are equivalent:\n d = a @ b @ [[10.], [11.]]\n d = tf.matmul(tf.matmul(a, b), [[10.], [11.]])\n ```\n\n Args:\n a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,\n `complex128` and rank > 1.\n b: `Tensor` with same type and rank as `a`.\n transpose_a: If `True`, `a` is transposed before multiplication.\n transpose_b: If `True`, `b` is transposed before multiplication.\n adjoint_a: If `True`, `a` is conjugated and transposed before\n multiplication.\n adjoint_b: If `True`, `b` is conjugated and transposed before\n multiplication.\n a_is_sparse: If `True`, `a` is treated as a sparse matrix.\n b_is_sparse: If `True`, `b` is treated as a sparse matrix.\n name: Name for the operation (optional).\n\n Returns:\n A `Tensor` of the same type as `a` and `b` where each inner-most matrix is\n the product of the corresponding matrices in `a` and `b`, e.g. if all\n transpose or adjoint attributes are `False`:\n\n `output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]),\n for all indices i, j.\n\n Note: This is matrix product, not element-wise product.\n\n\n Raises:\n ValueError: If transpose_a and adjoint_a, or transpose_b and adjoint_b\n are both set to True.\n \"\"\"\n with ops.name_scope(name, \"MatMul\", [a, b]) as name:\n if transpose_a and adjoint_a:\n raise ValueError(\"Only one of transpose_a and adjoint_a can be True.\")\n if transpose_b and adjoint_b:\n raise ValueError(\"Only one of transpose_b and adjoint_b can be True.\")\n\n a = ops.convert_to_tensor(a, name=\"a\")\n b = ops.convert_to_tensor(b, name=\"b\")\n # TODO(apassos) remove _shape_tuple here when it is not needed.\n a_shape = a._shape_tuple() # pylint: disable=protected-access\n b_shape = b._shape_tuple() # pylint: disable=protected-access\n if (not a_is_sparse and\n not b_is_sparse) and ((a_shape is None or len(a_shape) > 2) and\n (b_shape is None or len(b_shape) > 2)):\n # BatchMatmul does not support transpose, so we conjugate the matrix and\n # use adjoint instead. Conj() is a noop for real matrices.\n if transpose_a:\n a = conj(a)\n adjoint_a = True\n if transpose_b:\n b = conj(b)\n adjoint_b = True\n return gen_math_ops._batch_mat_mul(\n a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)\n\n # Neither matmul nor sparse_matmul support adjoint, so we conjugate\n # the matrix and use transpose instead. Conj() is a noop for real\n # matrices.\n if adjoint_a:\n a = conj(a)\n transpose_a = True\n if adjoint_b:\n b = conj(b)\n transpose_b = True\n\n use_sparse_matmul = False\n if a_is_sparse or b_is_sparse:\n sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]\n use_sparse_matmul = (\n a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)\n if a.dtype == dtypes.bfloat16 or b.dtype == dtypes.bfloat16:\n # matmul currently doesn't handle bfloat16 inputs.\n use_sparse_matmul = True\n if use_sparse_matmul:\n ret = sparse_matmul(\n a,\n b,\n transpose_a=transpose_a,\n transpose_b=transpose_b,\n a_is_sparse=a_is_sparse,\n b_is_sparse=b_is_sparse,\n name=name)\n # sparse_matmul always returns float32, even with\n # bfloat16 inputs. This prevents us from configuring bfloat16 training.\n # casting to bfloat16 also matches non-sparse matmul behavior better.\n if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:\n ret = cast(ret, dtypes.bfloat16)\n return ret\n else:\n return gen_math_ops._mat_mul(\n a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)\n\n\n_OverrideBinaryOperatorHelper(matmul, \"matmul\")\n\nsparse_matmul = gen_math_ops._sparse_mat_mul\n\n\[email protected](\"MatMul\", \"flops\")\ndef _calc_mat_mul_flops(graph, node):\n \"\"\"Calculates the compute resources needed for MatMul.\"\"\"\n transpose_a = node.attr[\"transpose_a\"].b\n a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n a_shape.assert_is_fully_defined()\n if transpose_a:\n k = int(a_shape[0])\n else:\n k = int(a_shape[1])\n output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n output_shape.assert_is_fully_defined()\n output_count = np.prod(output_shape.as_list())\n return ops.OpStats(\"flops\", (k * output_count * 2))\n\n\ndef _as_indexed_slices(x, optimize=True):\n \"\"\"Convert 'x' to IndexedSlices.\n\n Convert a dense Tensor to a block-sparse IndexedSlices.\n\n Args:\n x: Either a Tensor object, or an IndexedSlices object.\n optimize: if true, attempt to optimize the conversion of 'x'.\n\n Returns:\n An IndexedSlices object.\n\n Raises:\n TypeError: If 'x' is not a Tensor or an IndexedSlices object.\n \"\"\"\n # TODO(touts): op_scope\n if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):\n raise TypeError(\"Not a Tensor or IndexedSlices: %s\" % type(x))\n if isinstance(x, ops.IndexedSlices):\n return x\n x_shape = array_ops.shape_internal(x, optimize=optimize)\n return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)\n\n\ndef _as_indexed_slices_list(inputs, optimize=True):\n \"\"\"Convert all elements of 'inputs' to IndexedSlices.\n\n Additionally, homogenize the types of all the indices to\n either int32 or int64.\n\n Args:\n inputs: List containing either Tensor or IndexedSlices objects.\n optimize: if true, attempt to optimize the conversion of each input.\n\n Returns:\n A list of IndexedSlices objects.\n\n Raises:\n TypeError: If 'inputs' is not a list or a tuple.\n \"\"\"\n if not isinstance(inputs, (list, tuple)):\n raise TypeError(\"Expected a list or tuple, not a %s\" % type(inputs))\n outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]\n with_int32_index = [\n o.indices for o in outputs if o.indices.dtype == dtypes.int32\n ]\n if not with_int32_index or len(with_int32_index) == len(outputs):\n return outputs\n casted_outputs = []\n for o in outputs:\n if o.indices.dtype == dtypes.int32:\n casted_outputs.append(\n ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),\n o.dense_shape))\n else:\n casted_outputs.append(o)\n return casted_outputs\n\n\ndef add_n(inputs, name=None):\n \"\"\"Adds all input tensors element-wise.\n\n Args:\n inputs: A list of `Tensor` objects, each with same shape and type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as the elements of `inputs`.\n\n Raises:\n ValueError: If `inputs` don't all have same shape and dtype or the shape\n cannot be inferred.\n \"\"\"\n if not inputs or not isinstance(inputs, (list, tuple)):\n raise ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)\n if not all(isinstance(x, ops.Tensor) for x in inputs):\n raise ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n\n if len(inputs) == 1:\n if name:\n return array_ops.identity(inputs[0], name=name)\n return inputs[0]\n return gen_math_ops._add_n(inputs, name=name)\n\n\ndef accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):\n \"\"\"Returns the element-wise sum of a list of tensors.\n\n Optionally, pass `shape` and `tensor_dtype` for shape and type checking,\n otherwise, these are inferred.\n\n NOTE: This operation is not differentiable and cannot be used if inputs depend\n on trainable variables. Please use `tf.add_n` for such cases.\n\n Aside from differentiability, `tf.accumulate_n` performs the same operation as\n `tf.add_n`, but does not wait for all of its inputs to be ready before\n beginning to sum. This can save memory if inputs are ready at different times,\n since minimum temporary storage is proportional to the output size rather than\n the inputs size.\n\n For example:\n\n ```python\n a = tf.constant([[1, 2], [3, 4]])\n b = tf.constant([[5, 0], [0, 6]])\n tf.accumulate_n([a, b, a]) # [[7, 4], [6, 14]]\n\n # Explicitly pass shape and type\n tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32) # [[7, 4],\n # [6, 14]]\n ```\n\n Args:\n inputs: A list of `Tensor` objects, each with same shape and type.\n shape: Shape of elements of `inputs`.\n tensor_dtype: The type of `inputs`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as the elements of `inputs`.\n\n Raises:\n ValueError: If `inputs` don't all have same shape and dtype or the shape\n cannot be inferred.\n \"\"\"\n if context.in_eager_mode():\n # TODO(apassos) remove this once the lifetime of eager variables gets\n # addressed.\n raise ValueError(\"accumulate_n not supported in eager mode\")\n if not inputs or not isinstance(inputs, (list, tuple)):\n raise ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)\n if not all(isinstance(x, ops.Tensor) for x in inputs):\n raise ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n if not all(x.dtype == inputs[0].dtype for x in inputs):\n raise ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n if shape is not None:\n shape = tensor_shape.as_shape(shape)\n else:\n shape = tensor_shape.unknown_shape()\n for input_tensor in inputs:\n if isinstance(input_tensor, ops.Tensor):\n shape = shape.merge_with(input_tensor.get_shape())\n if tensor_dtype is None:\n tensor_dtype = inputs[0].dtype\n if tensor_dtype != inputs[0].dtype:\n raise TypeError(\"tensor_dtype is {}, but input is of type {}\".format(\n tensor_dtype, inputs[0].dtype))\n if len(inputs) == 1:\n return inputs[0]\n with ops.name_scope(name, \"AccumulateN\", inputs) as name:\n var = gen_state_ops._temporary_variable(\n shape=tensor_shape.vector(0), dtype=tensor_dtype)\n with ops.colocate_with(var):\n zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0])\n zeros.set_shape(shape)\n ref = state_ops.assign(var, zeros, validate_shape=False)\n update_ops = [\n state_ops.assign_add(ref, input_tensor, use_locking=True)\n for input_tensor in inputs\n ]\n with ops.control_dependencies(update_ops):\n return gen_state_ops._destroy_temporary_variable(\n ref, var_name=var.op.name, name=name)\n\n\ndef sigmoid(x, name=None):\n \"\"\"Computes sigmoid of `x` element-wise.\n\n Specifically, `y = 1 / (1 + exp(-x))`.\n\n Args:\n x: A Tensor with type `float16`, `float32`, `float64`, `complex64`,\n or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor with the same type as `x`.\n\n @compatibility(numpy)\n Equivalent to np.scipy.special.expit\n @end_compatibility\n \"\"\"\n with ops.name_scope(name, \"Sigmoid\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops._sigmoid(x, name=name)\n\n\ndef log_sigmoid(x, name=None):\n \"\"\"Computes log sigmoid of `x` element-wise.\n\n Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,\n we use `y = -tf.nn.softplus(-x)`.\n\n Args:\n x: A Tensor with type `float32` or `float64`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor with the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"LogSigmoid\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops._neg(gen_nn_ops.softplus(-x), name=name)\n\n\ndef tanh(x, name=None):\n \"\"\"Computes hyperbolic tangent of `x` element-wise.\n\n Args:\n x: A Tensor or SparseTensor with type `float16`, `float32`, `double`,\n `complex64`, or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor or SparseTensor respectively with the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"Tanh\", [x]) as name:\n if isinstance(x, sparse_tensor.SparseTensor):\n x_tanh = gen_math_ops._tanh(x.values, name=name)\n return sparse_tensor.SparseTensor(\n indices=x.indices, values=x_tanh, dense_shape=x.dense_shape)\n else:\n return gen_math_ops._tanh(x, name=name)\n\n\ndef bincount(arr,\n weights=None,\n minlength=None,\n maxlength=None,\n dtype=dtypes.int32):\n \"\"\"Counts the number of occurrences of each value in an integer array.\n\n If `minlength` and `maxlength` are not given, returns a vector with length\n `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.\n If `weights` are non-None, then index `i` of the output stores the sum of the\n value in `weights` at each index where the corresponding value in `arr` is\n `i`.\n\n Args:\n arr: An int32 tensor of non-negative values.\n weights: If non-None, must be the same shape as arr. For each value in\n `arr`, the bin will be incremented by the corresponding weight instead\n of 1.\n minlength: If given, ensures the output has length at least `minlength`,\n padding with zeros at the end if necessary.\n maxlength: If given, skips values in `arr` that are equal or greater than\n `maxlength`, ensuring that the output has length at most `maxlength`.\n dtype: If `weights` is None, determines the type of the output bins.\n\n Returns:\n A vector with the same dtype as `weights` or the given `dtype`. The bin\n values.\n \"\"\"\n arr = ops.convert_to_tensor(arr, name=\"arr\", dtype=dtypes.int32)\n array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0\n output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1)\n if minlength is not None:\n minlength = ops.convert_to_tensor(\n minlength, name=\"minlength\", dtype=dtypes.int32)\n output_size = gen_math_ops.maximum(minlength, output_size)\n if maxlength is not None:\n maxlength = ops.convert_to_tensor(\n maxlength, name=\"maxlength\", dtype=dtypes.int32)\n output_size = gen_math_ops.minimum(maxlength, output_size)\n if weights is not None:\n weights = ops.convert_to_tensor(weights, name=\"weights\")\n return gen_math_ops.unsorted_segment_sum(weights, arr, output_size)\n weights = constant_op.constant([], dtype)\n return gen_math_ops.bincount(arr, output_size, weights)\n\n\ndef cumsum(x, axis=0, exclusive=False, reverse=False, name=None):\n \"\"\"Compute the cumulative sum of the tensor `x` along `axis`.\n\n By default, this op performs an inclusive cumsum, which means that the first\n element of the input is identical to the first element of the output:\n\n ```python\n tf.cumsum([a, b, c]) # [a, a + b, a + b + c]\n ```\n\n By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed\n instead:\n\n ```python\n tf.cumsum([a, b, c], exclusive=True) # [0, a, a + b]\n ```\n\n By setting the `reverse` kwarg to `True`, the cumsum is performed in the\n opposite direction:\n\n ```python\n tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c]\n ```\n\n This is more efficient than using separate `tf.reverse` ops.\n\n The `reverse` and `exclusive` kwargs can also be combined:\n\n ```python\n tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n `complex128`, `qint8`, `quint8`, `qint32`, `half`.\n axis: A `Tensor` of type `int32` (default: 0). Must be in the range\n `[-rank(x), rank(x))`.\n exclusive: If `True`, perform exclusive cumsum.\n reverse: A `bool` (default: False).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"Cumsum\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops.cumsum(\n x, axis, exclusive=exclusive, reverse=reverse, name=name)\n\n\ndef cumprod(x, axis=0, exclusive=False, reverse=False, name=None):\n \"\"\"Compute the cumulative product of the tensor `x` along `axis`.\n\n By default, this op performs an inclusive cumprod, which means that the\n first element of the input is identical to the first element of the output:\n\n ```python\n tf.cumprod([a, b, c]) # [a, a * b, a * b * c]\n ```\n\n By setting the `exclusive` kwarg to `True`, an exclusive cumprod is\n performed\n instead:\n\n ```python\n tf.cumprod([a, b, c], exclusive=True) # [1, a, a * b]\n ```\n\n By setting the `reverse` kwarg to `True`, the cumprod is performed in the\n opposite direction:\n\n ```python\n tf.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]\n ```\n\n This is more efficient than using separate `tf.reverse` ops.\n The `reverse` and `exclusive` kwargs can also be combined:\n\n ```python\n tf.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n `complex128`, `qint8`, `quint8`, `qint32`, `half`.\n axis: A `Tensor` of type `int32` (default: 0). Must be in the range\n `[-rank(x), rank(x))`.\n exclusive: If `True`, perform exclusive cumprod.\n reverse: A `bool` (default: False).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"Cumprod\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops.cumprod(\n x, axis, exclusive=exclusive, reverse=reverse, name=name)\n\n\ndef conj(x, name=None):\n r\"\"\"Returns the complex conjugate of a complex number.\n\n Given a tensor `input` of complex numbers, this operation returns a tensor of\n complex numbers that are the complex conjugate of each element in `input`. The\n complex numbers in `input` must be of the form \\\\(a + bj\\\\), where *a* is the\n real part and *b* is the imaginary part.\n\n The complex conjugate returned by this operation is of the form \\\\(a - bj\\\\).\n\n For example:\n\n # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]\n tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]\n\n If `x` is real, it is returned unchanged.\n\n Args:\n x: `Tensor` to conjugate. Must have numeric or variant type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` that is the conjugate of `x` (with the same type).\n\n Raises:\n TypeError: If `x` is not a numeric tensor.\n \"\"\"\n if isinstance(x, ops.Tensor):\n dt = x.dtype\n if dt.is_floating or dt.is_integer:\n return x\n with ops.name_scope(name, \"Conj\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.is_complex or x.dtype == dtypes.variant:\n return gen_math_ops._conj(x, name=name)\n elif x.dtype.is_floating or x.dtype.is_integer:\n return x\n else:\n raise TypeError(\n \"Expected numeric or variant tensor, got dtype %r\" % x.dtype)\n\n\ndef _BroadcastShape(op):\n \"\"\"Common shape function for binary operators that broadcast their inputs.\"\"\"\n return [\n common_shapes.broadcast_shape(op.inputs[0].get_shape(),\n op.inputs[1].get_shape())\n ]\n\n\ndef reduced_shape(input_shape, axes):\n \"\"\"Helper function for reduction ops.\n\n Args:\n input_shape: 1-D Tensor, the shape of the Tensor being reduced.\n axes: 1-D Tensor, the reduction axes.\n Returns:\n A 1-D Tensor, the output shape as if keepdims were set to True.\n \"\"\"\n # Example:\n # cast needed for SparseTensor reductions\n input_shape = to_int32(input_shape) # [2, 3, 5, 7]\n axes = to_int32(axes) # [1, 2]\n\n input_rank = array_ops.size(input_shape) # 4\n axes = (axes + input_rank) % input_rank\n axes_shape = array_ops.shape(axes) # [2]\n return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]\n [\n range(input_rank), # [0, 1, 2, 3]\n axes\n ], # [1, 2]\n [\n input_shape, # [2, 3, 5, 7]\n array_ops.fill(axes_shape, 1)\n ]) # [1, 1]\n\n\ndef sparse_segment_sum(data, indices, segment_ids, name=None,\n num_segments=None):\n r\"\"\"Computes the sum along sparse segments of a tensor.\n\n Read @{$math_ops#segmentation$the section on segmentation} for an explanation\n of segments.\n\n Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first\n dimension, selecting a subset of dimension 0, specified by `indices`.\n `segment_ids` is allowed to have missing ids, in which case the output will\n be zeros at those indices. In those cases `num_segments` is used to determine\n the size of the output.\n\n For example:\n\n ```python\n c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\n\n # Select two rows, one segment.\n tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))\n # => [[0 0 0 0]]\n\n # Select two rows, two segment.\n tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))\n # => [[ 1 2 3 4]\n # [-1 -2 -3 -4]]\n\n # With missing segment ids.\n tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),\n num_segments=4)\n # => [[ 1 2 3 4]\n # [ 0 0 0 0]\n # [-1 -2 -3 -4]\n # [ 0 0 0 0]]\n\n # Select all rows, two segments.\n tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))\n # => [[0 0 0 0]\n # [5 6 7 8]]\n\n # Which is equivalent to:\n tf.segment_sum(c, tf.constant([0, 0, 1]))\n ```\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`.\n Values should be sorted and can be repeated.\n name: A name for the operation (optional).\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n if num_segments is not None:\n return gen_math_ops.sparse_segment_sum_with_num_segments(\n data=data,\n indices=indices,\n segment_ids=segment_ids,\n num_segments=num_segments,\n name=name)\n else:\n return gen_math_ops.sparse_segment_sum(\n data=data,\n indices=indices,\n segment_ids=segment_ids,\n name=name)\n\n\ndef sparse_segment_mean(data, indices, segment_ids, name=None,\n num_segments=None):\n r\"\"\"Computes the mean along sparse segments of a tensor.\n\n Read @{$math_ops#segmentation$the section on segmentation} for an explanation\n of segments.\n\n Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first\n dimension, selecting a subset of dimension 0, specified by `indices`.\n `segment_ids` is allowed to have missing ids, in which case the output will\n be zeros at those indices. In those cases `num_segments` is used to determine\n the size of the output.\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`.\n Values should be sorted and can be repeated.\n name: A name for the operation (optional).\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n if num_segments is not None:\n return gen_math_ops.sparse_segment_mean_with_num_segments(\n data=data,\n indices=indices,\n segment_ids=segment_ids,\n num_segments=num_segments,\n name=name)\n else:\n return gen_math_ops.sparse_segment_mean(\n data=data,\n indices=indices,\n segment_ids=segment_ids,\n name=name)\n\n\ndef sparse_segment_sqrt_n(data, indices, segment_ids, name=None,\n num_segments=None):\n r\"\"\"Computes the sum along sparse segments of a tensor divided by the sqrt(N).\n\n `N` is the size of the segment being reduced.\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`.\n Values should be sorted and can be repeated.\n name: A name for the operation (optional).\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n if num_segments is not None:\n return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(\n data=data,\n indices=indices,\n segment_ids=segment_ids,\n num_segments=num_segments,\n name=name)\n else:\n return gen_math_ops.sparse_segment_sqrt_n(\n data=data,\n indices=indices,\n segment_ids=segment_ids,\n name=name)\n\n\ndef tensordot(a, b, axes, name=None):\n r\"\"\"Tensor contraction of a and b along specified axes.\n\n Tensordot (also known as tensor contraction) sums the product of elements\n from `a` and `b` over the indices specified by `a_axes` and `b_axes`.\n The lists `a_axes` and `b_axes` specify those pairs of axes along which to\n contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension\n as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists\n `a_axes` and `b_axes` must have identical length and consist of unique\n integers that specify valid axes for each of the tensors.\n\n This operation corresponds to `numpy.tensordot(a, b, axes)`.\n\n Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`\n is equivalent to matrix multiplication.\n\n Example 2: When `a` and `b` are matrices (order 2), the case\n `axes = [[1], [0]]` is equivalent to matrix multiplication.\n\n Example 3: Suppose that \\\\(a_{ijk}\\\\) and \\\\(b_{lmn}\\\\) represent two\n tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor\n \\\\(c_{jklm}\\\\) whose entry\n corresponding to the indices \\\\((j,k,l,m)\\\\) is given by:\n\n \\\\( c_{jklm} = \\sum_i a_{ijk} b_{lmi} \\\\).\n\n In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.\n\n Args:\n a: `Tensor` of type `float32` or `float64`.\n b: `Tensor` with the same type as `a`.\n axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].\n If axes is a scalar, sum over the last N axes of a and the first N axes\n of b in order.\n If axes is a list or `Tensor` the first and second row contain the set of\n unique integers specifying axes along which the contraction is computed,\n for `a` and `b`, respectively. The number of axes for `a` and `b` must\n be equal.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `a`.\n\n Raises:\n ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.\n IndexError: If the values in axes exceed the rank of the corresponding\n tensor.\n \"\"\"\n\n def _tensordot_reshape(a, axes, flipped=False):\n \"\"\"Helper method to perform transpose and reshape for contraction op.\n\n This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`\n using `array_ops.transpose` and `array_ops.reshape`. The method takes a\n tensor and performs the correct transpose and reshape operation for a given\n set of indices. It returns the reshaped tensor as well as a list of indices\n necessary to reshape the tensor again after matrix multiplication.\n\n Args:\n a: `Tensor`.\n axes: List or `int32` `Tensor` of unique indices specifying valid axes of\n `a`.\n flipped: An optional `bool`. Defaults to `False`. If `True`, the method\n assumes that `a` is the second argument in the contraction operation.\n\n Returns:\n A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is\n the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is\n either a list of integers or an `int32` `Tensor`, depending on whether\n the shape of a is fully specified, and free_dims_static is either a list\n of integers and None values, or None, representing the inferred\n static shape of the free dimensions\n \"\"\"\n if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):\n shape_a = a.get_shape().as_list()\n axes = [i if i >= 0 else i + len(shape_a) for i in axes]\n free = [i for i in xrange(len(shape_a)) if i not in axes]\n free_dims = [shape_a[i] for i in free]\n prod_free = int(np.prod([shape_a[i] for i in free]))\n prod_axes = int(np.prod([shape_a[i] for i in axes]))\n perm = list(axes) + free if flipped else free + list(axes)\n new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]\n reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)\n return reshaped_a, free_dims, free_dims\n else:\n if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):\n shape_a = a.get_shape().as_list()\n axes = [i if i >= 0 else i + len(shape_a) for i in axes]\n free = [i for i in xrange(len(shape_a)) if i not in axes]\n free_dims_static = [shape_a[i] for i in free]\n else:\n free_dims_static = None\n shape_a = array_ops.shape(a)\n rank_a = array_ops.rank(a)\n axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name=\"axes\")\n axes = cast(axes >= 0, dtypes.int32) * axes + cast(\n axes < 0, dtypes.int32) * (\n axes + rank_a)\n free, _ = array_ops.setdiff1d(range(rank_a), axes)\n free_dims = array_ops.gather(shape_a, free)\n axes_dims = array_ops.gather(shape_a, axes)\n prod_free_dims = reduce_prod(free_dims)\n prod_axes_dims = reduce_prod(axes_dims)\n perm = array_ops.concat([axes_dims, free_dims], 0)\n if flipped:\n perm = array_ops.concat([axes, free], 0)\n new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])\n else:\n perm = array_ops.concat([free, axes], 0)\n new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])\n reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)\n return reshaped_a, free_dims, free_dims_static\n\n def _tensordot_axes(a, axes):\n \"\"\"Generates two sets of contraction axes for the two tensor arguments.\"\"\"\n a_shape = a.get_shape()\n if isinstance(axes, compat.integral_types):\n if axes < 1:\n raise ValueError(\"'axes' must be at least 1.\")\n if a_shape.ndims is not None:\n return range(a_shape.ndims - axes, a_shape.ndims), range(axes)\n else:\n rank = array_ops.rank(a)\n return (range(rank - axes, rank, dtype=dtypes.int32),\n range(axes, dtype=dtypes.int32))\n elif isinstance(axes, (list, tuple)):\n if len(axes) != 2:\n raise ValueError(\"'axes' must be an integer or have length 2.\")\n a_axes = axes[0]\n b_axes = axes[1]\n if isinstance(a_axes, compat.integral_types) and \\\n isinstance(b_axes, compat.integral_types):\n a_axes = [a_axes]\n b_axes = [b_axes]\n if len(a_axes) != len(b_axes):\n raise ValueError(\n \"Different number of contraction axes 'a' and 'b', %s != %s.\" %\n (len(a_axes), len(b_axes)))\n return a_axes, b_axes\n else:\n axes = ops.convert_to_tensor(axes, name=\"axes\", dtype=dtypes.int32)\n return axes[0], axes[1]\n\n with ops.name_scope(name, \"Tensordot\", [a, b, axes]) as name:\n a = ops.convert_to_tensor(a, name=\"a\")\n b = ops.convert_to_tensor(b, name=\"b\")\n a_axes, b_axes = _tensordot_axes(a, axes)\n a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)\n b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(\n b, b_axes, True)\n ab_matmul = matmul(a_reshape, b_reshape)\n if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):\n return array_ops.reshape(ab_matmul, a_free_dims + b_free_dims, name=name)\n else:\n a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)\n b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)\n product = array_ops.reshape(\n ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)\n if a_free_dims_static is not None and b_free_dims_static is not None:\n product.set_shape(a_free_dims_static + b_free_dims_static)\n return product\n\n\n# FFT ops were moved to tf.spectral. tf.fft symbols were part of the TensorFlow\n# 1.0 API so we leave these here for backwards compatibility.\nfft = gen_spectral_ops.fft\nifft = gen_spectral_ops.ifft\nfft2d = gen_spectral_ops.fft2d\nifft2d = gen_spectral_ops.ifft2d\nfft3d = gen_spectral_ops.fft3d\nifft3d = gen_spectral_ops.ifft3d\n"
] | [
[
"scipy.asarray"
],
[
"tensorflow.python.eager.execute.args_to_matching_eager",
"tensorflow.core.framework.op_def_pb2.OpList",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.eager.execute.record_gradient",
"tensorflow.python.framework.op_def_library.OpDefLibrary",
"tensorflow.python.eager.execute.make_shape",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.op_def_registry.register_op_list",
"tensorflow.python.eager.execute.execute"
],
[
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"matplotlib.pyplot.plot",
"numpy.testing.decorators.skipif",
"numpy.arange",
"numpy.diag_indices_from",
"matplotlib.pyplot.close",
"matplotlib.colors.colorConverter.to_rgba",
"numpy.repeat",
"pandas.notnull",
"scipy.stats.pearsonr",
"numpy.triu_indices_from",
"matplotlib.colors.colorConverter.to_rgb",
"numpy.array",
"numpy.meshgrid",
"numpy.random.RandomState",
"numpy.testing.assert_warns",
"numpy.tril_indices_from",
"numpy.testing.assert_array_equal",
"numpy.empty"
],
[
"numpy.dot",
"numpy.exp2",
"numpy.sqrt",
"numpy.asarray",
"numpy.max",
"numpy.zeros_like",
"numpy.exp",
"numpy.where",
"numpy.copy",
"numpy.outer",
"numpy.zeros",
"numpy.log",
"numpy.ndim",
"scipy.special.gammaln",
"scipy.special.polygamma",
"numpy.array",
"numpy.sum",
"scipy.special.psi",
"numpy.abs"
],
[
"numpy.reshape",
"numpy.append",
"numpy.array",
"numpy.zeros"
],
[
"numpy.array_equal"
],
[
"scipy.random.normal",
"scipy.ones"
],
[
"tensorflow.python.ops.gen_math_ops.real",
"tensorflow.python.ops.gen_math_ops._add_n",
"tensorflow.python.ops.gen_math_ops.sign",
"tensorflow.python.ops.gen_math_ops._batch_mat_mul",
"tensorflow.python.ops.gen_math_ops.sparse_segment_mean",
"tensorflow.python.framework.graph_util.tensor_shape_from_node_def_name",
"tensorflow.python.ops.array_ops.fill",
"tensorflow.python.ops.array_ops.matrix_diag_part",
"tensorflow.python.ops.gen_math_ops.not_equal",
"tensorflow.python.ops.gen_math_ops._mat_mul",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.ops.gen_math_ops.sparse_segment_mean_with_num_segments",
"tensorflow.python.ops.gen_math_ops._complex_abs",
"tensorflow.python.ops.gen_math_ops.minimum",
"tensorflow.python.ops.gen_math_ops._floor_div",
"tensorflow.python.ops.gen_math_ops._range",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.gen_math_ops._sigmoid",
"tensorflow.python.framework.ops.convert_n_to_tensor_or_indexed_slices",
"tensorflow.python.ops.gen_math_ops.unsorted_segment_sum",
"tensorflow.python.ops.gen_sparse_ops.sparse_dense_cwise_div",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.gen_math_ops.maximum",
"tensorflow.python.ops.gen_math_ops._mul.__doc__.replace",
"tensorflow.python.ops.gen_math_ops.arg_max.__doc__.replace",
"tensorflow.python.ops.gen_math_ops._conj",
"tensorflow.python.ops.gen_nn_ops.softplus",
"tensorflow.python.ops.gen_math_ops.arg_min",
"tensorflow.python.ops.gen_math_ops.bincount",
"tensorflow.python.ops.gen_math_ops.cumsum",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.ops.gen_math_ops.square",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.ops.gen_math_ops.erf",
"tensorflow.python.framework.ops.Tensor._override_operator",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.ops.gen_math_ops._pow",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.gen_math_ops.round",
"tensorflow.python.ops.gen_control_flow_ops._merge",
"tensorflow.python.ops.array_ops.shape_internal",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.gen_math_ops.sparse_segment_sum_with_num_segments",
"tensorflow.python.ops.gen_math_ops._abs",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.gen_sparse_ops.sparse_dense_cwise_mul",
"tensorflow.python.eager.context.in_eager_mode",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.gen_math_ops._complex",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.gen_math_ops._tanh",
"tensorflow.python.ops.gen_math_ops._sub.__doc__.replace",
"tensorflow.python.ops.gen_math_ops._sub",
"tensorflow.python.ops.gen_state_ops._destroy_temporary_variable",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.gen_math_ops._bucketize",
"tensorflow.python.ops.gen_math_ops.sparse_segment_sqrt_n",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.gen_math_ops.angle",
"tensorflow.python.ops.gen_math_ops.sparse_segment_sqrt_n_with_num_segments",
"tensorflow.python.ops.gen_math_ops.cast",
"tensorflow.python.framework.ops.RegisterStatistics",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.gen_math_ops.is_finite",
"tensorflow.python.util.deprecation.deprecated_argument_lookup",
"tensorflow.python.ops.gen_math_ops.logical_or",
"tensorflow.python.ops.gen_math_ops.arg_min.__doc__.replace",
"tensorflow.python.ops.gen_math_ops._real_div",
"tensorflow.python.ops.gen_math_ops.imag",
"tensorflow.python.ops.gen_math_ops.exp",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.framework.ops.OpStats",
"numpy.arange",
"tensorflow.python.ops.gen_math_ops.arg_max",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.gen_math_ops.logical_and",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.ops.gen_math_ops._mul",
"tensorflow.python.ops.gen_math_ops.cumprod",
"tensorflow.python.ops.gen_math_ops.sparse_segment_sum",
"tensorflow.python.framework.tensor_shape.vector",
"numpy.prod",
"tensorflow.python.ops.gen_math_ops.sqrt",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.ops.gen_math_ops._neg",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7",
"1.4"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
joshz123/tensorflow | [
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"04f2870814d2773e09dcfa00cbe76a66a2c4de88",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"04f2870814d2773e09dcfa00cbe76a66a2c4de88",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"04f2870814d2773e09dcfa00cbe76a66a2c4de88",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"04f2870814d2773e09dcfa00cbe76a66a2c4de88",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"04f2870814d2773e09dcfa00cbe76a66a2c4de88",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4",
"7841ca029060ab78e221e757d4b1ee6e3e0ffaa4"
] | [
"tensorflow/python/keras/layers/serialization.py",
"tensorflow/python/ops/linalg/linalg_impl.py",
"tensorflow/lite/testing/toco_convert.py",
"tensorflow/python/kernel_tests/gather_nd_op_test.py",
"tensorflow/python/distribute/cluster_resolver/tpu_cluster_resolver.py",
"tensorflow/python/autograph/core/converter_test.py",
"tensorflow/lite/testing/op_tests/resize_bilinear.py",
"tensorflow/python/debug/lib/source_utils.py",
"tensorflow/python/training/sync_replicas_optimizer_test.py",
"tensorflow/python/keras/engine/base_preprocessing_layer_test.py",
"tensorflow/python/data/kernel_tests/map_test.py",
"tensorflow/python/data/experimental/kernel_tests/prefetch_with_slack_test.py",
"tensorflow/lite/python/optimize/calibrator_test.py",
"tensorflow/python/distribute/combinations.py",
"tensorflow/python/data/experimental/kernel_tests/cardinality_test.py",
"tensorflow/python/eager/function_gradients_test.py",
"tensorflow/tools/common/public_api.py",
"tensorflow/python/keras/engine/training_generator_test.py",
"tensorflow/python/training/warm_starting_util_test.py",
"tensorflow/python/ops/signal/shape_ops.py",
"tensorflow/lite/testing/op_tests/concat.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Layer serialization/deserialization functions.\n\"\"\"\n# pylint: disable=wildcard-import\n# pylint: disable=unused-import\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport threading\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.keras.engine import input_layer\nfrom tensorflow.python.keras.engine import input_spec\nfrom tensorflow.python.keras.layers import advanced_activations\nfrom tensorflow.python.keras.layers import convolutional\nfrom tensorflow.python.keras.layers import convolutional_recurrent\nfrom tensorflow.python.keras.layers import core\nfrom tensorflow.python.keras.layers import cudnn_recurrent\nfrom tensorflow.python.keras.layers import dense_attention\nfrom tensorflow.python.keras.layers import embeddings\nfrom tensorflow.python.keras.layers import local\nfrom tensorflow.python.keras.layers import merge\nfrom tensorflow.python.keras.layers import noise\nfrom tensorflow.python.keras.layers import normalization\nfrom tensorflow.python.keras.layers import normalization_v2\nfrom tensorflow.python.keras.layers import pooling\nfrom tensorflow.python.keras.layers import recurrent\nfrom tensorflow.python.keras.layers import recurrent_v2\nfrom tensorflow.python.keras.layers import rnn_cell_wrapper_v2\nfrom tensorflow.python.keras.layers import wrappers\nfrom tensorflow.python.keras.layers.preprocessing import image_preprocessing\nfrom tensorflow.python.keras.layers.preprocessing import normalization as preprocessing_normalization\nfrom tensorflow.python.keras.layers.preprocessing import normalization_v1 as preprocessing_normalization_v1\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.util import tf_inspect as inspect\nfrom tensorflow.python.util.tf_export import keras_export\n\n\nALL_MODULES = (\n base_layer,\n input_layer,\n advanced_activations,\n convolutional,\n convolutional_recurrent,\n core,\n cudnn_recurrent,\n dense_attention,\n embeddings,\n local,\n merge,\n noise,\n normalization,\n pooling,\n image_preprocessing,\n preprocessing_normalization_v1,\n recurrent,\n wrappers\n)\nALL_V2_MODULES = (\n rnn_cell_wrapper_v2,\n normalization_v2,\n recurrent_v2,\n preprocessing_normalization\n)\nFEATURE_COLUMN_V1_OBJECTS = {}\nFEATURE_COLUMN_V2_OBJECTS = {}\n# ALL_OBJECTS is meant to be a global mutable. Hence we need to make it\n# thread-local to avoid concurrent mutations.\nLOCAL = threading.local()\n\n\ndef inject_feature_column_v1_objects(name, cls):\n global FEATURE_COLUMN_V1_OBJECTS\n FEATURE_COLUMN_V1_OBJECTS[name] = cls\n\n\ndef inject_feature_column_v2_objects(name, cls):\n global FEATURE_COLUMN_V2_OBJECTS\n FEATURE_COLUMN_V2_OBJECTS[name] = cls\n\n\ndef populate_deserializable_objects():\n \"\"\"Populates dict ALL_OBJECTS with every built-in layer.\n \"\"\"\n global LOCAL\n if not hasattr(LOCAL, 'ALL_OBJECTS'):\n LOCAL.ALL_OBJECTS = {}\n LOCAL.GENERATED_WITH_V2 = None\n\n if LOCAL.ALL_OBJECTS and LOCAL.GENERATED_WITH_V2 == tf2.enabled():\n # Objects dict is already generated for the proper TF version:\n # do nothing.\n return\n\n LOCAL.ALL_OBJECTS = {}\n LOCAL.GENERATED_WITH_V2 = tf2.enabled()\n\n base_cls = base_layer.Layer\n generic_utils.populate_dict_with_module_objects(\n LOCAL.ALL_OBJECTS,\n ALL_MODULES,\n obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))\n\n # Overwrite certain V1 objects with V2 versions\n if tf2.enabled():\n generic_utils.populate_dict_with_module_objects(\n LOCAL.ALL_OBJECTS,\n ALL_V2_MODULES,\n obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))\n\n # These deserialization aliases are added for backward compatibility,\n # as in TF 1.13, \"BatchNormalizationV1\" and \"BatchNormalizationV2\"\n # were used as class name for v1 and v2 version of BatchNormalization,\n # respectively. Here we explicitly convert them to their canonical names.\n LOCAL.ALL_OBJECTS['BatchNormalizationV1'] = normalization.BatchNormalization\n LOCAL.ALL_OBJECTS[\n 'BatchNormalizationV2'] = normalization_v2.BatchNormalization\n\n # Prevent circular dependencies.\n from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top\n from tensorflow.python.keras.premade.linear import LinearModel # pylint: disable=g-import-not-at-top\n from tensorflow.python.keras.premade.wide_deep import WideDeepModel # pylint: disable=g-import-not-at-top\n\n LOCAL.ALL_OBJECTS['Input'] = input_layer.Input\n LOCAL.ALL_OBJECTS['InputSpec'] = input_spec.InputSpec\n LOCAL.ALL_OBJECTS['Network'] = models.Network\n LOCAL.ALL_OBJECTS['Model'] = models.Model\n LOCAL.ALL_OBJECTS['Sequential'] = models.Sequential\n LOCAL.ALL_OBJECTS['LinearModel'] = LinearModel\n LOCAL.ALL_OBJECTS['WideDeepModel'] = WideDeepModel\n\n if tf2.enabled():\n LOCAL.ALL_OBJECTS.update(FEATURE_COLUMN_V2_OBJECTS)\n else:\n LOCAL.ALL_OBJECTS.update(FEATURE_COLUMN_V1_OBJECTS)\n\n # Merge layers, function versions.\n LOCAL.ALL_OBJECTS['add'] = merge.add\n LOCAL.ALL_OBJECTS['subtract'] = merge.subtract\n LOCAL.ALL_OBJECTS['multiply'] = merge.multiply\n LOCAL.ALL_OBJECTS['average'] = merge.average\n LOCAL.ALL_OBJECTS['maximum'] = merge.maximum\n LOCAL.ALL_OBJECTS['minimum'] = merge.minimum\n LOCAL.ALL_OBJECTS['concatenate'] = merge.concatenate\n LOCAL.ALL_OBJECTS['dot'] = merge.dot\n\n\n@keras_export('keras.layers.serialize')\ndef serialize(layer):\n return generic_utils.serialize_keras_object(layer)\n\n\n@keras_export('keras.layers.deserialize')\ndef deserialize(config, custom_objects=None):\n \"\"\"Instantiates a layer from a config dictionary.\n\n Arguments:\n config: dict of the form {'class_name': str, 'config': dict}\n custom_objects: dict mapping class names (or function names)\n of custom (non-Keras) objects to class/functions\n\n Returns:\n Layer instance (may be Model, Sequential, Network, Layer...)\n \"\"\"\n populate_deserializable_objects()\n return generic_utils.deserialize_keras_object(\n config,\n module_objects=LOCAL.ALL_OBJECTS,\n custom_objects=custom_objects,\n printable_module_name='layer')\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Operations for linear algebra.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_linalg_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import map_fn\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import special_math_ops\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util.tf_export import tf_export\n\n# Linear algebra ops.\nband_part = array_ops.matrix_band_part\ncholesky = linalg_ops.cholesky\ncholesky_solve = linalg_ops.cholesky_solve\ndet = linalg_ops.matrix_determinant\nslogdet = gen_linalg_ops.log_matrix_determinant\ntf_export('linalg.slogdet')(slogdet)\ndiag = array_ops.matrix_diag\ndiag_part = array_ops.matrix_diag_part\neigh = linalg_ops.self_adjoint_eig\neigvalsh = linalg_ops.self_adjoint_eigvals\neinsum = special_math_ops.einsum\neye = linalg_ops.eye\ninv = linalg_ops.matrix_inverse\nlogm = gen_linalg_ops.matrix_logarithm\nlu = gen_linalg_ops.lu\ntf_export('linalg.logm')(logm)\nlstsq = linalg_ops.matrix_solve_ls\nnorm = linalg_ops.norm\nqr = linalg_ops.qr\nset_diag = array_ops.matrix_set_diag\nsolve = linalg_ops.matrix_solve\nsqrtm = linalg_ops.matrix_square_root\nsvd = linalg_ops.svd\ntensordot = math_ops.tensordot\ntrace = math_ops.trace\ntranspose = array_ops.matrix_transpose\ntriangular_solve = linalg_ops.matrix_triangular_solve\n\n\n@tf_export('linalg.logdet')\[email protected]_dispatch_support\ndef logdet(matrix, name=None):\n \"\"\"Computes log of the determinant of a hermitian positive definite matrix.\n\n ```python\n # Compute the determinant of a matrix while reducing the chance of over- or\n underflow:\n A = ... # shape 10 x 10\n det = tf.exp(tf.linalg.logdet(A)) # scalar\n ```\n\n Args:\n matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,\n or `complex128` with shape `[..., M, M]`.\n name: A name to give this `Op`. Defaults to `logdet`.\n\n Returns:\n The natural log of the determinant of `matrix`.\n\n @compatibility(numpy)\n Equivalent to numpy.linalg.slogdet, although no sign is returned since only\n hermitian positive definite matrices are supported.\n @end_compatibility\n \"\"\"\n # This uses the property that the log det(A) = 2*sum(log(real(diag(C))))\n # where C is the cholesky decomposition of A.\n with ops.name_scope(name, 'logdet', [matrix]):\n chol = gen_linalg_ops.cholesky(matrix)\n return 2.0 * math_ops.reduce_sum(\n math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),\n axis=[-1])\n\n\n@tf_export('linalg.adjoint')\[email protected]_dispatch_support\ndef adjoint(matrix, name=None):\n \"\"\"Transposes the last two dimensions of and conjugates tensor `matrix`.\n\n For example:\n\n ```python\n x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n [4 + 4j, 5 + 5j, 6 + 6j]])\n tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j],\n # [2 - 2j, 5 - 5j],\n # [3 - 3j, 6 - 6j]]\n ```\n\n Args:\n matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,\n or `complex128` with shape `[..., M, M]`.\n name: A name to give this `Op` (optional).\n\n Returns:\n The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of\n matrix.\n \"\"\"\n with ops.name_scope(name, 'adjoint', [matrix]):\n matrix = ops.convert_to_tensor(matrix, name='matrix')\n return array_ops.matrix_transpose(matrix, conjugate=True)\n\n\n# This section is ported nearly verbatim from Eigen's implementation:\n# https://eigen.tuxfamily.org/dox/unsupported/MatrixExponential_8h_source.html\ndef _matrix_exp_pade3(matrix):\n \"\"\"3rd-order Pade approximant for matrix exponential.\"\"\"\n b = [120.0, 60.0, 12.0]\n b = [constant_op.constant(x, matrix.dtype) for x in b]\n ident = linalg_ops.eye(\n array_ops.shape(matrix)[-2],\n batch_shape=array_ops.shape(matrix)[:-2],\n dtype=matrix.dtype)\n matrix_2 = math_ops.matmul(matrix, matrix)\n tmp = matrix_2 + b[1] * ident\n matrix_u = math_ops.matmul(matrix, tmp)\n matrix_v = b[2] * matrix_2 + b[0] * ident\n return matrix_u, matrix_v\n\n\ndef _matrix_exp_pade5(matrix):\n \"\"\"5th-order Pade approximant for matrix exponential.\"\"\"\n b = [30240.0, 15120.0, 3360.0, 420.0, 30.0]\n b = [constant_op.constant(x, matrix.dtype) for x in b]\n ident = linalg_ops.eye(\n array_ops.shape(matrix)[-2],\n batch_shape=array_ops.shape(matrix)[:-2],\n dtype=matrix.dtype)\n matrix_2 = math_ops.matmul(matrix, matrix)\n matrix_4 = math_ops.matmul(matrix_2, matrix_2)\n tmp = matrix_4 + b[3] * matrix_2 + b[1] * ident\n matrix_u = math_ops.matmul(matrix, tmp)\n matrix_v = b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident\n return matrix_u, matrix_v\n\n\ndef _matrix_exp_pade7(matrix):\n \"\"\"7th-order Pade approximant for matrix exponential.\"\"\"\n b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0, 56.0]\n b = [constant_op.constant(x, matrix.dtype) for x in b]\n ident = linalg_ops.eye(\n array_ops.shape(matrix)[-2],\n batch_shape=array_ops.shape(matrix)[:-2],\n dtype=matrix.dtype)\n matrix_2 = math_ops.matmul(matrix, matrix)\n matrix_4 = math_ops.matmul(matrix_2, matrix_2)\n matrix_6 = math_ops.matmul(matrix_4, matrix_2)\n tmp = matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident\n matrix_u = math_ops.matmul(matrix, tmp)\n matrix_v = b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident\n return matrix_u, matrix_v\n\n\ndef _matrix_exp_pade9(matrix):\n \"\"\"9th-order Pade approximant for matrix exponential.\"\"\"\n b = [\n 17643225600.0, 8821612800.0, 2075673600.0, 302702400.0, 30270240.0,\n 2162160.0, 110880.0, 3960.0, 90.0\n ]\n b = [constant_op.constant(x, matrix.dtype) for x in b]\n ident = linalg_ops.eye(\n array_ops.shape(matrix)[-2],\n batch_shape=array_ops.shape(matrix)[:-2],\n dtype=matrix.dtype)\n matrix_2 = math_ops.matmul(matrix, matrix)\n matrix_4 = math_ops.matmul(matrix_2, matrix_2)\n matrix_6 = math_ops.matmul(matrix_4, matrix_2)\n matrix_8 = math_ops.matmul(matrix_6, matrix_2)\n tmp = (\n matrix_8 + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 +\n b[1] * ident)\n matrix_u = math_ops.matmul(matrix, tmp)\n matrix_v = (\n b[8] * matrix_8 + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 +\n b[0] * ident)\n return matrix_u, matrix_v\n\n\ndef _matrix_exp_pade13(matrix):\n \"\"\"13th-order Pade approximant for matrix exponential.\"\"\"\n b = [\n 64764752532480000.0, 32382376266240000.0, 7771770303897600.0,\n 1187353796428800.0, 129060195264000.0, 10559470521600.0, 670442572800.0,\n 33522128640.0, 1323241920.0, 40840800.0, 960960.0, 16380.0, 182.0\n ]\n b = [constant_op.constant(x, matrix.dtype) for x in b]\n ident = linalg_ops.eye(\n array_ops.shape(matrix)[-2],\n batch_shape=array_ops.shape(matrix)[:-2],\n dtype=matrix.dtype)\n matrix_2 = math_ops.matmul(matrix, matrix)\n matrix_4 = math_ops.matmul(matrix_2, matrix_2)\n matrix_6 = math_ops.matmul(matrix_4, matrix_2)\n tmp_u = (\n math_ops.matmul(matrix_6, matrix_6 + b[11] * matrix_4 + b[9] * matrix_2) +\n b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident)\n matrix_u = math_ops.matmul(matrix, tmp_u)\n tmp_v = b[12] * matrix_6 + b[10] * matrix_4 + b[8] * matrix_2\n matrix_v = (\n math_ops.matmul(matrix_6, tmp_v) + b[6] * matrix_6 + b[4] * matrix_4 +\n b[2] * matrix_2 + b[0] * ident)\n return matrix_u, matrix_v\n\n\n@tf_export('linalg.expm')\ndef matrix_exponential(input, name=None): # pylint: disable=redefined-builtin\n r\"\"\"Computes the matrix exponential of one or more square matrices.\n\n exp(A) = \\sum_{n=0}^\\infty A^n/n!\n\n The exponential is computed using a combination of the scaling and squaring\n method and the Pade approximation. Details can be found in:\n Nicholas J. Higham, \"The scaling and squaring method for the matrix\n exponential revisited,\" SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.\n\n The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\n form square matrices. The output is a tensor of the same shape as the input\n containing the exponential for all input submatrices `[..., :, :]`.\n\n Args:\n input: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or\n `complex128` with shape `[..., M, M]`.\n name: A name to give this `Op` (optional).\n\n Returns:\n the matrix exponential of the input.\n\n Raises:\n ValueError: An unsupported type is provided as input.\n\n @compatibility(scipy)\n Equivalent to scipy.linalg.expm\n @end_compatibility\n \"\"\"\n with ops.name_scope(name, 'matrix_exponential', [input]):\n matrix = ops.convert_to_tensor(input, name='input')\n if matrix.shape[-2:] == [0, 0]:\n return matrix\n batch_shape = matrix.shape[:-2]\n if not batch_shape.is_fully_defined():\n batch_shape = array_ops.shape(matrix)[:-2]\n\n # reshaping the batch makes the where statements work better\n matrix = array_ops.reshape(\n matrix, array_ops.concat(([-1], array_ops.shape(matrix)[-2:]), axis=0))\n l1_norm = math_ops.reduce_max(\n math_ops.reduce_sum(\n math_ops.abs(matrix),\n axis=array_ops.size(array_ops.shape(matrix)) - 2),\n axis=-1)[..., array_ops.newaxis, array_ops.newaxis]\n const = lambda x: constant_op.constant(x, l1_norm.dtype)\n\n def _nest_where(vals, cases):\n assert len(vals) == len(cases) - 1\n if len(vals) == 1:\n return array_ops.where_v2(\n math_ops.less(l1_norm, const(vals[0])), cases[0], cases[1])\n else:\n return array_ops.where_v2(\n math_ops.less(l1_norm, const(vals[0])), cases[0],\n _nest_where(vals[1:], cases[1:]))\n\n if matrix.dtype in [dtypes.float16, dtypes.float32, dtypes.complex64]:\n maxnorm = const(3.925724783138660)\n squarings = math_ops.maximum(\n math_ops.floor(\n math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)\n u3, v3 = _matrix_exp_pade3(matrix)\n u5, v5 = _matrix_exp_pade5(matrix)\n u7, v7 = _matrix_exp_pade7(\n matrix /\n math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype))\n conds = (4.258730016922831e-001, 1.880152677804762e+000)\n u = _nest_where(conds, (u3, u5, u7))\n v = _nest_where(conds, (v3, v5, v7))\n elif matrix.dtype in [dtypes.float64, dtypes.complex128]:\n maxnorm = const(5.371920351148152)\n squarings = math_ops.maximum(\n math_ops.floor(\n math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)\n u3, v3 = _matrix_exp_pade3(matrix)\n u5, v5 = _matrix_exp_pade5(matrix)\n u7, v7 = _matrix_exp_pade7(matrix)\n u9, v9 = _matrix_exp_pade9(matrix)\n u13, v13 = _matrix_exp_pade13(\n matrix /\n math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype))\n conds = (1.495585217958292e-002, 2.539398330063230e-001,\n 9.504178996162932e-001, 2.097847961257068e+000)\n u = _nest_where(conds, (u3, u5, u7, u9, u13))\n v = _nest_where(conds, (v3, v5, v7, v9, v13))\n else:\n raise ValueError('tf.linalg.expm does not support matrices of type %s' %\n matrix.dtype)\n numer = u + v\n denom = -u + v\n result = linalg_ops.matrix_solve(denom, numer)\n max_squarings = math_ops.reduce_max(squarings)\n\n i = const(0.0)\n c = lambda i, r: math_ops.less(i, max_squarings)\n\n def b(i, r):\n return i + 1, array_ops.where_v2(\n math_ops.less(i, squarings), math_ops.matmul(r, r), r)\n\n _, result = control_flow_ops.while_loop(c, b, [i, result])\n if not matrix.shape.is_fully_defined():\n return array_ops.reshape(\n result,\n array_ops.concat((batch_shape, array_ops.shape(result)[-2:]), axis=0))\n return array_ops.reshape(result, batch_shape.concatenate(result.shape[-2:]))\n\n\n@tf_export('linalg.tridiagonal_solve')\ndef tridiagonal_solve(diagonals,\n rhs,\n diagonals_format='compact',\n transpose_rhs=False,\n conjugate_rhs=False,\n name=None,\n partial_pivoting=True):\n r\"\"\"Solves tridiagonal systems of equations.\n\n The input can be supplied in various formats: `matrix`, `sequence` and\n `compact`, specified by the `diagonals_format` arg.\n\n In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with\n two inner-most dimensions representing the square tridiagonal matrices.\n Elements outside of the three diagonals will be ignored.\n\n In `sequence` format, `diagonals` are supplied as a tuple or list of three\n tensors of shapes `[..., N]`, `[..., M]`, `[..., N]` representing\n superdiagonals, diagonals, and subdiagonals, respectively. `N` can be either\n `M-1` or `M`; in the latter case, the last element of superdiagonal and the\n first element of subdiagonal will be ignored.\n\n In `compact` format the three diagonals are brought together into one tensor\n of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,\n diagonals, and subdiagonals, in order. Similarly to `sequence` format,\n elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.\n\n The `compact` format is recommended as the one with best performance. In case\n you need to cast a tensor into a compact format manually, use `tf.gather_nd`.\n An example for a tensor of shape [m, m]:\n\n ```python\n rhs = tf.constant([...])\n matrix = tf.constant([[...]])\n m = matrix.shape[0]\n dummy_idx = [0, 0] # An arbitrary element to use as a dummy\n indices = [[[i, i + 1] for i in range(m - 1)] + [dummy_idx], # Superdiagonal\n [[i, i] for i in range(m)], # Diagonal\n [dummy_idx] + [[i + 1, i] for i in range(m - 1)]] # Subdiagonal\n diagonals=tf.gather_nd(matrix, indices)\n x = tf.linalg.tridiagonal_solve(diagonals, rhs)\n ```\n\n Regardless of the `diagonals_format`, `rhs` is a tensor of shape `[..., M]` or\n `[..., M, K]`. The latter allows to simultaneously solve K systems with the\n same left-hand sides and K different right-hand sides. If `transpose_rhs`\n is set to `True` the expected shape is `[..., M]` or `[..., K, M]`.\n\n The batch dimensions, denoted as `...`, must be the same in `diagonals` and\n `rhs`.\n\n The output is a tensor of the same shape as `rhs`: either `[..., M]` or\n `[..., M, K]`.\n\n The op isn't guaranteed to raise an error if the input matrix is not\n invertible. `tf.debugging.check_numerics` can be applied to the output to\n detect invertibility problems.\n\n **Note**: with large batch sizes, the computation on the GPU may be slow, if\n either `partial_pivoting=True` or there are multiple right-hand sides\n (`K > 1`). If this issue arises, consider if it's possible to disable pivoting\n and have `K = 1`, or, alternatively, consider using CPU.\n\n On CPU, solution is computed via Gaussian elimination with or without partial\n pivoting, depending on `partial_pivoting` parameter. On GPU, Nvidia's cuSPARSE\n library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv\n\n Args:\n diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The\n shape depends of `diagonals_format`, see description above. Must be\n `float32`, `float64`, `complex64`, or `complex128`.\n rhs: A `Tensor` of shape [..., M] or [..., M, K] and with the same dtype as\n `diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known\n statically, `rhs` will be treated as a matrix rather than a vector.\n diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is\n `compact`.\n transpose_rhs: If `True`, `rhs` is transposed before solving (has no effect\n if the shape of rhs is [..., M]).\n conjugate_rhs: If `True`, `rhs` is conjugated before solving.\n name: A name to give this `Op` (optional).\n partial_pivoting: whether to perform partial pivoting. `True` by default.\n Partial pivoting makes the procedure more stable, but slower. Partial\n pivoting is unnecessary in some cases, including diagonally dominant and\n symmetric positive definite matrices (see e.g. theorem 9.12 in [1]).\n\n Returns:\n A `Tensor` of shape [..., M] or [..., M, K] containing the solutions.\n\n Raises:\n ValueError: An unsupported type is provided as input, or when the input\n tensors have incorrect shapes.\n UnimplementedError: Whenever `partial_pivoting` is true and the backend is\n XLA.\n\n [1] Nicholas J. Higham (2002). Accuracy and Stability of Numerical Algorithms:\n Second Edition. SIAM. p. 175. ISBN 978-0-89871-802-7.\n\n \"\"\"\n if diagonals_format == 'compact':\n return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,\n conjugate_rhs, partial_pivoting,\n name)\n\n if diagonals_format == 'sequence':\n if not isinstance(diagonals, (tuple, list)) or len(diagonals) != 3:\n raise ValueError('Expected diagonals to be a sequence of length 3.')\n\n superdiag, maindiag, subdiag = diagonals\n if (not subdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1]) or\n not superdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1])):\n raise ValueError(\n 'Tensors representing the three diagonals must have the same shape,'\n 'except for the last dimension, got {}, {}, {}'.format(\n subdiag.shape, maindiag.shape, superdiag.shape))\n\n m = tensor_shape.dimension_value(maindiag.shape[-1])\n\n def pad_if_necessary(t, name, last_dim_padding):\n n = tensor_shape.dimension_value(t.shape[-1])\n if not n or n == m:\n return t\n if n == m - 1:\n paddings = ([[0, 0] for _ in range(len(t.shape) - 1)] +\n [last_dim_padding])\n return array_ops.pad(t, paddings)\n raise ValueError('Expected {} to be have length {} or {}, got {}.'.format(\n name, m, m - 1, n))\n\n subdiag = pad_if_necessary(subdiag, 'subdiagonal', [1, 0])\n superdiag = pad_if_necessary(superdiag, 'superdiagonal', [0, 1])\n\n diagonals = array_ops.stack((superdiag, maindiag, subdiag), axis=-2)\n return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,\n conjugate_rhs, partial_pivoting,\n name)\n\n if diagonals_format == 'matrix':\n m1 = tensor_shape.dimension_value(diagonals.shape[-1])\n m2 = tensor_shape.dimension_value(diagonals.shape[-2])\n if m1 and m2 and m1 != m2:\n raise ValueError(\n 'Expected last two dimensions of diagonals to be same, got {} and {}'\n .format(m1, m2))\n m = m1 or m2\n diagonals = array_ops.matrix_diag_part(\n diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT')\n return _tridiagonal_solve_compact_format(\n diagonals, rhs, transpose_rhs, conjugate_rhs, partial_pivoting, name)\n\n raise ValueError('Unrecognized diagonals_format: {}'.format(diagonals_format))\n\n\ndef _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,\n conjugate_rhs, partial_pivoting, name):\n \"\"\"Helper function used after the input has been cast to compact form.\"\"\"\n diags_rank, rhs_rank = diagonals.shape.rank, rhs.shape.rank\n\n # If we know the rank of the diagonal tensor, do some static checking.\n if diags_rank:\n if diags_rank < 2:\n raise ValueError(\n 'Expected diagonals to have rank at least 2, got {}'.format(\n diags_rank))\n if rhs_rank and rhs_rank != diags_rank and rhs_rank != diags_rank - 1:\n raise ValueError('Expected the rank of rhs to be {} or {}, got {}'.format(\n diags_rank - 1, diags_rank, rhs_rank))\n if (rhs_rank and not diagonals.shape[:-2].is_compatible_with(\n rhs.shape[:diags_rank - 2])):\n raise ValueError('Batch shapes {} and {} are incompatible'.format(\n diagonals.shape[:-2], rhs.shape[:diags_rank - 2]))\n\n if diagonals.shape[-2] and diagonals.shape[-2] != 3:\n raise ValueError('Expected 3 diagonals got {}'.format(diagonals.shape[-2]))\n\n def check_num_lhs_matches_num_rhs():\n if (diagonals.shape[-1] and rhs.shape[-2] and\n diagonals.shape[-1] != rhs.shape[-2]):\n raise ValueError('Expected number of left-hand sided and right-hand '\n 'sides to be equal, got {} and {}'.format(\n diagonals.shape[-1], rhs.shape[-2]))\n\n if rhs_rank and diags_rank and rhs_rank == diags_rank - 1:\n # Rhs provided as a vector, ignoring transpose_rhs\n if conjugate_rhs:\n rhs = math_ops.conj(rhs)\n rhs = array_ops.expand_dims(rhs, -1)\n check_num_lhs_matches_num_rhs()\n return array_ops.squeeze(\n linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting, name),\n -1)\n\n if transpose_rhs:\n rhs = array_ops.matrix_transpose(rhs, conjugate=conjugate_rhs)\n elif conjugate_rhs:\n rhs = math_ops.conj(rhs)\n\n check_num_lhs_matches_num_rhs()\n return linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting, name)\n\n\n@tf_export('linalg.tridiagonal_matmul')\ndef tridiagonal_matmul(diagonals, rhs, diagonals_format='compact', name=None):\n r\"\"\"Multiplies tridiagonal matrix by matrix.\n\n `diagonals` is representation of 3-diagonal NxN matrix, which depends on\n `diagonals_format`.\n\n In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with\n two inner-most dimensions representing the square tridiagonal matrices.\n Elements outside of the three diagonals will be ignored.\n\n If `sequence` format, `diagonals` is list or tuple of three tensors:\n `[superdiag, maindiag, subdiag]`, each having shape [..., M]. Last element\n of `superdiag` first element of `subdiag` are ignored.\n\n In `compact` format the three diagonals are brought together into one tensor\n of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,\n diagonals, and subdiagonals, in order. Similarly to `sequence` format,\n elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.\n\n The `sequence` format is recommended as the one with the best performance.\n\n `rhs` is matrix to the right of multiplication. It has shape `[..., M, N]`.\n\n Example:\n\n ```python\n superdiag = tf.constant([-1, -1, 0], dtype=tf.float64)\n maindiag = tf.constant([2, 2, 2], dtype=tf.float64)\n subdiag = tf.constant([0, -1, -1], dtype=tf.float64)\n diagonals = [superdiag, maindiag, subdiag]\n rhs = tf.constant([[1, 1], [1, 1], [1, 1]], dtype=tf.float64)\n x = tf.linalg.tridiagonal_matmul(diagonals, rhs, diagonals_format='sequence')\n ```\n\n Args:\n diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The\n shape depends of `diagonals_format`, see description above. Must be\n `float32`, `float64`, `complex64`, or `complex128`.\n rhs: A `Tensor` of shape [..., M, N] and with the same dtype as `diagonals`.\n diagonals_format: one of `sequence`, or `compact`. Default is `compact`.\n name: A name to give this `Op` (optional).\n\n Returns:\n A `Tensor` of shape [..., M, N] containing the result of multiplication.\n\n Raises:\n ValueError: An unsupported type is provided as input, or when the input\n tensors have incorrect shapes.\n \"\"\"\n if diagonals_format == 'compact':\n superdiag = diagonals[..., 0, :]\n maindiag = diagonals[..., 1, :]\n subdiag = diagonals[..., 2, :]\n elif diagonals_format == 'sequence':\n superdiag, maindiag, subdiag = diagonals\n elif diagonals_format == 'matrix':\n m1 = tensor_shape.dimension_value(diagonals.shape[-1])\n m2 = tensor_shape.dimension_value(diagonals.shape[-2])\n if m1 and m2 and m1 != m2:\n raise ValueError(\n 'Expected last two dimensions of diagonals to be same, got {} and {}'\n .format(m1, m2))\n diags = array_ops.matrix_diag_part(\n diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT')\n superdiag = diags[..., 0, :]\n maindiag = diags[..., 1, :]\n subdiag = diags[..., 2, :]\n else:\n raise ValueError('Unrecognized diagonals_format: %s' % diagonals_format)\n\n # C++ backend requires matrices.\n # Converting 1-dimensional vectors to matrices with 1 row.\n superdiag = array_ops.expand_dims(superdiag, -2)\n maindiag = array_ops.expand_dims(maindiag, -2)\n subdiag = array_ops.expand_dims(subdiag, -2)\n\n return linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs, name)\n\n\ndef _maybe_validate_matrix(a, validate_args):\n \"\"\"Checks that input is a `float` matrix.\"\"\"\n assertions = []\n if not a.dtype.is_floating:\n raise TypeError('Input `a` must have `float`-like `dtype` '\n '(saw {}).'.format(a.dtype.name))\n if a.shape is not None and a.shape.rank is not None:\n if a.shape.rank < 2:\n raise ValueError('Input `a` must have at least 2 dimensions '\n '(saw: {}).'.format(a.shape.rank))\n elif validate_args:\n assertions.append(\n check_ops.assert_rank_at_least(\n a, rank=2, message='Input `a` must have at least 2 dimensions.'))\n return assertions\n\n\n@tf_export('linalg.matrix_rank')\ndef matrix_rank(a, tol=None, validate_args=False, name=None):\n \"\"\"Compute the matrix rank of one or more matrices.\n\n Arguments:\n a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be\n pseudo-inverted.\n tol: Threshold below which the singular value is counted as 'zero'.\n Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).\n validate_args: When `True`, additional assertions might be embedded in the\n graph.\n Default value: `False` (i.e., no graph assertions are added).\n name: Python `str` prefixed to ops created by this function.\n Default value: 'matrix_rank'.\n\n Returns:\n matrix_rank: (Batch of) `int32` scalars representing the number of non-zero\n singular values.\n \"\"\"\n with ops.name_scope(name or 'matrix_rank'):\n a = ops.convert_to_tensor(a, dtype_hint=dtypes.float32, name='a')\n assertions = _maybe_validate_matrix(a, validate_args)\n if assertions:\n with ops.control_dependencies(assertions):\n a = array_ops.identity(a)\n s = svd(a, compute_uv=False)\n if tol is None:\n if (a.shape[-2:]).is_fully_defined():\n m = np.max(a.shape[-2:].as_list())\n else:\n m = math_ops.reduce_max(array_ops.shape(a)[-2:])\n eps = np.finfo(a.dtype.as_numpy_dtype).eps\n tol = (\n eps * math_ops.cast(m, a.dtype) *\n math_ops.reduce_max(s, axis=-1, keepdims=True))\n return math_ops.reduce_sum(math_ops.cast(s > tol, dtypes.int32), axis=-1)\n\n\n@tf_export('linalg.pinv')\ndef pinv(a, rcond=None, validate_args=False, name=None):\n \"\"\"Compute the Moore-Penrose pseudo-inverse of one or more matrices.\n\n Calculate the [generalized inverse of a matrix](\n https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its\n singular-value decomposition (SVD) and including all large singular values.\n\n The pseudo-inverse of a matrix `A`, is defined as: 'the matrix that 'solves'\n [the least-squares problem] `A @ x = b`,' i.e., if `x_hat` is a solution, then\n `A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if\n `U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then\n `A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1]\n\n This function is analogous to [`numpy.linalg.pinv`](\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html).\n It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the\n default `rcond` is `1e-15`. Here the default is\n `10. * max(num_rows, num_cols) * np.finfo(dtype).eps`.\n\n Args:\n a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be\n pseudo-inverted.\n rcond: `Tensor` of small singular value cutoffs. Singular values smaller\n (in modulus) than `rcond` * largest_singular_value (again, in modulus) are\n set to zero. Must broadcast against `tf.shape(a)[:-2]`.\n Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`.\n validate_args: When `True`, additional assertions might be embedded in the\n graph.\n Default value: `False` (i.e., no graph assertions are added).\n name: Python `str` prefixed to ops created by this function.\n Default value: 'pinv'.\n\n Returns:\n a_pinv: (Batch of) pseudo-inverse of input `a`. Has same shape as `a` except\n rightmost two dimensions are transposed.\n\n Raises:\n TypeError: if input `a` does not have `float`-like `dtype`.\n ValueError: if input `a` has fewer than 2 dimensions.\n\n #### Examples\n\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n a = tf.constant([[1., 0.4, 0.5],\n [0.4, 0.2, 0.25],\n [0.5, 0.25, 0.35]])\n tf.matmul(tf.linalg..pinv(a), a)\n # ==> array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]], dtype=float32)\n\n a = tf.constant([[1., 0.4, 0.5, 1.],\n [0.4, 0.2, 0.25, 2.],\n [0.5, 0.25, 0.35, 3.]])\n tf.matmul(tf.linalg..pinv(a), a)\n # ==> array([[ 0.76, 0.37, 0.21, -0.02],\n [ 0.37, 0.43, -0.33, 0.02],\n [ 0.21, -0.33, 0.81, 0.01],\n [-0.02, 0.02, 0.01, 1. ]], dtype=float32)\n ```\n\n #### References\n\n [1]: G. Strang. 'Linear Algebra and Its Applications, 2nd Ed.' Academic Press,\n Inc., 1980, pp. 139-142.\n \"\"\"\n with ops.name_scope(name or 'pinv'):\n a = ops.convert_to_tensor(a, name='a')\n\n assertions = _maybe_validate_matrix(a, validate_args)\n if assertions:\n with ops.control_dependencies(assertions):\n a = array_ops.identity(a)\n\n dtype = a.dtype.as_numpy_dtype\n\n if rcond is None:\n\n def get_dim_size(dim):\n dim_val = tensor_shape.dimension_value(a.shape[dim])\n if dim_val is not None:\n return dim_val\n return array_ops.shape(a)[dim]\n\n num_rows = get_dim_size(-2)\n num_cols = get_dim_size(-1)\n if isinstance(num_rows, int) and isinstance(num_cols, int):\n max_rows_cols = float(max(num_rows, num_cols))\n else:\n max_rows_cols = math_ops.cast(\n math_ops.maximum(num_rows, num_cols), dtype)\n rcond = 10. * max_rows_cols * np.finfo(dtype).eps\n\n rcond = ops.convert_to_tensor(rcond, dtype=dtype, name='rcond')\n\n # Calculate pseudo inverse via SVD.\n # Note: if a is Hermitian then u == v. (We might observe additional\n # performance by explicitly setting `v = u` in such cases.)\n [\n singular_values, # Sigma\n left_singular_vectors, # U\n right_singular_vectors, # V\n ] = svd(\n a, full_matrices=False, compute_uv=True)\n\n # Saturate small singular values to inf. This has the effect of make\n # `1. / s = 0.` while not resulting in `NaN` gradients.\n cutoff = rcond * math_ops.reduce_max(singular_values, axis=-1)\n singular_values = array_ops.where_v2(\n singular_values > array_ops.expand_dims_v2(cutoff, -1), singular_values,\n np.array(np.inf, dtype))\n\n # By the definition of the SVD, `a == u @ s @ v^H`, and the pseudo-inverse\n # is defined as `pinv(a) == v @ inv(s) @ u^H`.\n a_pinv = math_ops.matmul(\n right_singular_vectors / array_ops.expand_dims_v2(singular_values, -2),\n left_singular_vectors,\n adjoint_b=True)\n\n if a.shape is not None and a.shape.rank is not None:\n a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]]))\n\n return a_pinv\n\n\n@tf_export('linalg.lu_solve')\ndef lu_solve(lower_upper, perm, rhs, validate_args=False, name=None):\n \"\"\"Solves systems of linear eqns `A X = RHS`, given LU factorizations.\n\n Note: this function does not verify the implied matrix is actually invertible\n nor is this condition checked even when `validate_args=True`.\n\n Args:\n lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,\n matmul(L, U)) = X` then `lower_upper = L + U - eye`.\n perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =\n X` then `perm = argmax(P)`.\n rhs: Matrix-shaped float `Tensor` representing targets for which to solve;\n `A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[...,\n tf.newaxis])[..., 0]`.\n validate_args: Python `bool` indicating whether arguments should be checked\n for correctness. Note: this function does not verify the implied matrix is\n actually invertible, even when `validate_args=True`.\n Default value: `False` (i.e., don't validate arguments).\n name: Python `str` name given to ops managed by this object.\n Default value: `None` (i.e., 'lu_solve').\n\n Returns:\n x: The `X` in `A @ X = RHS`.\n\n #### Examples\n\n ```python\n import numpy as np\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n x = [[[1., 2],\n [3, 4]],\n [[7, 8],\n [3, 4]]]\n inv_x = tf.linalg.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2))\n tf.assert_near(tf.matrix_inverse(x), inv_x)\n # ==> True\n ```\n\n \"\"\"\n\n with ops.name_scope(name or 'lu_solve'):\n lower_upper = ops.convert_to_tensor(\n lower_upper, dtype_hint=dtypes.float32, name='lower_upper')\n perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')\n rhs = ops.convert_to_tensor(rhs, dtype_hint=lower_upper.dtype, name='rhs')\n\n assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args)\n if assertions:\n with ops.control_dependencies(assertions):\n lower_upper = array_ops.identity(lower_upper)\n perm = array_ops.identity(perm)\n rhs = array_ops.identity(rhs)\n\n if (rhs.shape.rank == 2 and perm.shape.rank == 1):\n # Both rhs and perm have scalar batch_shape.\n permuted_rhs = array_ops.gather(rhs, perm, axis=-2)\n else:\n # Either rhs or perm have non-scalar batch_shape or we can't determine\n # this information statically.\n rhs_shape = array_ops.shape(rhs)\n broadcast_batch_shape = array_ops.broadcast_dynamic_shape(\n rhs_shape[:-2],\n array_ops.shape(perm)[:-1])\n d, m = rhs_shape[-2], rhs_shape[-1]\n rhs_broadcast_shape = array_ops.concat([broadcast_batch_shape, [d, m]],\n axis=0)\n\n # Tile out rhs.\n broadcast_rhs = array_ops.broadcast_to(rhs, rhs_broadcast_shape)\n broadcast_rhs = array_ops.reshape(broadcast_rhs, [-1, d, m])\n\n # Tile out perm and add batch indices.\n broadcast_perm = array_ops.broadcast_to(perm, rhs_broadcast_shape[:-1])\n broadcast_perm = array_ops.reshape(broadcast_perm, [-1, d])\n broadcast_batch_size = math_ops.reduce_prod(broadcast_batch_shape)\n broadcast_batch_indices = array_ops.broadcast_to(\n math_ops.range(broadcast_batch_size)[:, array_ops.newaxis],\n [broadcast_batch_size, d])\n broadcast_perm = array_ops.stack(\n [broadcast_batch_indices, broadcast_perm], axis=-1)\n\n permuted_rhs = array_ops.gather_nd(broadcast_rhs, broadcast_perm)\n permuted_rhs = array_ops.reshape(permuted_rhs, rhs_broadcast_shape)\n\n lower = set_diag(\n band_part(lower_upper, num_lower=-1, num_upper=0),\n array_ops.ones(\n array_ops.shape(lower_upper)[:-1], dtype=lower_upper.dtype))\n return triangular_solve(\n lower_upper, # Only upper is accessed.\n triangular_solve(lower, permuted_rhs),\n lower=False)\n\n\n@tf_export('linalg.lu_matrix_inverse')\ndef lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None):\n \"\"\"Computes the inverse given the LU decomposition(s) of one or more matrices.\n\n This op is conceptually identical to,\n\n ```python\n inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X))\n tf.assert_near(tf.matrix_inverse(X), inv_X)\n # ==> True\n ```\n\n Note: this function does not verify the implied matrix is actually invertible\n nor is this condition checked even when `validate_args=True`.\n\n Args:\n lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,\n matmul(L, U)) = X` then `lower_upper = L + U - eye`.\n perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =\n X` then `perm = argmax(P)`.\n validate_args: Python `bool` indicating whether arguments should be checked\n for correctness. Note: this function does not verify the implied matrix is\n actually invertible, even when `validate_args=True`.\n Default value: `False` (i.e., don't validate arguments).\n name: Python `str` name given to ops managed by this object.\n Default value: `None` (i.e., 'lu_matrix_inverse').\n\n Returns:\n inv_x: The matrix_inv, i.e.,\n `tf.matrix_inverse(tf.linalg.lu_reconstruct(lu, perm))`.\n\n #### Examples\n\n ```python\n import numpy as np\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n x = [[[3., 4], [1, 2]],\n [[7., 8], [3, 4]]]\n inv_x = tf.linalg.lu_matrix_inverse(*tf.linalg.lu(x))\n tf.assert_near(tf.matrix_inverse(x), inv_x)\n # ==> True\n ```\n\n \"\"\"\n\n with ops.name_scope(name or 'lu_matrix_inverse'):\n lower_upper = ops.convert_to_tensor(\n lower_upper, dtype_hint=dtypes.float32, name='lower_upper')\n perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')\n assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)\n if assertions:\n with ops.control_dependencies(assertions):\n lower_upper = array_ops.identity(lower_upper)\n perm = array_ops.identity(perm)\n shape = array_ops.shape(lower_upper)\n return lu_solve(\n lower_upper,\n perm,\n rhs=eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype),\n validate_args=False)\n\n\n@tf_export('linalg.lu_reconstruct')\ndef lu_reconstruct(lower_upper, perm, validate_args=False, name=None):\n \"\"\"The reconstruct one or more matrices from their LU decomposition(s).\n\n Args:\n lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,\n matmul(L, U)) = X` then `lower_upper = L + U - eye`.\n perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =\n X` then `perm = argmax(P)`.\n validate_args: Python `bool` indicating whether arguments should be checked\n for correctness.\n Default value: `False` (i.e., don't validate arguments).\n name: Python `str` name given to ops managed by this object.\n Default value: `None` (i.e., 'lu_reconstruct').\n\n Returns:\n x: The original input to `tf.linalg.lu`, i.e., `x` as in,\n `lu_reconstruct(*tf.linalg.lu(x))`.\n\n #### Examples\n\n ```python\n import numpy as np\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n x = [[[3., 4], [1, 2]],\n [[7., 8], [3, 4]]]\n x_reconstructed = tf.linalg.lu_reconstruct(*tf.linalg.lu(x))\n tf.assert_near(x, x_reconstructed)\n # ==> True\n ```\n\n \"\"\"\n with ops.name_scope(name or 'lu_reconstruct'):\n lower_upper = ops.convert_to_tensor(\n lower_upper, dtype_hint=dtypes.float32, name='lower_upper')\n perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')\n\n assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)\n if assertions:\n with ops.control_dependencies(assertions):\n lower_upper = array_ops.identity(lower_upper)\n perm = array_ops.identity(perm)\n\n shape = array_ops.shape(lower_upper)\n\n lower = set_diag(\n band_part(lower_upper, num_lower=-1, num_upper=0),\n array_ops.ones(shape[:-1], dtype=lower_upper.dtype))\n upper = band_part(lower_upper, num_lower=0, num_upper=-1)\n x = math_ops.matmul(lower, upper)\n\n if (lower_upper.shape is None or lower_upper.shape.rank is None or\n lower_upper.shape.rank != 2):\n # We either don't know the batch rank or there are >0 batch dims.\n batch_size = math_ops.reduce_prod(shape[:-2])\n d = shape[-1]\n x = array_ops.reshape(x, [batch_size, d, d])\n perm = array_ops.reshape(perm, [batch_size, d])\n perm = map_fn.map_fn(array_ops.invert_permutation, perm)\n batch_indices = array_ops.broadcast_to(\n math_ops.range(batch_size)[:, array_ops.newaxis], [batch_size, d])\n x = array_ops.gather_nd(x, array_ops.stack([batch_indices, perm],\n axis=-1))\n x = array_ops.reshape(x, shape)\n else:\n x = array_ops.gather(x, array_ops.invert_permutation(perm))\n\n x.set_shape(lower_upper.shape)\n return x\n\n\ndef lu_reconstruct_assertions(lower_upper, perm, validate_args):\n \"\"\"Returns list of assertions related to `lu_reconstruct` assumptions.\"\"\"\n assertions = []\n\n message = 'Input `lower_upper` must have at least 2 dimensions.'\n if lower_upper.shape.rank is not None and lower_upper.shape.rank < 2:\n raise ValueError(message)\n elif validate_args:\n assertions.append(\n check_ops.assert_rank_at_least_v2(lower_upper, rank=2, message=message))\n\n message = '`rank(lower_upper)` must equal `rank(perm) + 1`'\n if lower_upper.shape.rank is not None and perm.shape.rank is not None:\n if lower_upper.shape.rank != perm.shape.rank + 1:\n raise ValueError(message)\n elif validate_args:\n assertions.append(\n check_ops.assert_rank(\n lower_upper, rank=array_ops.rank(perm) + 1, message=message))\n\n message = '`lower_upper` must be square.'\n if lower_upper.shape[:-2].is_fully_defined():\n if lower_upper.shape[-2] != lower_upper.shape[-1]:\n raise ValueError(message)\n elif validate_args:\n m, n = array_ops.split(\n array_ops.shape(lower_upper)[-2:], num_or_size_splits=2)\n assertions.append(check_ops.assert_equal(m, n, message=message))\n\n return assertions\n\n\ndef _lu_solve_assertions(lower_upper, perm, rhs, validate_args):\n \"\"\"Returns list of assertions related to `lu_solve` assumptions.\"\"\"\n assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)\n\n message = 'Input `rhs` must have at least 2 dimensions.'\n if rhs.shape.ndims is not None:\n if rhs.shape.ndims < 2:\n raise ValueError(message)\n elif validate_args:\n assertions.append(\n check_ops.assert_rank_at_least(rhs, rank=2, message=message))\n\n message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'\n if (lower_upper.shape[-1] is not None and rhs.shape[-2] is not None):\n if lower_upper.shape[-1] != rhs.shape[-2]:\n raise ValueError(message)\n elif validate_args:\n assertions.append(\n check_ops.assert_equal(\n array_ops.shape(lower_upper)[-1],\n array_ops.shape(rhs)[-2],\n message=message))\n\n return assertions\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Creates TOCO options to process a model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\nimport traceback\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow.lite.testing import zip_test_utils\n\n\ndef toco_options(data_types,\n input_arrays,\n output_arrays,\n shapes,\n extra_toco_options=None):\n \"\"\"Create TOCO options to process a model.\n\n Args:\n data_types: input and inference types used by TOCO.\n input_arrays: names of the input tensors\n output_arrays: name of the output tensors\n shapes: shapes of the input tensors\n extra_toco_options: additional toco options\n\n Returns:\n the options in a string.\n \"\"\"\n if extra_toco_options is None:\n extra_toco_options = zip_test_utils.ExtraTocoOptions()\n\n shape_str = \":\".join([\",\".join(str(y) for y in x) for x in shapes if x])\n inference_type = \"FLOAT\"\n # TODO(ahentz): if we get multi-input quantization to work we need this\n # to change\n if data_types[0] == \"QUANTIZED_UINT8\":\n inference_type = \"QUANTIZED_UINT8\"\n s = (\" --input_data_types=%s\" % \",\".join(data_types) +\n \" --inference_type=%s\" % inference_type +\n \" --input_format=TENSORFLOW_GRAPHDEF\" + \" --output_format=TFLITE\" +\n \" --input_arrays=%s\" % \",\".join(input_arrays) +\n \" --output_arrays=%s\" % \",\".join(output_arrays))\n if shape_str:\n s += (\" --input_shapes=%s\" % shape_str)\n if extra_toco_options.drop_control_dependency:\n s += \" --drop_control_dependency\"\n if extra_toco_options.allow_custom_ops:\n s += \" --allow_custom_ops\"\n if extra_toco_options.rnn_states:\n s += (\" --rnn_states='\" + extra_toco_options.rnn_states + \"'\")\n if extra_toco_options.split_tflite_lstm_inputs is not None:\n if extra_toco_options.split_tflite_lstm_inputs:\n s += \" --split_tflite_lstm_inputs=true\"\n else:\n s += \" --split_tflite_lstm_inputs=false\"\n return s\n\n\ndef toco_convert(options, graph_def, input_tensors, output_tensors, **kwargs):\n \"\"\"Convert a model's graph def into a tflite model.\n\n NOTE: this currently shells out to the toco binary, but we would like\n convert to Python API tooling in the future.\n\n Args:\n options: An Options instance.\n graph_def: A GraphDef object.\n input_tensors: List of input tensor tuples `(name, shape, type)`.\n output_tensors: List of output tensors (names).\n **kwargs: Extra options to be passed.\n\n Returns:\n output tflite model, log_txt from conversion\n or None, log_txt if it did not convert properly.\n \"\"\"\n # Convert ophint ops if presented.\n graph_def = tf.compat.v1.lite.experimental.convert_op_hints_to_stubs(\n graph_def=graph_def)\n graph_def_str = graph_def.SerializeToString()\n\n extra_toco_options = kwargs.get(\"extra_toco_options\",\n zip_test_utils.ExtraTocoOptions())\n test_params = kwargs.get(\"test_params\", {})\n input_arrays = [x[0] for x in input_tensors]\n data_types = [zip_test_utils.TF_TYPE_INFO[x[2]][1] for x in input_tensors]\n\n if test_params.get(\"fully_quantize\", False):\n # Read the input range for the representative dataset from parameters.\n min_value, max_value = test_params.get(\"input_range\", (-1, 1))\n\n with tempfile.NamedTemporaryFile() as graphdef_file:\n graphdef_file.write(graph_def_str)\n graphdef_file.flush()\n\n input_shapes = zip_test_utils.get_input_shapes_map(input_tensors)\n converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(\n graphdef_file.name, input_arrays, output_tensors, input_shapes)\n\n # TODO(b/145313371): Evaluate should we make it work with the new\n # converter.\n # Note: Currently this line is a non-functional change because the new\n # converter is disabled by default. Since this code path doesn't work\n # with new converter yet, it's explicitly disabled for easier testing.\n converter.experimental_new_converter = False\n\n def representative_dataset(input_tensors):\n calibration_inputs = []\n for _, shape, _ in input_tensors:\n if shape:\n dims = [dim.value for dim in shape.dims]\n calibration_inputs.append(\n np.random.uniform(min_value, max_value,\n tuple(dims)).astype(np.float32))\n return calibration_inputs\n\n def representative_dataset_gen():\n for _ in range(100):\n yield representative_dataset(input_tensors)\n\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS_INT8\n ]\n converter.representative_dataset = representative_dataset_gen\n if extra_toco_options.inference_input_type:\n converter.inference_input_type = (\n extra_toco_options.inference_input_type)\n if extra_toco_options.inference_output_type:\n converter.inference_output_type = (\n extra_toco_options.inference_output_type)\n else:\n converter.inference_output_type = tf.int8\n\n try:\n tflite_model = converter.convert()\n return tflite_model, \"\"\n except Exception as e:\n log = \"{0}\\n{1}\".format(str(e), traceback.format_exc())\n return None, log\n\n else:\n opts = toco_options(\n data_types=data_types,\n input_arrays=input_arrays,\n shapes=[x[1] for x in input_tensors],\n output_arrays=output_tensors,\n extra_toco_options=extra_toco_options)\n\n with tempfile.NamedTemporaryFile() as graphdef_file, \\\n tempfile.NamedTemporaryFile() as output_file, \\\n tempfile.NamedTemporaryFile(\"w+\") as stdout_file:\n graphdef_file.write(graph_def_str)\n graphdef_file.flush()\n\n # TODO(aselle): Switch this to subprocess at some point.\n if options.run_with_flex:\n opts += \" --enable_select_tf_ops --force_select_tf_ops\"\n cmd = (\"%s --input_file=%s --output_file=%s %s > %s 2>&1\" %\n (options.toco, graphdef_file.name, output_file.name, opts,\n stdout_file.name))\n exit_code = os.system(cmd)\n log = (\n cmd + \"exited with code %d\" % exit_code + \"\\n------------------\\n\" +\n stdout_file.read())\n return (None if exit_code != 0 else output_file.read()), log\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.tf.gather_nd.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nimport numpy as np\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\nclass GatherNdTest(test.TestCase):\n\n def _testSimpleDtype(self, dtype):\n with self.cached_session(use_gpu=True):\n params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype))\n indices = constant_op.constant([[4], [4], [0]])\n gather_nd_t = array_ops.gather_nd(params, indices)\n gather_nd_val = self.evaluate(gather_nd_t)\n\n self.assertAllEqual(np.array([7, 7, 8], dtype=dtype), gather_nd_val)\n self.assertEqual([3], gather_nd_t.get_shape())\n\n def testSimpleDtype(self):\n self._testSimpleDtype(np.float32)\n self._testSimpleDtype(np.float64)\n self._testSimpleDtype(np.int32)\n self._testSimpleDtype(np.int64)\n self._testSimpleDtype(np.complex64)\n self._testSimpleDtype(np.complex128)\n self._testSimpleDtype(\"|S\") # byte strings in python2 + 3\n\n @test_util.run_deprecated_v1\n @test_util.disable_xla(\"b/123337890\") # Error messages differ\n def testEmptyIndicesAndParamsOKButJustEmptyParamsFails(self):\n with self.session(use_gpu=True):\n params = np.ones((3, 3), dtype=np.float32)\n\n indices_empty = np.empty((0, 2), dtype=np.int32)\n gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)\n gather_nd_ok_val = self.evaluate(gather_nd_ok_t)\n self.assertEqual([0], gather_nd_ok_t.get_shape())\n self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)\n\n indices_empty = np.empty((0, 1), dtype=np.int32)\n gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)\n gather_nd_ok_val = self.evaluate(gather_nd_ok_t)\n self.assertEqual([0, 3], gather_nd_ok_t.get_shape())\n self.assertAllClose(np.empty((0, 3), dtype=np.float32), gather_nd_ok_val)\n\n params_empty = np.empty((0, 3), dtype=np.float32)\n indices_empty = np.empty((0, 2), dtype=np.int32)\n gather_nd_ok_t = array_ops.gather_nd(params_empty, indices_empty)\n gather_nd_ok_val = self.evaluate(gather_nd_ok_t)\n self.assertEqual([0], gather_nd_ok_t.get_shape())\n self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)\n\n params_empty = np.empty((0, 3), dtype=np.float32)\n indices_nonempty = np.zeros((1, 2), dtype=np.int32)\n gather_nd_break_t = array_ops.gather_nd(params_empty, indices_nonempty)\n with self.assertRaisesOpError(\n r\"Requested more than 0 entries, but params is empty.\"):\n self.evaluate(gather_nd_break_t)\n self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)\n\n def testIndexScalar(self):\n with self.session(use_gpu=True):\n params = np.array(\n [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T\n indices = constant_op.constant([4, 1])\n gather_nd_t = array_ops.gather_nd(params, indices)\n gather_nd_val = self.evaluate(gather_nd_t)\n self.assertEqual([], gather_nd_t.get_shape())\n self.assertAllEqual(np.array(7), gather_nd_val)\n\n def testParamsRankLargerThanIndexIndexScalarSlices(self):\n with self.session(use_gpu=True):\n params = np.array(\n [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T\n indices = constant_op.constant([4])\n gather_nd_t = array_ops.gather_nd(params, indices)\n gather_nd_val = self.evaluate(gather_nd_t)\n self.assertEqual([2], gather_nd_t.get_shape())\n self.assertAllEqual(np.array([-7, 7]), gather_nd_val)\n\n def testParamsRankLargerThanIndexSlices(self):\n with self.session(use_gpu=True):\n params = np.array(\n [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T\n indices = constant_op.constant([[4], [4], [0]])\n gather_nd_t = array_ops.gather_nd(params, indices)\n gather_nd_val = self.evaluate(gather_nd_t)\n\n self.assertEqual([3, 2], gather_nd_t.get_shape())\n self.assertAllEqual(np.array([[-7, 7], [-7, 7], [-8, 8]]), gather_nd_val)\n\n def testHigherRankParamsLargerThanIndexSlices(self):\n with self.session(use_gpu=True):\n params = np.array(\n [[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],\n [[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],\n dtype=np.float32).T\n params_t = constant_op.constant(params)\n indices = constant_op.constant([[4], [4], [0]])\n gather_nd_t = array_ops.gather_nd(params_t, indices)\n gather_nd_val = self.evaluate(gather_nd_t)\n\n self.assertEqual([3, 2, 2], gather_nd_t.get_shape())\n self.assertAllEqual(params[[4, 4, 0]], gather_nd_val)\n\n def testEmptyIndicesLastRankMeansCopyEntireTensor(self):\n with self.session(use_gpu=True):\n params = np.array(\n [[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],\n [[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],\n dtype=np.float32).T\n params_t = constant_op.constant(params)\n indices = constant_op.constant(\n [[], []], dtype=dtypes.int32) # Size (2, 0)\n gather_nd_t = array_ops.gather_nd(params_t, indices)\n gather_nd_val = self.evaluate(gather_nd_t)\n\n self.assertEqual([2, 6, 2, 2], gather_nd_t.get_shape())\n self.assertAllEqual(\n np.vstack((params[np.newaxis, :], params[np.newaxis, :])),\n gather_nd_val)\n\n def testHigherRankParamsAndIndicesLargerThanIndexSlices(self):\n with self.session(use_gpu=True):\n params = np.array(\n [[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],\n [[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],\n dtype=np.float32).T\n params_t = constant_op.constant(params)\n indices = constant_op.constant([[[3], [2], [1]], [[4], [4], [0]]])\n gather_nd_t = array_ops.gather_nd(params_t, indices)\n gather_nd_val = self.evaluate(gather_nd_t)\n\n self.assertEqual([2, 3, 2, 2], gather_nd_t.get_shape())\n self.assertAllEqual(params[[3, 2, 1, 4, 4, 0]].reshape(2, 3, 2, 2),\n gather_nd_val)\n\n def testHigherRankParams(self):\n with self.session(use_gpu=True):\n shape = (10, 20, 5, 1, 17)\n params = np.random.rand(*shape)\n indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T\n gather_nd_t = array_ops.gather_nd(params, indices)\n gather_nd_val = self.evaluate(gather_nd_t)\n\n expected = params[tuple(indices.T)]\n self.assertAllEqual(expected, gather_nd_val)\n self.assertEqual([2000], gather_nd_t.get_shape())\n\n def testHigherRankParamsAndIndices(self):\n with self.session(use_gpu=True):\n shape = (10, 20, 5, 1, 17)\n params = np.random.rand(*shape)\n indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T\n indices_reshaped = indices.reshape([10, 10, 20, 5])\n gather_nd_t = array_ops.gather_nd(params, indices_reshaped)\n gather_nd_val = self.evaluate(gather_nd_t)\n\n expected = params[tuple(indices.T)]\n self.assertAllEqual(expected.reshape([10, 10, 20]), gather_nd_val)\n self.assertEqual([10, 10, 20], gather_nd_t.get_shape())\n\n def assertIndexedSlices(self, t):\n self.assertIsInstance(t, ops.IndexedSlices)\n\n @test_util.run_deprecated_v1\n def testUnknownIndices(self):\n params = constant_op.constant([[0, 1, 2]])\n indices = array_ops.placeholder(dtypes.int32)\n gather_nd_t = array_ops.gather_nd(params, indices)\n shape = gather_nd_t.get_shape()\n self.assertEqual(None, shape.ndims)\n self.assertEqual(None, tensor_shape.dimension_value(shape[0]))\n\n @test_util.run_deprecated_v1\n @test_util.disable_xla(\"XLA does not have assertions in kernels.\")\n def testBadIndicesCPU(self):\n with self.session(use_gpu=False):\n params = [0, 1, 2]\n indices = [[[0], [7]]] # Make this one higher rank\n gather_nd = array_ops.gather_nd(params, indices)\n with self.assertRaisesOpError(\n r\"indices\\[0,1\\] = \\[7\\] does not index into param shape \\[3\\]\"):\n self.evaluate(gather_nd)\n\n def _disabledTestBadIndicesGPU(self):\n # TODO disabled due to different behavior on GPU and CPU\n # On GPU the bad indices do not raise error but fetch 0 values\n if not test.is_gpu_available():\n return\n with self.session(use_gpu=True):\n params = [0, 1, 2]\n indices = [[[0], [7]]] # Make this one higher rank\n gather_nd = array_ops.gather_nd(params, indices)\n with self.assertRaisesOpError(\n r\"indices\\[0,1\\] = \\[7\\] does not index into param shape \\[3\\]\"):\n self.evaluate(gather_nd)\n\n @test_util.run_deprecated_v1\n @test_util.disable_xla(\"XLA does not have assertions in kernels.\")\n def testBadIndicesWithSlicesCPU(self):\n with self.session(use_gpu=False):\n params = [[0, 1, 2]]\n indices = [[[0], [0], [1]]] # Make this one higher rank\n gather_nd = array_ops.gather_nd(params, indices)\n with self.assertRaisesOpError(\n r\"indices\\[0,2\\] = \\[1\\] does not index into param shape \\[1,3\\]\"):\n self.evaluate(gather_nd)\n\n def _disabledTestBadIndicesWithSlicesGPU(self):\n # TODO disabled due to different behavior on GPU and CPU\n # On GPU the bad indices do not raise error but fetch 0 values\n if not test.is_gpu_available():\n return\n with self.session(use_gpu=True):\n params = [[0, 1, 2]]\n indices = [[[0], [0], [1]]] # Make this one higher rank\n gather_nd = array_ops.gather_nd(params, indices)\n with self.assertRaisesOpError(\n r\"indices\\[0,2\\] = \\[1\\] does not index into param shape \\[1,3\\]\"):\n self.evaluate(gather_nd)\n\n @test_util.run_deprecated_v1\n def testGradientsRank2Elements(self):\n indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)\n inputs = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)\n outputs = array_ops.gather_nd(inputs, indices)\n\n grad_vals = constant_op.constant([1, 2], dtype=dtypes.float64)\n grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]\n expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64)\n with self.session(use_gpu=True):\n assert np.array_equal(expected_grads, self.evaluate(grads))\n\n @test_util.run_deprecated_v1\n def testGradientsRank2Slices(self):\n indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)\n inputs = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)\n outputs = array_ops.gather_nd(inputs, indices)\n\n grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)\n grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]\n expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64)\n with self.session(use_gpu=True):\n self.assertIndexedSlices(grads)\n self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads).eval())\n\n @test_util.run_deprecated_v1\n def testGradientsRank3Elements(self):\n indices = constant_op.constant(\n [[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int32)\n inputs = constant_op.constant(\n [[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=dtypes.float64)\n outputs = array_ops.gather_nd(inputs, indices)\n\n grad_vals = constant_op.constant(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)\n grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]\n expected_grads = np.array(\n [[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)\n with self.session(use_gpu=True):\n self.assertAllEqual(expected_grads, self.evaluate(grads))\n\n @test_util.run_deprecated_v1\n def testGradientsRank7Elements(self):\n # Shape [1,1,2,1,1,2,2]\n indices = constant_op.constant(\n [[[\n [[[[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0]]]],\n [[[[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1]]]]\n ]]],\n dtype=dtypes.int32)\n inputs = constant_op.constant(\n [[[\n [[[[1, 3], [5, 7]]]],\n [[[[2, 4], [6, 8]]]]\n ]]], dtype=dtypes.float64)\n outputs = array_ops.gather_nd(inputs, indices)\n\n grad_vals = constant_op.constant(\n [[[\n [[[[1, 2], [3, 4]]]],\n [[[[5, 6], [7, 8]]]]\n ]]], dtype=dtypes.float64)\n grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]\n expected_grads = np.array(\n [[[\n [[[[5, 6], [1, 2]]]],\n [[[[3, 4], [7, 8]]]]\n ]]], dtype=np.float64)\n with self.session(use_gpu=True):\n self.assertAllEqual(expected_grads, self.evaluate(grads))\n\n @test_util.run_deprecated_v1\n def testGradientsInt64Indices(self):\n indices = constant_op.constant(\n [[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int64)\n inputs = constant_op.constant(\n [[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=dtypes.float64)\n outputs = array_ops.gather_nd(inputs, indices)\n\n grad_vals = constant_op.constant(\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)\n grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]\n expected_grads = np.array(\n [[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)\n with self.session(use_gpu=True):\n self.assertAllEqual(expected_grads, self.evaluate(grads))\n\n @test_util.run_deprecated_v1\n def testGradientsRank2SlicesWithEmptySpace(self):\n indices = constant_op.constant([[2], [0], [5]], dtype=dtypes.int32)\n inputs = constant_op.constant(\n [[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],\n [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],\n [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]],\n dtype=dtypes.float64)\n outputs = array_ops.gather_nd(inputs, indices)\n grad_vals = constant_op.constant(\n [[1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3, 3, 3, 3, 3]],\n dtype=dtypes.float64)\n grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]\n expected_grads = np.array(\n [[2, 2, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 3, 3, 3, 3]],\n dtype=np.float64)\n with self.session(use_gpu=True):\n self.assertIndexedSlices(grads)\n self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads).eval())\n\n @test_util.run_v1_only(\"RefVariable is not supported in v2\")\n def testGatherNdRefVariable(self):\n with self.cached_session():\n v = variables.RefVariable(constant_op.constant([[1, 2], [3, 4], [5, 6]]))\n self.evaluate(variables.global_variables_initializer())\n gather = array_ops.gather_nd(v, [[0, 1], [2, 0]])\n if not context.executing_eagerly(): # .op doesn't make sense in Eager\n self.assertEqual(\"GatherNd\", gather.op.name)\n self.assertAllEqual([2, 5], gather)\n\n @test_util.run_in_graph_and_eager_modes\n def testGatherNdResourceVariable(self):\n with self.cached_session():\n v = resource_variable_ops.ResourceVariable(\n constant_op.constant([[1, 2], [3, 4], [5, 6]]))\n self.evaluate(variables.global_variables_initializer())\n gather = array_ops.gather_nd(v, [[0, 1], [2, 0]])\n if not context.executing_eagerly(): # .op doesn't make sense in Eager\n self.assertEqual(\"ResourceGatherNd\", gather.op.inputs[0].op.type)\n self.assertAllEqual([2, 5], gather)\n\n\nclass GatherNdOpBenchmark(test.Benchmark):\n\n def benchmark_gather_nd_op(self):\n shape = (100, 47, 18, 170, 13)\n np.random.seed(127)\n params = np.random.rand(*shape)\n indices = np.vstack([np.random.randint(0, s, size=10000) for s in shape]).T\n\n with session.Session():\n t_params = variables.Variable(params)\n t_indices = variables.Variable(indices)\n gather_op = array_ops.gather_nd(t_params, t_indices)\n variables.global_variables_initializer().run()\n for _ in range(10):\n self.evaluate(gather_op)\n t1 = time.time()\n for _ in range(1000):\n self.evaluate(gather_op)\n t2 = time.time()\n self.report_benchmark(iters=1000, wall_time=(t2 - t1) / 1000.0)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implementation of Cluster Resolvers for Cloud TPUs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport re\n\nfrom tensorflow.python.distribute.cluster_resolver import cluster_resolver\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.tpu import tpu_system_metadata as tpu_system_metadata_lib\nfrom tensorflow.python.training import server_lib\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util.tf_export import tf_export\n\ntry:\n from cloud_tpu_client import client # pylint: disable=g-import-not-at-top\nexcept ImportError:\n logging.debug(\n 'Falling back to TensorFlow client; we recommended you install the Cloud '\n 'TPU client directly with pip install cloud-tpu-client.')\n from tensorflow.python.tpu.client import client\n\ndef is_running_in_gce():\n return True\n\n\n_TPU_DEVICE_REGEX = re.compile(\n r'.*task:(?P<host_id>\\d+)/.*device:TPU:(?P<core_id>\\d+)$')\n_TPU_CONN_RETRIES = 120\nDeviceDetails = collections.namedtuple(\n 'DeviceDetails', ['device_map', 'total_cores'])\n\n\n@tf_export('distribute.cluster_resolver.TPUClusterResolver')\nclass TPUClusterResolver(cluster_resolver.ClusterResolver):\n \"\"\"Cluster Resolver for Google Cloud TPUs.\n\n This is an implementation of cluster resolvers for the Google Cloud TPU\n service. As Cloud TPUs are in alpha, you will need to specify a API definition\n file for this to consume, in addition to a list of Cloud TPUs in your Google\n Cloud Platform project.\n\n TPUClusterResolver supports the following distinct environments:\n Google Compute Engine\n Google Kubernetes Engine\n Google internal\n \"\"\"\n\n @staticmethod\n def _get_device_dict_and_cores(devices):\n \"\"\"Returns a dict of hosts to cores and total cores given devices names.\n\n Returns a namedtuple with two attributes:\n device_map: A map of host_ids to a list of core_ids.\n total_cores: The total number of cores within the TPU system.\n\n Args:\n devices: A list of devices returned by session.list_devices()\n \"\"\"\n device_map = collections.defaultdict(list)\n num_cores = 0\n for device in devices:\n match = _TPU_DEVICE_REGEX.match(device.name)\n if match:\n host_id = match.group('host_id')\n core_id = match.group('core_id')\n device_map[host_id].append(core_id)\n num_cores += 1\n return DeviceDetails(device_map, num_cores)\n\n @staticmethod\n def _verify_and_return_same_core_count(device_dict):\n \"\"\"Verifies that every device in device_dict has the same # of cores.\"\"\"\n num_cores_per_host_set = (\n {len(core_ids) for core_ids in device_dict.values()})\n if len(num_cores_per_host_set) != 1:\n raise RuntimeError('TPU cores on each device is not the same. This '\n 'should never happen. Devices: {}'.format(device_dict))\n return num_cores_per_host_set.pop()\n\n def __init__(self,\n tpu=None,\n zone=None,\n project=None,\n job_name='worker',\n coordinator_name=None,\n coordinator_address=None,\n credentials='default',\n service=None,\n discovery_url=None):\n \"\"\"Creates a new TPUClusterResolver object.\n\n The ClusterResolver will then use the parameters to query the Cloud TPU APIs\n for the IP addresses and ports of each Cloud TPU listed.\n\n Args:\n tpu: A string corresponding to the TPU to use. If the string is an empty\n string, the string 'local', or a string that begins with 'grpc://', then\n it is assumed to not correspond with a Cloud TPU and will instead be\n passed as the session master and no ClusterSpec propagation will be\n done. In the future, this may also support a list of strings when\n multiple Cloud TPUs are used.\n zone: Zone where the TPUs are located. If omitted or empty, we will assume\n that the zone of the TPU is the same as the zone of the GCE VM, which we\n will try to discover from the GCE metadata service.\n project: Name of the GCP project containing Cloud TPUs. If omitted or\n empty, we will try to discover the project name of the GCE VM from the\n GCE metadata service.\n job_name: Name of the TensorFlow job the TPUs belong to.\n coordinator_name: The name to use for the coordinator. Set to None if the\n coordinator should not be included in the computed ClusterSpec.\n coordinator_address: The address of the coordinator (typically an ip:port\n pair). If set to None, a TF server will be started. If coordinator_name\n is None, a TF server will not be started even if coordinator_address is\n None.\n credentials: GCE Credentials. If None, then we use default credentials\n from the oauth2client\n service: The GCE API object returned by the googleapiclient.discovery\n function. If you specify a custom service object, then the credentials\n parameter will be ignored.\n discovery_url: A URL template that points to the location of the discovery\n service. It should have two parameters {api} and {apiVersion} that when\n filled in produce an absolute URL to the discovery document for that\n service. The environment variable 'TPU_API_DISCOVERY_URL' will override\n this.\n\n Raises:\n ImportError: If the googleapiclient is not installed.\n ValueError: If no TPUs are specified.\n RuntimeError: If an empty TPU name is specified and this is running in a\n Google Cloud environment.\n \"\"\"\n\n self._cloud_tpu_client = client.Client(\n tpu=tpu,\n zone=zone,\n project=project,\n credentials=credentials,\n service=service,\n discovery_url=discovery_url)\n\n self._tpu = self._cloud_tpu_client.name()\n # By default the task_type is 'worker` and the task_id is 0 (which is the\n # first worker in the task).\n self.task_type = job_name\n self.task_id = 0\n self._coordinator_name = coordinator_name\n if (coordinator_name and not coordinator_address):\n self._start_local_server()\n else:\n self._coordinator_address = coordinator_address\n\n def __enter__(self):\n self._cloud_tpu_client.enter()\n\n def __exit__(self, type, value, traceback): # pylint: disable=redefined-builtin\n self._cloud_tpu_client.exit(type, value, traceback)\n\n def master(self, task_type=None, task_id=None, rpc_layer=None):\n \"\"\"Get the Master string to be used for the session.\n\n In the normal case, this returns the grpc path (grpc://1.2.3.4:8470) of\n first instance in the ClusterSpec returned by the cluster_spec function.\n\n If a non-TPU name is used when constructing a TPUClusterResolver, that will\n be returned instead (e.g. If the tpus argument's value when constructing\n this TPUClusterResolver was 'grpc://10.240.1.2:8470',\n 'grpc://10.240.1.2:8470' will be returned).\n\n Args:\n task_type: (Optional, string) The type of the TensorFlow task of the\n master.\n task_id: (Optional, integer) The index of the TensorFlow task of the\n master.\n rpc_layer: (Optional, string) The RPC protocol TensorFlow should use to\n communicate with TPUs.\n\n Returns:\n string, the connection string to use when creating a session.\n\n Raises:\n ValueError: If none of the TPUs specified exists.\n \"\"\"\n\n cluster_spec = self.cluster_spec()\n if task_type is not None and task_id is not None:\n # task_type and task_id is from the function parameter\n master = cluster_spec.task_address(task_type, task_id)\n elif self.task_type is not None and self.task_id is not None:\n # task_type and task_id is from the object\n master = cluster_spec.task_address(self.task_type, self.task_id)\n else:\n # by default we take the first item in the cluster with the right name\n job_tasks = cluster_spec.job_tasks(self.task_type)\n if not job_tasks:\n raise ValueError('No TPUs with the specified names exist.')\n master = job_tasks[0]\n return cluster_resolver.format_master_url(master, 'grpc')\n\n def get_master(self):\n return self.master()\n\n def get_job_name(self):\n return self.task_type\n\n def get_tpu_system_metadata(self):\n \"\"\"Returns the metadata of the TPU system.\n\n Users can call this method to get some facts of the TPU system, like\n total number of cores, number of TPU workers and the devices. E.g.\n ```python\n\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')\n tpu_system_medata = resolver.get_tpu_system_metadata()\n num_hosts = tpu_system_medata.num_hosts\n ```\n\n Returns:\n A `tf.tpu.experimental.TPUSystemMetadata` object.\n \"\"\"\n cluster_spec = self.cluster_spec()\n cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None\n tpu_system_metadata = (\n tpu_system_metadata_lib._query_tpu_system_metadata( # pylint: disable=protected-access\n self.master(),\n cluster_def=cluster_def,\n query_topology=False))\n\n return tpu_system_metadata\n\n def cluster_spec(self):\n \"\"\"Returns a ClusterSpec object based on the latest TPU information.\n\n We retrieve the information from the GCE APIs every time this method is\n called.\n\n Returns:\n A ClusterSpec containing host information returned from Cloud TPUs,\n or None.\n\n Raises:\n RuntimeError: If the provided TPU is not healthy.\n \"\"\"\n ############################################################################\n # There are 5 potential cases this code must handle:\n # 1. [Normal case.] We should resolve the TPU name to a set of tasks, and\n # a. Create a ClusterSpec that includes the coordinator job\n # b. Create a ClusterSpec without the coordinator job.\n # 2. [GKE / No API Access.] We should not resolve the TPU name to a set of\n # tasks and\n # a. Create a ClusterSpec with the coordinator\n # b. Create a ClusterSpec without the coordinator\n ############################################################################\n\n network_endpoints = self._cloud_tpu_client.network_endpoints()\n worker_list = [\n '%s:%s' % (endpoint['ipAddress'], endpoint['port'])\n for endpoint in network_endpoints\n ]\n cluster_spec = {self.task_type: worker_list}\n if self._coordinator_address:\n # {1, 2}.a\n cluster_spec[self._coordinator_name] = [self._coordinator_address]\n\n return server_lib.ClusterSpec(cluster_spec)\n\n def num_accelerators(self,\n task_type=None,\n task_id=None,\n config_proto=None):\n \"\"\"Returns the number of TPU cores per worker.\n\n Connects to the master and list all the devices present in the master,\n and counts them up. Also verifies that the device counts per host in the\n cluster is the same before returning the number of TPU cores per host.\n\n Args:\n task_type: Unused.\n task_id: Unused.\n config_proto: Used to create a connection to a TPU master in order to\n retrieve the system metadata.\n\n Raises:\n RuntimeError: If we cannot talk to a TPU worker after retrying or if the\n number of TPU devices per host is different.\n \"\"\"\n retry_count = 1\n # TODO(b/120564445): Replace with standard library for retries.\n while True:\n try:\n device_details = TPUClusterResolver._get_device_dict_and_cores(\n cluster_resolver.get_accelerator_devices(\n self.master(), config_proto=config_proto))\n break\n except errors.DeadlineExceededError:\n error_message = ('Failed to connect to master. The TPU might not be '\n 'ready (e.g. still scheduling) or the master '\n 'address is incorrect: got (%s)' % self.master())\n if retry_count <= _TPU_CONN_RETRIES:\n logging.warning(error_message)\n logging.warning('Retrying (%d/%d)...', retry_count, _TPU_CONN_RETRIES)\n retry_count += 1\n else:\n raise RuntimeError(error_message)\n\n if device_details.total_cores:\n return {'TPU': TPUClusterResolver._verify_and_return_same_core_count(\n device_details.device_map)}\n return {'TPU': 0}\n\n @property\n def environment(self):\n \"\"\"Returns the current environment which TensorFlow is running in.\"\"\"\n return self._environment\n\n def _start_local_server(self):\n address = compat.as_text(self._cloud_tpu_client.get_local_ip())\n self._server = server_lib.Server({'local': ['0.0.0.0:0']},\n protocol='grpc',\n config=None,\n start=True)\n # self._server.target is of the form: grpc://ipaddress:port\n target = compat.as_bytes(self._server.target)\n splits = target.split(compat.as_bytes(':'))\n assert len(splits) == 3, self._server.target\n assert splits[0] == compat.as_bytes('grpc'), self._server.target\n self._coordinator_port = compat.as_text(splits[2])\n self._coordinator_address = '%s:%s' % (\n address, compat.as_text(self._coordinator_port))\n\n def __deepcopy__(self, memo):\n # TODO(b/73668574): Remove this once RunConfig avoids performing deepcopy.\n return self\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for converter module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.autograph.core import converter\nfrom tensorflow.python.autograph.core import converter_testing\nfrom tensorflow.python.autograph.pyct import anno\nfrom tensorflow.python.autograph.pyct import loader\nfrom tensorflow.python.autograph.pyct import parser\nfrom tensorflow.python.autograph.pyct import templates\nfrom tensorflow.python.platform import test\n\n\nclass TestConverter(converter.Base):\n pass\n\n\nclass ConversionOptionsTest(converter_testing.TestCase):\n\n def test_to_ast(self):\n opts = converter.ConversionOptions()\n opts_ast = opts.to_ast()\n\n template = '''\n def test_fn():\n return opts_ast\n '''\n opts_packed = templates.replace(template, opts_ast=opts_ast)\n\n reparsed, _, _ = loader.load_ast(opts_packed)\n reparsed.__dict__['ag__'] = self.make_fake_mod(\n 'fake_ag', converter.ConversionOptions, converter.Feature)\n\n reparsed_opts = reparsed.test_fn()\n\n self.assertEqual(opts.recursive, reparsed_opts.recursive)\n self.assertEqual(opts.user_requested, False)\n self.assertEqual(\n opts.internal_convert_user_code,\n reparsed_opts.internal_convert_user_code)\n self.assertEqual(opts.optional_features, reparsed_opts.optional_features)\n\n\nclass ConverterBaseTest(converter_testing.TestCase):\n\n def test_get_definition_directive_basic(self):\n\n directive_key = object\n\n def test_fn():\n a = 1\n return a\n\n ns = {}\n node, ctx = self.prepare(test_fn, ns)\n symbol_a = node.body[1].value\n defs, = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)\n defs.directives[directive_key] = {\n 'test_arg': parser.parse_expression('foo'),\n 'other_arg': parser.parse_expression('bar'),\n }\n c = TestConverter(ctx)\n value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',\n None)\n self.assertEqual(value.id, 'foo')\n\n def test_get_definition_directive_default(self):\n\n directive_key = object\n\n def test_fn():\n a = 1\n return a\n\n ns = {}\n node, ctx = self.prepare(test_fn, ns)\n symbol_a = node.body[1].value\n c = TestConverter(ctx)\n value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',\n parser.parse_expression('default'))\n self.assertEqual(value.id, 'default')\n\n def test_get_definition_directive_multiple_consistent(self):\n\n directive_key = object\n\n def test_fn():\n a = 1\n if a:\n a = 2\n return a\n\n ns = {}\n node, ctx = self.prepare(test_fn, ns)\n symbol_a = node.body[2].value\n defs = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)\n defs[0].directives[directive_key] = {\n 'test_arg': parser.parse_expression('foo'),\n 'other_arg': parser.parse_expression('bar'),\n }\n defs[1].directives[directive_key] = {\n 'test_arg': parser.parse_expression('foo'),\n 'other_arg': parser.parse_expression('baz'),\n }\n c = TestConverter(ctx)\n value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',\n None)\n self.assertEqual(value.id, 'foo')\n\n def test_get_definition_directive_multiple_inconsistent(self):\n\n directive_key = object\n\n def test_fn():\n a = 1\n if a:\n a = 2\n return a\n\n ns = {}\n node, ctx = self.prepare(test_fn, ns)\n symbol_a = node.body[2].value\n defs = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)\n defs[0].directives[directive_key] = {\n 'test_arg': parser.parse_expression('foo'),\n }\n defs[1].directives[directive_key] = {\n 'test_arg': parser.parse_expression('bar'),\n }\n c = TestConverter(ctx)\n with self.assertRaises(ValueError):\n c.get_definition_directive(symbol_a, directive_key, 'test_arg', None)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for resize_bilinear.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\n@register_make_test_function()\ndef make_resize_bilinear_tests(options):\n \"\"\"Make a set of tests to do resize_bilinear.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[1, 3, 4, 3], [1, 10, 2, 1]],\n \"size\": [[1, 1], [4, 3], [2, 2], [5, 6]],\n \"align_corners\": [None, True, False],\n \"half_pixel_centers\": [False],\n \"fully_quantize\": [False]\n }, {\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 3, 4, 3], [1, 10, 2, 1]],\n \"size\": [[1, 1], [4, 3], [2, 2], [5, 6]],\n \"align_corners\": [None, True, False],\n \"half_pixel_centers\": [False],\n \"fully_quantize\": [True]\n }, {\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 16, 24, 3], [1, 12, 18, 3]],\n \"size\": [[8, 12], [12, 18]],\n \"align_corners\": [None, True, False],\n \"half_pixel_centers\": [False],\n \"fully_quantize\": [True]\n }, {\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 16, 24, 3], [1, 12, 18, 3]],\n \"size\": [[8, 12]],\n \"align_corners\": [None, False],\n \"half_pixel_centers\": [True],\n \"fully_quantize\": [True]\n }, {\n \"dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[1, 3, 4, 3], [1, 10, 2, 1]],\n \"size\": [[1, 1], [4, 3], [2, 2], [5, 6]],\n \"align_corners\": [None, False],\n \"half_pixel_centers\": [True],\n \"fully_quantize\": [False]\n }]\n\n def build_graph(parameters):\n input_tensor = tf.compat.v1.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.compat.v1.image.resize_bilinear(\n input_tensor,\n size=parameters[\"size\"],\n align_corners=parameters[\"align_corners\"],\n half_pixel_centers=parameters[\"half_pixel_centers\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(\n parameters[\"dtype\"],\n parameters[\"input_shape\"],\n min_value=-1,\n max_value=1)\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Classes and functions that help to inspect Python source w.r.t. TF graphs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport re\nimport zipfile\n\nimport absl\nimport numpy as np\n\nfrom tensorflow.python.debug.lib import profiling\n\n\n_TENSORFLOW_BASEDIR = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.dirname(\n os.path.normpath(os.path.abspath(__file__))))))\n\n_ABSL_BASEDIR = os.path.dirname(absl.__file__)\n\n\nUNCOMPILED_SOURCE_SUFFIXES = (\".py\")\nCOMPILED_SOURCE_SUFFIXES = (\".pyc\", \".pyo\")\n\n\ndef _norm_abs_path(file_path):\n return os.path.normpath(os.path.abspath(file_path))\n\n\ndef is_extension_uncompiled_python_source(file_path):\n _, extension = os.path.splitext(file_path)\n return extension.lower() in UNCOMPILED_SOURCE_SUFFIXES\n\n\ndef is_extension_compiled_python_source(file_path):\n _, extension = os.path.splitext(file_path)\n return extension.lower() in COMPILED_SOURCE_SUFFIXES\n\n\ndef _convert_watch_key_to_tensor_name(watch_key):\n return watch_key[:watch_key.rfind(\":\")]\n\n\ndef guess_is_tensorflow_py_library(py_file_path):\n \"\"\"Guess whether a Python source file is a part of the tensorflow library.\n\n Special cases:\n 1) Returns False for unit-test files in the library (*_test.py),\n 2) Returns False for files under python/debug/examples.\n\n Args:\n py_file_path: full path of the Python source file in question.\n\n Returns:\n (`bool`) Whether the file is a part of the tensorflow library.\n\n Raises:\n ValueError: if the extension name of py_file_path does not indicate a Python\n source file (compiled or uncompiled).\n \"\"\"\n if (not is_extension_uncompiled_python_source(py_file_path) and\n not is_extension_compiled_python_source(py_file_path)):\n raise ValueError(\n \"Input file path (%s) is not a Python source file.\" % py_file_path)\n py_file_path = _norm_abs_path(py_file_path)\n\n return ((py_file_path.startswith(_TENSORFLOW_BASEDIR) or\n py_file_path.startswith(_ABSL_BASEDIR)) and\n not py_file_path.endswith(\"_test.py\") and\n (os.path.normpath(\"tensorflow/python/debug/examples\") not in\n os.path.normpath(py_file_path)))\n\n\ndef load_source(source_file_path):\n \"\"\"Load the content of a Python source code file.\n\n This function covers the following case:\n 1. source_file_path points to an existing Python (.py) file on the\n file system.\n 2. source_file_path is a path within a .par file (i.e., a zip-compressed,\n self-contained Python executable).\n\n Args:\n source_file_path: Path to the Python source file to read.\n\n Returns:\n A length-2 tuple:\n - Lines of the source file, as a `list` of `str`s.\n - The width of the string needed to show the line number in the file.\n This is calculated based on the number of lines in the source file.\n\n Raises:\n IOError: if loading is unsuccessful.\n \"\"\"\n if os.path.isfile(source_file_path):\n with open(source_file_path, \"rb\") as f:\n source_text = f.read().decode(\"utf-8\")\n source_lines = source_text.split(\"\\n\")\n else:\n # One possible reason why the file doesn't exist is that it's a path\n # inside a .par file. Try that possibility.\n source_lines = _try_load_par_source(source_file_path)\n if source_lines is None:\n raise IOError(\n \"Source path neither exists nor can be loaded as a .par file: %s\" %\n source_file_path)\n line_num_width = int(np.ceil(np.log10(len(source_lines)))) + 3\n return source_lines, line_num_width\n\n\ndef _try_load_par_source(source_file_path):\n \"\"\"Try loading the source code inside a .par file.\n\n A .par file is a zip-compressed, self-contained Python executable.\n It contains the content of individual Python source files that can\n be read only through extracting from the zip file.\n\n Args:\n source_file_path: The full path to the file inside the .par file. This\n path should include the path to the .par file itself, followed by the\n intra-par path, e.g.,\n \"/tmp/my_executable.par/org-tensorflow/tensorflow/python/foo/bar.py\".\n\n Returns:\n If successful, lines of the source file as a `list` of `str`s.\n Else, `None`.\n \"\"\"\n prefix_path = source_file_path\n while True:\n prefix_path, basename = os.path.split(prefix_path)\n if not basename:\n break\n suffix_path = os.path.normpath(\n os.path.relpath(source_file_path, start=prefix_path))\n if prefix_path.endswith(\".par\") and os.path.isfile(prefix_path):\n with zipfile.ZipFile(prefix_path) as z:\n norm_names = [os.path.normpath(name) for name in z.namelist()]\n if suffix_path in norm_names:\n with z.open(z.namelist()[norm_names.index(suffix_path)]) as zf:\n source_text = zf.read().decode(\"utf-8\")\n return source_text.split(\"\\n\")\n\n\ndef annotate_source(dump,\n source_file_path,\n do_dumped_tensors=False,\n file_stack_top=False,\n min_line=None,\n max_line=None):\n \"\"\"Annotate a Python source file with a list of ops created at each line.\n\n (The annotation doesn't change the source file itself.)\n\n Args:\n dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph\n has been loaded.\n source_file_path: (`str`) Path to the source file being annotated.\n do_dumped_tensors: (`str`) Whether dumped Tensors, instead of ops are to be\n used to annotate the source file.\n file_stack_top: (`bool`) Whether only the top stack trace in the\n specified source file is to be annotated.\n min_line: (`None` or `int`) The 1-based line to start annotate the source\n file from (inclusive).\n max_line: (`None` or `int`) The 1-based line number to end the annotation\n at (exclusive).\n\n Returns:\n A `dict` mapping 1-based line number to a list of op name(s) created at\n that line, or tensor names if `do_dumped_tensors` is True.\n\n Raises:\n ValueError: If the dump object does not have a Python graph set.\n \"\"\"\n\n py_graph = dump.python_graph\n if not py_graph:\n raise ValueError(\"Cannot perform source annotation due to a lack of set \"\n \"Python graph in the dump object\")\n\n source_file_path = _norm_abs_path(source_file_path)\n\n line_to_op_names = {}\n for op in py_graph.get_operations():\n for file_path, line_number, _, _ in reversed(dump.node_traceback(op.name)):\n if (min_line is not None and line_number < min_line or\n max_line is not None and line_number >= max_line):\n continue\n\n if _norm_abs_path(file_path) != source_file_path:\n continue\n\n if do_dumped_tensors:\n watch_keys = dump.debug_watch_keys(op.name)\n # Convert watch keys to unique Tensor names.\n items_to_append = list(\n set(map(_convert_watch_key_to_tensor_name, watch_keys)))\n else:\n items_to_append = [op.name]\n\n if line_number in line_to_op_names:\n line_to_op_names[line_number].extend(items_to_append)\n else:\n line_to_op_names[line_number] = items_to_append\n\n if file_stack_top:\n break\n\n return line_to_op_names\n\n\ndef list_source_files_against_dump(dump,\n path_regex_whitelist=None,\n node_name_regex_whitelist=None):\n \"\"\"Generate a list of source files with information regarding ops and tensors.\n\n Args:\n dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph\n has been loaded.\n path_regex_whitelist: A regular-expression filter for source file path.\n node_name_regex_whitelist: A regular-expression filter for node names.\n\n Returns:\n A list of tuples regarding the Python source files involved in constructing\n the ops and tensors contained in `dump`. Each tuple is:\n (source_file_path, is_tf_library, num_nodes, num_tensors, num_dumps,\n first_line)\n\n is_tf_library: (`bool`) A guess of whether the file belongs to the\n TensorFlow Python library.\n num_nodes: How many nodes were created by lines of this source file.\n These include nodes with dumps and those without.\n num_tensors: How many Tensors were created by lines of this source file.\n These include Tensors with dumps and those without.\n num_dumps: How many debug Tensor dumps were from nodes (and Tensors)\n that were created by this source file.\n first_line: The first line number (1-based) that created any nodes or\n Tensors in this source file.\n\n The list is sorted by ascending order of source_file_path.\n\n Raises:\n ValueError: If the dump object does not have a Python graph set.\n \"\"\"\n\n py_graph = dump.python_graph\n if not py_graph:\n raise ValueError(\"Cannot generate source list due to a lack of set \"\n \"Python graph in the dump object\")\n\n path_to_node_names = collections.defaultdict(set)\n path_to_tensor_names = collections.defaultdict(set)\n path_to_first_line = {}\n tensor_name_to_num_dumps = {}\n\n path_regex = (re.compile(path_regex_whitelist)\n if path_regex_whitelist else None)\n node_name_regex = (re.compile(node_name_regex_whitelist)\n if node_name_regex_whitelist else None)\n\n to_skip_file_paths = set()\n for op in py_graph.get_operations():\n if node_name_regex and not node_name_regex.match(op.name):\n continue\n\n for file_path, line_number, _, _ in dump.node_traceback(op.name):\n file_path = _norm_abs_path(file_path)\n if (file_path in to_skip_file_paths or\n path_regex and not path_regex.match(file_path) or\n not os.path.isfile(file_path)):\n to_skip_file_paths.add(file_path)\n continue\n\n path_to_node_names[file_path].add(op.name)\n if file_path in path_to_first_line:\n if path_to_first_line[file_path] > line_number:\n path_to_first_line[file_path] = line_number\n else:\n path_to_first_line[file_path] = line_number\n\n for output_tensor in op.outputs:\n tensor_name = output_tensor.name\n path_to_tensor_names[file_path].add(tensor_name)\n\n watch_keys = dump.debug_watch_keys(op.name)\n for watch_key in watch_keys:\n node_name, output_slot, debug_op = watch_key.split(\":\")\n tensor_name = \"%s:%s\" % (node_name, output_slot)\n if tensor_name not in tensor_name_to_num_dumps:\n tensor_name_to_num_dumps[tensor_name] = len(\n dump.get_tensors(node_name, int(output_slot), debug_op))\n\n path_to_num_dumps = {}\n for path in path_to_tensor_names:\n path_to_num_dumps[path] = sum(\n tensor_name_to_num_dumps.get(tensor_name, 0)\n for tensor_name in path_to_tensor_names[path])\n\n output = []\n for file_path in path_to_node_names:\n output.append((\n file_path,\n guess_is_tensorflow_py_library(file_path),\n len(path_to_node_names.get(file_path, {})),\n len(path_to_tensor_names.get(file_path, {})),\n path_to_num_dumps.get(file_path, 0),\n path_to_first_line[file_path]))\n\n return sorted(output, key=lambda x: x[0])\n\n\ndef annotate_source_against_profile(profile_data,\n source_file_path,\n node_name_filter=None,\n op_type_filter=None,\n min_line=None,\n max_line=None):\n \"\"\"Annotate a Python source file with profiling information at each line.\n\n (The annotation doesn't change the source file itself.)\n\n Args:\n profile_data: (`list` of `ProfileDatum`) A list of `ProfileDatum`.\n source_file_path: (`str`) Path to the source file being annotated.\n node_name_filter: Regular expression to filter by node name.\n op_type_filter: Regular expression to filter by op type.\n min_line: (`None` or `int`) The 1-based line to start annotate the source\n file from (inclusive).\n max_line: (`None` or `int`) The 1-based line number to end the annotation\n at (exclusive).\n\n Returns:\n A `dict` mapping 1-based line number to a the namedtuple\n `profiling.LineOrFuncProfileSummary`.\n \"\"\"\n\n source_file_path = _norm_abs_path(source_file_path)\n\n node_name_regex = re.compile(node_name_filter) if node_name_filter else None\n op_type_regex = re.compile(op_type_filter) if op_type_filter else None\n\n line_to_profile_summary = {}\n for profile_datum in profile_data:\n if not profile_datum.file_path:\n continue\n\n if _norm_abs_path(profile_datum.file_path) != source_file_path:\n continue\n\n if (min_line is not None and profile_datum.line_number < min_line or\n max_line is not None and profile_datum.line_number >= max_line):\n continue\n\n if (node_name_regex and\n not node_name_regex.match(profile_datum.node_exec_stats.node_name)):\n continue\n\n if op_type_regex and not op_type_regex.match(profile_datum.op_type):\n continue\n\n if profile_datum.line_number not in line_to_profile_summary:\n line_to_profile_summary[profile_datum.line_number] = (\n profiling.AggregateProfile(profile_datum))\n else:\n line_to_profile_summary[profile_datum.line_number].add(profile_datum)\n\n return line_to_profile_summary\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for sync_replicas_optimizer.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.framework.test_util import create_local_cluster\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import adam\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.training import training\n\n\n# Creates the workers and return their sessions, graphs, train_ops.\ndef get_workers(num_workers, replicas_to_aggregate, workers):\n sessions = []\n graphs = []\n train_ops = []\n for worker_id in range(num_workers):\n graph = ops.Graph()\n is_chief = (worker_id == 0)\n with graph.as_default():\n with ops.device(\"/job:ps/task:0\"):\n global_step = variables.VariableV1(\n 0, name=\"global_step\", trainable=False)\n var_0 = variables.VariableV1(0.0, name=\"v0\")\n with ops.device(\"/job:ps/task:1\"):\n var_1 = variables.VariableV1(1.0, name=\"v1\")\n var_sparse = variables.VariableV1([[3.0], [4.0]], name=\"v_sparse\")\n\n with ops.device(\"/job:worker/task:\" + str(worker_id)):\n grads_0 = constant_op.constant(0.1 + worker_id * 0.2)\n grads_1 = constant_op.constant(0.9 + worker_id * 0.2)\n # This is to test against sparse gradients.\n grads_sparse = ops.IndexedSlices(\n constant_op.constant(\n [0.1 + worker_id * 0.2], shape=[1, 1]),\n constant_op.constant([1]),\n constant_op.constant([2, 1]))\n sgd_opt = gradient_descent.GradientDescentOptimizer(2.0)\n sync_rep_opt = training.SyncReplicasOptimizer(\n sgd_opt,\n replicas_to_aggregate=replicas_to_aggregate,\n total_num_replicas=num_workers)\n train_op = [\n sync_rep_opt.apply_gradients(\n zip([grads_0, grads_1, grads_sparse],\n [var_0, var_1, var_sparse]),\n global_step=global_step)\n ]\n sync_replicas_hook = sync_rep_opt.make_session_run_hook(\n is_chief, num_tokens=num_workers)\n\n # Creates MonitoredSession\n session = training.MonitoredTrainingSession(\n master=workers[worker_id].target,\n is_chief=is_chief,\n hooks=[sync_replicas_hook])\n\n sessions.append(session)\n graphs.append(graph)\n train_ops.append(train_op)\n\n return sessions, graphs, train_ops\n\n\nclass SyncReplicasOptimizerTest(test.TestCase):\n\n def _run(self, train_op, sess):\n sess.run(train_op)\n\n @test_util.run_v1_only(\"b/120545219\")\n def test2Workers(self):\n num_workers = 2\n replicas_to_aggregate = 2\n num_ps = 2\n workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)\n\n # Creates and returns all the workers.\n sessions, graphs, train_ops = get_workers(num_workers,\n replicas_to_aggregate, workers)\n\n # Chief should have already initialized all the variables.\n var_0_g_0 = graphs[0].get_tensor_by_name(\"v0:0\")\n var_1_g_0 = graphs[0].get_tensor_by_name(\"v1:0\")\n local_step_0 = graphs[0].get_tensor_by_name(\"sync_rep_local_step:0\")\n self.assertAllEqual(0.0, sessions[0].run(var_0_g_0))\n self.assertAllEqual(1.0, sessions[0].run(var_1_g_0))\n self.assertAllEqual(0, sessions[0].run(local_step_0))\n\n # Will just use session 1 to verify all the variables later.\n var_0_g_1 = graphs[1].get_tensor_by_name(\"v0:0\")\n var_1_g_1 = graphs[1].get_tensor_by_name(\"v1:0\")\n var_sparse_g_1 = graphs[1].get_tensor_by_name(\"v_sparse:0\")\n local_step_1 = graphs[1].get_tensor_by_name(\"sync_rep_local_step:0\")\n global_step = graphs[1].get_tensor_by_name(\"global_step:0\")\n\n # The steps should also be initialized.\n self.assertAllEqual(0, sessions[1].run(global_step))\n self.assertAllEqual(0, sessions[1].run(local_step_1))\n self.assertAllClose([[3.0], [4.0]], sessions[1].run(var_sparse_g_1))\n\n # We have initial tokens in the queue so we can call this one by one. After\n # the first step, this will no longer work as there will be no more extra\n # tokens in the queue.\n sessions[0].run(train_ops[0])\n sessions[1].run(train_ops[1])\n\n # The global step should have been updated and the variables should now have\n # the new values after the average of the gradients are applied.\n while sessions[1].run(global_step) != 1:\n time.sleep(0.01)\n\n self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))\n self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))\n self.assertAllClose([[3.0], [4.0 - (0.1 + 0.3) / 2 * 2.0]],\n sessions[1].run(var_sparse_g_1))\n\n # The local step for both workers should still be 0 because the initial\n # tokens in the token queue are 0s. This means that the following\n # computation of the gradients will be wasted as local_step is smaller than\n # the current global step. However, this only happens once when the system\n # just starts and this is necessary to make the system robust for the case\n # when chief gets restarted by errors/preemption/...\n self.assertAllEqual(0, sessions[0].run(local_step_0))\n self.assertAllEqual(0, sessions[1].run(local_step_1))\n\n sessions[0].run(train_ops[0])\n sessions[1].run(train_ops[1])\n # Although the global step should still be 1 as explained above, the local\n # step should now be updated to 1. The variables are still the same.\n self.assertAllEqual(1, sessions[1].run(global_step))\n self.assertAllEqual(1, sessions[0].run(local_step_0))\n self.assertAllEqual(1, sessions[1].run(local_step_1))\n self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))\n self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))\n\n # At this step, the token queue is empty. So the 2 workers need to work\n # together to proceed.\n threads = []\n threads.append(\n self.checkedThread(\n target=self._run, args=(train_ops[0], sessions[0])))\n threads.append(\n self.checkedThread(\n target=self._run, args=(train_ops[1], sessions[1])))\n\n # The two workers starts to execute the train op.\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n # The global step should now be 2 and the gradients should have been\n # applied twice.\n self.assertAllEqual(2, sessions[1].run(global_step))\n self.assertAllClose(0 - 2 * (0.1 + 0.3) / 2 * 2.0,\n sessions[1].run(var_0_g_1))\n self.assertAllClose(1 - 2 * (0.9 + 1.1) / 2 * 2.0,\n sessions[1].run(var_1_g_1))\n\n # 3 workers and one of them is backup.\n @test_util.run_v1_only(\"b/120545219\")\n def test3Workers1Backup(self):\n num_workers = 3\n replicas_to_aggregate = 2\n num_ps = 2\n workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)\n\n # Creates and returns all the workers.\n sessions, graphs, train_ops = get_workers(num_workers,\n replicas_to_aggregate, workers)\n\n # Chief should have already initialized all the variables.\n var_0_g_1 = graphs[1].get_tensor_by_name(\"v0:0\")\n var_1_g_1 = graphs[1].get_tensor_by_name(\"v1:0\")\n local_step_1 = graphs[1].get_tensor_by_name(\"sync_rep_local_step:0\")\n global_step = graphs[1].get_tensor_by_name(\"global_step:0\")\n\n # The steps should also be initialized.\n self.assertAllEqual(0, sessions[1].run(global_step))\n self.assertAllEqual(0, sessions[1].run(local_step_1))\n\n # We have initial tokens in the queue so we can call this one by one. After\n # the token queue becomes empty, they should be called concurrently.\n # Here worker 0 and worker 2 finished first.\n sessions[0].run(train_ops[0])\n sessions[2].run(train_ops[2])\n\n # The global step should have been updated since we only need to collect 2\n # gradients. The variables should now have the new values after the average\n # of the gradients from worker 0/2 are applied.\n while sessions[1].run(global_step) != 1:\n time.sleep(0.01)\n\n self.assertAllEqual(1, sessions[1].run(global_step))\n self.assertAllClose(0 - (0.1 + 0.5) / 2 * 2.0, sessions[1].run(var_0_g_1))\n self.assertAllClose(1 - (0.9 + 1.3) / 2 * 2.0, sessions[1].run(var_1_g_1))\n\n # Worker 1 finished later and its gradients will now be dropped as it is\n # stale.\n sessions[1].run(train_ops[1])\n\n # As shown in the previous test, the local_step for all workers should be\n # still 0 so their next computation will also be dropped.\n sessions[0].run(train_ops[0])\n sessions[1].run(train_ops[1])\n sessions[2].run(train_ops[2])\n\n # Although the global step should still be 1 as explained above, the local\n # step should now be updated to 1. Just check worker 1 as an example.\n self.assertAllEqual(1, sessions[1].run(global_step))\n self.assertAllEqual(1, sessions[1].run(local_step_1))\n\n thread_0 = self.checkedThread(\n target=self._run, args=(train_ops[0], sessions[0]))\n thread_1 = self.checkedThread(\n target=self._run, args=(train_ops[1], sessions[1]))\n\n # Lets worker 0 execute first.\n # It will wait as we need 2 workers to finish this step and the global step\n # should be still 1.\n thread_0.start()\n self.assertAllEqual(1, sessions[1].run(global_step))\n\n # Starts worker 1.\n thread_1.start()\n thread_1.join()\n thread_0.join()\n\n # The global step should now be 2 and the gradients should have been\n # applied again.\n self.assertAllEqual(2, sessions[1].run(global_step))\n self.assertAllClose(-0.6 - (0.1 + 0.3) / 2 * 2.0,\n sessions[1].run(var_0_g_1))\n self.assertAllClose(-1.2 - (0.9 + 1.1) / 2 * 2.0,\n sessions[1].run(var_1_g_1))\n\n\nclass SyncReplicasOptimizerHookTest(test.TestCase):\n\n def testErrorIfUsedBeforeMinimizeCalled(self):\n opt = training.SyncReplicasOptimizer(\n opt=gradient_descent.GradientDescentOptimizer(1.0),\n replicas_to_aggregate=1,\n total_num_replicas=1)\n hook = opt.make_session_run_hook(True)\n with self.assertRaisesRegexp(ValueError,\n \"apply_gradient should be called\"):\n hook.begin()\n\n @test_util.run_v1_only(\"b/120545219\")\n def testCanCreatedBeforeMinimizeCalled(self):\n \"\"\"This behavior is required to be integrated with Estimators.\"\"\"\n opt = training.SyncReplicasOptimizer(\n opt=gradient_descent.GradientDescentOptimizer(1.0),\n replicas_to_aggregate=1,\n total_num_replicas=1)\n hook = opt.make_session_run_hook(True)\n v = variables.VariableV1([0.])\n global_step = variables.VariableV1(0, name=\"global_step\", trainable=False)\n opt.minimize(v, global_step=global_step)\n hook.begin()\n\n @test_util.run_v1_only(\"b/120545219\")\n def testFetchVariableList(self):\n opt = training.SyncReplicasOptimizer(\n opt=adam.AdamOptimizer(0.01),\n replicas_to_aggregate=1,\n total_num_replicas=1)\n v = variables.VariableV1([0.], name=\"fetch_variable_test\")\n global_step = variables.VariableV1(0, name=\"global_step\", trainable=False)\n opt.minimize(v, global_step=global_step)\n opt_variables = opt.variables()\n beta1_power, beta2_power = opt._opt._get_beta_accumulators()\n self.assertIn(beta1_power, opt_variables)\n self.assertIn(beta2_power, opt_variables)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras' base preprocessing layer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import keras\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.engine import base_preprocessing_layer\nfrom tensorflow.python.keras.engine import base_preprocessing_layer_v1\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import compat\n\n\n# Define a test-only implementation of CombinerPreprocessingLayer to validate\n# its correctness directly.\nclass AddingPreprocessingLayer(\n base_preprocessing_layer.CombinerPreprocessingLayer):\n _SUM_NAME = \"sum\"\n\n def __init__(self, **kwargs):\n super(AddingPreprocessingLayer, self).__init__(\n combiner=self.AddingCombiner(), **kwargs)\n\n def build(self, input_shape):\n super(AddingPreprocessingLayer, self).build(input_shape)\n self._sum = self._add_state_variable(\n name=self._SUM_NAME,\n shape=(1,),\n dtype=dtypes.float32,\n initializer=init_ops.zeros_initializer)\n\n def set_total(self, sum_value):\n \"\"\"This is an example of how a subclass would implement a direct setter.\n\n These methods should generally just create a dict mapping the correct names\n to the relevant passed values, and call self._set_state_variables() with the\n dict of data.\n\n Args:\n sum_value: The total to set.\n \"\"\"\n self._set_state_variables({self._SUM_NAME: [sum_value]})\n\n def call(self, inputs):\n return inputs + self._sum\n\n # Define a Combiner for this layer class.\n class AddingCombiner(base_preprocessing_layer.Combiner):\n\n def compute(self, batch_values, accumulator=None):\n \"\"\"Compute a step in this computation, returning a new accumulator.\"\"\"\n new_accumulator = 0 if batch_values is None else np.sum(batch_values)\n if accumulator is None:\n return new_accumulator\n else:\n return self.merge([accumulator, new_accumulator])\n\n def merge(self, accumulators):\n \"\"\"Merge several accumulators to a single accumulator.\"\"\"\n # Combine accumulators and return the result.\n result = accumulators[0]\n for accumulator in accumulators[1:]:\n result = np.sum([np.sum(result), np.sum(accumulator)])\n return result\n\n def extract(self, accumulator):\n \"\"\"Convert an accumulator into a dict of output values.\"\"\"\n # We have to add an additional dimension here because the weight shape\n # is (1,) not None.\n return {AddingPreprocessingLayer._SUM_NAME: [accumulator]}\n\n def restore(self, output):\n \"\"\"Create an accumulator based on 'output'.\"\"\"\n # There is no special internal state here, so we just return the relevant\n # internal value. We take the [0] value here because the weight itself\n # is of the shape (1,) and we want the scalar contained inside it.\n return output[AddingPreprocessingLayer._SUM_NAME][0]\n\n def serialize(self, accumulator):\n \"\"\"Serialize an accumulator for a remote call.\"\"\"\n return compat.as_bytes(json.dumps(accumulator))\n\n def deserialize(self, encoded_accumulator):\n \"\"\"Deserialize an accumulator received from 'serialize()'.\"\"\"\n return json.loads(compat.as_text(encoded_accumulator))\n\n\nclass AddingPreprocessingLayerV1(\n AddingPreprocessingLayer,\n base_preprocessing_layer_v1.CombinerPreprocessingLayer):\n pass\n\n\ndef get_layer():\n if context.executing_eagerly():\n return AddingPreprocessingLayer()\n else:\n return AddingPreprocessingLayerV1()\n\n\n@keras_parameterized.run_all_keras_modes\nclass PreprocessingLayerTest(keras_parameterized.TestCase):\n\n def test_adapt_list_fails(self):\n \"\"\"Test that non-Dataset/Numpy inputs cause a reasonable error.\"\"\"\n input_dataset = [1, 2, 3, 4, 5]\n\n layer = get_layer()\n with self.assertRaisesRegex(ValueError, \"requires a\"):\n layer.adapt(input_dataset)\n\n def test_adapt_infinite_dataset_fails(self):\n \"\"\"Test that preproc layers fail if an infinite dataset is passed.\"\"\"\n input_dataset = dataset_ops.Dataset.from_tensor_slices(\n np.array([[1], [2], [3], [4], [5], [0]])).repeat()\n\n layer = get_layer()\n with self.assertRaisesRegex(ValueError, \".*infinite number of elements.*\"):\n layer.adapt(input_dataset)\n\n def test_pre_build_injected_update_with_no_build_fails(self):\n \"\"\"Test external update injection before build() is called fails.\"\"\"\n input_dataset = np.array([1, 2, 3, 4, 5])\n\n layer = get_layer()\n combiner = layer._combiner\n updates = combiner.extract(combiner.compute(input_dataset))\n\n with self.assertRaisesRegex(RuntimeError, \".*called after build.*\"):\n layer._set_state_variables(updates)\n\n def test_setter_update(self):\n \"\"\"Test the prototyped setter method.\"\"\"\n input_data = keras.Input(shape=(1,))\n layer = get_layer()\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n layer.set_total(15)\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n def test_pre_build_adapt_update_numpy(self):\n \"\"\"Test that preproc layers can adapt() before build() is called.\"\"\"\n input_dataset = np.array([1, 2, 3, 4, 5])\n\n layer = get_layer()\n layer.adapt(input_dataset)\n\n input_data = keras.Input(shape=(1,))\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n def test_post_build_adapt_update_numpy(self):\n \"\"\"Test that preproc layers can adapt() after build() is called.\"\"\"\n input_dataset = np.array([1, 2, 3, 4, 5])\n\n input_data = keras.Input(shape=(1,))\n layer = get_layer()\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n layer.adapt(input_dataset)\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n def test_pre_build_injected_update(self):\n \"\"\"Test external update injection before build() is called.\"\"\"\n input_dataset = np.array([1, 2, 3, 4, 5])\n\n layer = get_layer()\n combiner = layer._combiner\n updates = combiner.extract(combiner.compute(input_dataset))\n\n layer.build((1,))\n layer._set_state_variables(updates)\n\n input_data = keras.Input(shape=(1,))\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n def test_post_build_injected_update(self):\n \"\"\"Test external update injection after build() is called.\"\"\"\n input_dataset = np.array([1, 2, 3, 4, 5])\n input_data = keras.Input(shape=(1,))\n layer = get_layer()\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n combiner = layer._combiner\n updates = combiner.extract(combiner.compute(input_dataset))\n layer._set_state_variables(updates)\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n def test_pre_build_adapt_update_dataset(self):\n \"\"\"Test that preproc layers can adapt() before build() is called.\"\"\"\n input_dataset = dataset_ops.Dataset.from_tensor_slices(\n np.array([[1], [2], [3], [4], [5], [0]]))\n\n layer = get_layer()\n layer.adapt(input_dataset)\n\n input_data = keras.Input(shape=(1,))\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n def test_post_build_adapt_update_dataset(self):\n \"\"\"Test that preproc layers can adapt() after build() is called.\"\"\"\n input_dataset = dataset_ops.Dataset.from_tensor_slices(\n np.array([[1], [2], [3], [4], [5], [0]]))\n\n input_data = keras.Input(shape=(1,))\n layer = get_layer()\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n layer.adapt(input_dataset)\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n def test_further_tuning(self):\n \"\"\"Test that models can be tuned with multiple calls to 'adapt'.\"\"\"\n\n input_dataset = np.array([1, 2, 3, 4, 5])\n\n layer = get_layer()\n layer.adapt(input_dataset)\n\n input_data = keras.Input(shape=(1,))\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n layer.adapt(np.array([1, 2]), reset_state=False)\n self.assertAllEqual([[19], [20], [21]], model.predict([1., 2., 3.]))\n\n def test_further_tuning_post_injection(self):\n \"\"\"Test that models can be tuned with multiple calls to 'adapt'.\"\"\"\n\n input_dataset = np.array([1, 2, 3, 4, 5])\n\n layer = get_layer()\n\n input_data = keras.Input(shape=(1,))\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n combiner = layer._combiner\n updates = combiner.extract(combiner.compute(input_dataset))\n layer._set_state_variables(updates)\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n layer.adapt(np.array([1, 2]), reset_state=False)\n self.assertAllEqual([[19], [20], [21]], model.predict([1., 2., 3.]))\n\n def test_weight_based_state_transfer(self):\n \"\"\"Test that preproc layers can transfer state via get/set weights..\"\"\"\n\n def get_model():\n input_data = keras.Input(shape=(1,))\n layer = get_layer()\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n return (model, layer)\n\n input_dataset = np.array([1, 2, 3, 4, 5])\n model, layer = get_model()\n layer.adapt(input_dataset)\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n # Create a new model and verify it has no state carryover.\n weights = model.get_weights()\n model_2, _ = get_model()\n self.assertAllEqual([[1], [2], [3]], model_2.predict([1., 2., 3.]))\n\n # Transfer state from model to model_2 via get/set weights.\n model_2.set_weights(weights)\n self.assertAllEqual([[16], [17], [18]], model_2.predict([1., 2., 3.]))\n\n def test_weight_based_state_transfer_with_further_tuning(self):\n \"\"\"Test that transferred state can be used to further tune a model..\"\"\"\n\n def get_model():\n input_data = keras.Input(shape=(1,))\n layer = get_layer()\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n return (model, layer)\n\n input_dataset = np.array([1, 2, 3, 4, 5])\n model, layer = get_model()\n layer.adapt(input_dataset)\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n # Transfer state from model to model_2 via get/set weights.\n weights = model.get_weights()\n model_2, layer_2 = get_model()\n model_2.set_weights(weights)\n\n # Further adapt this layer based on the transferred weights.\n layer_2.adapt(np.array([1, 2]), reset_state=False)\n self.assertAllEqual([[19], [20], [21]], model_2.predict([1., 2., 3.]))\n\n\n@keras_parameterized.run_all_keras_modes\nclass ConvertToListTest(keras_parameterized.TestCase):\n\n # Note: We need the inputs to be lambdas below to avoid some strangeness with\n # TF1.x graph mode - specifically, if the inputs are created outside the test\n # function body, the graph inside the test body will not contain the tensors\n # that were created in the parameters.\n @parameterized.named_parameters(\n {\n \"testcase_name\": \"ndarray\",\n \"inputs\": lambda: np.array([[1, 2, 3], [4, 5, 6]]),\n \"expected\": [[1, 2, 3], [4, 5, 6]]\n }, {\n \"testcase_name\": \"list\",\n \"inputs\": lambda: [[1, 2, 3], [4, 5, 6]],\n \"expected\": [[1, 2, 3], [4, 5, 6]]\n }, {\n \"testcase_name\": \"tensor\",\n \"inputs\": lambda: constant_op.constant([[1, 2, 3], [4, 5, 6]]),\n \"expected\": [[1, 2, 3], [4, 5, 6]]\n }, {\n \"testcase_name\":\n \"ragged_tensor\",\n \"inputs\":\n lambda: ragged_factory_ops.constant([[1, 2, 3, 4], [4, 5, 6]]),\n \"expected\": [[1, 2, 3, 4], [4, 5, 6]]\n }, {\n \"testcase_name\": \"sparse_tensor\",\n \"inputs\": lambda: sparse_ops.from_dense([[1, 2, 0, 4], [4, 5, 6, 0]]),\n \"expected\": [[1, 2, -1, 4], [4, 5, 6, -1]]\n })\n def test_conversion(self, inputs, expected):\n values = base_preprocessing_layer.convert_to_list(inputs())\n self.assertAllEqual(expected, values)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.Dataset.map()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nfrom collections import namedtuple\nimport threading\nimport time\nimport warnings\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.data.experimental.ops import threading_options\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import map_fn\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import script_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.ragged import ragged_concat_ops\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import test\n\n\ndef _test_combinations_with_mode_v1(mode):\n\n def new_map_fn(dataset, *args, **kwargs):\n return dataset.map(*args, **kwargs)\n\n def legacy_map_fn(dataset, *args, **kwargs):\n return dataset.map_with_legacy_function(*args, **kwargs)\n\n new_map_combinations = combinations.combine(\n tf_api_version=1,\n mode=mode,\n apply_map=combinations.NamedObject(\"map_fn\", new_map_fn))\n\n legacy_map_combinations = combinations.combine(\n tf_api_version=1,\n mode=mode,\n apply_map=combinations.NamedObject(\"legacy_map_fn\", legacy_map_fn))\n\n return new_map_combinations + legacy_map_combinations\n\n\ndef _test_combinations_with_mode_v2(mode):\n\n def new_map_fn(dataset, *args, **kwargs):\n return dataset.map(*args, **kwargs)\n\n return combinations.combine(\n tf_api_version=2,\n mode=mode,\n apply_map=combinations.NamedObject(\"map_fn\", new_map_fn))\n\n\ndef _test_combinations_with_mode(mode):\n return _test_combinations_with_mode_v1(\n mode) + _test_combinations_with_mode_v2(mode)\n\n\ndef _test_combinations():\n return _test_combinations_with_mode(\"eager\") + _test_combinations_with_mode(\n \"graph\")\n\n\ndef _short_circuit_test_cases():\n cases = [\n (\"Identity\", None, lambda x: x),\n (\"Replicate\", None, lambda x: (x, x)),\n (\"Swap\", (None, None), lambda x, y: (y, x)),\n (\"Project\", (None, None), lambda x, y: x)\n ]\n\n def reduce_fn(x, y):\n name, structure, fn = y\n return x + combinations.combine(\n structure=structure, fn=combinations.NamedObject(name, fn))\n\n return functools.reduce(reduce_fn, cases, [])\n\n\ndef _make_coordinated_sloppy_dataset(apply_map, num_elements,\n num_parallel_calls):\n \"\"\"Produces a dataset iterator and events to control the order of elements.\n\n Args:\n apply_map: method that applies the `map` transformation\n num_elements: the number of input elements\n num_parallel_calls: the degree of map parallelism\n\n Returns:\n A dataset iterator (represented as `get_next` op) and events that can be\n used to control the order of output elements.\n \"\"\"\n\n # Set up threading events used to sequence when items are produced that\n # are subsequently interleaved. These events allow us to deterministically\n # simulate slowdowns and force sloppiness.\n coordination_events = {i: threading.Event() for i in range(num_elements)}\n\n def map_py_fn(x):\n coordination_events[x].wait()\n coordination_events[x].clear()\n return x * x\n\n def fn(x):\n return script_ops.py_func(map_py_fn, [x], x.dtype)\n\n options = dataset_ops.Options()\n options.experimental_deterministic = False\n dataset = dataset_ops.Dataset.range(num_elements)\n dataset = apply_map(dataset, fn, num_parallel_calls).with_options(options)\n return dataset, coordination_events\n\n\nclass Foo(object):\n \"\"\"Dummy class used for invalid return value tests.\"\"\"\n\n def __init__(self):\n pass\n\n\nclass MapTest(test_base.DatasetTestBase, parameterized.TestCase):\n\n def _map_dataset_factory(self, components, apply_map, count):\n\n def _map_fn(x, y, z):\n return math_ops.square(x), math_ops.square(y), math_ops.square(z)\n\n dataset = dataset_ops.Dataset.from_tensor_slices(components)\n dataset = apply_map(dataset, _map_fn).repeat(count)\n self.assertEqual(\n [c.shape[1:] for c in components],\n [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])\n return dataset\n\n @combinations.generate(_test_combinations())\n def testMapDataset(self, apply_map):\n \"\"\"Test an dataset that maps a TF function across its input elements.\"\"\"\n # The pipeline is TensorSliceDataset -> MapDataset(square_3) ->\n # RepeatDataset(count).\n components = (np.arange(7),\n np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],\n np.array(37.0) * np.arange(7))\n\n # Test single-threaded access to the iterator.\n get_next = self.getNext(\n self._map_dataset_factory(components, apply_map, count=14))\n for _ in range(14):\n for i in range(7):\n result = self.evaluate(get_next())\n for component, result_component in zip(components, result):\n self.assertAllEqual(component[i]**2, result_component)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n # TODO(b/117581999): add eager coverage\n @combinations.generate(_test_combinations_with_mode(\"graph\"))\n def testMapDatasetMultiThreaded(self, apply_map):\n # Test multi-threaded access to the same iterator.\n components = (np.arange(7),\n np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],\n np.array(37.0) * np.arange(7))\n get_next = self.getNext(\n self._map_dataset_factory(components, apply_map, count=18))\n results = []\n with self.cached_session() as sess:\n def iterator_thread():\n while True:\n try:\n results.append(sess.run(get_next()))\n except errors.OutOfRangeError:\n return\n threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n # `results` will contain the same elements components**2\n # repeated 18 times, but in a non-deterministic order. Sort the\n # results, and assert that each element of components**2 is\n # produced 18 times.\n results.sort(key=lambda x: x[0])\n for i in range(7):\n for j in range(18):\n for component, result_component in zip(components,\n results[i * 18 + j]):\n self.assertAllEqual(component[i]**2, result_component)\n\n def _parallel_map_dataset_factory(self, components, apply_map, count,\n num_parallel_calls, buffer_size):\n\n def _map_fn(x, y, z):\n return math_ops.square(x), math_ops.square(y), math_ops.square(z)\n\n dataset = dataset_ops.Dataset.from_tensor_slices(components)\n dataset = apply_map(dataset, _map_fn, num_parallel_calls=num_parallel_calls)\n dataset = dataset.prefetch(buffer_size).repeat(count)\n\n self.assertEqual(\n [c.shape[1:] for c in components],\n [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])\n return dataset\n\n @combinations.generate(\n combinations.times(\n _test_combinations(),\n combinations.combine(num_parallel_calls=1, buffer_size=1) +\n combinations.combine(num_parallel_calls=1, buffer_size=2) +\n combinations.combine(num_parallel_calls=2, buffer_size=2) +\n combinations.combine(num_parallel_calls=2, buffer_size=4) +\n combinations.combine(num_parallel_calls=8, buffer_size=8) +\n combinations.combine(num_parallel_calls=8, buffer_size=16)))\n def testParallelMapDataset(self, apply_map, num_parallel_calls, buffer_size):\n \"\"\"Test an dataset that maps a TF function across its input elements.\"\"\"\n\n # The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->\n # RepeatDataset(count).\n components = (np.arange(7),\n np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],\n np.array(37.0) * np.arange(7))\n # Test single-threaded access to the iterator.\n get_next = self.getNext(\n self._parallel_map_dataset_factory(components, apply_map, 14,\n num_parallel_calls, buffer_size))\n for _ in range(14):\n for i in range(7):\n result = self.evaluate(get_next())\n for component, result_component in zip(components, result):\n self.assertAllEqual(component[i]**2, result_component)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n # TODO(b/117581999): add eager coverage\n @combinations.generate(\n combinations.times(\n _test_combinations_with_mode(\"graph\"),\n combinations.combine(num_parallel_calls=1, buffer_size=1) +\n combinations.combine(num_parallel_calls=1, buffer_size=2) +\n combinations.combine(num_parallel_calls=2, buffer_size=2) +\n combinations.combine(num_parallel_calls=2, buffer_size=4) +\n combinations.combine(num_parallel_calls=8, buffer_size=8) +\n combinations.combine(num_parallel_calls=8, buffer_size=16)))\n def testParallelMapDatasetMultiThreaded(self, apply_map, num_parallel_calls,\n buffer_size):\n\n # Test multi-threaded access to the same iterator.\n components = (np.arange(7),\n np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],\n np.array(37.0) * np.arange(7))\n get_next = self.getNext(\n self._parallel_map_dataset_factory(components, apply_map, 18,\n num_parallel_calls, buffer_size))\n results = []\n with self.cached_session() as sess:\n\n def iterator_thread():\n while True:\n try:\n results.append(sess.run(get_next()))\n except errors.OutOfRangeError:\n return\n\n threads = [self.checkedThread(target=iterator_thread) for _ in range(64)]\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n # `results` will contain the same elements components**2\n # repeated 18 times, but in a non-deterministic order. Sort the\n # results, and assert that each element of components**2 is\n # produced 18 times.\n results.sort(key=lambda x: x[0])\n for i in range(7):\n for j in range(18):\n for component, result_component in zip(components,\n results[i * 18 + j]):\n self.assertAllEqual(component[i]**2, result_component)\n\n @combinations.generate(_test_combinations())\n def testImplicitDisposeParallelMapDataset(self, apply_map):\n # Tests whether a parallel map dataset will be cleaned up correctly when\n # the pipeline does not run it until exhaustion.\n # The pipeline is TensorSliceDataset -> MapDataset(square_3) ->\n # RepeatDataset(1000).\n components = (np.arange(1000),\n np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],\n np.array(37.0) * np.arange(1000))\n\n dataset = self._parallel_map_dataset_factory(components, apply_map, 1000,\n 100, 100)\n # NOTE(mrry): Also test that the prefetching thread is cancelled correctly.\n dataset = dataset.prefetch(100)\n get_next = self.getNext(dataset)\n\n for _ in range(3):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations())\n def testParallelMapUnspecifiedOutputSize(self, apply_map):\n components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)\n\n dataset = dataset_ops.Dataset.from_tensor_slices(components)\n dataset = apply_map(\n dataset,\n lambda x: array_ops.check_numerics(x, \"message\"),\n num_parallel_calls=2)\n get_next = self.getNext(dataset)\n\n for _ in range(3):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations())\n def testParallelMapError(self, apply_map):\n components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)\n\n dataset = dataset_ops.Dataset.from_tensor_slices(components)\n dataset = apply_map(\n dataset,\n lambda x: array_ops.check_numerics(x, \"message\"),\n num_parallel_calls=2)\n get_next = self.getNext(dataset)\n\n for _ in range(3):\n self.evaluate(get_next())\n # The 4th element is NaN, so `array_ops.check_numerics()` should fail.\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(get_next())\n self.evaluate(get_next())\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations())\n def testPrefetchError(self, apply_map):\n components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)\n\n dataset = dataset_ops.Dataset.from_tensor_slices(components)\n dataset = apply_map(\n dataset, lambda x: array_ops.check_numerics(x, \"message\")).prefetch(2)\n get_next = self.getNext(dataset)\n\n for _ in range(3):\n self.evaluate(get_next())\n # The 4th element is NaN, so `array_ops.check_numerics()` should fail.\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(get_next())\n self.evaluate(get_next())\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations())\n def testCaptureIterator(self, apply_map):\n\n def _build_ds(iterator):\n\n def _map_fn(x):\n get_next = iterator.get_next()\n return x * get_next\n\n return apply_map(dataset_ops.Dataset.range(10), _map_fn)\n\n def _build_graph():\n if context.executing_eagerly():\n captured_iterator = iter(dataset_ops.Dataset.range(10))\n else:\n captured_iterator = dataset_ops.make_initializable_iterator(\n dataset_ops.Dataset.range(10))\n ds = _build_ds(captured_iterator)\n return captured_iterator, ds\n\n captured_iter, ds = _build_graph()\n if not context.executing_eagerly():\n self.evaluate(captured_iter.initializer)\n get_next = self.getNext(ds, requires_initialization=True)\n for i in range(10):\n self.assertEqual(i * i, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations())\n def testCaptureHashTable(self, apply_map):\n # NOTE(mrry): We must use the V2 variants of `HashTable`\n # etc. because these produce a `tf.resource`-typed output that is\n # compatible with the in-graph function implementation.\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n\n input_sentences = dataset_ops.Dataset.from_tensor_slices(\n [\"brain brain tank salad surgery\", \"surgery brain\"])\n\n dataset = apply_map(input_sentences,\n lambda x: string_ops.string_split([x]).values)\n dataset = apply_map(dataset, table.lookup)\n\n get_next = self.getNext(dataset, requires_initialization=True)\n\n self.evaluate(table.initializer)\n self.evaluate(get_next())\n self.evaluate(get_next())\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n # TODO(b/123904513)\n @combinations.generate(_test_combinations_with_mode_v1(\"graph\"))\n def testCaptureQueue(self, apply_map):\n elements = np.random.randint(100, size=[200])\n queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])\n enqueue_op = queue.enqueue_many(elements)\n close_op = queue.close()\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(-1)\n dataset = apply_map(dataset, lambda _: queue.dequeue())\n\n get_next = self.getNext(dataset, requires_initialization=True)\n self.evaluate(enqueue_op)\n self.evaluate(close_op)\n\n for element in elements:\n self.assertEqual(element, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n # TODO(b/117581999): Possible deadlock in eager mode, debug.\n @combinations.generate(_test_combinations_with_mode_v1(\"graph\"))\n def testCaptureSameResourceMultipleTimes(self, apply_map):\n elements = np.random.randint(100, size=[200])\n queue = data_flow_ops.FIFOQueue(\n 200, dtypes.int64, shapes=[], shared_name=\"shared_queue\")\n queue_2 = data_flow_ops.FIFOQueue(\n 200, dtypes.int64, shapes=[], shared_name=\"shared_queue\")\n\n enqueue_op = queue.enqueue_many(elements)\n close_op = queue.close()\n\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(-1)\n dataset = apply_map(dataset, lambda _: (queue.dequeue(), queue_2.dequeue()))\n\n self.evaluate(enqueue_op)\n self.evaluate(close_op)\n get_next = self.getNext(dataset, requires_initialization=True)\n for i in range(100):\n self.assertCountEqual([elements[i * 2], elements[i * 2 + 1]],\n self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations())\n def testSeededStatefulOperatorIsProperlyStateful(self, apply_map):\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)\n fn = lambda _: random_ops.random_uniform((), seed=11)\n dataset = apply_map(dataset, fn).batch(2)\n\n get_next = self.getNext(dataset, requires_initialization=True)\n random_values = []\n with self.assertRaises(errors.OutOfRangeError):\n while True:\n random_values.extend(self.evaluate(get_next()))\n self.assertLen(random_values, 10)\n self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)\n\n get_next = self.getNext(dataset, requires_initialization=True)\n random_values_2 = []\n with self.assertRaises(errors.OutOfRangeError):\n while True:\n random_values_2.extend(self.evaluate(get_next()))\n\n # Randomness is repeatable given same seed\n self.assertAllClose(random_values, random_values_2)\n\n @combinations.generate(_test_combinations())\n def testStatefulMapKeepsStateAcrossIterators(self, apply_map):\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)\n fn = lambda _: random_ops.random_uniform((), seed=11)\n dataset = apply_map(dataset, fn).repeat(1000).batch(10)\n\n get_next = self.getNext(dataset)\n random_values = self.evaluate(get_next())\n\n # Assert that one of the next 99 batches yielded by the iterator is\n # different from the first.\n i = 0\n while i < 99:\n if np.any(random_values != self.evaluate(get_next())):\n break\n i += 1\n self.assertLess(i, 99)\n\n @combinations.generate(_test_combinations())\n def testStatefulOperationInShortCircuit(self, apply_map):\n counter_var = variable_scope.get_variable(\n \"counter\", (), dtypes.int32, use_resource=True)\n\n def increment_fn(x):\n counter_var.assign_add(1)\n return x\n\n dataset = dataset_ops.Dataset.range(10)\n dataset = apply_map(dataset, increment_fn)\n\n get_next = self.getNext(dataset, requires_initialization=True)\n\n self.evaluate(counter_var.initializer)\n for i in range(10):\n self.assertEqual(i, self.evaluate(counter_var))\n self.assertEqual(i, self.evaluate(get_next()))\n self.assertEqual(10, self.evaluate(counter_var))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n self.assertEqual(10, self.evaluate(counter_var))\n\n @combinations.generate(_test_combinations())\n def testMapDict(self, apply_map):\n dataset = dataset_ops.Dataset.range(10)\n dataset = apply_map(dataset, lambda x: {\"foo\": x * 2, \"bar\": x**2})\n dataset = apply_map(dataset, lambda d: d[\"foo\"] + d[\"bar\"])\n self.assertDatasetProduces(\n dataset, expected_output=[i * 2 + i**2 for i in range(10)])\n\n @combinations.generate(_test_combinations())\n def testMapNamedtuple(self, apply_map):\n # construct dataset of tuples\n labels = dataset_ops.Dataset.range(10)\n images = apply_map(labels, lambda l: -l)\n dataset_tuple = dataset_ops.Dataset.zip((labels, images))\n\n # convert dataset of tuples to dataset of namedtuples\n example = namedtuple(\"Example\", [\"label\", \"image\"])\n dataset_namedtuple = apply_map(dataset_tuple, example)\n\n def preprocess_tuple(label, image):\n image = 2 * image\n return label, image\n\n def preprocess_namedtuple(example):\n return example._replace(image=2 * example.image)\n\n # preprocess both datasets\n dataset_tuple = apply_map(dataset_tuple, preprocess_tuple)\n dataset_namedtuple = apply_map(dataset_namedtuple, preprocess_namedtuple)\n\n next_tuple = self.getNext(dataset_tuple)\n next_namedtuple = self.getNext(dataset_namedtuple)\n\n # make sure both datasets contain the same data\n for i in range(10):\n tuple_, namedtuple_ = self.evaluate([next_tuple(), next_namedtuple()])\n self.assertEqual(tuple_, namedtuple_)\n self.assertEqual(tuple_, (i, -2 * i))\n\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_namedtuple())\n\n @combinations.generate(_test_combinations())\n def testUseStepContainerInMap(self, apply_map):\n row = np.arange(6)\n dataset = dataset_ops.Dataset.from_tensors(row)\n dataset = apply_map(dataset,\n lambda elems: map_fn.map_fn(lambda x: x * x, elems))\n self.assertDatasetProduces(dataset, expected_output=[row**2])\n\n @combinations.generate(_test_combinations())\n def testCaseAndCondInMap(self, apply_map):\n\n def control_map_fn(x, y):\n\n def multiply():\n return x * 2\n\n def divide():\n return x // 2\n\n def defaults_two():\n return control_flow_ops.cond(\n math_ops.equal(math_ops.mod(x, 2), 0),\n multiply,\n divide,\n name=\"cond_mult\")\n\n pred_fn_pairs = [\n (math_ops.logical_or(math_ops.equal(y, 2),\n math_ops.equal(y, 3)), defaults_two),\n ]\n\n return control_flow_ops.case(\n pred_fn_pairs, default=multiply, exclusive=True)\n\n def build_dataset(row, num):\n dataset = dataset_ops.Dataset.from_tensor_slices(row)\n return apply_map(dataset, lambda x: control_map_fn(x, num))\n\n row = np.arange(6)\n for num in [2, 3, 4]:\n get_next = self.getNext(build_dataset(row, num))\n for i in range(6):\n self.assertEqual(\n (i // 2 if i % 2 else i * 2) if (num == 2 or num == 3) else i * 2,\n self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations())\n def testCaseInWhileInMap(self, apply_map):\n\n def control_map_fn(x, y):\n\n def multiply():\n return x * 2\n\n def divide():\n return x // 2\n\n pred_fn_pairs = [\n (math_ops.logical_or(math_ops.equal(y, 2),\n math_ops.equal(y, 3)), divide),\n ]\n\n return control_flow_ops.case(\n pred_fn_pairs, default=multiply, exclusive=True)\n\n def build_dataset(row, num):\n dataset = dataset_ops.Dataset.from_tensors(row)\n return apply_map(\n dataset,\n lambda elems: map_fn.map_fn(lambda x: control_map_fn(x, num), elems))\n\n row = np.arange(6)\n for num in [2, 3, 4]:\n get_next = self.getNext(build_dataset(row, num))\n self.assertAllEqual(\n [x // 2 if (num == 2 or num == 3) else x * 2 for x in row],\n self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations())\n def testCaseAndCondInWhileInMap(self, apply_map):\n\n def control_map_fn(x, y):\n\n def multiply():\n return x * 2\n\n def divide():\n return x // 2\n\n def defaults_two():\n return control_flow_ops.cond(\n math_ops.equal(math_ops.mod(x, 2), 0),\n multiply,\n divide,\n name=\"cond_mult\")\n\n pred_fn_pairs = [\n (math_ops.logical_or(math_ops.equal(y, 2),\n math_ops.equal(y, 3)), defaults_two),\n ]\n\n return control_flow_ops.case(\n pred_fn_pairs, default=multiply, exclusive=True)\n\n row = np.arange(6)\n num = 2\n dataset = dataset_ops.Dataset.from_tensors(row)\n dataset = apply_map(\n dataset,\n lambda elems: map_fn.map_fn(lambda x: control_map_fn(x, num), elems))\n get_next = self.getNext(dataset)\n\n self.assertAllEqual([(x // 2 if x % 2 else x * 2) if\n (num == 2 or num == 3) else x * 2 for x in row],\n self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations())\n def testNestedListMapDataset(self, apply_map):\n dataset = dataset_ops.Dataset.from_tensors([0, 1, 2]).repeat(10)\n dataset = apply_map(dataset, lambda a: ([a[1], a[0] + a[2]], a[1]))\n expected_output = [(np.array([1, 2]), 1)] * 10\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n @combinations.generate(\n combinations.times(_test_combinations(),\n combinations.combine(buffer_size=[1, 2, 3, 4])))\n def testPrefetch(self, apply_map, buffer_size):\n # We will use this event to test that `_map_py_func()` has been invoked a\n # certain number of times (6 times, to be exact) after consuming fewer\n # elements from the iterator.\n ev = threading.Event()\n\n set_event_during_invocation = 5\n\n def _map_py_func(x):\n if x == set_event_during_invocation:\n ev.set()\n return x * x\n\n def _map_fn(x):\n return script_ops.py_func(_map_py_func, [x], x.dtype)\n\n # We can indirectly observe that varying the buffer size has the intended\n # effect by observing when `ev` is set (on the 6th invocation of\n # `_map_py_func()`).\n # NOTE(mrry): We do not test with `buffer_size ==\n # set_event_during_invocation`, because we must consume at least one element\n # to start the prefetching.\n dataset = dataset_ops.Dataset.range(100)\n dataset = apply_map(dataset, _map_fn).prefetch(buffer_size)\n get_next = self.getNext(dataset)\n\n event_will_be_set_after_consuming = (\n set_event_during_invocation - buffer_size + 1)\n\n ev.clear()\n for i in range(event_will_be_set_after_consuming):\n self.assertFalse(ev.is_set())\n self.assertEqual(i * i, self.evaluate(get_next()))\n ev.wait()\n for i in range(event_will_be_set_after_consuming, 100):\n self.assertEqual(i * i, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations())\n def testReturnList(self, apply_map):\n dataset = dataset_ops.Dataset.range(10)\n dataset = apply_map(dataset, lambda x: [x, constant_op.constant(37.0)])\n self.assertDatasetProduces(\n dataset, expected_output=[(i, 37.0) for i in range(10)])\n\n @combinations.generate(_test_combinations())\n def testMultiOutputPyFunc(self, apply_map):\n # The `tf.py_func()` op returns a list of tensors for its outputs.\n def _map_fn(x_tensor):\n def _map_py_func(x):\n return x, np.array(37.0, dtype=np.float64)\n return script_ops.py_func(\n _map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])\n\n dataset = dataset_ops.Dataset.range(10)\n dataset = apply_map(dataset, _map_fn)\n self.assertDatasetProduces(\n dataset, expected_output=[(i, 37.0) for i in range(10)])\n\n @combinations.generate(_test_combinations())\n def testSparse(self, apply_map):\n\n def _sparse(i):\n return sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0]]),\n values=(i * np.array([1])),\n dense_shape=np.array([1, 1]))\n\n dataset = dataset_ops.Dataset.range(10)\n dataset = apply_map(dataset, _sparse)\n self.assertDatasetProduces(\n dataset, expected_output=[_sparse(i) for i in range(10)])\n\n @combinations.generate(_test_combinations())\n def testSparseChain(self, apply_map):\n\n def _sparse(i):\n return sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0]]),\n values=(i * np.array([1])),\n dense_shape=np.array([1, 1]))\n\n def _check(i):\n self.assertTrue(sparse_tensor.is_sparse(i))\n return sparse_ops.sparse_concat(0, [i, i])\n\n dataset = dataset_ops.Dataset.range(10)\n dataset = apply_map(dataset, _sparse)\n dataset = apply_map(dataset, _check)\n\n self.assertDatasetProduces(\n dataset,\n expected_output=[self.evaluate(_check(_sparse(i))) for i in range(10)])\n\n @combinations.generate(_test_combinations_with_mode(\"eager\"))\n def testSparseMapShapeInference(self, apply_map):\n row_lengths = np.random.randint(0, 4, size=128)\n values = np.ones(np.sum(row_lengths))\n sparse = ragged_tensor.RaggedTensor.from_row_lengths(\n values, row_lengths).to_sparse()\n dataset = dataset_ops.Dataset.from_tensor_slices(sparse)\n dataset = dataset.batch(32, drop_remainder=True)\n dataset = apply_map(dataset, lambda x: x)\n self.assertEqual((32, 3), dataset.element_spec.shape)\n\n @combinations.generate(_test_combinations_with_mode(\"eager\"))\n def testSparseMapShapeInferencePartial(self, apply_map):\n row_lengths = np.random.randint(0, 4, size=128)\n values = np.ones(np.sum(row_lengths))\n sparse = ragged_tensor.RaggedTensor.from_row_lengths(\n values, row_lengths).to_sparse()\n dataset = dataset_ops.Dataset.from_tensor_slices(sparse)\n dataset = dataset.batch(32, drop_remainder=False)\n dataset = apply_map(dataset, lambda x: x)\n self.assertEqual([None, 3], dataset.element_spec.shape.as_list())\n\n @combinations.generate(_test_combinations())\n def testTensorArray(self, apply_map):\n\n def _tensor_array(i):\n i = math_ops.cast(i, dtypes.int32)\n return (\n tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)\n .unstack(math_ops.range(i, dtype=dtypes.int32)))\n\n dataset = dataset_ops.Dataset.range(10)\n dataset = apply_map(dataset, _tensor_array)\n self.assertDatasetProduces(\n dataset, expected_output=[list(range(i)) for i in range(10)])\n\n @combinations.generate(_test_combinations())\n def testTensorArrayChain(self, apply_map):\n\n def _tensor_array(i):\n i = math_ops.cast(i, dtypes.int32)\n return (\n tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)\n .unstack(math_ops.range(i, dtype=dtypes.int32)))\n\n def _check(x):\n self.assertIsInstance(x, tensor_array_ops.TensorArray)\n return x.identity()\n\n dataset = dataset_ops.Dataset.range(10)\n dataset = apply_map(dataset, _tensor_array)\n dataset = apply_map(dataset, _check)\n\n self.assertDatasetProduces(\n dataset,\n expected_output=[list(range(i)) for i in range(10)])\n\n @combinations.generate(_test_combinations())\n def testRagged(self, apply_map):\n\n def _ragged(i):\n return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])\n\n dataset = dataset_ops.Dataset.range(5)\n dataset = apply_map(dataset, _ragged)\n self.assertDatasetProduces(\n dataset,\n expected_output=[ragged_factory_ops.constant([[i]]) for i in range(5)])\n\n @combinations.generate(_test_combinations())\n def testRaggedChain(self, apply_map):\n\n def _ragged(i):\n return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])\n\n def _concat(i):\n self.assertTrue(ragged_tensor.is_ragged(i))\n return ragged_concat_ops.concat([i, i], 0)\n\n dataset = dataset_ops.Dataset.range(10)\n dataset = apply_map(dataset, _ragged)\n dataset = apply_map(dataset, _concat)\n\n self.assertDatasetProduces(\n dataset,\n expected_output=[\n self.evaluate(_concat(ragged_factory_ops.constant([[i]])))\n for i in range(10)\n ])\n\n # TODO(b/123904513)\n @combinations.generate(_test_combinations_with_mode_v1(\"graph\"))\n def testParallelMapOutOfRangeError(self, apply_map):\n\n def raising_py_func(i):\n if i == 100:\n raise StopIteration()\n else:\n return i\n\n dataset = dataset_ops.Dataset.range(105)\n dataset = apply_map(\n dataset,\n lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),\n num_parallel_calls=2)\n get_next = self.getNext(dataset)\n for i in range(100):\n self.assertEqual(i, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations())\n def testConstantOutput(self, apply_map):\n dataset = dataset_ops.Dataset.range(10)\n dataset = apply_map(dataset, lambda x: [x, \"hello\", 10])\n self.assertDatasetProduces(dataset, [(i, b\"hello\", 10) for i in range(10)])\n\n @combinations.generate(_test_combinations())\n def testWarnOnLookupTable(self, apply_map):\n\n def collecting_function(x):\n _ = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer([\"a\"], [1.]), 0.0, name=\"t1\")\n return x\n\n warnings.simplefilter(\"always\")\n with warnings.catch_warnings(record=True) as w:\n dataset = dataset_ops.Dataset.range(10)\n _ = apply_map(dataset, collecting_function)\n # NOTE(mrry): Python 3 prints other warnings in addition to the one we are\n # testing, so we search for the expected warning.\n self.assertGreaterEqual(len(w), 1)\n found_warning = False\n for warning in w:\n if (\"Creating resources inside a function passed to Dataset.map() is \"\n \"not supported.\" in str(warning)):\n found_warning = True\n break\n self.assertTrue(found_warning)\n\n @combinations.generate(test_base.default_test_combinations())\n def testWarnOnSeedFromOuterGraph(self):\n with ops.Graph().as_default() as g:\n g.seed = 10\n warnings.simplefilter(\"always\")\n\n def _check_warning(caught_warnings, expected_result):\n found_warning = False\n for warning in caught_warnings:\n if (\"Explicitly set the seed in the function if this is not the \"\n \"intended behavior\" in str(warning)):\n found_warning = True\n break\n self.assertEqual(found_warning, expected_result)\n\n # map_fun doesn't use seed, so no warning is generated.\n with warnings.catch_warnings(record=True) as w:\n _ = dataset_ops.Dataset.range(10).map(math_ops.square)\n _check_warning(w, False)\n\n def random_func(x):\n x = math_ops.add(x, 1)\n random_ops.random_shuffle([x, math_ops.square(x)])\n return x\n\n with warnings.catch_warnings(record=True) as w:\n _ = dataset_ops.Dataset.range(10).map(random_func)\n _check_warning(w, True)\n\n def random_func_seeded(x):\n ops.get_default_graph().seed = None\n random_ops.random_shuffle(x)\n return x\n\n with warnings.catch_warnings(record=True) as w:\n _ = dataset_ops.Dataset.range(10).batch(2).map(random_func_seeded)\n _check_warning(w, False)\n\n with warnings.catch_warnings(record=True) as w:\n _ = dataset_ops.Dataset.range(10).batch(2).map(\n lambda x: random_ops.random_shuffle(x, seed=37))\n _check_warning(w, False)\n\n @combinations.generate(_test_combinations())\n def testNestedDatasetMap(self, apply_map):\n dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])\n dataset = apply_map(dataset, dataset_ops.Dataset.from_tensor_slices)\n dataset = apply_map(dataset, lambda ds: ds.batch(3)).flat_map(lambda x: x)\n\n self.assertDatasetProduces(dataset, expected_output=[[1.0, 2.0, 3.0]])\n\n @combinations.generate(_test_combinations())\n def testReturnValueError(self, apply_map):\n dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])\n with self.assertRaisesRegexp(\n TypeError, r\"Unsupported return value from function passed to \"\n r\"Dataset.map\\(\\)\"):\n _ = apply_map(dataset, lambda x: Foo)\n\n @combinations.generate(test_base.default_test_combinations())\n def testBrokenFunctionErrorOnInitialization(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])\n\n def broken_function(_):\n \"\"\"A function deliberately designed to fail on instantiation.\"\"\"\n value = []\n tensor_value = attr_value_pb2.AttrValue()\n tensor_value.tensor.CopyFrom(\n tensor_util.make_tensor_proto(\n value, dtype=dtypes.float32, shape=[0], verify_shape=False))\n dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)\n\n # Create a \"Const\" op with a `tf.float32` value and a `tf.int32` type.\n const_tensor = ops.get_default_graph().create_op(\n \"Const\", [], [dtypes.int32],\n attrs={\n \"value\": tensor_value,\n \"dtype\": dtype_value\n },\n name=\"BrokenConst\").outputs[0]\n return const_tensor\n\n dataset = dataset.map(broken_function)\n self.assertDatasetProduces(\n dataset, expected_error=(errors.InvalidArgumentError, \"BrokenConst\"))\n\n @combinations.generate(\n combinations.times(\n _test_combinations_with_mode(\"graph\"),\n combinations.combine(num_parallel_calls=[None, 12])))\n def testNoInterOpParallelism(self, apply_map, num_parallel_calls):\n dataset = dataset_ops.Dataset.from_tensors(0)\n\n def _get_tid():\n return np.int64(threading.current_thread().ident)\n\n def _map_fn(_):\n tids = []\n for _ in range(10):\n tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))\n return tids\n\n dataset = apply_map(dataset, _map_fn)\n dataset._variant_tensor.op._set_attr(\"use_inter_op_parallelism\",\n attr_value_pb2.AttrValue(b=False))\n get_next = self.getNext(dataset)\n\n tids = self.evaluate(get_next())\n self.assertTrue(all(tids[0] == tid for tid in tids))\n\n @combinations.generate(\n combinations.times(_test_combinations(), _short_circuit_test_cases(),\n combinations.combine(num_parallel_calls=[None, 12])))\n def testShortCircuit(self, apply_map, structure, fn, num_parallel_calls):\n dataset = self.structuredDataset(structure).repeat()\n dataset = apply_map(dataset, fn, num_parallel_calls=num_parallel_calls)\n get_next = self.getNext(dataset)\n\n if isinstance(structure, tuple):\n expected = fn(*self.evaluate(self.structuredElement(structure)))\n else:\n expected = fn(self.evaluate(self.structuredElement(structure)))\n self.assertEqual(expected, self.evaluate(get_next()))\n\n @combinations.generate(\n combinations.times(_test_combinations(),\n combinations.combine(num_parallel_calls=[None, 12])))\n def testShortCircuitCapturedInput(self, apply_map, num_parallel_calls):\n captured_t = variables.Variable(42)\n dataset = self.structuredDataset(None).repeat()\n dataset = apply_map(\n dataset, lambda x: captured_t, num_parallel_calls=num_parallel_calls)\n self.evaluate(variables.global_variables_initializer())\n get_next = self.getNext(dataset, requires_initialization=True)\n\n self.assertEqual(42, self.evaluate(get_next()))\n\n @combinations.generate(\n combinations.times(\n _test_combinations(),\n combinations.combine(num_elements=1, num_parallel_calls=1) +\n combinations.combine(num_elements=10, num_parallel_calls=1) +\n combinations.combine(num_elements=10, num_parallel_calls=10) +\n combinations.combine(num_elements=100, num_parallel_calls=1) +\n combinations.combine(num_elements=100, num_parallel_calls=10) +\n combinations.combine(num_elements=100, num_parallel_calls=100)))\n def testSloppyInterleaveInOrder(self, apply_map, num_elements,\n num_parallel_calls):\n dataset, coordination_events = _make_coordinated_sloppy_dataset(\n apply_map, num_elements, num_parallel_calls)\n options = dataset_ops.Options()\n options.experimental_threading = threading_options.ThreadingOptions()\n options.experimental_threading.private_threadpool_size = (\n num_parallel_calls + 1)\n dataset = dataset.with_options(options)\n get_next = self.getNext(dataset, requires_initialization=True)\n for i in range(num_elements):\n coordination_events[i].set()\n self.assertEqual(i * i, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(\n combinations.times(\n _test_combinations(),\n combinations.combine(num_elements=10, num_parallel_calls=10) +\n combinations.combine(num_elements=100, num_parallel_calls=10) +\n combinations.combine(num_elements=100, num_parallel_calls=100)))\n def testSloppyInterleaveOutOfOrder(self, apply_map, num_elements,\n num_parallel_calls):\n dataset, coordination_events = _make_coordinated_sloppy_dataset(\n apply_map, num_elements, num_parallel_calls)\n options = dataset_ops.Options()\n options.experimental_threading = threading_options.ThreadingOptions()\n options.experimental_threading.private_threadpool_size = (\n num_parallel_calls + 1)\n dataset = dataset.with_options(options)\n\n get_next = self.getNext(dataset, requires_initialization=True)\n\n elements = [x for x in range(num_elements)]\n for i in [1, 4, 7]:\n elements[i], elements[i + 1] = elements[i + 1], elements[i]\n\n for element in elements:\n coordination_events[element].set()\n self.assertEqual(element * element, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(\n combinations.combine(\n tf_api_version=2,\n mode=[\"eager\", \"graph\"],\n num_parallel_calls=[None, 12]))\n def testPreserveCardinality(self, num_parallel_calls):\n\n def py_fn(_):\n raise StopIteration()\n\n dataset = dataset_ops.Dataset.from_tensors(0).map(\n lambda x: script_ops.py_func(py_fn, [x], dtypes.int64),\n num_parallel_calls=num_parallel_calls)\n get_next = self.getNext(dataset)\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations_with_mode(\"graph\"))\n def testCollectionCopy(self, apply_map):\n w = variable_scope.get_variable(\"w\", [])\n self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))\n\n def func(x):\n self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))\n return x\n\n dataset = dataset_ops.Dataset.from_tensors(constant_op.constant(1.0))\n _ = apply_map(dataset, func)\n\n @combinations.generate(\n combinations.times(\n _test_combinations_with_mode_v1(\"graph\"),\n combinations.combine(num_parallel_calls=[None, 12])))\n def testMapCancellation(self, apply_map, num_parallel_calls):\n # Checks that a cancellation of is threaded through to map transformation.\n queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())\n\n def fn(_):\n return queue.dequeue()\n\n dataset = dataset_ops.Dataset.range(1)\n dataset = apply_map(dataset, fn, num_parallel_calls=num_parallel_calls)\n get_next = self.getNext(dataset, requires_initialization=True)\n\n with self.cached_session() as sess:\n thread = self.checkedThread(self.assert_op_cancelled, args=(get_next(),))\n thread.start()\n time.sleep(0.2)\n sess.close()\n thread.join()\n\n\n # TODO(b/126553094): map doesnt work with variable defined inside function in\n # eager mode, possible Graph tensors leak out of the function building context\n # from function graph in eager mode as variables are created in init_scope.\n @combinations.generate(test_base.graph_only_combinations())\n def testCreateVariableInsideFunctionWithGetter(self):\n\n def func(_):\n with variable_scope.variable_scope(\n \"variable\", reuse=variable_scope.AUTO_REUSE):\n counter_var = variable_scope.get_variable(\n \"counter\", (), dtypes.int32, use_resource=True)\n return counter_var.assign_add(1)\n\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)\n\n if hasattr(dataset, \"map_with_legacy_function\"):\n # NOTE: In the legacy function, resource is captured by value.\n with self.assertRaisesWithPredicateMatch(\n AttributeError, \"'Tensor' object has no attribute 'assign_add'\"):\n dataset.map_with_legacy_function(func)\n\n dataset = dataset.map(func)\n self.evaluate(variables.global_variables_initializer())\n\n get_next = self.getNext(dataset, requires_initialization=True)\n\n for i in range(10):\n self.assertEqual(i + 1, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(_test_combinations())\n def testCaptureVariable(self, apply_map):\n counter_var = variable_scope.get_variable(\n \"counter\", (), dtypes.int32, use_resource=True)\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)\n dataset = apply_map(dataset, lambda _: counter_var.assign_add(1))\n get_next = self.getNext(dataset, requires_initialization=True)\n\n self.evaluate(counter_var.initializer)\n\n for i in range(10):\n self.assertEqual(i, self.evaluate(counter_var))\n self.assertEqual(i + 1, self.evaluate(get_next()))\n self.assertEqual(10, self.evaluate(counter_var))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n self.assertEqual(10, self.evaluate(counter_var))\n\n @combinations.generate(_test_combinations_with_mode_v1(\"graph\"))\n def testCaptureUninitializedVariableError(self, apply_map):\n counter_var = variable_scope.get_variable(\n \"counter\", (), dtypes.int32, use_resource=True)\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)\n dataset = apply_map(dataset, lambda _: counter_var.assign_add(1))\n\n get_next = self.getNext(dataset, requires_initialization=True)\n with self.assertRaises(errors.NotFoundError):\n self.evaluate(get_next())\n\n # TODO(b/121264236): add eager mode coverage when we have multi-device setup.\n @combinations.generate(_test_combinations_with_mode_v1(\"graph\"))\n def testCaptureConstantsWithConflictingDevices(self, apply_map):\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.cached_session(config=config):\n with ops.device(\"/device:CPU:0\"):\n a = constant_op.constant(3.0)\n with ops.device(\"/device:CPU:1\"):\n b = constant_op.constant(5.0)\n\n def func(_):\n return math_ops.add(a, b)\n\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)\n dataset = apply_map(dataset, func)\n expected_output = [8.0] * 10\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n # TODO(b/121264236): add eager mode coverage when we have multi-device setup.\n @combinations.generate(_test_combinations_with_mode_v1(\"graph\"))\n def testReferenceVariablesWithMultipleDevices(self, apply_map):\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.cached_session(config=config):\n\n def func(_):\n with ops.device(\"/device:CPU:0\"):\n a = variables.VariableV1(3.0)\n with ops.device(\"/device:CPU:1\"):\n b = variables.VariableV1(5.0)\n return math_ops.add(a, b)\n\n # NOTE: Use the legacy function implementation as eager function will\n # convert RefVariables to ResourceVariables.\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)\n dataset = apply_map(dataset, func)\n self.evaluate(variables.global_variables_initializer())\n expected_output = [8.0] * 10\n self.assertDatasetProduces(\n dataset,\n expected_output=expected_output,\n requires_initialization=True)\n\n # TODO(b/121264236): add eager mode coverage when we have multi-device setup.\n @combinations.generate(_test_combinations_with_mode_v1(\"graph\"))\n def testResourceVariablesWithMultipleDevices(self, apply_map):\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n\n def func(_):\n with variable_scope.variable_scope(\n \"variable\", reuse=variable_scope.AUTO_REUSE):\n with ops.device(\"/device:CPU:0\"):\n a_var = variable_scope.get_variable(\n \"a\", (), dtypes.int32, use_resource=True)\n a_var = math_ops.add(a_var, 1)\n with ops.device(\"/device:CPU:1\"):\n b_var = variable_scope.get_variable(\n \"b\", (), dtypes.int32, use_resource=True)\n return math_ops.add(a_var, b_var)\n\n g = ops.Graph()\n with self.session(config=config, graph=g):\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)\n dataset = apply_map(dataset, func)\n self.evaluate(variables.global_variables_initializer())\n expected_output = [1] * 10\n self.assertDatasetProduces(\n dataset,\n expected_output=expected_output,\n requires_initialization=True)\n\n @combinations.generate(\n combinations.times(\n _test_combinations(),\n combinations.combine(\n local_determinism=[None, True, False],\n global_determinism=[True, False])))\n def testDeterminismConfiguration(self, apply_map, local_determinism,\n global_determinism):\n expect_determinism = local_determinism or (local_determinism is None and\n global_determinism)\n elements = list(range(1000))\n\n def dataset_fn(delay_ms):\n\n def sleep(x):\n time.sleep(delay_ms / 1000)\n return x\n\n def map_function(x):\n if math_ops.equal(x, 0):\n return script_ops.py_func(sleep, [x], x.dtype)\n else:\n return x\n\n dataset = dataset_ops.Dataset.from_tensor_slices(elements)\n dataset = apply_map(\n dataset,\n map_function,\n num_parallel_calls=2,\n deterministic=local_determinism)\n opts = dataset_ops.Options()\n opts.experimental_deterministic = global_determinism\n dataset = dataset.with_options(opts)\n return dataset\n\n self.checkDeterminism(\n dataset_fn, expect_determinism, expected_elements=elements)\n\n @combinations.generate(_test_combinations())\n def testNoneComponent(self, apply_map):\n dataset = dataset_ops.Dataset.from_tensors((42, None))\n\n def map_function(x, y):\n if y is None:\n return x / 2\n return x\n\n dataset = apply_map(dataset, map_function)\n self.assertDatasetProduces(dataset, expected_output=[21])\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `experimental_slack` option.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import multi_device_iterator_ops\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.platform import test\n\n\nclass PrefetchWithSlackTest(test_base.DatasetTestBase, parameterized.TestCase):\n\n # TODO(b/121264236)\n @combinations.generate(\n combinations.combine(tf_api_version=[1], mode=[\"graph\"]))\n def testPrefetchWithSlackOption(self):\n \"\"\"Determines slack_period based on num devices attached to iterator.\"\"\"\n dataset = dataset_ops.Dataset.range(10)\n dataset = dataset.prefetch(1)\n options = dataset_ops.Options()\n options.experimental_slack = True\n dataset = dataset.with_options(options)\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:1\", \"/cpu:2\"])\n dataset = multi_device_iterator._dataset # pylint: disable=protected-access\n self.assertIn(\"slack\", dataset.options()._graph_rewrites())\n self.assertIn(\"slack:slack_period:2\",\n dataset.options()._graph_rewrite_configs())\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.test_session(config=config):\n self.evaluate(multi_device_iterator.initializer)\n for i in range(0, 10, 2):\n elem_on_1, elem_on_2 = multi_device_iterator.get_next()\n self.assertEqual(i, self.evaluate(elem_on_1))\n self.assertEqual(i + 1, self.evaluate(elem_on_2))\n with self.assertRaises(errors.OutOfRangeError):\n elem_on_1, elem_on_2 = multi_device_iterator.get_next()\n self.evaluate(elem_on_1)\n self.evaluate(elem_on_2)\n\n @combinations.generate(test_base.default_test_combinations())\n def testPrefetchWithSlackOptionWithoutIterator(self):\n \"\"\"Defaults to slack period of 1 without iterator.\"\"\"\n dataset = dataset_ops.Dataset.range(10)\n dataset = dataset.prefetch(1)\n options = dataset_ops.Options()\n options.experimental_slack = True\n dataset = dataset.with_options(options)\n self.assertIn(\"slack\", dataset.options()._graph_rewrites())\n self.assertIn(\"slack:slack_period:1\",\n dataset.options()._graph_rewrite_configs())\n self.assertDatasetProduces(dataset, range(10))\n\n @combinations.generate(test_base.default_test_combinations())\n def testWithPassthroughDataset(self):\n \"\"\"Should still work with a passthrough dataset after prefetch().\"\"\"\n dataset = dataset_ops.Dataset.range(10)\n dataset = dataset.prefetch(1)\n dataset = dataset.map(lambda x: x + 1)\n options = dataset_ops.Options()\n options.experimental_slack = True\n dataset = dataset.with_options(options)\n self.assertDatasetProduces(dataset, range(1, 11))\n\n @combinations.generate(test_base.default_test_combinations())\n def testErrorWithoutPrefetch(self):\n \"\"\"The rewrite fails if there is no prefetch() in the pipeline.\"\"\"\n dataset = dataset_ops.Dataset.range(10)\n options = dataset_ops.Options()\n options.experimental_slack = True\n dataset = dataset.with_options(options)\n with self.assertRaises(errors.InvalidArgumentError):\n get_next = self.getNext(dataset)\n self.evaluate(get_next())\n\n @combinations.generate(test_base.default_test_combinations())\n def testErrorWithInvalidDataset(self):\n \"\"\"With a nested dataset op after prefetch, the rewrite should fail.\"\"\"\n dataset = dataset_ops.Dataset.range(10)\n dataset = dataset.prefetch(1)\n dataset = dataset.flat_map(dataset_ops.Dataset.from_tensors)\n options = dataset_ops.Options()\n options.experimental_slack = True\n dataset = dataset.with_options(options)\n with self.assertRaises(errors.InvalidArgumentError):\n get_next = self.getNext(dataset)\n self.evaluate(get_next())\n\n\nif __name__ == \"__main__\":\n ops.enable_eager_execution(\n config=config_pb2.ConfigProto(device_count={\"CPU\": 3}))\n test.main()\n",
"# Lint as: python2, python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Calibrator.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom six.moves import range\n\nfrom tensorflow.lite.python import lite_constants as constants\nfrom tensorflow.lite.python.optimize import calibrator as _calibrator\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import resource_loader\nfrom tensorflow.python.platform import test\n\n\nclass CalibratorTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n def test_calibration_with_quantization(self):\n model_path = resource_loader.get_path_to_datafile(\n 'test_data/mobilenet_like_model.bin')\n float_model = open(model_path, 'rb').read()\n quantizer = _calibrator.Calibrator(float_model)\n\n # Input generator for the model.\n def input_gen():\n for _ in range(10):\n yield [np.ones(shape=(1, 5, 5, 3), dtype=np.float32)]\n\n quantized_model = quantizer.calibrate_and_quantize(input_gen,\n constants.FLOAT,\n constants.FLOAT, False)\n self.assertIsNotNone(quantized_model)\n\n def test_calibration_with_quantization_allow_float(self):\n model_path = resource_loader.get_path_to_datafile(\n 'test_data/mobilenet_like_model.bin')\n float_model = open(model_path, 'rb').read()\n quantizer = _calibrator.Calibrator(float_model)\n\n # Input generator for the model.\n def input_gen():\n for _ in range(10):\n yield [np.ones(shape=(1, 5, 5, 3), dtype=np.float32)]\n\n quantized_model = quantizer.calibrate_and_quantize(input_gen,\n constants.FLOAT,\n constants.FLOAT, True)\n self.assertIsNotNone(quantized_model)\n\n def test_calibration_with_quantization_single_op(self):\n model_path = resource_loader.get_path_to_datafile(\n 'test_data/mobilenet_like_model.bin')\n float_model = open(model_path, 'rb').read()\n quantizer = _calibrator.Calibrator(float_model)\n\n # Input generator for the model.\n def input_gen():\n for _ in range(10):\n yield [np.ones(shape=(1, 5, 5, 3), dtype=np.float32)]\n\n quantized_model = quantizer.calibrate_and_quantize_single(\n input_gen, constants.FLOAT, constants.FLOAT, True, 'conv2d_8/BiasAdd')\n self.assertIsNotNone(quantized_model)\n\n def test_calibration_with_quantization_multiple_inputs(self):\n # Load multi add model from test data.\n # This model has 4 inputs of size (1, 8, 8, 3).\n model_path = resource_loader.get_path_to_datafile(\n '../../testdata/multi_add.bin')\n float_model = open(model_path, 'rb').read()\n quantizer = _calibrator.Calibrator(float_model)\n\n # Input generator for the model.\n def input_gen():\n for _ in range(10):\n yield [np.ones(shape=(1, 8, 8, 3), dtype=np.float32) for _ in range(4)]\n\n quantized_model = quantizer.calibrate_and_quantize(input_gen,\n constants.FLOAT,\n constants.FLOAT, False)\n self.assertIsNotNone(quantized_model)\n\n def test_invalid_model_buffer(self):\n float_model = b'\\0' * 100\n with self.assertRaisesRegex(ValueError, 'Failed to parse the model'):\n _calibrator.Calibrator(float_model)\n\n # TODO(fengliuai): enable mlir quantizer\n def test_empty_calibrator_gen(self):\n model_path = resource_loader.get_path_to_datafile(\n 'test_data/mobilenet_like_model.bin')\n float_model = open(model_path, 'rb').read()\n quantizer = _calibrator.Calibrator(float_model)\n\n def empty_input_gen():\n for i in ():\n yield i\n\n with self.assertRaises(RuntimeError):\n quantizer.calibrate_and_quantize(empty_input_gen, constants.FLOAT,\n constants.FLOAT, False)\n\n def test_invalid_shape_calibrator_gen(self):\n model_path = resource_loader.get_path_to_datafile(\n 'test_data/mobilenet_like_model.bin')\n float_model = open(model_path, 'rb').read()\n quantizer = _calibrator.Calibrator(float_model)\n\n # Input generator with incorrect shape.\n def input_gen():\n for _ in range(10):\n yield [np.ones(shape=(1, 2, 2, 3), dtype=np.float32)]\n\n with self.assertRaisesRegex(ValueError, 'Size mismatch'):\n quantizer.calibrate_and_quantize(input_gen, constants.FLOAT,\n constants.FLOAT, False, False)\n\n def test_invalid_type_calibrator_gen(self):\n model_path = resource_loader.get_path_to_datafile(\n 'test_data/mobilenet_like_model.bin')\n float_model = open(model_path, 'rb').read()\n quantizer = _calibrator.Calibrator(float_model)\n\n # Input generator with incorrect type.\n def input_gen():\n for _ in range(10):\n yield [np.ones(shape=(1, 5, 5, 3), dtype=np.int32)]\n\n with self.assertRaises(ValueError):\n quantizer.calibrate_and_quantize(input_gen, constants.FLOAT,\n constants.FLOAT, False)\n\n def test_calibration(self):\n model_path = resource_loader.get_path_to_datafile(\n 'test_data/mobilenet_like_model.bin')\n float_model = open(model_path, 'rb').read()\n quantizer = _calibrator.Calibrator(float_model)\n\n # Input generator for the model.\n def input_gen():\n for _ in range(10):\n yield [np.ones(shape=(1, 5, 5, 3), dtype=np.float32)]\n\n quantized_model = quantizer.calibrate(input_gen)\n self.assertIsNotNone(quantized_model)\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"This module customizes `test_combinations` for `tf.distribute.Strategy`.\n\nAdditionally it provides `generate()`, `combine()` and `times()` with\n`tf.distribute.Strategy` customizations as a default.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport types\nimport unittest\n\nimport six\n\nfrom tensorflow.python.distribute import multi_process_runner\nfrom tensorflow.python.distribute import multi_worker_test_base\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import combinations as framework_combinations\nfrom tensorflow.python.framework import test_combinations as combinations_lib\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util import tf_inspect\n\nFLAGS = flags.FLAGS\n\n\n# TODO(rchao): Rename `distribution` parameter to `strategy` or\n# `distribute_strategy` in all tests.\nclass DistributionParameter(combinations_lib.ParameterModifier):\n \"\"\"Transforms arguments of type `NamedDistribution`.\n\n Convert all arguments of type `NamedDistribution` to the value of their\n `strategy` property.\n \"\"\"\n\n def modified_arguments(self, kwargs, requested_parameters):\n del requested_parameters\n distribution_arguments = {}\n for k, v in kwargs.items():\n if isinstance(v, NamedDistribution):\n distribution_arguments[k] = v.strategy\n return distribution_arguments\n\n\nclass ClusterParameters(combinations_lib.ParameterModifier):\n \"\"\"Adds cluster parameters if a `NamedDistribution` has it.\n\n It needs to be before DistributionParameter.\n \"\"\"\n\n def modified_arguments(self, kwargs, requested_parameters):\n strategy = None\n for _, v in kwargs.items():\n if isinstance(v, NamedDistribution):\n if strategy is not None and _num_total_workers(v.has_chief,\n v.num_workers) > 1:\n raise ValueError(\"Only support one NamedDistribution for multi worker\"\n \"tests.\")\n strategy = v\n # Always set cluster parameters if they're requested. So that generate()\n # works when there's no startegy in the combinations.\n update = {}\n if \"has_chief\" in requested_parameters:\n update[\"has_chief\"] = strategy.has_chief if strategy else False\n if \"num_workers\" in requested_parameters:\n update[\"num_workers\"] = strategy.num_workers if strategy else 1\n return update\n\n\nclass NamedGPUCombination(combinations_lib.TestCombination):\n \"\"\"Enable tests to request GPU hardware and skip non-GPU combinations.\n\n This class expects test_combinations to be generated with `NamedDistribution`\n wrapping instances of `tf.distribute.Strategy`.\n\n Optionally, the `required_gpus` argument is supported. GPU hardware is\n required, if its value is `True` or > 0.\n\n Attributes:\n GPU_TEST: The environment is considered to have GPU hardware available if\n the name of the program contains \"test_gpu\".\n \"\"\"\n\n GPU_TEST = \"test_gpu\" in sys.argv[0]\n\n def should_execute_combination(self, kwargs):\n distributions = [\n v for v in kwargs.values() if isinstance(v, NamedDistribution)\n ]\n required_gpus = kwargs.get(\"required_gpus\", None)\n\n if distributions and required_gpus:\n raise ValueError(\"Do not use `required_gpus` and arguments of type \"\n \"NamedDistribution together.\")\n\n number_of_required_gpus = max([required_gpus or 0] +\n [d.required_gpus or 0 for d in distributions])\n\n if not number_of_required_gpus and GPUCombination.GPU_TEST:\n return (False, \"Test that doesn't require GPUs.\")\n elif (number_of_required_gpus > 0\n and context.num_gpus() < number_of_required_gpus):\n return (False, (\"Only {} of {} required GPUs are available.\".format(\n context.num_gpus(), number_of_required_gpus)))\n else:\n return (True, None)\n\n def parameter_modifiers(self):\n return [combinations_lib.OptionalParameter(\"required_gpus\")]\n\n\nclass GPUCombination(NamedGPUCombination):\n \"\"\"NamedGPUCombination that passes `tf.distribute.Strategy` to the tests.\"\"\"\n\n def parameter_modifiers(self):\n return [\n ClusterParameters(),\n DistributionParameter(),\n ] + NamedGPUCombination.parameter_modifiers(self)\n\n\nclass NamedTPUCombination(combinations_lib.TestCombination):\n \"\"\"Allow to request TPU hardware and skip non-TPU combinations.\n\n This class expects test_combinations to be generated with `NamedDistribution`\n wrapping instances of `tf.distribute.Strategy`.\n\n Optionally, the `required_tpus` parameter is supported. TPU hardware is\n required, if its argument is `True` or > 0.\n\n Optionally, the `use_cloud_tpu` parameter is supported. If TPU hardware is\n required by `required_tpus`, it specifically must be a Cloud TPU (specified\n with `--tpu`) if `use_cloud_tpu` is `True`.\n\n Attributes:\n TPU_TEST: The environment is considered to have TPU hardware available if\n the name of the program contains \"test_tpu\".\n \"\"\"\n\n TPU_TEST = \"test_tpu\" in sys.argv[0]\n\n def should_execute_combination(self, kwargs):\n distributions = [\n v for v in kwargs.values() if isinstance(v, NamedDistribution)\n ]\n # TODO(isaprykin): Migrate all tests away from using 'required_tpu' in favor\n # of 'required_tpus'.\n if \"required_tpus\" in kwargs and \"required_tpu\" in kwargs:\n raise ValueError(\"Do not use `required_tpu`. Both `required_tpus` and \"\n \"`required_tpu` were specified.\")\n required_tpus = kwargs.get(\"required_tpus\", None) or kwargs.get(\n \"required_tpu\", None)\n\n if distributions and required_tpus:\n raise ValueError(\"Do not use `required_tpus` and arguments of type \"\n \"NamedDistribution together.\")\n\n # TODO(isaprykin): Add support for a particular number of TPUs. Right now\n # it's binary.\n number_of_required_tpus = max([required_tpus or 0] +\n [d.required_tpu or 0 for d in distributions])\n use_cloud_tpu = any([kwargs.get(\"use_cloud_tpu\")] +\n [d.use_cloud_tpu for d in distributions])\n tpu = hasattr(FLAGS, \"tpu\") and FLAGS.tpu or \"\"\n\n if not number_of_required_tpus and TPUCombination.TPU_TEST:\n return (False, \"Test that doesn't require TPUs.\")\n if number_of_required_tpus and not TPUCombination.TPU_TEST:\n return (False, \"Test requires a TPU, but it's not available.\")\n if use_cloud_tpu and not tpu:\n return (False, \"Test requires a Cloud TPU, but none specified.\")\n if not use_cloud_tpu and tpu:\n return (False, \"Test requires local TPU, but Cloud TPU specified.\")\n return (True, None)\n\n def parameter_modifiers(self):\n return [\n combinations_lib.OptionalParameter(\"required_tpus\"),\n combinations_lib.OptionalParameter(\"required_tpu\"),\n combinations_lib.OptionalParameter(\"use_cloud_tpu\"),\n ]\n\n\nclass TPUCombination(NamedTPUCombination):\n \"\"\"NamedTPUCombination that passes `tf.distribute.Strategy` to the tests.\"\"\"\n\n def parameter_modifiers(self):\n return [\n ClusterParameters(),\n DistributionParameter(),\n ] + NamedTPUCombination.parameter_modifiers(self)\n\n\nclass NamedDistribution(object):\n \"\"\"Wraps a `tf.distribute.Strategy` and adds a name for test titles.\"\"\"\n\n def __init__(self,\n name,\n distribution_fn,\n required_gpus=None,\n required_tpu=False,\n use_cloud_tpu=False,\n has_chief=False,\n num_workers=1):\n \"\"\"Initialize NamedDistribution.\n\n Args:\n name: Name that will be a part of the name of the test case.\n distribution_fn: A callable that creates a `tf.distribute.Strategy`.\n required_gpus: The number of GPUs that the strategy requires.\n required_tpu: Whether the strategy requires TPU.\n use_cloud_tpu: Whether the strategy requires cloud TPU.\n has_chief: Whether the strategy requires a chief worker.\n num_workers: The number of workers that the strategy requires.\n \"\"\"\n object.__init__(self)\n self._name = name\n self._distribution_fn = distribution_fn\n self.required_gpus = required_gpus\n self.required_tpu = required_tpu\n self.use_cloud_tpu = use_cloud_tpu\n self.has_chief = has_chief\n self.num_workers = num_workers\n\n @property\n def strategy(self):\n return self._distribution_fn()\n\n def __repr__(self):\n return self._name\n\n\ndef concat(*combined):\n \"\"\"Concats combinations.\"\"\"\n result = []\n for one in combined:\n result += one\n return result\n\n\ndef generate(combinations, test_combinations=()):\n # pylint: disable=g-doc-args,g-doc-return-or-yield\n \"\"\"Distributed adapter of `framework.combinations_lib.generate`.\n\n All tests with distributed strategy should use this one instead of\n `framework.test_combinations.generate`. This function has support of strategy\n combinations, GPU/TPU and multi worker support.\n\n See `framework.test_combinations_lib.generate` for usage.\n \"\"\"\n # pylint: enable=g-doc-args,g-doc-return-or-yield\n default_combinations = (\n framework_combinations.EagerGraphCombination(),\n framework_combinations.TFVersionCombination(),\n GPUCombination(),\n TPUCombination(),\n )\n # We apply our own decoration to handle multi worker tests before applying\n # framework.test_combinations.generate. The order is important since we need\n # framework.test_combinations.generate to apply all parameter modifiers first.\n combination_decorator = combinations_lib.generate(\n combinations, test_combinations=default_combinations + test_combinations)\n\n def decorator(test_method_or_class):\n if isinstance(test_method_or_class, type):\n # If it's a test class.\n class_object = test_method_or_class\n # Decorate each test method with _multi_worker_test.\n for name, test_method in six.iteritems(class_object.__dict__.copy()):\n if (name.startswith(unittest.TestLoader.testMethodPrefix) and\n isinstance(test_method, types.FunctionType)):\n setattr(class_object, name, _multi_worker_test(test_method))\n return combination_decorator(class_object)\n else:\n return combination_decorator(_multi_worker_test(test_method_or_class))\n\n return decorator\n\n\ncombine = combinations_lib.combine\ntimes = combinations_lib.times\nNamedObject = combinations_lib.NamedObject\n\n\ndef main():\n \"\"\"Tests must call this main().\"\"\"\n return multi_process_runner.test_main()\n\n\n# Identifies whether we're in the main process or worker processes.\n# `_multi_worker_test` decoration behaves differently in the main processs and\n# the worker processes. See the documentation of _multi_worker_test for detail.\n_running_in_worker = False\n\n\ndef _test_runner(test_id):\n \"\"\"Executes the test with the given test_id.\n\n This is a simple wrapper around TestRunner to be used with\n multi_process_runner. Similar to test.main(), but it executes only one test\n specified by test_id and returns whether the test succeeds. If the test fails,\n the function prints failures and errors to stdout.\n\n Args:\n test_id: TestCase.id()\n\n Returns:\n A boolean indicates whether the test succeeds.\n \"\"\"\n global _running_in_worker\n # No need to restore the value of _running_in_worker since it should always be\n # True in worker processes.\n _running_in_worker = True\n test = unittest.defaultTestLoader.loadTestsFromName(test_id)\n runner = unittest.TextTestRunner()\n result = runner.run(test)\n # Print failures and errors to stdout and multi_process_runner will collect\n # them and stream back to the main process.\n for _, msg in result.failures + result.errors:\n print(msg)\n # Return expected failures as failures, so that the main process can get\n # them and fail as expected.\n if result.expectedFailures:\n return False\n return result.wasSuccessful()\n\n\ndef _multi_worker_test(test_method):\n \"\"\"Decorate test_method so that it runs in each worker.\n\n We use `multi_process_runner` to simulate multiple workers. Since we run the\n this function in the main process and all worker processes, this decoration\n behaves differently in the main process and worker procssses. In the main\n process, it spawns subprocesses and runs the test on each of them; in a worker\n process, it executes test in the same way as a normal test, e.g.\n setUp()/tearDown() are called before/after the test.\n\n Args:\n test_method: a function which must be a test method.\n\n Returns:\n Decorated `test_method`. Note that the decorated function has additional\n arguments.\n \"\"\"\n\n def decorator(self, has_chief, num_workers, **kwargs):\n if _num_total_workers(has_chief, num_workers) == 1 or _running_in_worker:\n # We're in worker process or the test is for single worker. Either case we\n # execute the test method directly instead of spawning subprocesses.\n test_method(self, **kwargs)\n return\n\n # We're in the main process. We spawn subprocesses and run the *test* on\n # each of them. Note that we're not directly executing test_method passed to\n # _multi_worker_test, because we need setUp()/tearDown() to be called and\n # all the decorations on the test method. The conceptual call stack is:\n # [main process]test.main()\n # [main process]test_runner.run(test)\n # [main process]wrapper by combinations.generate()\n # [main process]_multi_worker_test.decorator()\n # # A sub process goes through the same code path as the main\n # # process.\n # [sub process]_test_runner()\n # [sub process]test_runner.run(test)\n # [sub process]wrapper by combinations.generate()\n # [sub process]_multi_worker_test.decorator()\n # # _running_in_worker is True\n # [sub process]test_method()\n test_id = self.id()\n cluster_spec = multi_worker_test_base.create_cluster_spec(\n has_chief=has_chief, num_workers=num_workers, num_ps=0, has_eval=False)\n result = multi_process_runner.run(\n _test_runner, cluster_spec, args=(test_id,))\n for was_successful in result.return_value:\n if not was_successful:\n raise AssertionError(\"some worker failed, see logs for details\")\n\n argspec = tf_inspect.getfullargspec(test_method)\n decorator_args = (argspec.args or []) + [\"has_chief\", \"num_workers\"]\n decorator_argspec = argspec._replace(args=decorator_args)\n return tf_decorator.make_decorator(\n test_method, decorator, decorator_argspec=decorator_argspec)\n\n\ndef _num_total_workers(has_chief, num_workers):\n \"\"\"Returns the number of workers including the chief.\"\"\"\n if has_chief:\n return num_workers + 1\n return num_workers\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.experimental.cardinality()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.data.experimental.ops import cardinality\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.platform import test\n\n\ndef _test_combinations():\n # pylint: disable=g-long-lambda\n cases = [\n (\"Batch1\",\n lambda: dataset_ops.Dataset.range(5).batch(2, drop_remainder=True), 2),\n (\"Batch2\",\n lambda: dataset_ops.Dataset.range(5).batch(2, drop_remainder=False), 3),\n (\"Batch3\",\n lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).batch(2),\n cardinality.UNKNOWN),\n (\"Batch4\", lambda: dataset_ops.Dataset.range(5).repeat().batch(2),\n cardinality.INFINITE),\n (\"Cache1\", lambda: dataset_ops.Dataset.range(5).cache(), 5),\n (\"Cache2\", lambda: dataset_ops.Dataset.range(5).cache(\"foo\"), 5),\n (\"Concatenate1\", lambda: dataset_ops.Dataset.range(5).concatenate(\n dataset_ops.Dataset.range(5)), 10),\n (\"Concatenate2\",\n lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).concatenate(\n dataset_ops.Dataset.range(5)), cardinality.UNKNOWN),\n (\"Concatenate3\", lambda: dataset_ops.Dataset.range(5).repeat().\n concatenate(dataset_ops.Dataset.range(5)), cardinality.INFINITE),\n (\"Concatenate4\", lambda: dataset_ops.Dataset.range(5).concatenate(\n dataset_ops.Dataset.range(5).filter(lambda _: True)),\n cardinality.UNKNOWN),\n (\"Concatenate5\",\n lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).concatenate(\n dataset_ops.Dataset.range(5).filter(lambda _: True)),\n cardinality.UNKNOWN),\n (\"Concatenate6\", lambda: dataset_ops.Dataset.range(5).repeat().\n concatenate(dataset_ops.Dataset.range(5).filter(lambda _: True)),\n cardinality.INFINITE),\n (\"Concatenate7\", lambda: dataset_ops.Dataset.range(5).concatenate(\n dataset_ops.Dataset.range(5).repeat()), cardinality.INFINITE),\n (\"Concatenate8\",\n lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).concatenate(\n dataset_ops.Dataset.range(5).repeat()), cardinality.INFINITE),\n (\"Concatenate9\",\n lambda: dataset_ops.Dataset.range(5).repeat().concatenate(\n dataset_ops.Dataset.range(5).repeat()), cardinality.INFINITE),\n (\"FlatMap\", lambda: dataset_ops.Dataset.range(5).flat_map(\n lambda _: dataset_ops.Dataset.from_tensors(0)), cardinality.UNKNOWN),\n (\"Filter\", lambda: dataset_ops.Dataset.range(5).filter(lambda _: True),\n cardinality.UNKNOWN),\n (\"FromTensors1\", lambda: dataset_ops.Dataset.from_tensors(0), 1),\n (\"FromTensors2\", lambda: dataset_ops.Dataset.from_tensors((0, 1)), 1),\n (\"FromTensorSlices1\",\n lambda: dataset_ops.Dataset.from_tensor_slices([0, 0, 0]), 3),\n (\"FromTensorSlices2\",\n lambda: dataset_ops.Dataset.from_tensor_slices(([0, 0, 0], [1, 1, 1])),\n 3),\n (\"Interleave1\", lambda: dataset_ops.Dataset.range(5).interleave(\n lambda _: dataset_ops.Dataset.from_tensors(0), cycle_length=1),\n cardinality.UNKNOWN),\n (\"Interleave2\", lambda: dataset_ops.Dataset.range(5).interleave(\n lambda _: dataset_ops.Dataset.from_tensors(0),\n cycle_length=1,\n num_parallel_calls=1), cardinality.UNKNOWN),\n (\"Map1\", lambda: dataset_ops.Dataset.range(5).map(lambda x: x), 5),\n (\"Map2\", lambda: dataset_ops.Dataset.range(5).map(\n lambda x: x, num_parallel_calls=1), 5),\n (\"PaddedBatch1\", lambda: dataset_ops.Dataset.range(5).padded_batch(\n 2, [], drop_remainder=True), 2),\n (\"PaddedBatch2\", lambda: dataset_ops.Dataset.range(5).padded_batch(\n 2, [], drop_remainder=False), 3),\n (\"PaddedBatch3\", lambda: dataset_ops.Dataset.range(5).filter(\n lambda _: True).padded_batch(2, []), cardinality.UNKNOWN),\n (\"PaddedBatch4\",\n lambda: dataset_ops.Dataset.range(5).repeat().padded_batch(2, []),\n cardinality.INFINITE),\n (\"Prefetch\", lambda: dataset_ops.Dataset.range(5).prefetch(buffer_size=1),\n 5),\n (\"Range1\", lambda: dataset_ops.Dataset.range(0), 0),\n (\"Range2\", lambda: dataset_ops.Dataset.range(5), 5),\n (\"Range3\", lambda: dataset_ops.Dataset.range(5, 10), 5),\n (\"Range4\", lambda: dataset_ops.Dataset.range(10, 5), 0),\n (\"Range5\", lambda: dataset_ops.Dataset.range(5, 10, 2), 3),\n (\"Range6\", lambda: dataset_ops.Dataset.range(10, 5, -2), 3),\n (\"Repeat1\", lambda: dataset_ops.Dataset.range(0).repeat(0), 0),\n (\"Repeat2\", lambda: dataset_ops.Dataset.range(1).repeat(0), 0),\n (\"Repeat3\", lambda: dataset_ops.Dataset.range(0).repeat(5), 0),\n (\"Repeat4\", lambda: dataset_ops.Dataset.range(1).repeat(5), 5),\n (\"Repeat5\", lambda: dataset_ops.Dataset.range(0).repeat(), 0),\n (\"Repeat6\", lambda: dataset_ops.Dataset.range(1).repeat(),\n cardinality.INFINITE),\n (\"Shuffle\", lambda: dataset_ops.Dataset.range(5).shuffle(buffer_size=1),\n 5),\n (\"Shard1\", lambda: dataset_ops.Dataset.range(5).shard(2, 0), 3),\n (\"Shard2\", lambda: dataset_ops.Dataset.range(5).shard(8, 7), 0),\n (\"Shard3\",\n lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).shard(2, 0),\n cardinality.UNKNOWN),\n (\"Shard4\", lambda: dataset_ops.Dataset.range(5).repeat().shard(2, 0),\n cardinality.INFINITE),\n (\"Skip1\", lambda: dataset_ops.Dataset.range(5).skip(2), 3),\n (\"Skip2\", lambda: dataset_ops.Dataset.range(5).skip(8), 0),\n (\"Skip3\",\n lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).skip(2),\n cardinality.UNKNOWN),\n (\"Skip4\", lambda: dataset_ops.Dataset.range(5).repeat().skip(2),\n cardinality.INFINITE),\n (\"Take1\", lambda: dataset_ops.Dataset.range(5).take(2), 2),\n (\"Take2\", lambda: dataset_ops.Dataset.range(5).take(8), 5),\n (\"Take3\",\n lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).take(2),\n cardinality.UNKNOWN),\n (\"Take4\", lambda: dataset_ops.Dataset.range(5).repeat().take(2), 2),\n (\"Window1\", lambda: dataset_ops.Dataset.range(5).window(\n size=2, shift=2, drop_remainder=True), 2),\n (\"Window2\", lambda: dataset_ops.Dataset.range(5).window(\n size=2, shift=2, drop_remainder=False), 3),\n (\"Zip1\", lambda: dataset_ops.Dataset.zip(dataset_ops.Dataset.range(5)),\n 5),\n (\"Zip2\", lambda: dataset_ops.Dataset.zip(\n (dataset_ops.Dataset.range(5), dataset_ops.Dataset.range(3))), 3),\n (\"Zip3\", lambda: dataset_ops.Dataset.zip((dataset_ops.Dataset.range(\n 5), dataset_ops.Dataset.range(3).repeat())), 5),\n (\"Zip4\", lambda: dataset_ops.Dataset.zip((dataset_ops.Dataset.range(\n 5).repeat(), dataset_ops.Dataset.range(3).repeat())),\n cardinality.INFINITE),\n (\"Zip5\", lambda: dataset_ops.Dataset.zip((dataset_ops.Dataset.range(\n 5), dataset_ops.Dataset.range(3).filter(lambda _: True))),\n cardinality.UNKNOWN),\n ]\n\n def reduce_fn(x, y):\n name, dataset_fn, expected_result = y\n return x + combinations.combine(\n dataset_fn=combinations.NamedObject(name, dataset_fn),\n expected_result=expected_result)\n\n return functools.reduce(reduce_fn, cases, [])\n\n\nclass CardinalityTest(test_base.DatasetTestBase, parameterized.TestCase):\n \"\"\"Tests for `tf.data.experimental.cardinality()`.\"\"\"\n\n @combinations.generate(\n combinations.times(test_base.default_test_combinations(),\n _test_combinations()))\n def testCardinality(self, dataset_fn, expected_result):\n self.assertEqual(\n self.evaluate(cardinality.cardinality(dataset_fn())), expected_result)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_grad\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import nest\n\n\n_COS_DERIVATIVES = [math_ops.cos,\n lambda x: -math_ops.sin(x),\n lambda x: -math_ops.cos(x),\n math_ops.sin,\n math_ops.cos]\n\n\nclass FunctionGradientsTest(test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(FunctionGradientsTest, self).setUp()\n cpus = config.list_physical_devices('CPU')\n # Set 4 virtual CPUs\n config.set_logical_device_configuration(cpus[0], [\n context.LogicalDeviceConfiguration(),\n context.LogicalDeviceConfiguration(),\n context.LogicalDeviceConfiguration(),\n context.LogicalDeviceConfiguration()\n ])\n\n def testGraphModeWithGradients(self):\n v = resource_variable_ops.ResourceVariable(1.0, name='v')\n\n @def_function.function\n def step():\n def inner():\n return v * v\n\n return backprop.implicit_grad(inner)()[0][0]\n\n self.assertAllEqual(step(), 2.0)\n\n def testGraphGradientVariable(self):\n with ops.Graph().as_default(), self.cached_session():\n v = variables.Variable(1.0)\n\n @def_function.function\n def f():\n return 2.0 * v\n\n node = f()\n grads, = gradients_impl.gradients(node, v)\n v.initializer.run()\n self.assertAllEqual(grads.eval(), 2.0)\n self.assertEqual(grads.shape, v.shape)\n\n def testSymbolicHigherOrder(self):\n @def_function.function\n def f(x, order):\n y = def_function.function(lambda: math_ops.cos(x))()\n for _ in range(order):\n y, = gradients_impl.gradients(y, [x])\n return y\n for order, expected in enumerate(_COS_DERIVATIVES):\n self.assertAllClose(\n expected(constant_op.constant(1.)),\n f(constant_op.constant(1.), order))\n\n @parameterized.parameters([dict(persistent=True),\n dict(persistent=False)])\n def testSymbolicHigherOrderUnderTape(self, persistent):\n @def_function.function\n def f(x, order):\n with backprop.GradientTape(persistent=persistent) as tape:\n tape.watch(x)\n # Note that having a tape active, even if we don't use it, forces us\n # down a different function call path. Symbolic gradients should work\n # here too; correctness of tape gradients are tested elsewhere.\n y = def_function.function(lambda: math_ops.cos(x))()\n tape_dy = tape.gradient(y, x)\n for _ in range(order):\n y, = gradients_impl.gradients(y, [x])\n if order > 0:\n y1 = tape_dy\n for _ in range(order - 1):\n y1, = gradients_impl.gradients(y1, [x])\n else:\n y1 = y\n return y, y1\n for order, expected_f in enumerate(_COS_DERIVATIVES):\n expected = self.evaluate(expected_f(constant_op.constant(1.)))\n self.assertAllClose(\n (expected, expected),\n f(constant_op.constant(1.), order))\n\n def testIteratedGradientsNested(self):\n\n def _grad(f):\n def _grad_function(primal):\n with backprop.GradientTape() as tape:\n tape.watch(primal)\n primal_out = f(primal)\n return tape.gradient(primal_out, primal)\n return _grad_function\n\n @def_function.function\n def _forward(x):\n return math_ops.cos(x)\n\n f = _forward\n traced_f = def_function.function(f)\n one = constant_op.constant(1.)\n for expected in _COS_DERIVATIVES:\n self.assertAllClose(expected(one), f(one))\n self.assertAllClose(expected(one), traced_f(one))\n self.assertAllClose(expected(one), def_function.function(f)(one))\n f = _grad(f)\n traced_f = def_function.function(_grad(traced_f))\n\n def testIteratedGradientsNestedWithVariable(self):\n\n def _grad(f):\n def _grad_function():\n with backprop.GradientTape() as tape:\n primal_out = f()\n g, = tape.gradient(primal_out, tape.watched_variables())\n return g\n return _grad_function\n\n v = variables.Variable(2.)\n\n @def_function.function\n def _forward():\n return math_ops.cos(v)\n\n f = _forward\n\n two = constant_op.constant(2.)\n\n for expected in _COS_DERIVATIVES:\n self.assertAllClose(expected(two), f())\n self.assertAllClose(expected(two), def_function.function(f)())\n f = _grad(f)\n\n def testIteratedGradientsPersistent(self):\n\n @def_function.function\n def _forward(z):\n return math_ops.cos(z)\n\n f = _forward\n with backprop.GradientTape(persistent=True) as tape:\n start = constant_op.constant(1.)\n tape.watch(start)\n x = f(start)\n for expected in _COS_DERIVATIVES:\n self.assertAllClose(expected(start), x)\n x = tape.gradient(x, start)\n\n def testHigherOrderWithVariable(self):\n\n v = variables.Variable(1.)\n\n @def_function.function\n def _forward():\n return math_ops.cos(v)\n\n f = _forward\n with backprop.GradientTape(persistent=True) as tape:\n x = f()\n for expected in _COS_DERIVATIVES:\n self.assertAllClose(expected(constant_op.constant(1.)), x)\n x, = tape.gradient(x, tape.watched_variables())\n\n def testGradientsChained(self):\n\n @def_function.function\n def _forward(z):\n return math_ops.cos(z)\n\n f = _forward\n x = constant_op.constant(1.)\n with backprop.GradientTape() as t:\n t.watch(x)\n y = f(x)\n with backprop.GradientTape() as tt:\n doutputs = constant_op.constant(2.)\n tt.watch(doutputs)\n g = t.gradient(y, x, doutputs)\n self.assertAllClose(-2. * math_ops.sin(x), g)\n gg = tt.gradient(g, doutputs)\n # We're taking gradients with respect to doutputs, which is just a linear\n # function of the gradient.\n self.assertAllClose(-math_ops.sin(x), gg)\n\n def testSymGradGatherNd(self):\n with ops.Graph().as_default(), self.cached_session():\n\n @def_function.function\n def f(x):\n return array_ops.gather_nd(x, [[0]])\n\n c = constant_op.constant([[2.]])\n f_c = f(c)\n g, = gradients_impl.gradients(f_c, c)\n self.assertAllEqual(self.evaluate(g).values, [[1.0]])\n\n def testNoSymGradNestedDefun(self):\n\n @def_function.function\n def outer():\n\n @def_function.function\n def f(x):\n return array_ops.gather_nd(x, [[0]])\n\n c = constant_op.constant([[2.]])\n f_c = f(c)\n g, = gradients_impl.gradients(f_c, c)\n self.assertIsInstance(g, ops.IndexedSlices)\n\n outer()\n\n def testGraphFunctionWithGradients(self):\n v = resource_variable_ops.ResourceVariable(1.0, name='v')\n\n @def_function.function\n def step():\n def inner():\n return v * v\n\n return backprop.implicit_grad(inner)()[0][0]\n\n step_op = step.get_concrete_function()\n self.assertEqual(step_op.output_dtypes, dtypes.float32)\n self.assertEqual(step_op.output_shapes, tensor_shape.TensorShape([]))\n self.assertAllEqual(step_op(), 2.0)\n\n @test_util.run_in_graph_and_eager_modes()\n def testDefunCondGradient(self):\n\n @def_function.function\n def f(x):\n return control_flow_ops.cond(x > 0.5, lambda: 2 * x, lambda: 3 * x)\n\n with backprop.GradientTape() as t:\n x = constant_op.constant(1.0)\n t.watch(x)\n y = f(x)\n self.assertAllEqual(self.evaluate(t.gradient(y, x)), 2.0)\n\n @test_util.run_in_graph_and_eager_modes()\n def testGraphLoopGradient(self):\n\n @def_function.function\n def f(x):\n return control_flow_ops.while_loop(lambda _, i: i < 2,\n lambda x, i: (2*x, i + 1),\n [x, 0])[0]\n\n with backprop.GradientTape() as t:\n x = constant_op.constant(1.0)\n t.watch(x)\n y = f(x)\n self.assertAllEqual(self.evaluate(t.gradient(y, x)), 4.0)\n\n def testGraphLoopGradientInsideSession(self):\n with ops.Graph().as_default():\n n = constant_op.constant(2.0)\n x = array_ops.placeholder(dtypes.float32, shape=None)\n\n @def_function.function\n def f():\n c = lambda n: n < 10\n b = lambda n: n * x\n return control_flow_ops.while_loop(c, b, [n],\n [tensor_shape.unknown_shape()])\n\n l = f()\n dx = gradients_impl.gradients(l, [x])[0]\n\n with self.cached_session():\n self.assertEqual(dx.eval(feed_dict={x: 2.0}), 24.0)\n\n def testDefunDifferentiable(self):\n v = resource_variable_ops.ResourceVariable(1.0)\n\n @def_function.function\n def f():\n return v * v\n\n self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)\n\n def testDefunCanBeDifferentiatedTwice(self):\n v = resource_variable_ops.ResourceVariable(1.0)\n\n @def_function.function\n def f():\n return v * v\n\n self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)\n # Ensure that v is watched again.\n self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)\n\n def testSymbolicGradientVariableNoneNotZerosLike(self):\n with ops.Graph().as_default():\n v = variables.Variable(1.0)\n\n @def_function.function\n def f(x, v):\n v.read_value()\n return x * x\n\n x = constant_op.constant(1.0)\n l = f(x, v)\n _, dv = gradients_impl.gradients(l, [x, v])\n with self.cached_session():\n v.initializer.run()\n self.assertEqual(dv, None)\n\n def testDefunCallBackprop(self):\n\n @def_function.function\n def f(x):\n return math_ops.add(x, x)\n\n @def_function.function\n def g(x):\n return backprop.gradients_function(f, [0])(x)[0]\n\n self.assertAllEqual(2, g(constant_op.constant(2.)))\n\n @test_util.run_v1_only('b/120545219')\n def testGraphModeEagerGradError(self):\n with context.graph_mode():\n def f():\n x = variable_scope.get_variable(\n 'v', initializer=constant_op.constant(1.0))\n return x * constant_op.constant(2.0)\n\n with self.assertRaisesRegexp(ValueError,\n 'No trainable variables were accessed'):\n backprop.implicit_val_and_grad(f)()\n\n def testDefunCallBackpropUsingSameObjectForMultipleArguments(self):\n\n @def_function.function\n def g(x):\n return backprop.gradients_function(math_ops.multiply, [0, 1])(x, x)\n\n def np_g(x):\n return [d.numpy() for d in g(x)]\n\n x = constant_op.constant(1.)\n self.assertAllEqual([1., 1.], np_g(x))\n self.assertAllEqual([1., 1.], np_g(1.))\n\n def testGradientTensorConversionWithDefun(self):\n three = resource_variable_ops.ResourceVariable(3.0, name='v')\n\n @def_function.function\n def f(x):\n return math_ops.add(x, three)\n\n def g(x):\n return f(x)\n\n g = backprop.implicit_grad(g)(constant_op.constant(1.0))[0][0]\n self.assertAllEqual(g, 1.0)\n\n def testGradient(self):\n matmul = def_function.function(math_ops.matmul)\n\n def sq(x):\n return matmul(x, x, transpose_a=True)\n\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n grad_t, = backprop.gradients_function(sq, [0])(t)\n self.assertAllEqual(grad_t, [[6, 6], [14, 14]])\n\n def testGradientInFunction(self):\n\n @def_function.function\n def f(x):\n return backprop.gradients_function(lambda y: y * y, [0])(x)[0]\n\n self.assertAllEqual(f(constant_op.constant(1.0)), 2.0)\n\n def testGradientOfGatherWithDefun(self):\n v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])\n\n def sum_gather():\n return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))\n\n grad_fn = backprop.implicit_grad(sum_gather)\n gradient = grad_fn()\n defun_grad_fn = backprop.implicit_grad(def_function.function(sum_gather))\n defun_gradient = defun_grad_fn()\n self.assertEqual(len(gradient), len(defun_gradient))\n\n gradient = gradient[0][0]\n defun_gradient = defun_gradient[0][0]\n self.assertAllEqual(gradient.values, defun_gradient.values)\n self.assertAllEqual(gradient.indices, defun_gradient.indices)\n self.assertAllEqual(gradient.dense_shape, defun_gradient.dense_shape)\n\n def testDifferentiableFunctionNoneOutputs(self):\n\n @def_function.function\n def my_function(x):\n return x, None\n\n def wrapper(x):\n return my_function(x)[0]\n\n g = backprop.gradients_function(wrapper, [0])(constant_op.constant(0.0))\n self.assertAllEqual(g[0], 1.)\n\n @def_function.function\n def foo(a):\n return None, a * a\n\n x = constant_op.constant(5.0)\n with backprop.GradientTape() as tp:\n tp.watch(x)\n none, r = foo(x)\n g = tp.gradient(r, x)\n\n self.assertIs(none, None)\n self.assertAllEqual(r, 25.0)\n self.assertAllEqual(g, 2 * 5.0)\n\n @test_util.run_in_graph_and_eager_modes\n def testNestedDifferentiableFunction(self):\n @def_function.function\n def inner_fn(a, b):\n return a * math_ops.add(a, b)\n\n @def_function.function\n def outer_fn(x):\n return inner_fn(x, 1.0)\n\n x = constant_op.constant(5.0)\n with backprop.GradientTape() as tp:\n tp.watch(x)\n result = outer_fn(x)\n grad = tp.gradient(result, x)\n\n self.assertAllEqual(grad, 2 * 5.0 + 1.0)\n\n @test_util.run_in_graph_and_eager_modes\n def testDeeplyNestedDifferentiableFunction(self):\n @def_function.function\n def inner_inner_fn(a, b):\n return math_ops.add(a, b)\n\n @def_function.function\n def inner_fn(a, b):\n return inner_inner_fn(a, b)\n\n @def_function.function\n def middle_fn(a, b):\n return a * inner_fn(a, b)\n\n @def_function.function\n def outer_fn(x):\n return middle_fn(x, 1.0)\n\n x = constant_op.constant(5.0)\n with backprop.GradientTape() as tp:\n tp.watch(x)\n result = outer_fn(x)\n grad = tp.gradient(result, x)\n\n self.assertAllEqual(grad, 2 * 5.0 + 1.0)\n\n @test_util.run_in_graph_and_eager_modes\n def testDeeplyNestedDifferentiableFunctionWithMultipleGradCalls(self):\n @def_function.function\n def inner_fn(a, b):\n return math_ops.add(a, b)\n\n @def_function.function\n def middle_fn(a, b):\n return math_ops.mul(a, inner_fn(a, b))\n\n @def_function.function\n def outer_fn(x):\n return middle_fn(x, 3.0)\n\n x = constant_op.constant(5.0)\n self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))\n\n with backprop.GradientTape() as tp:\n tp.watch(x)\n result = outer_fn(x)\n grad = tp.gradient(result, x)\n\n self.assertAllEqual(grad, 2 * 5.0 + 3.0)\n self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))\n self.assertAllEqual(middle_fn(3.0, x), 3.0 * (3.0 + 5.0))\n\n with backprop.GradientTape() as tp:\n tp.watch(x)\n result = outer_fn(x)\n grad = tp.gradient(result, x)\n\n self.assertAllEqual(grad, 2 * 5.0 + 3.0)\n\n y = constant_op.constant(4.0)\n with backprop.GradientTape() as tp:\n tp.watch(y)\n result = outer_fn(y)\n grad = tp.gradient(result, y)\n\n self.assertAllEqual(grad, 2 * 4.0 + 3.0)\n\n with backprop.GradientTape() as tp:\n tp.watch(y)\n result = inner_fn(y, y)\n grad = tp.gradient(result, y)\n\n self.assertAllEqual(grad, 2.0)\n\n @test_util.run_in_graph_and_eager_modes\n def testDeeplyNestedDifferentiableFunctionGradientTapeInDefun(self):\n @def_function.function\n def inner_inner_fn(a, b):\n return math_ops.add(a, b)\n\n @def_function.function\n def inner_fn(a, b):\n return inner_inner_fn(a, b)\n\n @def_function.function\n def middle_fn(a, b):\n return a * inner_fn(a, b)\n\n @def_function.function\n def outer_fn(x):\n with backprop.GradientTape() as tp:\n tp.watch(x)\n result = middle_fn(x, 1.0)\n grad = tp.gradient(result, x)\n return grad\n\n x = constant_op.constant(5.0)\n grad = outer_fn(x)\n self.assertAllEqual(grad, 2 * 5.0 + 1.0)\n\n @test_util.run_in_graph_and_eager_modes\n def testDeeplyNestedDifferentiableFunctionGradientTapeInNestedDefun(self):\n @def_function.function\n def inner_inner_fn(a, b):\n return math_ops.add(a, b)\n\n @def_function.function\n def inner_fn(a, b):\n return inner_inner_fn(a, b)\n\n @def_function.function\n def middle_fn(a, b):\n return a * inner_fn(a, b)\n\n @def_function.function\n def almost_outer_fn(x):\n with backprop.GradientTape() as tp:\n tp.watch(x)\n result = middle_fn(x, 1.0)\n grad = tp.gradient(result, x)\n return grad\n\n @def_function.function\n def outer_fn(x):\n return almost_outer_fn(x)\n\n x = constant_op.constant(5.0)\n grad = outer_fn(x)\n self.assertAllEqual(grad, 2 * 5.0 + 1.0)\n\n @test_util.run_in_graph_and_eager_modes\n def testDeeplyNestedDifferentiableFunctionGradientTapeInMultNestedDefun(self):\n @def_function.function\n def inner_inner_fn(a, b):\n return math_ops.add(a, b)\n\n @def_function.function\n def inner_fn(a, b):\n return inner_inner_fn(a, b)\n\n @def_function.function\n def middle_fn(a, b):\n return a * inner_fn(a, b)\n\n @def_function.function\n def almost_outer_fn(x):\n with backprop.GradientTape() as tp:\n tp.watch(x)\n result = middle_fn(x, 1.0)\n grad = tp.gradient(result, x)\n return grad\n\n @def_function.function\n def outer_fn(x):\n return almost_outer_fn(x)\n\n @def_function.function\n def outer_outer_fn(x):\n return outer_fn(x)\n\n x = constant_op.constant(5.0)\n grad = outer_outer_fn(x)\n self.assertAllEqual(grad, 2 * 5.0 + 1.0)\n\n @test_util.run_in_graph_and_eager_modes\n def testDeeplyNestedDifferentiableFunctionTFGradientInDefun(self):\n @def_function.function\n def inner_inner_fn(a, b):\n return math_ops.add(a, b)\n\n @def_function.function\n def inner_fn(a, b):\n return inner_inner_fn(a, b)\n\n @def_function.function\n def middle_fn(a, b):\n return a * inner_fn(a, b)\n\n @def_function.function\n def outer_fn(x):\n result = middle_fn(x, 1.0)\n return gradients_impl.gradients(result, [x])[0]\n\n x = constant_op.constant(5.0)\n grad = outer_fn(x)\n self.assertAllEqual(grad, 2 * 5.0 + 1.0)\n\n @test_util.run_in_graph_and_eager_modes\n def testDeeplyNestedDifferentiableFunctionTFGradientInNestedDefun(self):\n @def_function.function\n def inner_inner_fn(a, b):\n return math_ops.add(a, b)\n\n @def_function.function\n def inner_fn(a, b):\n return inner_inner_fn(a, b)\n\n @def_function.function\n def middle_fn(a, b):\n return a * inner_fn(a, b)\n\n @def_function.function\n def almost_outer_fn(x):\n result = middle_fn(x, 1.0)\n return gradients_impl.gradients(result, [x])[0]\n\n @def_function.function\n def outer_fn(x):\n return almost_outer_fn(x)\n\n x = constant_op.constant(5.0)\n grad = outer_fn(x)\n self.assertAllEqual(grad, 2 * 5.0 + 1.0)\n\n @test_util.run_in_graph_and_eager_modes\n def testDeeplyNestedDifferentiableFunctionTFGradientInMultNestedDefun(self):\n @def_function.function\n def inner_inner_fn(a, b):\n return math_ops.add(a, b)\n\n @def_function.function\n def inner_fn(a, b):\n return inner_inner_fn(a, b)\n\n @def_function.function\n def middle_fn(a, b):\n return a * inner_fn(a, b)\n\n @def_function.function\n def almost_outer_fn(x):\n result = middle_fn(x, 1.0)\n return gradients_impl.gradients(result, [x])[0]\n\n @def_function.function\n def outer_fn(x):\n return almost_outer_fn(x)\n\n @def_function.function\n def outer_outer_fn(x):\n return outer_fn(x)\n\n x = constant_op.constant(5.0)\n grad = outer_outer_fn(x)\n self.assertAllEqual(grad, 2 * 5.0 + 1.0)\n\n def testDeeplyNestedDifferentiableFunctionWithVariable(self):\n var = variables.Variable(constant_op.constant(1.0))\n\n @def_function.function\n def inner_fn(a, b):\n return math_ops.add(a, b)\n\n @def_function.function\n def middle_fn(a, b):\n return a * inner_fn(a, b)\n\n @def_function.function\n def outer_fn(x):\n return middle_fn(x, var)\n\n x = constant_op.constant(5.0)\n with backprop.GradientTape() as tp:\n tp.watch(x)\n result = outer_fn(x)\n grad = tp.gradient(result, x)\n\n self.assertAllEqual(grad, 2 * 5.0 + 1.0)\n\n def testDeeplyNestedDifferentiableFunctionWithVariableMultipleGradCalls(self):\n v = variables.Variable(constant_op.constant(3.0))\n\n @def_function.function\n def inner_fn(a, b):\n return math_ops.add(a, b)\n\n @def_function.function\n def middle_fn(a, b):\n return math_ops.mul(a, inner_fn(a, b))\n\n @def_function.function\n def outer_fn(x):\n return middle_fn(x, v)\n\n x = constant_op.constant(5.0)\n self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))\n\n with backprop.GradientTape() as tp:\n tp.watch(x)\n result = outer_fn(x)\n grad = tp.gradient(result, x)\n\n self.assertAllEqual(grad, 2 * 5.0 + 3.0)\n self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))\n self.assertAllEqual(middle_fn(v, x), 3.0 * (3.0 + 5.0))\n\n with backprop.GradientTape() as tp:\n tp.watch(x)\n result = outer_fn(x)\n grad = tp.gradient(result, x)\n\n self.assertAllEqual(grad, 2 * 5.0 + 3.0)\n\n y = constant_op.constant(4.0)\n with backprop.GradientTape() as tp:\n tp.watch(y)\n result = outer_fn(y)\n grad = tp.gradient(result, y)\n\n self.assertAllEqual(grad, 2 * 4.0 + 3.0)\n\n v.assign(constant_op.constant(1.5))\n with backprop.GradientTape() as tp:\n tp.watch(y)\n result = outer_fn(y)\n grad = tp.gradient(result, y)\n\n self.assertAllEqual(grad, 2 * 4.0 + 1.5)\n\n with backprop.GradientTape() as tp:\n tp.watch(y)\n result = inner_fn(y, v)\n grad = tp.gradient(result, y)\n\n self.assertAllEqual(grad, 1.0)\n\n def testDeeplyNestedDifferentiableFunctionWithVariableMultipleTFGrads(self):\n with context.graph_mode(), self.cached_session():\n v = resource_variable_ops.ResourceVariable(3.0)\n v.initializer.run()\n\n @def_function.function\n def inner_fn(a, b):\n return math_ops.add(a, b)\n\n @def_function.function\n def middle_fn(a, b):\n return math_ops.mul(a, inner_fn(a, b))\n\n @def_function.function\n def outer_fn(x):\n return middle_fn(x, v)\n\n x = constant_op.constant(5.0)\n self.assertAllEqual(outer_fn(x).eval(), 5.0 * (5.0 + 3.0))\n\n grad, = gradients_impl.gradients(outer_fn(x), x)\n\n self.assertAllEqual(grad, 2 * 5.0 + 3.0)\n self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))\n self.assertAllEqual(middle_fn(v, x), 3.0 * (3.0 + 5.0))\n\n grad, = gradients_impl.gradients(outer_fn(x), x)\n\n self.assertAllEqual(grad, 2 * 5.0 + 3.0)\n\n y = constant_op.constant(4.0)\n grad, = gradients_impl.gradients(outer_fn(y), y)\n self.assertAllEqual(grad, 2 * 4.0 + 3.0)\n\n self.evaluate(v.assign(constant_op.constant(1.5)))\n grad, = gradients_impl.gradients(outer_fn(y), y)\n\n self.assertAllEqual(grad, 2 * 4.0 + 1.5)\n\n grad, = gradients_impl.gradients(inner_fn(y, v), y)\n self.assertAllEqual(grad, 1.0)\n\n def testNestedDifferentiableFunctionNoneOutputs(self):\n @def_function.function\n def foo(a, b):\n return None, a * math_ops.add(a, b), None, 2*a\n\n @def_function.function\n def bar(x):\n return foo(x, 1.0)\n\n x = constant_op.constant(5.0)\n with backprop.GradientTape(persistent=True) as tp:\n tp.watch(x)\n none1, r1, none2, r2 = bar(x)\n g1 = tp.gradient(r1, x)\n g2 = tp.gradient(r2, x)\n\n self.assertAllEqual(r1, 30.0)\n self.assertAllEqual(r2, 10.0)\n self.assertIs(none1, None)\n self.assertIs(none2, None)\n self.assertAllEqual(g1, 2 * 5.0 + 1.0)\n self.assertAllEqual(g2, 2.0)\n\n def testGradientWithKeywordArguments(self):\n matmul = def_function.function(math_ops.matmul)\n\n def sq(x):\n return matmul(a=x, b=x, transpose_a=True)\n\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n grad_t, = backprop.gradients_function(sq, [0])(t)\n self.assertAllEqual(grad_t, [[6, 6], [14, 14]])\n\n with backprop.GradientTape(persistent=True) as tape:\n tape.watch(t)\n one = matmul(t, b=t, transpose_a=True)\n two = matmul(b=t, a=t, transpose_a=True)\n three = matmul(a=t, b=t, transpose_a=True)\n\n for output in [one, two, three]:\n self.assertAllEqual(tape.gradient(output, t), [[6, 6], [14, 14]])\n\n def testGradientInFunctionWithKeywordArguments(self):\n\n @def_function.function\n def f(x):\n return backprop.gradients_function(lambda y: y * y, [0])(x)[0]\n\n self.assertAllEqual(f(x=constant_op.constant(1.0)), 2.0)\n\n def testFunctionHasNoSecondOrderGradient(self):\n\n # This test needs nn_grad imported. We could just disable the lint error,\n # but this way if the test is deleted we'll know the import isn't needed.\n _ = nn_grad\n\n v = variables.Variable(1.)\n\n @def_function.function\n def f(labels, logits):\n return def_function.function(\n nn_ops.sparse_softmax_cross_entropy_with_logits)(\n labels=labels, logits=logits + v)\n\n @def_function.function\n def f_grad():\n with backprop.GradientTape() as tape:\n logits = constant_op.constant([1., 2.])\n tape.watch(logits)\n out = f(constant_op.constant(1), logits)\n return tape.gradient(out, logits)\n # Mainly we want to check that the function builds despite\n # sparse_softmax_cross_entropy_with_logits not having a second-order\n # gradient defined.\n self.assertAllEqual([2], f_grad().shape)\n\n @test_util.run_in_graph_and_eager_modes\n def testBackwardNone(self):\n model = variables.Variable(1.0, name='model')\n count = variables.Variable(0)\n\n @function.defun\n def forward_pass(value):\n count.assign_add(1)\n residuals = value - model\n loss = 0.5 * math_ops.reduce_mean(math_ops.pow(residuals, 2))\n # Note: count is an integer, so its doutput will be None\n return loss, count\n\n def reduce_fn(x):\n if context.executing_eagerly():\n with backprop.GradientTape() as t:\n loss, count = forward_pass(x)\n return t.gradient(loss, model), count\n loss, count = forward_pass(x)\n grad_only = gradients_impl.gradients(loss, model)\n return grad_only, count\n\n g, _ = reduce_fn(constant_op.constant([7.0]))\n\n self.evaluate(variables.global_variables_initializer())\n self.assertAllEqual(nest.flatten(self.evaluate(g)), [-6.0])\n\n\nif __name__ == '__main__':\n ops.enable_eager_execution()\n test.main()\n",
"# Lint as: python2, python3\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Visitor restricting traversal to only the public tensorflow API.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\n\nimport six\n\nfrom tensorflow.python.util import tf_inspect\n\n\nclass PublicAPIVisitor(object):\n \"\"\"Visitor to use with `traverse` to visit exactly the public TF API.\"\"\"\n\n def __init__(self, visitor):\n \"\"\"Constructor.\n\n `visitor` should be a callable suitable as a visitor for `traverse`. It will\n be called only for members of the public TensorFlow API.\n\n Args:\n visitor: A visitor to call for the public API.\n \"\"\"\n self._visitor = visitor\n self._root_name = 'tf'\n\n # Modules/classes we want to suppress entirely.\n self._private_map = {\n 'tf': [\n 'compiler',\n 'core',\n 'python',\n ],\n # Some implementations have this internal module that we shouldn't\n # expose.\n 'tf.flags': ['cpp_flags'],\n }\n\n # Modules/classes we do not want to descend into if we hit them. Usually,\n # system modules exposed through platforms for compatibility reasons.\n # Each entry maps a module path to a name to ignore in traversal.\n self._do_not_descend_map = {\n 'tf': [\n 'examples',\n 'flags', # Don't add flags\n # TODO(drpng): This can be removed once sealed off.\n 'platform',\n # TODO(drpng): This can be removed once sealed.\n 'pywrap_tensorflow',\n # TODO(drpng): This can be removed once sealed.\n 'user_ops',\n 'tools',\n 'tensorboard',\n ],\n\n ## Everything below here is legitimate.\n # It'll stay, but it's not officially part of the API.\n 'tf.app': ['flags'],\n # Imported for compatibility between py2/3.\n 'tf.test': ['mock'],\n }\n\n @property\n def private_map(self):\n \"\"\"A map from parents to symbols that should not be included at all.\n\n This map can be edited, but it should not be edited once traversal has\n begun.\n\n Returns:\n The map marking symbols to not include.\n \"\"\"\n return self._private_map\n\n @property\n def do_not_descend_map(self):\n \"\"\"A map from parents to symbols that should not be descended into.\n\n This map can be edited, but it should not be edited once traversal has\n begun.\n\n Returns:\n The map marking symbols to not explore.\n \"\"\"\n return self._do_not_descend_map\n\n def set_root_name(self, root_name):\n \"\"\"Override the default root name of 'tf'.\"\"\"\n self._root_name = root_name\n\n def _is_private(self, path, name, obj=None):\n \"\"\"Return whether a name is private.\"\"\"\n # TODO(wicke): Find out what names to exclude.\n del obj # Unused.\n return ((path in self._private_map and name in self._private_map[path]) or\n (six.ensure_str(name).startswith('_') and\n not re.match('__.*__$', six.ensure_str(name)) or\n name in ['__base__', '__class__']))\n\n def _do_not_descend(self, path, name):\n \"\"\"Safely queries if a specific fully qualified name should be excluded.\"\"\"\n return (path in self._do_not_descend_map and\n name in self._do_not_descend_map[path])\n\n def __call__(self, path, parent, children):\n \"\"\"Visitor interface, see `traverse` for details.\"\"\"\n\n # Avoid long waits in cases of pretty unambiguous failure.\n if tf_inspect.ismodule(parent) and len(\n six.ensure_str(path).split('.')) > 10:\n raise RuntimeError('Modules nested too deep:\\n%s.%s\\n\\nThis is likely a '\n 'problem with an accidental public import.' %\n (self._root_name, path))\n\n # Includes self._root_name\n full_path = '.'.join([self._root_name, path]) if path else self._root_name\n\n # Remove things that are not visible.\n for name, child in list(children):\n if self._is_private(full_path, name, child):\n children.remove((name, child))\n\n self._visitor(path, parent, children)\n\n # Remove things that are visible, but which should not be descended into.\n for name, child in list(children):\n if self._do_not_descend(full_path, name):\n children.remove((name, child))\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for training routines.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.keras import combinations\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import layers as layers_module\nfrom tensorflow.python.keras import losses\nfrom tensorflow.python.keras import metrics as metrics_module\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.engine import input_layer\nfrom tensorflow.python.keras.engine import training\nfrom tensorflow.python.keras.engine import training_generator\nfrom tensorflow.python.keras.optimizer_v2 import rmsprop\nfrom tensorflow.python.keras.utils import data_utils\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import nest\n\n\ndef custom_generator(mode=2):\n batch_size = 10\n num_samples = 50\n arr_data = np.random.random((num_samples, 2))\n arr_labels = np.random.random((num_samples, 4))\n arr_weights = np.random.random((num_samples,))\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n x = arr_data[start: end]\n y = arr_labels[start: end]\n w = arr_weights[start: end]\n if mode == 1:\n yield x\n elif mode == 2:\n yield x, y\n else:\n yield x, y, w\n\n\ndef custom_generator_changing_batch_size(mode=2):\n batch_size = 10\n cur_batch_size = 11\n num_samples = 50\n arr_data = np.random.random((num_samples, 2))\n arr_labels = np.random.random((num_samples, 4))\n arr_weights = np.random.random((num_samples,))\n i = 0\n while True:\n if cur_batch_size > 1:\n cur_batch_size -= 1\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + cur_batch_size\n x = arr_data[start: end]\n y = arr_labels[start: end]\n w = arr_weights[start: end]\n if mode == 1:\n yield x\n elif mode == 2:\n yield x, y\n else:\n yield x, y, w\n\ncustom_generator_threads = data_utils.threadsafe_generator(custom_generator)\n\n\nclass TestGeneratorMethods(keras_parameterized.TestCase):\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n @data_utils.dont_use_multiprocessing_pool\n def test_fit_generator_method(self):\n model = testing_utils.get_small_mlp(\n num_hidden=3, num_classes=4, input_dim=2)\n model.compile(\n loss='mse',\n optimizer=rmsprop.RMSprop(1e-3),\n metrics=['mae', metrics_module.CategoricalAccuracy()])\n\n model.fit_generator(custom_generator_threads(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n workers=4,\n use_multiprocessing=True)\n model.fit_generator(custom_generator(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False)\n model.fit_generator(custom_generator(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False,\n validation_data=custom_generator(),\n validation_steps=10)\n model.fit_generator(custom_generator(),\n steps_per_epoch=5,\n validation_data=custom_generator(),\n validation_steps=1,\n workers=0)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n @data_utils.dont_use_multiprocessing_pool\n def test_evaluate_generator_method(self):\n model = testing_utils.get_small_mlp(\n num_hidden=3, num_classes=4, input_dim=2)\n model.compile(\n loss='mse',\n optimizer=rmsprop.RMSprop(1e-3),\n metrics=['mae', metrics_module.CategoricalAccuracy()],\n run_eagerly=testing_utils.should_run_eagerly())\n\n model.evaluate_generator(custom_generator_threads(),\n steps=5,\n max_queue_size=10,\n workers=2,\n verbose=1,\n use_multiprocessing=True)\n model.evaluate_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n model.evaluate_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False,\n workers=0)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n @data_utils.dont_use_multiprocessing_pool\n def test_predict_generator_method(self):\n model = testing_utils.get_small_mlp(\n num_hidden=3, num_classes=4, input_dim=2)\n model.run_eagerly = testing_utils.should_run_eagerly()\n\n model.predict_generator(custom_generator_threads(),\n steps=5,\n max_queue_size=10,\n workers=2,\n use_multiprocessing=True)\n model.predict_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n model.predict_generator(custom_generator(),\n steps=5,\n max_queue_size=10,\n workers=0)\n # Test generator with just inputs (no targets)\n model.predict_generator(custom_generator_threads(mode=1),\n steps=5,\n max_queue_size=10,\n workers=2,\n use_multiprocessing=True)\n model.predict_generator(custom_generator(mode=1),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n model.predict_generator(custom_generator(mode=1),\n steps=5,\n max_queue_size=10,\n workers=0)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_generator_methods_with_sample_weights(self):\n model = testing_utils.get_small_mlp(\n num_hidden=3, num_classes=4, input_dim=2)\n model.compile(\n loss='mse',\n optimizer=rmsprop.RMSprop(1e-3),\n metrics=['mae', metrics_module.CategoricalAccuracy()],\n run_eagerly=testing_utils.should_run_eagerly())\n\n model.fit_generator(custom_generator(mode=3),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False)\n model.fit_generator(custom_generator(mode=3),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False,\n validation_data=custom_generator(mode=3),\n validation_steps=10)\n model.predict_generator(custom_generator(mode=3),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n model.evaluate_generator(custom_generator(mode=3),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_generator_methods_invalid_use_case(self):\n def invalid_generator():\n while 1:\n yield (0, 0, 0, 0)\n\n model = testing_utils.get_small_mlp(\n num_hidden=3, num_classes=4, input_dim=2)\n model.compile(\n loss='mse',\n optimizer=rmsprop.RMSprop(1e-3),\n run_eagerly=testing_utils.should_run_eagerly())\n\n with self.assertRaises(ValueError):\n model.fit_generator(invalid_generator(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False)\n with self.assertRaises(ValueError):\n model.fit_generator(custom_generator(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False,\n validation_data=invalid_generator(),\n validation_steps=10)\n with self.assertRaises(ValueError):\n model.predict_generator(invalid_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n with self.assertRaises(ValueError):\n model.evaluate_generator(invalid_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_generator_input_to_fit_eval_predict(self):\n val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)\n\n def ones_generator():\n while True:\n yield np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)\n\n model = testing_utils.get_small_mlp(\n num_hidden=10, num_classes=1, input_dim=10)\n\n model.compile(\n rmsprop.RMSprop(0.001),\n 'binary_crossentropy',\n run_eagerly=testing_utils.should_run_eagerly())\n model.fit(\n ones_generator(),\n steps_per_epoch=2,\n validation_data=val_data,\n epochs=2)\n model.evaluate(ones_generator(), steps=2)\n model.predict(ones_generator(), steps=2)\n\n # Test with a changing batch size\n model = testing_utils.get_small_mlp(\n num_hidden=3, num_classes=4, input_dim=2)\n model.compile(\n loss='mse',\n optimizer=rmsprop.RMSprop(1e-3),\n metrics=['mae', metrics_module.CategoricalAccuracy()])\n model.fit_generator(custom_generator_changing_batch_size(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False)\n model.fit_generator(custom_generator_changing_batch_size(),\n steps_per_epoch=5,\n epochs=1,\n verbose=1,\n max_queue_size=10,\n use_multiprocessing=False,\n validation_data=custom_generator_changing_batch_size(),\n validation_steps=10)\n\n model.fit(\n custom_generator_changing_batch_size(),\n steps_per_epoch=5,\n validation_data=custom_generator_changing_batch_size(),\n validation_steps=10,\n epochs=2)\n model.evaluate(custom_generator_changing_batch_size(), steps=5)\n model.predict(custom_generator_changing_batch_size(), steps=5)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n @data_utils.dont_use_multiprocessing_pool\n def test_generator_dynamic_shapes(self):\n\n x = [\n 'I think juice is great',\n 'unknown is the best language since slicedbread',\n 'a a a a a a a',\n 'matmul'\n 'Yaks are also quite nice',\n ]\n y = [1, 0, 0, 1, 1]\n\n vocab = {\n word: i + 1 for i, word in\n enumerate(\n sorted(set(itertools.chain(*[i.split() for i in x]))))\n }\n\n def data_gen(batch_size=2):\n np.random.seed(0)\n data = list(zip(x, y)) * 10\n np.random.shuffle(data)\n\n def pack_and_pad(queue):\n x = [[vocab[j] for j in i[0].split()] for i in queue]\n pad_len = max(len(i) for i in x)\n x = np.array([i + [0] * (pad_len - len(i)) for i in x])\n y = np.array([i[1] for i in queue])\n del queue[:]\n return x, y[:, np.newaxis]\n\n queue = []\n for i, element in enumerate(data):\n queue.append(element)\n if not (i + 1) % batch_size:\n yield pack_and_pad(queue)\n\n if queue:\n # Last partial batch\n yield pack_and_pad(queue)\n\n model = testing_utils.get_model_from_layers([\n layers_module.Embedding(input_dim=len(vocab) + 1, output_dim=4),\n layers_module.SimpleRNN(units=1),\n layers_module.Activation('sigmoid')\n ],\n input_shape=(None,))\n\n model.compile(loss=losses.binary_crossentropy, optimizer='sgd')\n model.fit(data_gen(), epochs=1, steps_per_epoch=5)\n\n\nclass TestGeneratorMethodsWithSequences(keras_parameterized.TestCase):\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n @data_utils.dont_use_multiprocessing_pool\n def test_training_with_sequences(self):\n\n class DummySequence(data_utils.Sequence):\n\n def __getitem__(self, idx):\n return np.zeros([10, 2]), np.ones([10, 4])\n\n def __len__(self):\n return 10\n\n model = testing_utils.get_small_mlp(\n num_hidden=3, num_classes=4, input_dim=2)\n model.compile(loss='mse', optimizer=rmsprop.RMSprop(1e-3))\n\n model.fit_generator(DummySequence(),\n steps_per_epoch=10,\n validation_data=custom_generator(),\n validation_steps=1,\n max_queue_size=10,\n workers=0,\n use_multiprocessing=True)\n model.fit_generator(DummySequence(),\n steps_per_epoch=10,\n validation_data=custom_generator(),\n validation_steps=1,\n max_queue_size=10,\n workers=0,\n use_multiprocessing=False)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n @data_utils.dont_use_multiprocessing_pool\n def test_sequence_input_to_fit_eval_predict(self):\n val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)\n\n class CustomSequence(data_utils.Sequence):\n\n def __getitem__(self, idx):\n return np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)\n\n def __len__(self):\n return 2\n\n class CustomSequenceChangingBatchSize(data_utils.Sequence):\n\n def __getitem__(self, idx):\n batch_size = 10 - idx\n return (np.ones([batch_size, 10], np.float32),\n np.ones([batch_size, 1], np.float32))\n\n def __len__(self):\n return 2\n\n model = testing_utils.get_small_mlp(\n num_hidden=10, num_classes=1, input_dim=10)\n\n model.compile(rmsprop.RMSprop(0.001), 'binary_crossentropy')\n model.fit(CustomSequence(), validation_data=val_data, epochs=2)\n model.evaluate(CustomSequence())\n model.predict(CustomSequence())\n\n with self.assertRaisesRegexp(ValueError, '`y` argument is not supported'):\n model.fit(CustomSequence(), y=np.ones([10, 1]))\n\n with self.assertRaisesRegexp(ValueError,\n '`sample_weight` argument is not supported'):\n model.fit(CustomSequence(), sample_weight=np.ones([10, 1]))\n\n model.compile(rmsprop.RMSprop(0.001), 'binary_crossentropy')\n model.fit(CustomSequenceChangingBatchSize(),\n validation_data=val_data, epochs=2)\n model.evaluate(CustomSequenceChangingBatchSize())\n model.predict(CustomSequenceChangingBatchSize())\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_sequence_on_epoch_end(self):\n\n class MySequence(data_utils.Sequence):\n\n def __init__(self):\n self.epochs = 0\n\n def __getitem__(self, idx):\n return np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)\n\n def __len__(self):\n return 2\n\n def on_epoch_end(self):\n self.epochs += 1\n\n inputs = input_layer.Input(10)\n outputs = layers_module.Dense(1)(inputs)\n model = training.Model(inputs, outputs)\n model.compile('sgd', 'mse')\n my_seq = MySequence()\n model.fit(my_seq, epochs=2)\n self.assertEqual(my_seq.epochs, 2)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass TestConvertToGeneratorLike(test.TestCase, parameterized.TestCase):\n simple_inputs = (np.ones((10, 10)), np.ones((10, 1)))\n nested_inputs = ((np.ones((10, 10)), np.ones((10, 20))), (np.ones((10, 1)),\n np.ones((10, 3))))\n\n def _make_dataset(self, inputs, batches):\n return dataset_ops.DatasetV2.from_tensors(inputs).repeat(batches)\n\n def _make_iterator(self, inputs, batches):\n return dataset_ops.make_one_shot_iterator(\n self._make_dataset(inputs, batches))\n\n def _make_generator(self, inputs, batches):\n\n def _gen():\n for _ in range(batches):\n yield inputs\n\n return _gen()\n\n def _make_numpy(self, inputs, _):\n return inputs\n\n @parameterized.named_parameters(\n ('simple_dataset', _make_dataset, simple_inputs),\n ('simple_iterator', _make_iterator, simple_inputs),\n ('simple_generator', _make_generator, simple_inputs),\n ('simple_numpy', _make_numpy, simple_inputs),\n ('nested_dataset', _make_dataset, nested_inputs),\n ('nested_iterator', _make_iterator, nested_inputs),\n ('nested_generator', _make_generator, nested_inputs),\n ('nested_numpy', _make_numpy, nested_inputs))\n def test_convert_to_generator_like(self, input_fn, inputs):\n expected_batches = 5\n data = input_fn(self, inputs, expected_batches)\n\n # Dataset and Iterator not supported in Legacy Graph mode.\n if (not context.executing_eagerly() and\n isinstance(data, (dataset_ops.DatasetV2, iterator_ops.Iterator))):\n return\n\n generator, steps = training_generator.convert_to_generator_like(\n data, batch_size=2, steps_per_epoch=expected_batches)\n self.assertEqual(steps, expected_batches)\n\n for _ in range(expected_batches):\n outputs = next(generator)\n nest.assert_same_structure(outputs, inputs)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for warm_starting_util.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport six\n\nfrom tensorflow.python.feature_column import feature_column_lib as fc\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import checkpoint_utils\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.python.training import warm_starting_util as ws_util\nfrom tensorflow.python.training.tracking import util as tracking_util\n\nones = init_ops.ones_initializer\nnorms = init_ops.truncated_normal_initializer\nrand = init_ops.random_uniform_initializer\nzeros = init_ops.zeros_initializer\n\n\nclass WarmStartingUtilTest(test.TestCase):\n\n def _write_vocab(self, string_values, file_name):\n vocab_file = os.path.join(self.get_temp_dir(), file_name)\n with open(vocab_file, \"w\") as f:\n f.write(\"\\n\".join(string_values))\n return vocab_file\n\n def _write_checkpoint(self, sess):\n self.evaluate(variables.global_variables_initializer())\n saver = saver_lib.Saver()\n ckpt_prefix = os.path.join(self.get_temp_dir(), \"model\")\n saver.save(sess, ckpt_prefix, global_step=0)\n\n def _create_prev_run_var(self,\n var_name,\n shape=None,\n initializer=None,\n partitioner=None):\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n var = variable_scope.get_variable(\n var_name,\n shape=shape,\n initializer=initializer,\n partitioner=partitioner)\n self._write_checkpoint(sess)\n if partitioner:\n self.assertTrue(isinstance(var, variables.PartitionedVariable))\n var = var._get_variable_list()\n return var, self.evaluate(var)\n\n def _create_prev_run_vars(self,\n var_names,\n shapes,\n initializers):\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n all_vars = []\n for var_name, shape, initializer in zip(var_names, shapes,\n initializers):\n all_vars.append(variable_scope.get_variable(\n var_name,\n shape=shape,\n initializer=initializer))\n self._write_checkpoint(sess)\n return [self.evaluate(var) for var in all_vars]\n\n def _create_dummy_inputs(self):\n return {\n \"sc_int\": array_ops.sparse_placeholder(dtypes.int32),\n \"sc_hash\": array_ops.sparse_placeholder(dtypes.string),\n \"sc_keys\": array_ops.sparse_placeholder(dtypes.string),\n \"sc_vocab\": array_ops.sparse_placeholder(dtypes.string),\n \"real\": array_ops.placeholder(dtypes.float32)\n }\n\n def _create_linear_model(self, feature_cols, partitioner):\n cols_to_vars = {}\n with variable_scope.variable_scope(\"\", partitioner=partitioner):\n # Create the variables.\n fc.linear_model(\n features=self._create_dummy_inputs(),\n feature_columns=feature_cols,\n units=1,\n cols_to_vars=cols_to_vars)\n # Return a dictionary mapping each column to its variable.\n return cols_to_vars\n\n def _assert_cols_to_vars(self, cols_to_vars, cols_to_expected_values, sess):\n for col, expected_values in six.iteritems(cols_to_expected_values):\n for i, var in enumerate(cols_to_vars[col]):\n self.assertAllClose(expected_values[i], var.eval(sess))\n\n def testWarmStartVar(self):\n _, prev_val = self._create_prev_run_var(\n \"fruit_weights\", initializer=[[0.5], [1.], [1.5], [2.]])\n\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_weights = variable_scope.get_variable(\n \"fruit_weights\", initializer=[[0.], [0.], [0.], [0.]])\n prev_tensor_name, var = ws_util._get_var_info(fruit_weights)\n checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),\n {prev_tensor_name: var})\n self.evaluate(variables.global_variables_initializer())\n self.assertAllClose(prev_val, fruit_weights.eval(sess))\n\n def testWarmStartVarPrevVarPartitioned(self):\n _, weights = self._create_prev_run_var(\n \"fruit_weights\",\n shape=[4, 1],\n initializer=[[0.5], [1.], [1.5], [2.]],\n partitioner=lambda shape, dtype: [2, 1])\n prev_val = np.concatenate([weights[0], weights[1]], axis=0)\n\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_weights = variable_scope.get_variable(\n \"fruit_weights\", initializer=[[0.], [0.], [0.], [0.]])\n prev_tensor_name, var = ws_util._get_var_info(fruit_weights)\n checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),\n {prev_tensor_name: var})\n self.evaluate(variables.global_variables_initializer())\n self.assertAllClose(prev_val, fruit_weights.eval(sess))\n\n def testWarmStartVarCurrentVarPartitioned(self):\n _, prev_val = self._create_prev_run_var(\n \"fruit_weights\", initializer=[[0.5], [1.], [1.5], [2.]])\n\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_weights = variable_scope.get_variable(\n \"fruit_weights\",\n shape=[4, 1],\n initializer=[[0.], [0.], [0.], [0.]],\n partitioner=lambda shape, dtype: [2, 1])\n self.assertTrue(\n isinstance(fruit_weights, variables.PartitionedVariable))\n prev_tensor_name, var = ws_util._get_var_info(fruit_weights)\n checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),\n {prev_tensor_name: var})\n self.evaluate(variables.global_variables_initializer())\n fruit_weights = fruit_weights._get_variable_list()\n new_val = np.concatenate(\n [fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0)\n self.assertAllClose(prev_val, new_val)\n\n def testWarmStartVarBothVarsPartitioned(self):\n _, weights = self._create_prev_run_var(\n \"old_scope/fruit_weights\",\n shape=[4, 1],\n initializer=[[0.5], [1.], [1.5], [2.]],\n partitioner=lambda shape, dtype: [2, 1])\n prev_val = np.concatenate([weights[0], weights[1]], axis=0)\n # New session and new graph.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_weights = variable_scope.get_variable(\n \"new_scope/fruit_weights\",\n shape=[4, 1],\n initializer=[[0.], [0.], [0.], [0.]],\n partitioner=lambda shape, dtype: [2, 1])\n self.assertTrue(\n isinstance(fruit_weights, variables.PartitionedVariable))\n prev_tensor_name, var = ws_util._get_var_info(\n fruit_weights, prev_tensor_name=\"old_scope/fruit_weights\")\n checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),\n {prev_tensor_name: var})\n self.evaluate(variables.global_variables_initializer())\n fruit_weights = fruit_weights._get_variable_list()\n new_val = np.concatenate(\n [fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0)\n self.assertAllClose(prev_val, new_val)\n\n def testWarmStartVarWithVocab(self):\n prev_vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"old_vocab\")\n self._create_prev_run_var(\n \"fruit_weights\", initializer=[[0.5], [1.], [1.5], [2.]])\n\n # New vocab with elements in reverse order and one new element.\n new_vocab_path = self._write_vocab(\n [\"orange\", \"guava\", \"banana\", \"apple\", \"raspberry\"], \"new_vocab\")\n # New session and new graph.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_weights = variable_scope.get_variable(\n \"fruit_weights\", initializer=[[0.], [0.], [0.], [0.], [0.]])\n ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 5,\n self.get_temp_dir(), prev_vocab_path)\n self.evaluate(variables.global_variables_initializer())\n self.assertAllClose([[2.], [1.5], [1.], [0.5], [0.]],\n fruit_weights.eval(sess))\n\n def testWarmStartVarWithColumnVocab(self):\n prev_vocab_path = self._write_vocab([\"apple\", \"orange\"], \"old_vocab\")\n self._create_prev_run_var(\n \"fruit_output_layer\",\n initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]])\n\n # New vocab with elements in reverse order and one new element.\n new_vocab_path = self._write_vocab([\"orange\", \"apple\", \"banana\"],\n \"new_vocab\")\n # New session and new graph.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_output_layer = variable_scope.get_variable(\n \"fruit_output_layer\",\n initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],\n [0., 0., 0.]])\n ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,\n current_vocab_size=3,\n prev_ckpt=self.get_temp_dir(),\n prev_vocab_path=prev_vocab_path,\n axis=1)\n self.evaluate(variables.global_variables_initializer())\n self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.], [1.2, 1.5, 0.],\n [2.3, 2., 0.]], fruit_output_layer.eval(sess))\n\n def testWarmStartVarWithVocabConstrainedOldVocabSize(self):\n prev_vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"old_vocab\")\n self._create_prev_run_var(\n \"fruit_weights\", initializer=[[0.5], [1.], [1.5], [2.]])\n\n # New vocab with elements in reverse order and one new element.\n new_vocab_path = self._write_vocab(\n [\"orange\", \"guava\", \"banana\", \"apple\", \"raspberry\"], \"new_vocab\")\n # New session and new graph.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_weights = variable_scope.get_variable(\n \"fruit_weights\", initializer=[[0.], [0.], [0.], [0.], [0.]])\n ws_util._warm_start_var_with_vocab(\n fruit_weights,\n new_vocab_path,\n 5,\n self.get_temp_dir(),\n prev_vocab_path,\n previous_vocab_size=2)\n self.evaluate(variables.global_variables_initializer())\n # Old vocabulary limited to ['apple', 'banana'].\n self.assertAllClose([[0.], [0.], [1.], [0.5], [0.]],\n fruit_weights.eval(sess))\n\n def testWarmStartVarWithVocabPrevVarPartitioned(self):\n prev_vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"old_vocab\")\n self._create_prev_run_var(\n \"fruit_weights\",\n shape=[4, 1],\n initializer=[[0.5], [1.], [1.5], [2.]],\n partitioner=lambda shape, dtype: [2, 1])\n\n # New vocab with elements in reverse order and one new element.\n new_vocab_path = self._write_vocab(\n [\"orange\", \"guava\", \"banana\", \"apple\", \"raspberry\"], \"new_vocab\")\n # New session and new graph.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_weights = variable_scope.get_variable(\n \"fruit_weights\", initializer=[[0.], [0.], [0.], [0.], [0.]])\n ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 5,\n self.get_temp_dir(), prev_vocab_path)\n self.evaluate(variables.global_variables_initializer())\n self.assertAllClose([[2.], [1.5], [1.], [0.5], [0.]],\n fruit_weights.eval(sess))\n\n def testWarmStartVarWithColumnVocabPrevVarPartitioned(self):\n prev_vocab_path = self._write_vocab([\"apple\", \"orange\"], \"old_vocab\")\n self._create_prev_run_var(\n \"fruit_output_layer\",\n shape=[4, 2],\n initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]],\n partitioner=lambda shape, dtype: [2, 1])\n\n # New vocab with elements in reverse order and one new element.\n new_vocab_path = self._write_vocab([\"orange\", \"apple\", \"banana\"],\n \"new_vocab\")\n # New session and new graph.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_output_layer = variable_scope.get_variable(\n \"fruit_output_layer\",\n initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],\n [0., 0., 0.]])\n ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,\n current_vocab_size=3,\n prev_ckpt=self.get_temp_dir(),\n prev_vocab_path=prev_vocab_path,\n axis=1)\n self.evaluate(variables.global_variables_initializer())\n self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.], [1.2, 1.5, 0.],\n [2.3, 2., 0.]], fruit_output_layer.eval(sess))\n\n def testWarmStartVarWithVocabCurrentVarPartitioned(self):\n prev_vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"old_vocab\")\n self._create_prev_run_var(\n \"fruit_weights\", initializer=[[0.5], [1.], [1.5], [2.]])\n\n # New vocab with elements in reverse order and one new element.\n new_vocab_path = self._write_vocab(\n [\"orange\", \"guava\", \"banana\", \"apple\", \"raspberry\"], \"new_vocab\")\n # New session and new graph.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_weights = variable_scope.get_variable(\n \"fruit_weights\",\n shape=[6, 1],\n initializer=[[0.], [0.], [0.], [0.], [0.], [0.]],\n partitioner=lambda shape, dtype: [2, 1])\n ws_util._warm_start_var_with_vocab(\n fruit_weights,\n new_vocab_path,\n 5,\n self.get_temp_dir(),\n prev_vocab_path,\n current_oov_buckets=1)\n self.evaluate(variables.global_variables_initializer())\n self.assertTrue(\n isinstance(fruit_weights, variables.PartitionedVariable))\n fruit_weights_vars = fruit_weights._get_variable_list()\n self.assertAllClose([[2.], [1.5], [1.]],\n fruit_weights_vars[0].eval(sess))\n self.assertAllClose([[0.5], [0.], [0.]],\n fruit_weights_vars[1].eval(sess))\n\n def testWarmStartVarWithColumnVocabCurrentVarPartitioned(self):\n prev_vocab_path = self._write_vocab([\"apple\", \"orange\"], \"old_vocab\")\n self._create_prev_run_var(\n \"fruit_output_layer\",\n initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]])\n\n # New vocab with elements in reverse order and one new element.\n new_vocab_path = self._write_vocab([\"orange\", \"apple\", \"banana\"],\n \"new_vocab\")\n # New session and new graph.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_output_layer = variable_scope.get_variable(\n \"fruit_output_layer\",\n shape=[4, 3],\n initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],\n [0., 0., 0.]],\n partitioner=lambda shape, dtype: [2, 1])\n ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,\n current_vocab_size=3,\n prev_ckpt=self.get_temp_dir(),\n prev_vocab_path=prev_vocab_path,\n axis=1)\n self.evaluate(variables.global_variables_initializer())\n self.assertTrue(\n isinstance(fruit_output_layer, variables.PartitionedVariable))\n fruit_output_layer_vars = fruit_output_layer._get_variable_list()\n self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.]],\n fruit_output_layer_vars[0].eval(sess))\n self.assertAllClose([[1.2, 1.5, 0.], [2.3, 2., 0.]],\n fruit_output_layer_vars[1].eval(sess))\n\n def testWarmStartVarWithVocabBothVarsPartitioned(self):\n prev_vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"old_vocab\")\n self._create_prev_run_var(\n \"fruit_weights\",\n shape=[4, 1],\n initializer=[[0.5], [1.], [1.5], [2.]],\n partitioner=lambda shape, dtype: [2, 1])\n\n # New vocab with elements in reverse order and two new elements.\n new_vocab_path = self._write_vocab(\n [\"orange\", \"guava\", \"banana\", \"apple\", \"raspberry\",\n \"blueberry\"], \"new_vocab\")\n # New session and new graph.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_weights = variable_scope.get_variable(\n \"fruit_weights\",\n shape=[6, 1],\n initializer=[[0.], [0.], [0.], [0.], [0.], [0.]],\n partitioner=lambda shape, dtype: [2, 1])\n ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 6,\n self.get_temp_dir(), prev_vocab_path)\n self.evaluate(variables.global_variables_initializer())\n self.assertTrue(\n isinstance(fruit_weights, variables.PartitionedVariable))\n fruit_weights_vars = fruit_weights._get_variable_list()\n self.assertAllClose([[2.], [1.5], [1.]],\n fruit_weights_vars[0].eval(sess))\n self.assertAllClose([[0.5], [0.], [0.]],\n fruit_weights_vars[1].eval(sess))\n\n def testWarmStartVarWithColumnVocabBothVarsPartitioned(self):\n prev_vocab_path = self._write_vocab([\"apple\", \"orange\"], \"old_vocab\")\n self._create_prev_run_var(\n \"fruit_output_layer\",\n shape=[4, 2],\n initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]],\n partitioner=lambda shape, dtype: [2, 1])\n\n # New vocab with elements in reverse order and one new element.\n new_vocab_path = self._write_vocab([\"orange\", \"apple\", \"banana\"],\n \"new_vocab\")\n # New session and new graph.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n fruit_output_layer = variable_scope.get_variable(\n \"fruit_output_layer\",\n shape=[4, 3],\n initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],\n [0., 0., 0.]],\n partitioner=lambda shape, dtype: [2, 1])\n ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,\n current_vocab_size=3,\n prev_ckpt=self.get_temp_dir(),\n prev_vocab_path=prev_vocab_path,\n axis=1)\n self.evaluate(variables.global_variables_initializer())\n self.assertTrue(\n isinstance(fruit_output_layer, variables.PartitionedVariable))\n fruit_output_layer_vars = fruit_output_layer._get_variable_list()\n self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.]],\n fruit_output_layer_vars[0].eval(sess))\n self.assertAllClose([[1.2, 1.5, 0.], [2.3, 2., 0.]],\n fruit_output_layer_vars[1].eval(sess))\n\n def testWarmStart_ListOfVariables(self):\n # Save checkpoint from which to warm-start.\n _, prev_int_val = self._create_prev_run_var(\"v1\", shape=[10, 1],\n initializer=ones())\n # Verify we initialized the values correctly.\n self.assertAllEqual(np.ones([10, 1]), prev_int_val)\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n # Initialize with zeros.\n var = variable_scope.get_variable(\n \"v1\",\n shape=[10, 1],\n initializer=zeros())\n ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=[var])\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started (init overridden to ones).\n self.assertAllEqual(var.eval(), prev_int_val)\n\n def testWarmStart_ListOfStrings(self):\n # Save checkpoint from which to warm-start.\n _, prev_int_val = self._create_prev_run_var(\"v1\", shape=[10, 1],\n initializer=ones())\n # Verify we initialized the values correctly.\n self.assertAllEqual(np.ones([10, 1]), prev_int_val)\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n # Initialize with zeros.\n var = variable_scope.get_variable(\n \"v1\",\n shape=[10, 1],\n initializer=zeros())\n ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=[\"v1\"])\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started (init overridden to ones).\n self.assertAllEqual(var.eval(), prev_int_val)\n\n def testWarmStart_ListOfRegexes(self):\n # Save checkpoint from which to warm-start.\n [prev_v1_val, prev_v1_momentum_val,\n prev_v2_val, _] = self._create_prev_run_vars(\n var_names=[\"v1\", \"v1/Momentum\", \"v2\", \"v2/Momentum\"],\n shapes=[[10, 1]] * 4,\n initializers=[ones()] * 4)\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n # Initialize with zeros.\n v1 = variable_scope.get_variable(\n \"v1\",\n shape=[10, 1],\n initializer=zeros())\n v1_momentum = variable_scope.get_variable(\n \"v1/Momentum\",\n shape=[10, 1],\n initializer=zeros())\n v2 = variable_scope.get_variable(\n \"v2\",\n shape=[10, 1],\n initializer=zeros())\n v2_momentum = variable_scope.get_variable(\n \"v2/Momentum\",\n shape=[10, 1],\n initializer=zeros())\n ws_util.warm_start(self.get_temp_dir(),\n # This warm-starts both v1 and v1/Momentum, but only\n # v2 (and not v2/Momentum).\n vars_to_warm_start=[\"v1\", \"v2[^/]\"])\n self.evaluate(variables.global_variables_initializer())\n # Verify the selection of weights were correctly warm-started (init\n # overridden to ones).\n self.assertAllEqual(v1.eval(), prev_v1_val)\n self.assertAllEqual(v1_momentum.eval(), prev_v1_momentum_val)\n self.assertAllEqual(v2.eval(), prev_v2_val)\n self.assertAllEqual(v2_momentum.eval(), np.zeros([10, 1]))\n\n def testWarmStart_SparseColumnIntegerized(self):\n # Create feature column.\n sc_int = fc.categorical_column_with_identity(\"sc_int\", num_buckets=10)\n\n # Save checkpoint from which to warm-start.\n _, prev_int_val = self._create_prev_run_var(\n \"linear_model/sc_int/weights\", shape=[10, 1], initializer=ones())\n # Verify we initialized the values correctly.\n self.assertAllEqual(np.ones([10, 1]), prev_int_val)\n\n partitioner = lambda shape, dtype: [1] * len(shape)\n # New graph, new session WITHOUT warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model([sc_int], partitioner)\n self.evaluate(variables.global_variables_initializer())\n # Without warm-starting, the weights should be initialized using default\n # initializer (which is init_ops.zeros_initializer).\n self._assert_cols_to_vars(cols_to_vars, {sc_int: [np.zeros([10, 1])]},\n sess)\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model([sc_int], partitioner)\n ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=\".*sc_int.*\")\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started.\n self._assert_cols_to_vars(cols_to_vars, {sc_int: [prev_int_val]}, sess)\n\n def testWarmStart_SparseColumnHashed(self):\n # Create feature column.\n sc_hash = fc.categorical_column_with_hash_bucket(\n \"sc_hash\", hash_bucket_size=15)\n\n # Save checkpoint from which to warm-start.\n _, prev_hash_val = self._create_prev_run_var(\n \"linear_model/sc_hash/weights\", shape=[15, 1], initializer=norms())\n\n partitioner = lambda shape, dtype: [1] * len(shape)\n # New graph, new session WITHOUT warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model([sc_hash], partitioner)\n self.evaluate(variables.global_variables_initializer())\n # Without warm-starting, the weights should be initialized using default\n # initializer (which is init_ops.zeros_initializer).\n self._assert_cols_to_vars(cols_to_vars, {sc_hash: [np.zeros([15, 1])]},\n sess)\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model([sc_hash], partitioner)\n ws_util.warm_start(\n self.get_temp_dir(), vars_to_warm_start=\".*sc_hash.*\")\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started.\n self._assert_cols_to_vars(cols_to_vars, {sc_hash: [prev_hash_val]},\n sess)\n\n def testWarmStart_SparseColumnVocabulary(self):\n # Create vocab for sparse column \"sc_vocab\".\n vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"vocab\")\n # Create feature column.\n sc_vocab = fc.categorical_column_with_vocabulary_file(\n \"sc_vocab\", vocabulary_file=vocab_path, vocabulary_size=4)\n\n # Save checkpoint from which to warm-start.\n _, prev_vocab_val = self._create_prev_run_var(\n \"linear_model/sc_vocab/weights\", shape=[4, 1], initializer=ones())\n\n partitioner = lambda shape, dtype: [1] * len(shape)\n # New graph, new session WITHOUT warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model([sc_vocab], partitioner)\n self.evaluate(variables.global_variables_initializer())\n # Without warm-starting, the weights should be initialized using default\n # initializer (which is init_ops.zeros_initializer).\n self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([4, 1])]},\n sess)\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model([sc_vocab], partitioner)\n # Since old vocab is not explicitly set in WarmStartSettings, the old\n # vocab is assumed to be same as new vocab.\n ws_util.warm_start(\n self.get_temp_dir(), vars_to_warm_start=\".*sc_vocab.*\")\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started.\n self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [prev_vocab_val]},\n sess)\n\n def testWarmStart_ExplicitCheckpointFile(self):\n # Create vocab for sparse column \"sc_vocab\".\n vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"vocab\")\n # Create feature column.\n sc_vocab = fc.categorical_column_with_vocabulary_file(\n \"sc_vocab\", vocabulary_file=vocab_path, vocabulary_size=4)\n\n # Save checkpoint from which to warm-start.\n _, prev_vocab_val = self._create_prev_run_var(\n \"linear_model/sc_vocab/weights\", shape=[4, 1], initializer=ones())\n\n partitioner = lambda shape, dtype: [1] * len(shape)\n # New graph, new session WITHOUT warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model([sc_vocab], partitioner)\n self.evaluate(variables.global_variables_initializer())\n # Without warm-starting, the weights should be initialized using default\n # initializer (which is init_ops.zeros_initializer).\n self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([4, 1])]},\n sess)\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model([sc_vocab], partitioner)\n # Since old vocab is not explicitly set in WarmStartSettings, the old\n # vocab is assumed to be same as new vocab.\n ws_util.warm_start(\n # Explicitly provide the file prefix instead of just the dir.\n os.path.join(self.get_temp_dir(), \"model-0\"),\n vars_to_warm_start=\".*sc_vocab.*\")\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started.\n self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [prev_vocab_val]},\n sess)\n\n def testWarmStart_SparseColumnVocabularyConstrainedVocabSizes(self):\n # Create old vocabulary, and use a size smaller than the total number of\n # entries.\n old_vocab_path = self._write_vocab([\"apple\", \"guava\", \"banana\"],\n \"old_vocab\")\n old_vocab_size = 2 # ['apple', 'guava']\n\n # Create new vocab for sparse column \"sc_vocab\".\n current_vocab_path = self._write_vocab(\n [\"apple\", \"banana\", \"guava\", \"orange\"], \"current_vocab\")\n # Create feature column. Only use 2 of the actual entries, resulting in\n # ['apple', 'banana'] for the new vocabulary.\n sc_vocab = fc.categorical_column_with_vocabulary_file(\n \"sc_vocab\", vocabulary_file=current_vocab_path, vocabulary_size=2)\n\n # Save checkpoint from which to warm-start.\n self._create_prev_run_var(\n \"linear_model/sc_vocab/weights\", shape=[2, 1], initializer=ones())\n\n partitioner = lambda shape, dtype: [1] * len(shape)\n # New graph, new session WITHOUT warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model([sc_vocab], partitioner)\n self.evaluate(variables.global_variables_initializer())\n # Without warm-starting, the weights should be initialized using default\n # initializer (which is init_ops.zeros_initializer).\n self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([2, 1])]},\n sess)\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model([sc_vocab], partitioner)\n vocab_info = ws_util.VocabInfo(\n new_vocab=sc_vocab.vocabulary_file,\n new_vocab_size=sc_vocab.vocabulary_size,\n num_oov_buckets=sc_vocab.num_oov_buckets,\n old_vocab=old_vocab_path,\n old_vocab_size=old_vocab_size)\n ws_util.warm_start(\n ckpt_to_initialize_from=self.get_temp_dir(),\n vars_to_warm_start=\".*sc_vocab.*\",\n var_name_to_vocab_info={\n \"linear_model/sc_vocab/weights\": vocab_info\n })\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started. 'banana' isn't in the\n # first two entries of the old vocabulary, so it's newly initialized.\n self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [[[1], [0]]]}, sess)\n\n def testWarmStart_BucketizedColumn(self):\n # Create feature column.\n real = fc.numeric_column(\"real\")\n real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.])\n\n # Save checkpoint from which to warm-start.\n _, prev_bucket_val = self._create_prev_run_var(\n \"linear_model/real_bucketized/weights\",\n shape=[5, 1],\n initializer=norms())\n\n partitioner = lambda shape, dtype: [1] * len(shape)\n # New graph, new session WITHOUT warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model([real_bucket], partitioner)\n self.evaluate(variables.global_variables_initializer())\n # Without warm-starting, the weights should be initialized using default\n # initializer (which is init_ops.zeros_initializer).\n self._assert_cols_to_vars(cols_to_vars,\n {real_bucket: [np.zeros([5, 1])]}, sess)\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model([real_bucket], partitioner)\n ws_util.warm_start(\n self.get_temp_dir(), vars_to_warm_start=\".*real_bucketized.*\")\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started.\n self._assert_cols_to_vars(cols_to_vars,\n {real_bucket: [prev_bucket_val]}, sess)\n\n def testWarmStart_MultipleCols(self):\n # Create vocab for sparse column \"sc_vocab\".\n vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"vocab\")\n\n # Create feature columns.\n sc_int = fc.categorical_column_with_identity(\"sc_int\", num_buckets=10)\n sc_hash = fc.categorical_column_with_hash_bucket(\n \"sc_hash\", hash_bucket_size=15)\n sc_keys = fc.categorical_column_with_vocabulary_list(\n \"sc_keys\", vocabulary_list=[\"a\", \"b\", \"c\", \"e\"])\n sc_vocab = fc.categorical_column_with_vocabulary_file(\n \"sc_vocab\", vocabulary_file=vocab_path, vocabulary_size=4)\n real = fc.numeric_column(\"real\")\n real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.])\n cross = fc.crossed_column([sc_keys, sc_vocab], hash_bucket_size=20)\n all_linear_cols = [sc_int, sc_hash, sc_keys, sc_vocab, real_bucket, cross]\n\n # Save checkpoint from which to warm-start. Also create a bias variable,\n # so we can check that it's also warm-started.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n sc_int_weights = variable_scope.get_variable(\n \"linear_model/sc_int/weights\", shape=[10, 1], initializer=ones())\n sc_hash_weights = variable_scope.get_variable(\n \"linear_model/sc_hash/weights\", shape=[15, 1], initializer=norms())\n sc_keys_weights = variable_scope.get_variable(\n \"linear_model/sc_keys/weights\", shape=[4, 1], initializer=rand())\n sc_vocab_weights = variable_scope.get_variable(\n \"linear_model/sc_vocab/weights\", shape=[4, 1], initializer=ones())\n real_bucket_weights = variable_scope.get_variable(\n \"linear_model/real_bucketized/weights\",\n shape=[5, 1],\n initializer=norms())\n cross_weights = variable_scope.get_variable(\n \"linear_model/sc_keys_X_sc_vocab/weights\",\n shape=[20, 1],\n initializer=rand())\n bias = variable_scope.get_variable(\n \"linear_model/bias_weights\",\n shape=[1],\n initializer=rand())\n self._write_checkpoint(sess)\n (prev_int_val, prev_hash_val, prev_keys_val, prev_vocab_val,\n prev_bucket_val, prev_cross_val, prev_bias_val) = sess.run([\n sc_int_weights, sc_hash_weights, sc_keys_weights, sc_vocab_weights,\n real_bucket_weights, cross_weights, bias\n ])\n\n partitioner = lambda shape, dtype: [1] * len(shape)\n # New graph, new session WITHOUT warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model(all_linear_cols, partitioner)\n self.evaluate(variables.global_variables_initializer())\n # Without warm-starting, all weights should be initialized using default\n # initializer (which is init_ops.zeros_initializer).\n self._assert_cols_to_vars(cols_to_vars, {\n sc_int: [np.zeros([10, 1])],\n sc_hash: [np.zeros([15, 1])],\n sc_keys: [np.zeros([4, 1])],\n sc_vocab: [np.zeros([4, 1])],\n real_bucket: [np.zeros([5, 1])],\n cross: [np.zeros([20, 1])],\n }, sess)\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model(all_linear_cols, partitioner)\n vocab_info = ws_util.VocabInfo(\n new_vocab=sc_vocab.vocabulary_file,\n new_vocab_size=sc_vocab.vocabulary_size,\n num_oov_buckets=sc_vocab.num_oov_buckets,\n old_vocab=vocab_path)\n ws_util.warm_start(\n self.get_temp_dir(),\n var_name_to_vocab_info={\n \"linear_model/sc_vocab/weights\": vocab_info\n })\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started.\n self._assert_cols_to_vars(cols_to_vars, {\n sc_int: [prev_int_val],\n sc_hash: [prev_hash_val],\n sc_keys: [prev_keys_val],\n sc_vocab: [prev_vocab_val],\n real_bucket: [prev_bucket_val],\n cross: [prev_cross_val],\n \"bias\": [prev_bias_val],\n }, sess)\n\n def testWarmStartMoreSettings(self):\n # Create old and new vocabs for sparse column \"sc_vocab\".\n prev_vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"old_vocab\")\n new_vocab_path = self._write_vocab(\n [\"orange\", \"guava\", \"banana\", \"apple\", \"raspberry\",\n \"blueberry\"], \"new_vocab\")\n # Create feature columns.\n sc_hash = fc.categorical_column_with_hash_bucket(\n \"sc_hash\", hash_bucket_size=15)\n sc_keys = fc.categorical_column_with_vocabulary_list(\n \"sc_keys\", vocabulary_list=[\"a\", \"b\", \"c\", \"e\"])\n sc_vocab = fc.categorical_column_with_vocabulary_file(\n \"sc_vocab\", vocabulary_file=new_vocab_path, vocabulary_size=6)\n all_linear_cols = [sc_hash, sc_keys, sc_vocab]\n\n # Save checkpoint from which to warm-start.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n variable_scope.get_variable(\n \"linear_model/sc_hash/weights\", shape=[15, 1], initializer=norms())\n sc_keys_weights = variable_scope.get_variable(\n \"some_other_name\", shape=[4, 1], initializer=rand())\n variable_scope.get_variable(\n \"linear_model/sc_vocab/weights\",\n initializer=[[0.5], [1.], [2.], [3.]])\n self._write_checkpoint(sess)\n prev_keys_val = self.evaluate(sc_keys_weights)\n\n def _partitioner(shape, dtype): # pylint:disable=unused-argument\n # Partition each var into 2 equal slices.\n partitions = [1] * len(shape)\n partitions[0] = min(2, shape.dims[0].value)\n return partitions\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model(all_linear_cols, _partitioner)\n vocab_info = ws_util.VocabInfo(\n new_vocab=sc_vocab.vocabulary_file,\n new_vocab_size=sc_vocab.vocabulary_size,\n num_oov_buckets=sc_vocab.num_oov_buckets,\n old_vocab=prev_vocab_path)\n ws_util.warm_start(\n self.get_temp_dir(),\n vars_to_warm_start=\".*(sc_keys|sc_vocab).*\",\n var_name_to_vocab_info={\n ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info\n },\n var_name_to_prev_var_name={\n ws_util._infer_var_name(cols_to_vars[sc_keys]):\n \"some_other_name\"\n })\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started. Var corresponding to\n # sc_hash should not be warm-started. Var corresponding to sc_vocab\n # should be correctly warm-started after vocab remapping.\n self._assert_cols_to_vars(cols_to_vars, {\n sc_keys:\n np.split(prev_keys_val, 2),\n sc_hash: [np.zeros([8, 1]), np.zeros([7, 1])],\n sc_vocab: [\n np.array([[3.], [2.], [1.]]),\n np.array([[0.5], [0.], [0.]])\n ]\n }, sess)\n\n def testWarmStartMoreSettingsNoPartitioning(self):\n # Create old and new vocabs for sparse column \"sc_vocab\".\n prev_vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"old_vocab\")\n new_vocab_path = self._write_vocab(\n [\"orange\", \"guava\", \"banana\", \"apple\", \"raspberry\",\n \"blueberry\"], \"new_vocab\")\n # Create feature columns.\n sc_hash = fc.categorical_column_with_hash_bucket(\n \"sc_hash\", hash_bucket_size=15)\n sc_keys = fc.categorical_column_with_vocabulary_list(\n \"sc_keys\", vocabulary_list=[\"a\", \"b\", \"c\", \"e\"])\n sc_vocab = fc.categorical_column_with_vocabulary_file(\n \"sc_vocab\", vocabulary_file=new_vocab_path, vocabulary_size=6)\n all_linear_cols = [sc_hash, sc_keys, sc_vocab]\n\n # Save checkpoint from which to warm-start.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n variable_scope.get_variable(\n \"linear_model/sc_hash/weights\", shape=[15, 1], initializer=norms())\n sc_keys_weights = variable_scope.get_variable(\n \"some_other_name\", shape=[4, 1], initializer=rand())\n variable_scope.get_variable(\n \"linear_model/sc_vocab/weights\",\n initializer=[[0.5], [1.], [2.], [3.]])\n self._write_checkpoint(sess)\n prev_keys_val = self.evaluate(sc_keys_weights)\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model(all_linear_cols,\n partitioner=None)\n vocab_info = ws_util.VocabInfo(\n new_vocab=sc_vocab.vocabulary_file,\n new_vocab_size=sc_vocab.vocabulary_size,\n num_oov_buckets=sc_vocab.num_oov_buckets,\n old_vocab=prev_vocab_path)\n ws_util.warm_start(\n self.get_temp_dir(),\n vars_to_warm_start=\".*(sc_keys|sc_vocab).*\",\n var_name_to_vocab_info={\n ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info\n },\n var_name_to_prev_var_name={\n ws_util._infer_var_name(cols_to_vars[sc_keys]):\n \"some_other_name\"\n })\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started. Var corresponding to\n # sc_hash should not be warm-started. Var corresponding to sc_vocab\n # should be correctly warm-started after vocab remapping.\n self._assert_cols_to_vars(cols_to_vars, {\n sc_keys: [prev_keys_val],\n sc_hash: [np.zeros([15, 1])],\n sc_vocab: [np.array([[3.], [2.], [1.], [0.5], [0.], [0.]])]\n }, sess)\n\n def testWarmStartVarsToWarmstartIsNone(self):\n # Create old and new vocabs for sparse column \"sc_vocab\".\n prev_vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"old_vocab\")\n new_vocab_path = self._write_vocab(\n [\"orange\", \"guava\", \"banana\", \"apple\", \"raspberry\",\n \"blueberry\"], \"new_vocab\")\n # Create feature columns.\n sc_hash = fc.categorical_column_with_hash_bucket(\n \"sc_hash\", hash_bucket_size=15)\n sc_keys = fc.categorical_column_with_vocabulary_list(\n \"sc_keys\", vocabulary_list=[\"a\", \"b\", \"c\", \"e\"])\n sc_vocab = fc.categorical_column_with_vocabulary_file(\n \"sc_vocab\", vocabulary_file=new_vocab_path, vocabulary_size=6)\n all_linear_cols = [sc_hash, sc_keys, sc_vocab]\n\n # Save checkpoint from which to warm-start.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n variable_scope.get_variable(\n \"linear_model/sc_hash/weights\", shape=[15, 1], initializer=norms())\n variable_scope.get_variable(\n \"some_other_name\", shape=[4, 1], initializer=rand())\n variable_scope.get_variable(\n \"linear_model/sc_vocab/weights\",\n initializer=[[0.5], [1.], [2.], [3.]])\n self._write_checkpoint(sess)\n\n def _partitioner(shape, dtype): # pylint:disable=unused-argument\n # Partition each var into 2 equal slices.\n partitions = [1] * len(shape)\n partitions[0] = min(2, shape.dims[0].value)\n return partitions\n\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = self._create_linear_model(all_linear_cols, _partitioner)\n vocab_info = ws_util.VocabInfo(\n new_vocab=sc_vocab.vocabulary_file,\n new_vocab_size=sc_vocab.vocabulary_size,\n num_oov_buckets=sc_vocab.num_oov_buckets,\n old_vocab=prev_vocab_path)\n ws_util.warm_start(\n self.get_temp_dir(),\n # The special value of None here will ensure that only the variable\n # specified in var_name_to_vocab_info (sc_vocab embedding) is\n # warm-started.\n vars_to_warm_start=None,\n var_name_to_vocab_info={\n ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info\n },\n # Even though this is provided, the None value for\n # vars_to_warm_start overrides the logic, and this will not be\n # warm-started.\n var_name_to_prev_var_name={\n ws_util._infer_var_name(cols_to_vars[sc_keys]):\n \"some_other_name\"\n })\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started. Var corresponding to\n # sc_vocab should be correctly warm-started after vocab remapping,\n # and neither of the other two should be warm-started..\n self._assert_cols_to_vars(cols_to_vars, {\n sc_keys: [np.zeros([2, 1]), np.zeros([2, 1])],\n sc_hash: [np.zeros([8, 1]), np.zeros([7, 1])],\n sc_vocab: [\n np.array([[3.], [2.], [1.]]),\n np.array([[0.5], [0.], [0.]])\n ]\n }, sess)\n\n def testWarmStartEmbeddingColumn(self):\n # Create old and new vocabs for embedding column \"sc_vocab\".\n prev_vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"old_vocab\")\n new_vocab_path = self._write_vocab(\n [\"orange\", \"guava\", \"banana\", \"apple\", \"raspberry\", \"blueberry\"],\n \"new_vocab\")\n\n # Save checkpoint from which to warm-start.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n variable_scope.get_variable(\n \"input_layer/sc_vocab_embedding/embedding_weights\",\n initializer=[[0.5, 0.4], [1., 1.1], [2., 2.2], [3., 3.3]])\n self._write_checkpoint(sess)\n\n def _partitioner(shape, dtype): # pylint:disable=unused-argument\n # Partition each var into 2 equal slices.\n partitions = [1] * len(shape)\n partitions[0] = min(2, shape.dims[0].value)\n return partitions\n\n # Create feature columns.\n sc_vocab = fc.categorical_column_with_vocabulary_file(\n \"sc_vocab\", vocabulary_file=new_vocab_path, vocabulary_size=6)\n emb_vocab_column = fc.embedding_column(\n categorical_column=sc_vocab,\n dimension=2)\n all_deep_cols = [emb_vocab_column]\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = {}\n with variable_scope.variable_scope(\"\", partitioner=_partitioner):\n # Create the variables.\n fc.input_layer(\n features=self._create_dummy_inputs(),\n feature_columns=all_deep_cols,\n cols_to_vars=cols_to_vars)\n vocab_info = ws_util.VocabInfo(\n new_vocab=sc_vocab.vocabulary_file,\n new_vocab_size=sc_vocab.vocabulary_size,\n num_oov_buckets=sc_vocab.num_oov_buckets,\n old_vocab=prev_vocab_path,\n # Can't use constant_initializer with load_and_remap. In practice,\n # use a truncated normal initializer.\n backup_initializer=init_ops.random_uniform_initializer(\n minval=0.42, maxval=0.42))\n ws_util.warm_start(\n self.get_temp_dir(),\n var_name_to_vocab_info={\n ws_util._infer_var_name(cols_to_vars[emb_vocab_column]):\n vocab_info\n })\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started. Var corresponding to\n # emb_vocab_column should be correctly warm-started after vocab\n # remapping. Missing values are filled in with the EmbeddingColumn's\n # initializer.\n self._assert_cols_to_vars(\n cols_to_vars, {\n emb_vocab_column: [\n np.array([[3., 3.3], [2., 2.2], [1., 1.1]]),\n np.array([[0.5, 0.4], [0.42, 0.42], [0.42, 0.42]])\n ]\n }, sess)\n\n def testWarmStartEmbeddingColumnLinearModel(self):\n # Create old and new vocabs for embedding column \"sc_vocab\".\n prev_vocab_path = self._write_vocab([\"apple\", \"banana\", \"guava\", \"orange\"],\n \"old_vocab\")\n new_vocab_path = self._write_vocab(\n [\"orange\", \"guava\", \"banana\", \"apple\", \"raspberry\", \"blueberry\"],\n \"new_vocab\")\n\n # Save checkpoint from which to warm-start.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n variable_scope.get_variable(\n \"linear_model/sc_vocab_embedding/embedding_weights\",\n initializer=[[0.5, 0.4], [1., 1.1], [2., 2.2], [3., 3.3]])\n variable_scope.get_variable(\n \"linear_model/sc_vocab_embedding/weights\",\n initializer=[[0.69], [0.71]])\n self._write_checkpoint(sess)\n\n def _partitioner(shape, dtype): # pylint:disable=unused-argument\n # Partition each var into 2 equal slices.\n partitions = [1] * len(shape)\n partitions[0] = min(2, shape.dims[0].value)\n return partitions\n\n # Create feature columns.\n sc_vocab = fc.categorical_column_with_vocabulary_file(\n \"sc_vocab\", vocabulary_file=new_vocab_path, vocabulary_size=6)\n emb_vocab = fc.embedding_column(\n categorical_column=sc_vocab,\n dimension=2)\n all_deep_cols = [emb_vocab]\n # New graph, new session with warm-starting.\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n cols_to_vars = {}\n with variable_scope.variable_scope(\"\", partitioner=_partitioner):\n # Create the variables.\n fc.linear_model(\n features=self._create_dummy_inputs(),\n feature_columns=all_deep_cols,\n cols_to_vars=cols_to_vars)\n\n # Construct the vocab_info for the embedding weight.\n vocab_info = ws_util.VocabInfo(\n new_vocab=sc_vocab.vocabulary_file,\n new_vocab_size=sc_vocab.vocabulary_size,\n num_oov_buckets=sc_vocab.num_oov_buckets,\n old_vocab=prev_vocab_path,\n # Can't use constant_initializer with load_and_remap. In practice,\n # use a truncated normal initializer.\n backup_initializer=init_ops.random_uniform_initializer(\n minval=0.42, maxval=0.42))\n ws_util.warm_start(\n self.get_temp_dir(),\n vars_to_warm_start=\".*sc_vocab.*\",\n var_name_to_vocab_info={\n \"linear_model/sc_vocab_embedding/embedding_weights\": vocab_info\n })\n self.evaluate(variables.global_variables_initializer())\n # Verify weights were correctly warm-started. Var corresponding to\n # emb_vocab should be correctly warm-started after vocab remapping.\n # Missing values are filled in with the EmbeddingColumn's initializer.\n self._assert_cols_to_vars(\n cols_to_vars,\n {\n emb_vocab: [\n # linear weights part 0.\n np.array([[0.69]]),\n # linear weights part 1.\n np.array([[0.71]]),\n # embedding_weights part 0.\n np.array([[3., 3.3], [2., 2.2], [1., 1.1]]),\n # embedding_weights part 1.\n np.array([[0.5, 0.4], [0.42, 0.42], [0.42, 0.42]])\n ]\n },\n sess)\n\n def testErrorConditions(self):\n x = variable_scope.get_variable(\n \"x\",\n shape=[4, 1],\n initializer=ones(),\n partitioner=lambda shape, dtype: [2, 1])\n\n # List of PartitionedVariable is invalid type when warm-starting with vocab.\n self.assertRaises(TypeError, ws_util._warm_start_var_with_vocab, [x],\n \"/tmp\", 5, \"/tmp\", \"/tmp\")\n\n # Unused variable names raises ValueError.\n with ops.Graph().as_default():\n with self.cached_session() as sess:\n x = variable_scope.get_variable(\n \"x\",\n shape=[4, 1],\n initializer=ones(),\n partitioner=lambda shape, dtype: [2, 1])\n self._write_checkpoint(sess)\n\n self.assertRaises(\n ValueError,\n ws_util.warm_start,\n self.get_temp_dir(),\n var_name_to_vocab_info={\"y\": ws_util.VocabInfo(\"\", 1, 0, \"\")})\n self.assertRaises(\n ValueError,\n ws_util.warm_start,\n self.get_temp_dir(),\n var_name_to_prev_var_name={\"y\": \"y2\"})\n\n def testWarmStartFromObjectBasedCheckpoint(self):\n prev_val = [[0.5], [1.], [1.5], [2.]]\n with ops.Graph().as_default() as g:\n with self.session(graph=g):\n prev_var = variable_scope.get_variable(\n \"fruit_weights\",\n initializer=prev_val)\n self.evaluate(variables.global_variables_initializer())\n # Save object-based checkpoint.\n tracking_util.Checkpoint(v=prev_var).save(\n os.path.join(self.get_temp_dir(), \"checkpoint\"))\n\n with ops.Graph().as_default() as g:\n with self.session(graph=g):\n fruit_weights = variable_scope.get_variable(\n \"fruit_weights\", initializer=[[0.], [0.], [0.], [0.]])\n ws_util.warm_start(self.get_temp_dir())\n self.evaluate(variables.global_variables_initializer())\n self.assertAllClose(prev_val, self.evaluate(fruit_weights))\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"General shape ops for frames.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.signal import util_ops\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef _infer_frame_shape(signal, frame_length, frame_step, pad_end, axis):\n \"\"\"Infers the shape of the return value of `frame`.\"\"\"\n frame_length = tensor_util.constant_value(frame_length)\n frame_step = tensor_util.constant_value(frame_step)\n axis = tensor_util.constant_value(axis)\n if signal.shape.ndims is None:\n return None\n if axis is None:\n return [None] * (signal.shape.ndims + 1)\n\n signal_shape = signal.shape.as_list()\n num_frames = None\n frame_axis = signal_shape[axis]\n outer_dimensions = signal_shape[:axis]\n inner_dimensions = signal_shape[axis:][1:]\n if signal_shape and frame_axis is not None:\n if frame_step is not None and pad_end:\n # Double negative is so that we round up.\n num_frames = max(0, -(-frame_axis // frame_step))\n elif frame_step is not None and frame_length is not None:\n assert not pad_end\n num_frames = max(\n 0, (frame_axis - frame_length + frame_step) // frame_step)\n return outer_dimensions + [num_frames, frame_length] + inner_dimensions\n\n\n@tf_export(\"signal.frame\")\ndef frame(signal, frame_length, frame_step, pad_end=False, pad_value=0, axis=-1,\n name=None):\n \"\"\"Expands `signal`'s `axis` dimension into frames of `frame_length`.\n\n Slides a window of size `frame_length` over `signal`'s `axis` dimension\n with a stride of `frame_step`, replacing the `axis` dimension with\n `[frames, frame_length]` frames.\n\n If `pad_end` is True, window positions that are past the end of the `axis`\n dimension are padded with `pad_value` until the window moves fully past the\n end of the dimension. Otherwise, only window positions that fully overlap the\n `axis` dimension are produced.\n\n For example:\n\n ```python\n # A batch size 3 tensor of 9152 audio samples.\n audio = tf.random.normal([3, 9152])\n\n # Compute overlapping frames of length 512 with a step of 180 (frames overlap\n # by 332 samples). By default, only 50 frames are generated since the last\n # 152 samples do not form a full frame.\n frames = tf.signal.frame(audio, 512, 180)\n frames.shape.assert_is_compatible_with([3, 50, 512])\n\n # When pad_end is enabled, the final frame is kept (padded with zeros).\n frames = tf.signal.frame(audio, 512, 180, pad_end=True)\n frames.shape.assert_is_compatible_with([3, 51, 512])\n ```\n\n Args:\n signal: A `[..., samples, ...]` `Tensor`. The rank and dimensions\n may be unknown. Rank must be at least 1.\n frame_length: The frame length in samples. An integer or scalar `Tensor`.\n frame_step: The frame hop size in samples. An integer or scalar `Tensor`.\n pad_end: Whether to pad the end of `signal` with `pad_value`.\n pad_value: An optional scalar `Tensor` to use where the input signal\n does not exist when `pad_end` is True.\n axis: A scalar integer `Tensor` indicating the axis to frame. Defaults to\n the last axis. Supports negative values for indexing from the end.\n name: An optional name for the operation.\n\n Returns:\n A `Tensor` of frames with shape `[..., frames, frame_length, ...]`.\n\n Raises:\n ValueError: If `frame_length`, `frame_step`, `pad_value`, or `axis` are not\n scalar.\n \"\"\"\n with ops.name_scope(name, \"frame\", [signal, frame_length, frame_step,\n pad_value]):\n signal = ops.convert_to_tensor(signal, name=\"signal\")\n frame_length = ops.convert_to_tensor(frame_length, name=\"frame_length\")\n frame_step = ops.convert_to_tensor(frame_step, name=\"frame_step\")\n axis = ops.convert_to_tensor(axis, name=\"axis\")\n\n signal.shape.with_rank_at_least(1)\n frame_length.shape.assert_has_rank(0)\n frame_step.shape.assert_has_rank(0)\n axis.shape.assert_has_rank(0)\n\n result_shape = _infer_frame_shape(signal, frame_length, frame_step, pad_end,\n axis)\n\n def maybe_constant(val):\n val_static = tensor_util.constant_value(val)\n return (val_static, True) if val_static is not None else (val, False)\n\n signal_shape, signal_shape_is_static = maybe_constant(\n array_ops.shape(signal))\n axis, axis_is_static = maybe_constant(axis)\n\n if signal_shape_is_static and axis_is_static:\n # Axis can be negative. Convert it to positive.\n axis = range(len(signal_shape))[axis]\n outer_dimensions, length_samples, inner_dimensions = np.split(\n signal_shape, indices_or_sections=[axis, axis + 1])\n length_samples = length_samples.item()\n else:\n signal_rank = array_ops.rank(signal)\n # Axis can be negative. Convert it to positive.\n axis = math_ops.range(signal_rank)[axis]\n outer_dimensions, length_samples, inner_dimensions = array_ops.split(\n signal_shape, [axis, 1, signal_rank - 1 - axis])\n length_samples = array_ops.reshape(length_samples, [])\n num_outer_dimensions = array_ops.size(outer_dimensions)\n num_inner_dimensions = array_ops.size(inner_dimensions)\n\n # If padding is requested, pad the input signal tensor with pad_value.\n if pad_end:\n pad_value = ops.convert_to_tensor(pad_value, signal.dtype)\n pad_value.shape.assert_has_rank(0)\n\n # Calculate number of frames, using double negatives to round up.\n num_frames = -(-length_samples // frame_step)\n\n # Pad the signal by up to frame_length samples based on how many samples\n # are remaining starting from last_frame_position.\n pad_samples = math_ops.maximum(\n 0, frame_length + frame_step * (num_frames - 1) - length_samples)\n\n # Pad the inner dimension of signal by pad_samples.\n paddings = array_ops.concat(\n [array_ops.zeros([num_outer_dimensions, 2], dtype=pad_samples.dtype),\n [[0, pad_samples]],\n array_ops.zeros([num_inner_dimensions, 2], dtype=pad_samples.dtype)],\n 0)\n signal = array_ops.pad(signal, paddings, constant_values=pad_value)\n\n signal_shape = array_ops.shape(signal)\n length_samples = signal_shape[axis]\n else:\n num_frames = math_ops.maximum(\n 0, 1 + (length_samples - frame_length) // frame_step)\n\n subframe_length, _ = maybe_constant(util_ops.gcd(frame_length, frame_step))\n subframes_per_frame = frame_length // subframe_length\n subframes_per_hop = frame_step // subframe_length\n num_subframes = length_samples // subframe_length\n\n slice_shape = array_ops.concat([outer_dimensions,\n [num_subframes * subframe_length],\n inner_dimensions], 0)\n subframe_shape = array_ops.concat([outer_dimensions,\n [num_subframes, subframe_length],\n inner_dimensions], 0)\n subframes = array_ops.reshape(array_ops.strided_slice(\n signal, array_ops.zeros_like(signal_shape),\n slice_shape), subframe_shape)\n\n # frame_selector is a [num_frames, subframes_per_frame] tensor\n # that indexes into the appropriate frame in subframes. For example:\n # [[0, 0, 0, 0], [2, 2, 2, 2], [4, 4, 4, 4]]\n frame_selector = array_ops.reshape(\n math_ops.range(num_frames) * subframes_per_hop, [num_frames, 1])\n\n # subframe_selector is a [num_frames, subframes_per_frame] tensor\n # that indexes into the appropriate subframe within a frame. For example:\n # [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]\n subframe_selector = array_ops.reshape(\n math_ops.range(subframes_per_frame), [1, subframes_per_frame])\n\n # Adding the 2 selector tensors together produces a [num_frames,\n # subframes_per_frame] tensor of indices to use with tf.gather to select\n # subframes from subframes. We then reshape the inner-most\n # subframes_per_frame dimension to stitch the subframes together into\n # frames. For example: [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7]].\n selector = frame_selector + subframe_selector\n\n frames = array_ops.reshape(\n array_ops.gather(subframes, selector, axis=axis),\n array_ops.concat([outer_dimensions, [num_frames, frame_length],\n inner_dimensions], 0))\n\n if result_shape:\n frames.set_shape(result_shape)\n return frames\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for concat.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\n@register_make_test_function()\ndef make_concat_tests(options):\n \"\"\"Make a set of tests to do concatenation.\"\"\"\n\n test_parameters = [{\n \"base_shape\": [[1, 3, 4, 3], [3, 4]],\n \"num_tensors\": [1, 2, 3, 4, 5, 6],\n \"axis\": [0, 1, 2, 3, -3, -2, -1],\n \"type\": [tf.float32, tf.uint8, tf.int32, tf.int64],\n \"fully_quantize\": [False]\n }, {\n \"base_shape\": [[1, 3, 4, 3], [3, 4], [2, 3, 4, 3]],\n \"num_tensors\": [1, 2, 3, 4, 5, 6],\n \"axis\": [1, 2, 3, -3, -2, -1],\n \"type\": [tf.float32],\n \"fully_quantize\": [True]\n }]\n\n def get_shape(parameters, delta):\n \"\"\"Return a tweaked version of 'base_shape'.\"\"\"\n axis = parameters[\"axis\"]\n shape = parameters[\"base_shape\"][:]\n if axis < 0:\n axis += len(shape)\n if axis < len(shape):\n shape[axis] += delta\n return shape\n\n def build_graph(parameters):\n all_tensors = []\n for n in range(0, parameters[\"num_tensors\"]):\n input_tensor = tf.compat.v1.placeholder(\n dtype=parameters[\"type\"],\n name=(\"input%d\" % n),\n shape=get_shape(parameters, n))\n all_tensors.append(input_tensor)\n out = tf.concat(all_tensors, parameters[\"axis\"])\n return all_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n all_values = []\n for n in range(0, parameters[\"num_tensors\"]):\n input_values = create_tensor_data(\n parameters[\"type\"],\n get_shape(parameters, n),\n min_value=-1,\n max_value=1)\n all_values.append(input_values)\n return all_values, sess.run(\n outputs, feed_dict=dict(zip(inputs, all_values)))\n\n make_zip_of_tests(\n options,\n test_parameters,\n build_graph,\n build_inputs,\n expected_tf_failures=75)\n"
] | [
[
"tensorflow.python.util.tf_inspect.isclass",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.keras.utils.generic_utils.deserialize_keras_object",
"tensorflow.python.tf2.enabled",
"tensorflow.python.keras.utils.generic_utils.serialize_keras_object"
],
[
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.ops.check_ops.assert_rank_at_least_v2",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.reduce_max",
"tensorflow.python.ops.array_ops.invert_permutation",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.ops.linalg_ops.tridiagonal_mat_mul",
"tensorflow.python.ops.array_ops.gather_nd",
"tensorflow.python.ops.linalg_ops.tridiagonal_solve",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.math_ops.reduce_prod",
"numpy.finfo",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.framework.tensor_shape.dimension_value",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.check_ops.assert_rank_at_least",
"tensorflow.python.ops.array_ops.matrix_diag_part",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.array_ops.expand_dims_v2",
"tensorflow.python.ops.linalg_ops.matrix_solve",
"tensorflow.python.ops.gen_linalg_ops.cholesky",
"tensorflow.python.ops.array_ops.matrix_transpose",
"tensorflow.python.ops.check_ops.assert_equal",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.array",
"tensorflow.python.ops.math_ops.conj",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.broadcast_to",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.map_fn.map_fn",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.pad",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.compat.v1.compat.v1.lite.TFLiteConverter.from_frozen_graph",
"tensorflow.lite.testing.zip_test_utils.ExtraTocoOptions",
"tensorflow.compat.v1.compat.v1.lite.experimental.convert_op_hints_to_stubs",
"tensorflow.lite.testing.zip_test_utils.get_input_shapes_map"
],
[
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.gather_nd",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.eager.context.executing_eagerly",
"numpy.random.randint",
"tensorflow.python.framework.test_util.disable_xla",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.tensor_shape.dimension_value",
"numpy.zeros",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.client.session.Session",
"numpy.random.rand",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.array",
"numpy.random.seed",
"numpy.empty",
"numpy.ones",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.vstack",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.tpu.client.client.Client",
"tensorflow.python.platform.tf_logging.debug",
"tensorflow.python.util.compat.as_text",
"tensorflow.python.training.server_lib.ClusterSpec",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.distribute.cluster_resolver.cluster_resolver.format_master_url",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.training.server_lib.Server"
],
[
"tensorflow.python.autograph.core.converter.ConversionOptions",
"tensorflow.python.autograph.pyct.templates.replace",
"tensorflow.python.autograph.pyct.parser.parse_expression",
"tensorflow.python.platform.test.main",
"tensorflow.python.autograph.pyct.loader.load_ast",
"tensorflow.python.autograph.pyct.anno.getanno"
],
[
"tensorflow.lite.testing.zip_test_utils.make_zip_of_tests",
"tensorflow.compat.v1.compat.v1.image.resize_bilinear",
"tensorflow.lite.testing.zip_test_utils.create_tensor_data",
"tensorflow.compat.v1.compat.v1.placeholder",
"tensorflow.lite.testing.zip_test_utils.register_make_test_function"
],
[
"tensorflow.python.debug.lib.profiling.AggregateProfile"
],
[
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.framework.test_util.create_local_cluster",
"tensorflow.python.ops.variables.VariableV1",
"tensorflow.python.platform.test.main",
"tensorflow.python.training.adam.AdamOptimizer",
"tensorflow.python.framework.ops.device",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"tensorflow.python.training.training.SyncReplicasOptimizer",
"tensorflow.python.training.training.MonitoredTrainingSession",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.testing_utils.should_run_eagerly",
"tensorflow.python.keras.Input",
"tensorflow.python.keras.Model",
"tensorflow.python.util.compat.as_text",
"tensorflow.python.ops.sparse_ops.from_dense",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.platform.test.main",
"numpy.array",
"numpy.sum",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.python.data.kernel_tests.test_base.default_test_combinations",
"tensorflow.python.framework.combinations.NamedObject",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.data.kernel_tests.test_base.graph_only_combinations",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.ops.ragged.ragged_concat_ops.concat",
"tensorflow.python.framework.ops.device",
"tensorflow.python.data.ops.dataset_ops.get_legacy_output_shapes",
"tensorflow.python.eager.context.executing_eagerly",
"numpy.random.randint",
"tensorflow.python.ops.random_ops.random_shuffle",
"tensorflow.python.ops.sparse_ops.sparse_concat",
"tensorflow.python.framework.ops.get_collection",
"numpy.arange",
"tensorflow.python.ops.string_ops.string_split",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_tensor",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.lookup_ops.KeyValueTensorInitializer",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.ops.variable_scope.variable_scope",
"numpy.diff",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.tensor_array_ops.TensorArray",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.array_ops.check_numerics",
"tensorflow.python.ops.script_ops.py_func",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_lengths",
"tensorflow.python.ops.ragged.ragged_tensor.is_ragged",
"tensorflow.python.ops.variables.VariableV1",
"tensorflow.python.framework.tensor_util.make_tensor_proto",
"tensorflow.core.framework.attr_value_pb2.AttrValue",
"numpy.array",
"tensorflow.python.ops.data_flow_ops.FIFOQueue",
"numpy.sum",
"tensorflow.python.framework.combinations.combine",
"tensorflow.python.ops.control_flow_ops.case",
"tensorflow.python.framework.sparse_tensor.is_sparse",
"tensorflow.python.data.experimental.ops.threading_options.ThreadingOptions",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.map_fn.map_fn",
"tensorflow.python.ops.math_ops.mod",
"tensorflow.python.data.ops.dataset_ops.Options",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.data.ops.dataset_ops.Dataset.zip",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.data.kernel_tests.test_base.default_test_combinations",
"tensorflow.python.data.ops.dataset_ops.Options",
"tensorflow.python.platform.test.main",
"tensorflow.python.data.ops.multi_device_iterator_ops.MultiDeviceIterator",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.combinations.combine"
],
[
"tensorflow.python.platform.resource_loader.get_path_to_datafile",
"tensorflow.python.platform.test.main",
"tensorflow.lite.python.optimize.calibrator.Calibrator",
"numpy.ones"
],
[
"tensorflow.python.framework.combinations.TFVersionCombination",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.framework.test_combinations.generate",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.framework.combinations.EagerGraphCombination",
"tensorflow.python.distribute.multi_process_runner.run",
"tensorflow.python.framework.test_combinations.OptionalParameter",
"tensorflow.python.distribute.multi_process_runner.test_main",
"tensorflow.python.util.tf_decorator.make_decorator",
"tensorflow.python.distribute.multi_worker_test_base.create_cluster_spec"
],
[
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.data.kernel_tests.test_base.default_test_combinations",
"tensorflow.python.framework.combinations.NamedObject",
"tensorflow.python.platform.test.main",
"tensorflow.python.data.ops.dataset_ops.Dataset.range"
],
[
"tensorflow.python.framework.config.list_physical_devices",
"tensorflow.python.ops.math_ops.sin",
"tensorflow.python.framework.test_util.run_in_graph_and_eager_modes",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.gather_nd",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.ops.enable_eager_execution",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.platform.test.main",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.eager.backprop.implicit_grad",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.ops.math_ops.pow",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.eager.context.LogicalDeviceConfiguration",
"tensorflow.python.eager.backprop.implicit_val_and_grad",
"tensorflow.python.eager.backprop.gradients_function",
"tensorflow.python.ops.math_ops.cos",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.util.tf_inspect.ismodule"
],
[
"tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.keras.layers.SimpleRNN",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.layers.Activation",
"tensorflow.python.data.ops.dataset_ops.DatasetV2.from_tensors",
"tensorflow.python.keras.engine.training_generator.convert_to_generator_like",
"tensorflow.python.keras.testing_utils.get_small_mlp",
"tensorflow.python.platform.test.main",
"numpy.zeros",
"tensorflow.python.keras.keras_parameterized.run_all_keras_modes",
"tensorflow.python.keras.engine.input_layer.Input",
"numpy.array",
"tensorflow.python.keras.combinations.combine",
"tensorflow.python.keras.testing_utils.should_run_eagerly",
"tensorflow.python.util.nest.assert_same_structure",
"numpy.random.random",
"tensorflow.python.keras.utils.data_utils.threadsafe_generator",
"numpy.random.seed",
"tensorflow.python.keras.metrics.CategoricalAccuracy",
"numpy.random.shuffle",
"numpy.ones",
"tensorflow.python.keras.engine.training.Model"
],
[
"tensorflow.python.feature_column.feature_column_lib.categorical_column_with_hash_bucket",
"numpy.split",
"tensorflow.python.training.warm_starting_util._get_var_info",
"numpy.concatenate",
"tensorflow.python.ops.array_ops.sparse_placeholder",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.training.warm_starting_util.VocabInfo",
"tensorflow.python.ops.init_ops.random_uniform_initializer",
"tensorflow.python.feature_column.feature_column_lib.embedding_column",
"tensorflow.python.feature_column.feature_column_lib.crossed_column",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variable_scope.variable_scope",
"numpy.zeros",
"tensorflow.python.feature_column.feature_column_lib.bucketized_column",
"numpy.array",
"tensorflow.python.feature_column.feature_column_lib.categorical_column_with_vocabulary_file",
"tensorflow.python.feature_column.feature_column_lib.numeric_column",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.variable_scope.get_variable",
"numpy.ones",
"tensorflow.python.training.warm_starting_util._infer_var_name",
"tensorflow.python.feature_column.feature_column_lib.categorical_column_with_identity",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.feature_column.feature_column_lib.categorical_column_with_vocabulary_list",
"tensorflow.python.training.tracking.util.Checkpoint",
"tensorflow.python.training.saver.Saver"
],
[
"tensorflow.python.ops.math_ops.range",
"numpy.split",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.signal.util_ops.gcd",
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.array_ops.pad",
"tensorflow.python.ops.math_ops.maximum"
],
[
"tensorflow.lite.testing.zip_test_utils.register_make_test_function",
"tensorflow.compat.v1.concat",
"tensorflow.lite.testing.zip_test_utils.make_zip_of_tests"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zanzibar7/python-skyfield | [
"332038d49ea5814061336cd70cad1d819e630f2b"
] | [
"skyfield/tests/test_earth_satellites.py"
] | [
"# -*- coding: utf-8 -*-\n\nfrom numpy import array\nfrom skyfield import api\nfrom skyfield.api import EarthSatellite, load\nfrom skyfield.constants import AU_KM, AU_M\nfrom skyfield.sgp4lib import TEME_to_ITRF\nfrom skyfield.timelib import julian_date\n\nline1 = '1 25544U 98067A 18184.80969102 .00001614 00000-0 31745-4 0 9993'\nline2 = '2 25544 51.6414 295.8524 0003435 262.6267 204.2868 15.54005638121106'\n\n# Here are numbers from HORIZONS, which I copied into the test below:\n#\n#Ephemeris / WWW_USER Wed Jul 4 19:16:45 2018 Pasadena, USA / Horizons\n#...\n#2458303.500000000 = A.D. 2018-Jul-04 00:00:00.0000 TDB\n# X = 2.633404251158200E-05 Y = 1.015087620439817E-05 Z = 3.544778677556393E-05\n# VX=-1.751248694205384E-03 VY= 4.065407557020968E-03 VZ= 1.363540232307603E-04\n#2458304.500000000 = A.D. 2018-Jul-05 00:00:00.0000 TDB\n# X =-2.136440257814821E-05 Y =-2.084170814514480E-05 Z =-3.415494123796893E-05\n# VX= 2.143876266215405E-03 VY=-3.752167957502106E-03 VZ= 9.484159290242074E-04\n\n# TODO: try with array of dates\n\ndef test_iss_against_horizons():\n ts = api.load.timescale()\n s = EarthSatellite(line1, line2)\n\n hp = array([\n [2.633404251158200E-5, 1.015087620439817E-5, 3.544778677556393E-5],\n [-2.136440257814821E-5, -2.084170814514480E-5, -3.415494123796893E-5],\n ]).T\n hv = array([\n [-1.751248694205384E-3, 4.065407557020968E-3, 1.363540232307603E-4],\n [2.143876266215405E-3, -3.752167957502106E-3, 9.484159290242074E-4],\n ]).T\n\n two_meters = 2.0 / AU_M\n three_km_per_hour = 3.0 * 24.0 / AU_KM\n\n t = ts.tdb(2018, 7, 4)\n p = s.at(t)\n assert abs(p.position.au - hp[:,0]).max() < two_meters\n assert abs(p.velocity.au_per_d - hv[:,0]).max() < three_km_per_hour\n\n t = ts.tdb(2018, 7, [4, 5])\n p = s.at(t)\n assert abs(p.position.au - hp).max() < two_meters\n assert abs(p.velocity.au_per_d - hv).max() < three_km_per_hour\n\n# The following tests are based on the text of\n# http://www.celestrak.com/publications/AIAA/2006-6753/AIAA-2006-6753-Rev2.pdf\n\nappendix_c_example = \"\"\"\\\nTEME EXAMPLE\n1 00005U 58002B 00179.78495062 .00000023 00000-0 28098-4 0 4753\n2 00005 34.2682 348.7242 1859667 331.7664 19.3264 10.82419157413667\n\"\"\"\n\nfrom ..constants import DEG2RAD\n\narcminute = DEG2RAD / 60.0\narcsecond = arcminute / 60.0\nseconds_per_day = 86400.0\n\n# Note that the following test is based specifically on Revision 2 of\n# \"Revisiting Spacetrack Report #3\" AIAA 2006-6753 (earlier versions of\n# the PDF use different numbers):\n#\n# http://ww.celestrak.com/publications/AIAA/2006-6753/AIAA-2006-6753-Rev2.pdf\n\ndef test_appendix_c_conversion_from_TEME_to_ITRF():\n rTEME = array([5094.18016210, 6127.64465950, 6380.34453270])\n vTEME = array([-4.746131487, 0.785818041, 5.531931288])\n vTEME = vTEME * 24.0 * 60.0 * 60.0 # km/s to km/day\n\n jd_utc = julian_date(2004, 4, 6, 7, 51, 28.386)\n d_ut1 = -0.439961\n jd_ut1 = jd_utc + d_ut1 / 86400.0\n\n xp = -0.140682 * arcsecond\n yp = 0.333309 * arcsecond\n\n rITRF, vITRF = TEME_to_ITRF(jd_ut1, rTEME, vTEME, xp, yp)\n\n epsilon = 5e-8 # Why not 1e-8, which would match all of their digits?\n\n assert abs(-1033.47938300 - rITRF[0]) < epsilon\n assert abs(+7901.29527540 - rITRF[1]) < epsilon\n assert abs(+6380.35659580 - rITRF[2]) < epsilon\n\n vITRF_per_second = vITRF / seconds_per_day\n\n epsilon = 7e-8 # Why not 1e-9, which would match all of their digits?\n\n assert abs(-3.225636520 - vITRF_per_second[0]) < epsilon\n assert abs(-2.872451450 - vITRF_per_second[1]) < epsilon\n assert abs(+5.531924446 - vITRF_per_second[2]) < epsilon\n\ndef test_appendix_c_satellite():\n lines = appendix_c_example.splitlines()\n ts = api.load.timescale()\n sat = EarthSatellite(lines[1], lines[2], lines[0], ts)\n t = ts.tt_jd(sat.epoch.whole + 3.0, sat.epoch.tt_fraction)\n\n # First, a crucial sanity check (which is, technically, a test of\n # the `sgp4` package and not of Skyfield): are the right coordinates\n # being produced by our Python SGP4 propagator for this satellite?\n\n rTEME, vTEME, error = sat._position_and_velocity_TEME_km(t)\n\n # TODO: This used to be accurate to within 1e-8 but lost precision\n # with the move to SGP4 2.0. Is the difference an underlying change\n # in the algorithm and its results? Or something else?\n epsilon = 1e-4\n assert abs(-9060.47373569 - rTEME[0]) < epsilon\n assert abs(4658.70952502 - rTEME[1]) < epsilon\n assert abs(813.68673153 - rTEME[2]) < epsilon\n\n # TODO: Similar to the above, this used to be 1e-9.\n epsilon = 1e-8\n assert abs(-2.232832783 - vTEME[0]) < epsilon\n assert abs(-4.110453490 - vTEME[1]) < epsilon\n assert abs(-3.157345433 - vTEME[2]) < epsilon\n\ndef test_epoch_date():\n # Example from https://celestrak.com/columns/v04n03/\n s = appendix_c_example.replace('00179.78495062', '98001.00000000')\n lines = s.splitlines()\n sat = EarthSatellite(lines[1], lines[2], lines[0])\n assert sat.epoch.utc_jpl() == 'A.D. 1998-Jan-01 00:00:00.0000 UT'\n\ndef test_target_number():\n s = EarthSatellite(line1, line2)\n assert s.target == -125544\n\ndef test_is_sunlit():\n # Yes, a positionlib method; but it made sense to test it here.\n ts = api.load.timescale()\n t = ts.utc(2018, 7, 3, 0, range(0, 60, 10))\n s = EarthSatellite(line1, line2)\n eph = load('de421.bsp')\n expected = [True, False, False, False, True, True]\n assert list(s.at(t).is_sunlit(eph)) == expected\n\n # What if we observe from a topos rather than the geocenter?\n topos = api.Topos('40.8939 N', '83.8917 W')\n assert list((s - topos).at(t).is_sunlit(eph)) == expected\n\ndef test_is_venus_behind_earth():\n # Like the previous test: a satellite-focused positionlib method.\n # Just for fun, we ask whether the Sun is behind the earth, so this\n # measures the same celestial circumstance as the previous test.\n ts = api.load.timescale()\n t = ts.utc(2018, 7, 3, 0, range(0, 60, 10))\n s = EarthSatellite(line1, line2)\n eph = load('de421.bsp')\n expected = [False, True, True, True, False, False]\n p = (eph['earth'] + s).at(t).observe(eph['sun']).apparent()\n assert list(p.is_behind_earth()) == expected\n\ndef test_is_another_satellite_behind_earth():\n # See if the method works with a pure geometric difference.\n ts = api.load.timescale()\n t = ts.utc(2018, 7, 3, 0, range(0, 60, 10))\n s = EarthSatellite(line1, line2)\n # The \"other satellite\" is fictitious: the ISS offset by one day.\n s2 = EarthSatellite(line1.replace('184.80969102', '185.80969102'), line2)\n expected = [True, True, True, True, True, True]\n p = (s - s2).at(t)\n assert list(p.is_behind_earth()) == expected\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yamasampo/alignmentrs | [
"5f963d13ac2db72f4ef23b462de0836526f590b7"
] | [
"alignmentrs/aln/mixins/serde.py"
] | [
"from collections import OrderedDict, ChainMap\nfrom copy import deepcopy\nimport os\nimport json\nimport pickle\nimport re\nimport io\n\nimport pandas\n\nfrom libalignmentrs.alignment import SeqMatrix\nfrom libalignmentrs.readers import fasta_to_dict\nfrom alignmentrs.utils import to_intlist\n\n\n__all__ = [\n 'FastaSerdeMixin', 'DictSerdeMixin', 'JsonSerdeMixin', \n 'PickleSerdeMixin', 'CsvSerdeMixin', 'RecordsSerdeMixin',\n 'col_metadata_to_str', 'col_metadata_str_formatter',\n]\n\n\n_whitespace_regexp = re.compile(r'\\s+')\n_column_metadata_string_regexp = re.compile(r'meta\\|(\\S+)\\=(\\S+)')\n\n\nclass FastaSerdeMixin:\n \"\"\"Adds ability to read/write an Alignment object\n from a FASTA formatted file.\n \"\"\"\n @classmethod\n def from_fasta(\n cls, path, name=None, \n # parse_row_metadata=True,\n parse_description=True, \n # column_metadata_decoders=None,\n column_metadata_regexp='c\\|([A-Za-z0-9\\s\\.]+)=(\\[[A-Za-z0-9\\.\\s,\\\"\\']+\\])',\n column_index_regexp='ci\\|([A-Za-z0-9\\s\\.]+)=(\\[[A-Za-z0-9\\.\\s,\\\"\\']+\\])',\n store_history=True, **kwargs):\n \"\"\"Create an Alignment object from a FASTA-formatted file.\n\n Parameters\n ----------\n path : str\n Path to FASTA file.\n name : str, optional\n Name of the new alignment.\n (default is None, takes the name from the comments\n or uses the filename)\n parse_description : function, optional\n Function that takes a list of comment lines as input\n and outputs a dictionary that organizes comments into\n keys and values. (default is None, lines starting with \n a semicolon \";\" are ignored.)\n\n Returns\n -------\n Alignment\n Creates a new Alignment object based on the identifiers,\n descriptions, and sequences in the FASTA file.\n\n \"\"\"\n matrix, metadata = fasta_to_dict(path)\n row_meta, col_meta = None, None\n if parse_description:\n # Parses metadata['descriptions'] and removes parsed info\n offset = 0\n match_locations = []\n col_d = {}\n col_idx = None\n # Parses column index\n match = re.search(column_index_regexp, metadata['descriptions'][0])\n if match:\n key, value = match.groups()\n # Convert text into a list using eval\n try:\n value = cls._parse_str_to_list(value, 'infer')\n except SyntaxError:\n raise ValueError('Cannot construct Alignment from the given FASTA file: column index is malformed'.format(key))\n # Put key-value pair into the dictionary\n col_idx = value\n\n # Parses column metadata\n for match in re.finditer(column_metadata_regexp,\n metadata['descriptions'][0]):\n match_locations.append(match.span())\n key, value = match.groups()\n # Convert text into a list using eval\n try:\n value = cls._parse_str_to_list(value, 'infer')\n except SyntaxError:\n raise ValueError('Cannot construct Alignment from the given FASTA file: column metadata {} is malformed'.format(key))\n # Put key-value pair into the dictionary\n col_d[key] = value\n # Constructs column metadata DataFrame from dictionary and index\n if (col_idx is not None) or col_d:\n col_meta = pandas.DataFrame(col_d, index=col_idx)\n\n if name is None:\n name = os.path.basename(path)\n return cls(matrix, name,\n row_metadata=row_meta,\n # row_ids and row_descriptions are ignored\n # if row_meta is not None\n row_ids=metadata['ids'],\n row_descriptions=metadata['descriptions'],\n # col_meta is None unless parse_column_metadata is True\n col_metadata=col_meta,\n aln_metadata=metadata['comments'],\n store_history=store_history, **kwargs)\n\n def to_fasta(self, path=None, include_column_metadata=None, column_metadata_encoders=None, column_metadata_template='c|{}={}', **kwargs):\n \"\"\"Saves the alignment as a FASTA-formatted file.\n Some metadata may not be lost.\n\n Parameters\n ----------\n path : str, optional\n Path to save the alignment to.\n include_column_metadata : list of str, optional\n List of keys of columns in column metadata to include\n (default is None, information are not written as FASTA comments\n to ensure maximum compatibility)\n column_metadata_encoders : dict of callable, optional\n Dictionary of functions used to transform values of included\n column metadata.\n Keys are expected to match specified column names.\n (default is None, all included columns will be transformed using the\n `str` string constructor)\n\n \"\"\"\n # Default values if not specified\n if include_column_metadata is None:\n include_column_metadata = []\n if column_metadata_encoders is None:\n column_metadata_encoders = {}\n\n # Transform col metadata DataFrame into a stringed representation\n # of columns and values.\n col_meta_str = col_metadata_to_str(\n self.column_metadata, include_column_metadata,\n column_metadata_encoders, column_metadata_template\n )\n # Creates a generator that writes each entry as a string\n # in the FASTA format:\n # >{sid} {desc}\n # {seq}\n info_generator = (\n (vals[0], vals[1]['description'], self.data.data[i]) \n for i, vals in enumerate(self.row_metadata.iterrows())\n )\n fasta_str = '\\n'.join([\n self._fasta_entry_formatter(*params, col_meta_str)\n if i == 0 else\n self._fasta_entry_formatter(*params, '')\n for i, params in enumerate(info_generator)\n ])\n\n # Write the FASTA string to file\n if path is None:\n return fasta_str\n dirpath = os.path.dirname(os.path.abspath(path))\n if not os.path.isdir(dirpath):\n raise OSError('{} does not exist'.format(dirpath))\n with open(path, 'w') as writer:\n print(fasta_str, file=writer)\n\n @staticmethod\n def _parse_str_to_list(string: str, item_type: type = 'infer'):\n \"\"\" Returns a list by parsing a given string. The input string has to\n expressed as like Python list syntax.\n \n Parameters\n ----------\n string: str\n A string to be converted into a list. Format should be Python\n syntax of list object like \"[1, 2, 3]\". It has to starts with \"[\"\n and ends with \"]\" and items have to be separated by \",\".\n item_type: type (default: str)\n Type in which items in str-like list will be converted. For example,\n \"[1, 2, 3]\" and int are passed to string and item_type variables \n respectively, \"[1, 2, 3]\" will converted into [1, 2, 3] not\n [\"1\", \"2\", \"3\"].\n\n Return\n ------\n A list version of the input string.\n\n \"\"\"\n # Check if item_type variable is \"type\" type\n if item_type != 'infer' and not isinstance(item_type, type):\n raise TypeError('Invalid type: object constructor type should be '\\\n 'passed to \"item_type\" variable.')\n\n # Check if sring is str\n if not isinstance(string, str):\n raise TypeError('Invalid type: \"string\" variable has to be str type.')\n\n # Check string format\n if not string.startswith('['):\n raise SyntaxError(f'Invalid syntax for conversion to a list. '\\\n '{string} does not start with \"[\".')\n if not string.endswith(']'):\n raise SyntaxError(f'Invalid syntax for conversion to a list. '\\\n '{string} does not end with \"]\".')\n \n # Convert into a list\n if item_type == 'infer':\n out_l = []\n for item in string.split('[')[1].split(']')[0].split(','):\n try:\n dat = int(item)\n # e.g. int('1.1') gives \"ValueError: invalid literal for int() \n # with base 10: '1.1'\"\n except ValueError:\n dat = float(item)\n # e.g. float('a') gives \"ValueError: could not convert string \n # to float: 'a'\"\n except:\n dat = item\n\n out_l.append(dat)\n return out_l\n\n return [item_type(item) for item \n in string.split('[')[1].split(']')[0].split(',')]\n\n @staticmethod\n def _fasta_entry_formatter(sid, desc, seq, col_meta):\n # Formats the ID, description, stringed metadata, and sequence\n # to follow the FASTA format.\n # There are 4 possible scenarios, note that the identifier string\n # is always expected to have a non-empty value:\n # - all information exists\n # - col_meta is an empty string\n # - description is an empty string\n # - both col_meta and description are empty strings\n\n # Checks if ID is empty\n if len(sid) < 1:\n raise ValueError('Cannot create FASTA file: identifier string cannot be empty.')\n # Description is not empty\n if len(desc) > 0:\n if len(col_meta) > 0:\n return '>{} {} {}\\n{}'.format(sid, desc, col_meta, seq)\n return '>{} {}\\n{}'.format(sid, desc, seq)\n # Description is empty but column metadata is not empty\n if len(col_meta) > 0:\n return '>{} {}\\n{}'.format(sid, col_meta, seq)\n # Decription and column metadata are empty\n return '>{}\\n{}'.format(sid, seq)\n\n\nclass DictSerdeMixin:\n \"\"\"Adds ability to read/write an Alignment object from a dictionary.\n \"\"\"\n @classmethod\n def from_dict(cls, d, store_history=True, **kwargs):\n \"\"\"Creates an Alignment object from a dictionary.\n\n Parameters\n ----------\n d : dict\n Dictionary containing the alignment information and relevant\n metadata.\n\n Returns\n -------\n Alignment\n\n \"\"\"\n return cls(d['data'],\n name=d['name'],\n row_ids=d['row_metadata_index'],\n row_descriptions=d['row_metadata'],\n col_ids=d['column_metadata_index'],\n col_descriptions=d['column_metadata'],\n aln_metadata=d['alignment_metadata'],\n store_history=store_history,\n **kwargs)\n\n def to_dict(self, row_metadata=True, column_metadata=True):\n \"\"\"Returns the dictionary representation of the alignment.\n Contents of the dictionary use builtin types to maximize\n compatibility.\n\n Parameters\n ----------\n row_metadata : bool, optional\n Whether or not to include row metadata information. (default is True, row metadata is included)\n column_metadata : bool, optional\n Whether or not to include column metadata information. (default is True, column metadata is included)\n\n Returns\n -------\n dict\n\n \"\"\"\n d = {\n 'name': self.name,\n 'data': self.data.data,\n 'alignment_metadata': self.alignment_metadata,\n }\n if row_metadata:\n d['row_metadata'] = self.row_metadata.to_dict(orient='list')\n d['row_metadata_index'] = self.row_metadata.index.tolist()\n if column_metadata:\n d['column_metadata'] = self.column_metadata.to_dict(orient='list')\n d['column_metadata_index'] = self.column_metadata.index.tolist()\n return d\n\nclass JsonSerdeMixin(DictSerdeMixin):\n \"\"\"Adds ability to read/write an Alignment object from a JSON file.\n\n The underlying organization of the JSON encoding is based on the dictionary \n created using the DictSerdeMixin dictionary mixin.\n \"\"\"\n @classmethod\n def from_json(cls, path, store_history=True, **kwargs):\n \"\"\"Create an alignment from a JSON file.\n \n Parameters\n ----------\n path : io.IOBase or str\n File stream using a file handler or a string to the path.\n \n Returns\n -------\n Alignment\n\n \"\"\"\n if isinstance(path, io.IOBase):\n # json.load requires a file handler to read the file.\n # io.IOBase is the abstract base class for all kinds of\n # I/O file streaming.\n d = json.load(path)\n elif isinstance(path, str):\n # The user can also input the path where the json file is located.\n # To handle this, the path will have to be opened as a file handler\n with open(path, 'r') as reader:\n d = json.load(reader)\n # JSON structure is based on to_dict and so it is dependent\n # on DictSerdeMixin.\n return cls.from_dict(d, store_history=store_history, **kwargs)\n\n def to_json(self, path=None, row_metadata=True, column_metadata=True):\n \"\"\"Saves the alignment as a JSON file.\n\n Parameters\n ----------\n path : str, optional\n Path to save the alignment to.\n row_metadata : bool, optional\n Whether or not to include row metadata information. (default is True, row metadata is included)\n column_metadata : bool, optional\n Whether or not to include column metadata information. (default is True, column metadata is included)\n\n Returns\n -------\n str\n If path is None, returns the JSON-formatted text as a string.\n\n \"\"\"\n # to_json uses to_dict to transform the alignment data\n # into a representation that uses only builtins to maximize\n # compatibility.\n # The resulting dictionary is encoded into JSON.\n d = self.to_dict(\n row_metadata=row_metadata,\n column_metadata=column_metadata)\n json_str = json.dumps(d)\n # If the save path is not specified, the encoded JSON text\n # is returned as a string\n if path is None:\n return json_str\n # If the save path is specified, the JSON encoded text\n # is written as a text file.\n dirpath = os.path.dirname(os.path.abspath(path))\n if not os.path.isdir(dirpath):\n raise OSError('{} does not exist'.format(dirpath))\n with open(path, 'w') as writer:\n print(json_str, file=writer)\n\n\nclass PickleSerdeMixin:\n \"\"\"Adds ability to pickle/unpickle an Alignment object.\n \"\"\"\n @classmethod\n def from_pickle(cls, path, store_history=True, **kwargs):\n \"\"\"Converts a pickled alignment back into an Alignment object.\n\n Parameters\n ----------\n path : io.IOBase or str\n File handler for the pickled alignment or a string to the path.\n\n Returns\n -------\n Alignment\n\n \"\"\"\n if isinstance(path, io.IOBase):\n obj = pickle.load(path)\n elif isinstance(path, str):\n with open(path, 'rb') as reader:\n obj = pickle.load(reader)\n return obj\n\n def to_pickle(self, path=None, **kwargs):\n \"\"\"Pickles the current alignment.\n\n Parameters\n ----------\n path : str, optional\n Path to save the alignment to.\n\n Returns\n -------\n bytestring\n If path is None, returns the bytestring representation of the\n pickled alignment.\n\n \"\"\"\n # Pickles the alignment. Underlying methods that do the pickling are\n # __getstate__ and __setstate__.\n pickled = pickle.dumps(self)\n # If path is not provided, the bytestring of the pickle is returned.\n if path is None:\n return pickled\n # If path is provided, the pickle is written to file.\n dirpath = os.path.dirname(os.path.abspath(path))\n if not os.path.isdir(dirpath):\n raise OSError('{} does not exist'.format(dirpath))\n with open(path, 'wb') as writer:\n writer.write(pickled)\n\n def __getstate__(self):\n # This method gets called when the Alignment object\n # is being pickled.\n d = {k: v for k, v in self.__dict__.items() if k != 'data'}\n d['data'] = self.data.data\n return d\n\n def __setstate__(self, d):\n # This method gets called when the pickled object\n # is being unpickled back into an Alignment object.\n d['data'] = SeqMatrix(d['data'])\n self.__dict__ = d\n\n\nclass NexusSerdeMixin:\n pass\n\n\nclass PhylipSerdeMixin:\n pass\n\n\ndef col_metadata_to_str(column_metadata, included_keys, encoders=None, template='c|{}={}', index_template='ci|{}={}'):\n \"\"\"Transforms the column metadata DataFrame into a string representation.\n \n Parameters\n ----------\n column_metadata : pandas.DataFrame\n Column metadata\n included_keys : list of str\n List of column metadata column names that to be included in the\n string representation.\n encoders : dict of callable, optional\n Dictionary of functions used to transform column metadata values.\n Keys are expected to match the column names of the column metadata\n DataFrame. (default is None, all columns will be transformed using the\n `str` string constructor)\n template : str, optional\n Template used for formatting the string. Template should have 2\n slots for the key and the column value.\n \n Returns\n -------\n str\n Column metadata categories and values represented as a string.\n\n \"\"\"\n # Creates a tuple generator giving the filtered set of column metadata\n # Each tuple generated consists of the column name and the list of values\n # for that column. \n included_values = (\n (k, v) for k, v in column_metadata.to_dict(orient='list').items()\n if k in included_keys\n )\n if encoders is None:\n encoders = dict()\n # Creates a list of stringed column metadata where each string is the\n # contains the stringed data of a column metadata category (column)\n # The metadata column is transformed into a string by consuming\n # the `included_values` generator and calling `col_metadata_str_formatter`\n # for each item yielded.\n str_list = [\n col_metadata_str_formatter(\n k, v, encoders[k] if k in encoders.keys() else None, template)\n for k, v in included_values\n ]\n str_index = [col_metadata_str_formatter(\n 'index', column_metadata.index.tolist(),\n encoders['index'] if 'index' in encoders.keys() else None, \n index_template)\n ]\n # Each column's string representation is separated by a whitespace\n return ' '.join(str_index + str_list)\n\ndef col_metadata_str_formatter(key, value, encoder:callable=None, template='c|{}={}'):\n \"\"\"Returns the string representation of a column metadata category.\n \n Parameters\n ----------\n key : str\n Name of column metadata category (column name in the DataFrame).\n value : list\n Column metadata category values.\n encoder : callable\n Function used to transform the list of values into a string.\n template : str, optional\n Template used for formatting the string. Template should have 2\n slots for the key and the column value.\n\n Returns\n -------\n str\n String representation of the column metadata category and its values.\n\n \"\"\"\n if encoder is None:\n encoder = lambda x: _whitespace_regexp.sub('', str(x))\n return template.format(key, encoder(value))\n\ndef make_col_meta_dict(description, decoders):\n matches = _column_metadata_string_regexp.findall(description)\n return {\n k: (decoders[k](v) if k in decoders.keys() else eval(v))\n for k, v in matches\n }\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lmmentel/mendeleev | [
"f6ac866c831e3041910ae97a7177301b1be5b3e7"
] | [
"mendeleev/vis/seaborn.py"
] | [
"from typing import Tuple\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef heatmap(\n elements: pd.DataFrame,\n prop: str,\n style: str = \"whitegrid\",\n figsize: Tuple[int] = (16, 10),\n cmap: str = \"RdBu_r\",\n lw: int = 1,\n output: str = None,\n **kwargs\n):\n \"\"\"\n Plot a heatmap of the given property\n\n Args:\n elements: DataFrame with data about elements\n prop : Name of the attribute of Element object that is available from the\n elements table\n style : Seaborn style option, default='whitegrid\n figsize : Size of the plot, default=(16, 10)\n cmap : Colormap to use, default='RdBu_r'\n lw : Seaborn heatmap `linewidths` argumentm default=1,\n see http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.heatmap.html\n output : File name to save the plot, by default nothing is saved\n \"\"\"\n\n # add lanthanides and actinides\n\n keys = [\"period\", \"group_id\", prop]\n els = elements[keys].dropna()\n elements_rect = els.pivot(*keys)\n\n sns.set(font_scale=1.5, style=style, rc={\"figure.figsize\": figsize})\n mask = np.asarray(elements_rect.isnull())\n ax = sns.heatmap(elements_rect, cmap=cmap, mask=mask, linewidths=lw, **kwargs)\n n = len(ax.xaxis.get_ticklabels())\n ax.set_yticklabels(elements_rect.index[::-1], rotation=0)\n ax.set_xticklabels(list(range(1, n + 1)))\n ax.xaxis.tick_top()\n ax.xaxis.set_label_position(\"top\")\n ax.set_xlabel(\"Group\")\n ax.set_ylabel(\"Period\")\n if output is not None:\n plt.savefig(output)\n return ax\n"
] | [
[
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
baoy-nlp/DSS-VAE | [
"855f5722301b2d22aef622e7bb8fef74a759f9de",
"855f5722301b2d22aef622e7bb8fef74a759f9de",
"855f5722301b2d22aef622e7bb8fef74a759f9de"
] | [
"dss_vae/structs/distance_tree.py",
"dss_vae/utils/schedule_funs.py",
"dss_vae/utils/input_funcs.py"
] | [
"import re\n\nimport nltk\nimport numpy\n\nword_tags = ['CC', 'CD', 'DT', 'EX', 'FW', 'IN', 'JJ', 'JJR',\n 'JJS', 'LS', 'MD', 'NN', 'NNS', 'NNP', 'NNPS', 'PDT',\n 'POS', 'PRP', 'PRP$', 'RB', 'RBR', 'RBS', 'RP',\n 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBP',\n 'VBZ', 'WDT', 'WP', 'WP$', 'WRB']\ncurrency_tags_words = ['#', '$', 'C$', 'A$']\neclipse = ['*', '*?*', '0', '*T*', '*ICH*', '*U*', '*RNR*', '*EXP*', '*PPA*', '*NOT*']\npunctuation_tags = ['.', ',', ':', '-LRB-', '-RRB-', '\\'\\'', '``']\npunctuation_words = ['.', ',', ':', '-LRB-', '-RRB-', '\\'\\'', '``',\n '--', ';', '-', '?', '!', '...', '-LCB-',\n '-RCB-']\ndeleted_tags = ['TOP', '-NONE-', ',', ':', '``', '\\'\\'']\n\n\ndef process_arc(label):\n labels = label.split('+')\n new_arc = []\n for sub_labels in labels:\n if sub_labels == 'ADVP':\n sub_labels = 'PRT'\n new_arc.append(sub_labels)\n label = '+'.join(new_arc)\n return label\n\n\ndef process_none(tree):\n if isinstance(tree, nltk.Tree):\n label = tree.label()\n if label == '-NONE-':\n return None\n else:\n tr = []\n for node in tree:\n new_node = process_none(node)\n if new_node is not None:\n tr.append(new_node)\n if len(tr) == 0:\n return None\n else:\n return nltk.Tree(label, tr)\n else:\n return tree\n\n\ndef build_nltk_tree(depth, arc, tag, sen, arc_dict, tag_dict, stag_dict, stags=None):\n \"\"\"\n stags are the stanford predicted tags present in the train/valid/test files.\n \"\"\"\n assert len(sen) > 0\n assert len(depth) == len(sen) - 1, (\"%s_%s\" % (len(depth), len(sen)))\n if stags:\n assert len(stags) == len(tag)\n\n if len(sen) == 1:\n tag_list = str(tag_dict[tag[0]]).split('+')\n tag_list.reverse()\n # if stags, put the real stanford pos TAG for the word and leave the\n # unary chain on top.\n if stags is not None:\n assert len(stags) > 0\n tag_list.insert(0, str(stag_dict[stags[0]]))\n word = str(sen[0])\n for t in tag_list:\n word = nltk.Tree(t, [word])\n assert isinstance(word, nltk.Tree)\n return word\n else:\n idx = numpy.argmax(depth)\n node0 = build_nltk_tree(\n depth[:idx], arc[:idx], tag[:idx + 1], sen[:idx + 1],\n arc_dict, tag_dict, stag_dict, stags[:idx + 1] if stags else None)\n node1 = build_nltk_tree(\n depth[idx + 1:], arc[idx + 1:], tag[idx + 1:], sen[idx + 1:],\n arc_dict, tag_dict, stag_dict, stags[idx + 1:] if stags else None)\n\n if node0.label() != '<empty>' and node1.label() != '<empty>':\n tr = [node0, node1]\n elif node0.label() == '<empty>' and node1.label() != '<empty>':\n tr = [c for c in node0] + [node1]\n elif node0.label() != '<empty>' and node1.label() == '<empty>':\n tr = [node0] + [c for c in node1]\n elif node0.label() == '<empty>' and node1.label() == '<empty>':\n tr = [c for c in node0] + [c for c in node1]\n\n arc_list = str(arc_dict[arc[idx]]).split('+')\n arc_list.reverse()\n for a in arc_list:\n if isinstance(tr, nltk.Tree):\n tr = [tr]\n tr = nltk.Tree(a, tr)\n\n return tr\n\n\ndef mrg(tr):\n if isinstance(tr, str):\n return '( %s )' % tr\n # return tr + ' '\n else:\n s = '('\n for subtr in tr:\n s += mrg(subtr) + ' '\n s += ')'\n return s\n\n\ndef get_brackets(tree, start_idx=0, root=False):\n assert isinstance(tree, nltk.Tree)\n label = tree.label()\n label = label.replace('ADVP', 'PRT')\n\n brackets = set()\n if isinstance(tree[0], nltk.Tree):\n end_idx = start_idx\n for node in tree:\n node_brac, next_idx = get_brackets(node, end_idx)\n brackets.update(node_brac)\n end_idx = next_idx\n if not root:\n brackets.add((start_idx, end_idx, label))\n else:\n end_idx = start_idx + 1\n\n return brackets, end_idx\n\n\ndef normalize(x):\n return x / (sum(x) + 1e-8)\n\n\ndef tree2list(tree, parent_arc=[]):\n if isinstance(tree, nltk.Tree):\n label = tree.label()\n if isinstance(tree[0], nltk.Tree):\n label = re.split('-|=', tree.label())[0]\n root_arc_list = parent_arc + [label]\n root_arc = '+'.join(root_arc_list)\n if len(tree) == 1:\n root, arc, tag = tree2list(tree[0], parent_arc=root_arc_list)\n elif len(tree) == 2:\n c0, arc0, tag0 = tree2list(tree[0])\n c1, arc1, tag1 = tree2list(tree[1])\n root = [c0, c1]\n arc = arc0 + [root_arc] + arc1\n tag = tag0 + tag1\n else:\n c0, arc0, tag0 = tree2list(tree[0])\n c1, arc1, tag1 = tree2list(nltk.Tree('<empty>', tree[1:]))\n if bin == 0:\n root = [c0] + c1\n else:\n root = [c0, c1]\n arc = arc0 + [root_arc] + arc1\n tag = tag0 + tag1\n return root, arc, tag\n else:\n if len(parent_arc) == 1:\n parent_arc.insert(0, '<empty>')\n # parent_arc[-1] = '<POS>'\n del parent_arc[-1]\n return str(tree), [], ['+'.join(parent_arc)]\n\n\ndef get_distance(root):\n if isinstance(root, list):\n dist_list = []\n depth_list = []\n for child in root:\n dist, depth = get_distance(child)\n dist_list.append(dist)\n depth_list.append(depth)\n\n max_depth = max(depth_list)\n\n out = dist_list[0]\n for dist in dist_list[1:]:\n out.append(max_depth)\n out.extend(dist)\n return out, max_depth + 1\n else:\n return [], 1\n",
"# MIT License\n\n# Copyright (c) 2018 the NJUNLP groups.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Author baoyu.nlp \n# Time 2019-01-23 09:28\n\nimport numpy as np\nimport torch\n\n\ndef unk_replace(input_sequence, dropoutr, vocab):\n if dropoutr > 0.:\n prob = torch.rand(input_sequence.size())\n if torch.cuda.is_available():\n prob = prob.cuda()\n prob[(input_sequence.data - vocab.sos_id) * (input_sequence.data - vocab.pad_id) * (\n input_sequence.data - vocab.eos_id) == 0] = 1\n decoder_input_sequence = input_sequence.clone()\n decoder_input_sequence[prob < dropoutr] = vocab.unk_id\n return decoder_input_sequence\n return input_sequence\n\n\ndef kl_anneal_function(anneal_function, step, k, x0):\n if anneal_function == \"fixed\":\n return 1.0\n elif anneal_function == 'logistic':\n return float(1 / (1 + np.exp(-k * (step - x0))))\n elif anneal_function == 'sigmoid':\n return float(1 / (1 + np.exp(0.001 * (x0 - step))))\n elif anneal_function == 'negative-sigmoid':\n return float(1 / (1 + np.exp(-0.001 * (x0 - step))))\n elif anneal_function == 'linear':\n return min(1, step / x0)\n\n\ndef wd_anneal_function(unk_max, anneal_function, step, k, x0):\n return unk_max * kl_anneal_function(anneal_function, step, k, x0)\n",
"# coding=utf-8\n\nimport torch\nfrom torch.autograd import Variable\n\n\ndef word2id(sents, vocab):\n if type(sents[0]) == list:\n return [[vocab[w] for w in s] for s in sents]\n else:\n return [vocab[w] for w in sents]\n\n\ndef id2word(sents, vocab):\n if type(sents[0]) == list:\n return [robust_id2word(s, vocab) for s in sents]\n else:\n return robust_id2word(sents, vocab)\n\n\ndef robust_id2word(sents, vocab):\n res = []\n for w in sents:\n if w == vocab.sos_id or w == vocab.pad_id:\n pass\n elif w == vocab.eos_id:\n break\n else:\n res.append(vocab.id2word[w])\n return res\n\n\ndef padding_input(sents, pad_token=\"<pad>\", tgt_len=-1):\n \"\"\"\n padding the token sequence to max length\n Args:\n sents:\n pad_token:\n tgt_len:\n\n Returns:\n\n \"\"\"\n if tgt_len == -1:\n tgt_len = max(len(s) for s in sents)\n batch_size = len(sents)\n seqs = []\n for i in range(batch_size):\n seqs.append(sents[i][0:tgt_len] + [pad_token] * (tgt_len - len(sents[i])))\n return seqs\n\n\ndef scaling_input(sent, tgt_len=-1, eos_token=\"</s>\", scale=0.0):\n \"\"\"\n scaling the token sequence to min{ max_tgt_len, (1+scale) * len(sent) }\n Args:\n sent:\n tgt_len:\n eos_token:\n scale:\n\n Returns:\n\n \"\"\"\n if scale <= 0:\n return sent\n else:\n pad_len = int(len(sent) * scale)\n if tgt_len != -1:\n # cur_dif = tgt_len - len(sent)\n # pad_len = pad_len if pad_len < cur_diff else cur_diff\n pad_len = tgt_len - len(sent)\n return sent + [eos_token] * pad_len\n\n\ndef to_input_variable(sequences, vocab, tgt_len=-1, training=True, append_boundary_sym=False,\n batch_first=False, shuffle=False):\n \"\"\"\n given a list of sequences,\n return a tensor of shape (max_sent_len, batch_size)\n \"\"\"\n from .tensor_ops import get_long_tensor\n if not isinstance(sequences[0], list):\n sequences = [sequences]\n if append_boundary_sym:\n sequences = [['<s>'] + seq + ['</s>'] for seq in sequences]\n\n pad_sents = padding_input(sequences, tgt_len=tgt_len)\n seqs = word2id(pad_sents, vocab)\n\n if not training:\n with torch.no_grad():\n seqs_var = Variable(get_long_tensor(seqs), requires_grad=False)\n else:\n seqs_var = Variable(get_long_tensor(seqs), requires_grad=False)\n if not batch_first:\n seqs_var = seqs_var.transpose(1, 0).contiguous()\n shuffle_dim = -1\n else:\n shuffle_dim = 0\n\n if shuffle:\n from .tensor_ops import shuffle_2d\n return shuffle_2d(inputs=seqs_var, dim=shuffle_dim)\n\n return seqs_var\n\n\ndef to_input_dict(examples,\n vocab,\n max_tgt_len=-1,\n training=True,\n src_append=True,\n tgt_append=True,\n use_tgt=False,\n use_tag=False,\n use_dst=False,\n shuffle_tgt=False,\n scale_to_tgt=0.0\n ):\n from .tensor_ops import get_float_tensor\n sources = [e.src for e in examples]\n sources_length = [len(c) for c in sources] if not src_append else [len(c) + 2 for c in sources]\n max_src_length = int(max(sources_length) * (1 + scale_to_tgt))\n batch_sources = to_input_variable(\n sequences=sources, vocab=vocab.src, tgt_len=max_src_length,\n training=training, append_boundary_sym=src_append, batch_first=True\n )\n if not use_tgt:\n return {\n \"src\": batch_sources,\n \"src_len\": sources_length,\n }\n\n targets = [e.tgt for e in examples]\n if max_tgt_len == -1:\n targets_length = [len(c) for c in targets] if not tgt_append else [len(c) + 2 for c in targets]\n max_tgt_length = max(targets_length)\n common_length = max(max_src_length, max_tgt_length)\n else:\n common_length = max_tgt_len\n if scale_to_tgt > 0.0:\n batch_sources = to_input_variable(\n sequences=sources, vocab=vocab.src, tgt_len=common_length,\n training=training, append_boundary_sym=src_append, batch_first=True\n )\n batch_targets = to_input_variable(\n sequences=targets, vocab=vocab.tgt, tgt_len=common_length,\n training=training, append_boundary_sym=tgt_append, batch_first=True\n )\n if use_dst:\n distances = [e.dst for e in examples]\n batch_distances = []\n for dst in distances:\n padded_dst = dst + [0] * (common_length - 1 - len(dst))\n batch_distances.append(padded_dst)\n batch_distances = get_float_tensor(batch_distances)\n else:\n batch_distances = None\n\n if use_tag:\n postags = [e.tag for e in examples]\n syntags = [e.arc for e in examples]\n batch_postags = to_input_variable(\n sequences=postags, vocab=vocab.arc, tgt_len=common_length,\n training=training, append_boundary_sym=True, batch_first=True\n )\n batch_syntags = to_input_variable(\n sequences=syntags, vocab=vocab.arc, tgt_len=common_length,\n training=training, append_boundary_sym=True, batch_first=True\n )\n else:\n batch_postags = None\n batch_syntags = None\n if not shuffle_tgt:\n return {\n \"src\": batch_sources,\n \"src_len\": sources_length,\n \"tgt\": batch_targets,\n \"dst\": batch_distances,\n \"tag\": batch_postags,\n \"arc\": batch_syntags,\n }\n else:\n targets = [e.tgt for e in examples]\n shuffle_targets, shuffle_positions = to_input_variable(sequences=targets, vocab=vocab.tgt,\n tgt_len=common_length,\n training=training, append_boundary_sym=tgt_append,\n batch_first=True,\n shuffle=True)\n\n return {\n \"src\": batch_sources,\n \"src_len\": sources_length,\n \"tgt\": batch_targets,\n \"s_tgt\": shuffle_targets,\n \"s_pos\": shuffle_positions,\n \"dst\": batch_distances,\n \"tag\": batch_postags,\n \"arc\": batch_syntags,\n }\n\n\ndef to_target_word(log_prob, vocab):\n _, word_ids = log_prob.sort(dim=-1, descending=True)\n word_ids = word_ids[:, :, 0].data.tolist()\n return [[[id2word(sents, vocab)], [-1]] for sents in word_ids]\n\n\ndef data_to_word(tensor, vocab):\n word_ids = tensor.squeeze(1).data.tolist()\n return [[[id2word(sents, vocab)], [-1]] for sents in word_ids]\n\n\ndef reverse_to_word(inputs, vocab, batch_first=True, use_bpe=True):\n def trim(s, t):\n sentence = []\n for w in s:\n if w == t:\n break\n sentence.append(w)\n return sentence\n\n def filter_special(tok):\n return tok not in (\"<s>\", \"<pad>\")\n\n if not batch_first:\n inputs.t_()\n\n with torch.cuda.device_of(inputs):\n input_list = inputs.tolist()\n\n process_ret = [id2word(ex, vocab) for ex in input_list] # denumericalize\n process_ret = [trim(ex, \"</s>\") for ex in process_ret] # trim past frst eos\n if use_bpe:\n process_ret = [\" \".join(filter(filter_special, ex)).replace(\"@@ \", \"\") for ex in process_ret]\n else:\n process_ret = [\" \".join(filter(filter_special, ex)) for ex in process_ret]\n ret = [[[r], [-1]] for r in process_ret]\n return ret\n"
] | [
[
"numpy.argmax"
],
[
"numpy.exp",
"torch.cuda.is_available"
],
[
"torch.no_grad",
"torch.cuda.device_of"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AlexLJC/MLTetris | [
"dc47321746bcd67989cb7190941827c317036d6b"
] | [
"python/TetrisDRL.py"
] | [
"# Import\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom collections import deque\nimport random\n\nimport tetris as tetris\n\n\n# Hyperparameter \n\nnum_episodes = 500\nnum_exploration_episodes = 100\nmax_len_episode = 1000\nbatch_size = 40\nlearning_rate = 0.005\ngamma = 0.95\ninitial_epsilon = 1.0\nfinal_epsilon = 0.01\n\neps_decay = 0.995\neps_min = 0.01\n\n\nclass QNetwork(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.state_dim = 216\n self.action_dim = 36\n self.epsilon = 1.\n self.dense1 = tf.keras.layers.Dense(units=216, input_dim=216,activation=tf.nn.relu)\n self.dense2 = tf.keras.layers.Dense(units=116, activation=tf.nn.relu)\n self.dense3 = tf.keras.layers.Dense(units=36, activation=tf.nn.relu)\n self.dense4 = tf.keras.layers.Dense(units=self.action_dim)\n \n \n \n self.model = self.create_model()\n \n def call(self, inputs):\n x = self.dense1(inputs)\n x = self.dense2(x)\n x = self.dense3(x)\n return x\n \n def create_model(self):\n# model = tf.keras.Sequential([\n# Input((self.state_dim,)),\n# Dense(32, activation='relu'),\n# Dense(16, activation='relu'),\n# Dense(self.action_dim)\n# ])\n model = tf.keras.models.Sequential()\n model.add(self.dense1)\n model.add(self.dense2)\n model.add(self.dense3)\n model.add(self.dense4)\n model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(learning_rate))\n return model\n \n def predict(self, state):\n return self.model.predict(state)\n \n def get_action(self, state):\n state = np.reshape(state, [1, self.state_dim])\n self.epsilon *= eps_decay\n self.epsilon = max(self.epsilon, eps_min)\n q_value = self.predict(state)[0]\n if np.random.random() < self.epsilon:\n return random.randint(0, 35)\n \n \n return np.argmax(q_value)\n \n def train(self, states, targets):\n self.model.fit(states, targets, epochs=1, verbose=0)\nclass ReplayBuffer:\n def __init__(self, capacity=100000):\n self.buffer = deque(maxlen=capacity)\n \n def put(self, state, action, reward, next_state, done):\n self.buffer.append([state, action, reward, next_state, done])\n \n def sample(self):\n sample = random.sample(self.buffer, batch_size)\n states, actions, rewards, next_states, done = map(np.asarray, zip(*sample))\n states = np.array(states).reshape(batch_size, -1)\n next_states = np.array(next_states).reshape(batch_size, -1)\n return states, actions, rewards, next_states, done\n \n def size(self):\n return len(self.buffer)\n \nclass Agent:\n def __init__(self, env):\n self.env = env\n self.state_dim = 216\n self.action_dim = 36 # move 0 - 8 rotate 0 - 3 = 9*4\n\n self.model = QNetwork()\n self.target_model = QNetwork()\n self.target_update()\n\n self.buffer = ReplayBuffer()\n \n self.max_score_now = 0\n def target_update(self):\n weights = self.model.model.get_weights()\n self.target_model.model.set_weights(weights)\n \n def replay(self):\n for _ in range(10):\n states, actions, rewards, next_states, done = self.buffer.sample()\n targets = self.target_model.predict(states)\n next_q_values = self.target_model.predict(next_states).max(axis=1)\n #print(states, actions, rewards, next_states, done,next_q_values )\n targets[range(batch_size), actions] = rewards + (1-done) * next_q_values * gamma\n self.model.train(states, targets)\n \n def train(self, max_episodes=1000):\n print(\"Start.\")\n for ep in range(max_episodes):\n done, total_reward = False, 0\n state = self.env.reset()\n while not done:\n action = self.model.get_action(state)\n #print(type(action),action)\n next_state, reward, done, info = self.env.step_action(int(action))\n self.buffer.put(state, action, reward, next_state, done)\n total_reward += reward\n state = next_state\n \n \n \n if self.buffer.size() >= batch_size:\n self.replay()\n self.target_update()\n if self.env.score > self.max_score_now:\n self.max_score_now = self.env.score\n for i in range(len(self.env.pannel)):\n print(self.env.pannel[i])\n print(\"Total Steps\",self.env.total_steps,\"Score\",self.env.score,\"Max Score\",self.max_score_now)\n print('EP{} EpisodeReward={}'.format(ep, total_reward))\n print(\"===========================================================\")\n #wandb.log({'Reward': total_reward})\n\n\ndef main():\n \n env = tetris.Tertris(10,20)\n agent = Agent(env)\n agent.train(max_episodes=100000)\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"numpy.random.random",
"numpy.reshape",
"tensorflow.keras.layers.Dense",
"numpy.argmax",
"tensorflow.keras.optimizers.Adam",
"numpy.array",
"tensorflow.keras.models.Sequential"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
rocketbot-cl/recognition | [
"cca8a87070ccaca3a26e37345c36ab1bf836e258",
"cca8a87070ccaca3a26e37345c36ab1bf836e258",
"cca8a87070ccaca3a26e37345c36ab1bf836e258",
"cca8a87070ccaca3a26e37345c36ab1bf836e258",
"cca8a87070ccaca3a26e37345c36ab1bf836e258",
"cca8a87070ccaca3a26e37345c36ab1bf836e258",
"cca8a87070ccaca3a26e37345c36ab1bf836e258"
] | [
"libs/imutils/feature/rootsift.py",
"libs/numpy/lib/ufunclike.py",
"libs/numpy/f2py/tests/test_regression.py",
"libs/numpy/core/tests/test_shape_base.py",
"libs/numpy/distutils/fcompiler/absoft.py",
"libs/numpy/fft/setup.py",
"libs/numpy/ma/tests/test_old_ma.py"
] | [
"# import the necessary packages\r\nfrom __future__ import absolute_import\r\nimport numpy as np\r\nimport cv2\r\nfrom ..convenience import is_cv2\r\n\r\nclass RootSIFT:\r\n\tdef __init__(self):\r\n\t\t# initialize the SIFT feature extractor for OpenCV 2.4\r\n\t\tif is_cv2():\r\n\t\t\tself.extractor = cv2.DescriptorExtractor_create(\"SIFT\")\r\n\r\n\t\t# otherwise initialize the SIFT feature extractor for OpenCV 3+\r\n\t\telse:\r\n\t\t\tself.extractor = cv2.xfeatures2d.SIFT_create()\r\n\r\n\tdef compute(self, image, kps, eps=1e-7):\r\n\t\t# compute SIFT descriptors\r\n\t\t(kps, descs) = self.extractor.compute(image, kps)\r\n\r\n\t\t# if there are no keypoints or descriptors, return an empty tuple\r\n\t\tif len(kps) == 0:\r\n\t\t\treturn ([], None)\r\n\r\n\t\t# apply the Hellinger kernel by first L1-normalizing and taking the\r\n\t\t# square-root\r\n\t\tdescs /= (descs.sum(axis=1, keepdims=True) + eps)\r\n\t\tdescs = np.sqrt(descs)\r\n\r\n\t\t# return a tuple of the keypoints and descriptors\r\n\t\treturn (kps, descs)",
"\"\"\"\r\nModule of functions that are like ufuncs in acting on arrays and optionally\r\nstoring results in an output array.\r\n\r\n\"\"\"\r\n__all__ = ['fix', 'isneginf', 'isposinf']\r\n\r\nimport numpy.core.numeric as nx\r\nfrom numpy.core.overrides import (\r\n array_function_dispatch, ARRAY_FUNCTION_ENABLED,\r\n)\r\nimport warnings\r\nimport functools\r\n\r\n\r\ndef _deprecate_out_named_y(f):\r\n \"\"\"\r\n Allow the out argument to be passed as the name `y` (deprecated)\r\n\r\n In future, this decorator should be removed.\r\n \"\"\"\r\n @functools.wraps(f)\r\n def func(x, out=None, **kwargs):\r\n if 'y' in kwargs:\r\n if 'out' in kwargs:\r\n raise TypeError(\r\n \"{} got multiple values for argument 'out'/'y'\"\r\n .format(f.__name__)\r\n )\r\n out = kwargs.pop('y')\r\n # NumPy 1.13.0, 2017-04-26\r\n warnings.warn(\r\n \"The name of the out argument to {} has changed from `y` to \"\r\n \"`out`, to match other ufuncs.\".format(f.__name__),\r\n DeprecationWarning, stacklevel=3)\r\n return f(x, out=out, **kwargs)\r\n\r\n return func\r\n\r\n\r\ndef _fix_out_named_y(f):\r\n \"\"\"\r\n Allow the out argument to be passed as the name `y` (deprecated)\r\n\r\n This decorator should only be used if _deprecate_out_named_y is used on\r\n a corresponding dispatcher function.\r\n \"\"\"\r\n @functools.wraps(f)\r\n def func(x, out=None, **kwargs):\r\n if 'y' in kwargs:\r\n # we already did error checking in _deprecate_out_named_y\r\n out = kwargs.pop('y')\r\n return f(x, out=out, **kwargs)\r\n\r\n return func\r\n\r\n\r\ndef _fix_and_maybe_deprecate_out_named_y(f):\r\n \"\"\"\r\n Use the appropriate decorator, depending upon if dispatching is being used.\r\n \"\"\"\r\n if ARRAY_FUNCTION_ENABLED:\r\n return _fix_out_named_y(f)\r\n else:\r\n return _deprecate_out_named_y(f)\r\n\r\n\r\n@_deprecate_out_named_y\r\ndef _dispatcher(x, out=None):\r\n return (x, out)\r\n\r\n\r\n@array_function_dispatch(_dispatcher, verify=False, module='numpy')\r\n@_fix_and_maybe_deprecate_out_named_y\r\ndef fix(x, out=None):\r\n \"\"\"\r\n Round to nearest integer towards zero.\r\n\r\n Round an array of floats element-wise to nearest integer towards zero.\r\n The rounded values are returned as floats.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n An array of floats to be rounded\r\n out : ndarray, optional\r\n A location into which the result is stored. If provided, it must have\r\n a shape that the input broadcasts to. If not provided or None, a\r\n freshly-allocated array is returned.\r\n\r\n Returns\r\n -------\r\n out : ndarray of floats\r\n A float array with the same dimensions as the input.\r\n If second argument is not supplied then a float array is returned\r\n with the rounded values.\r\n\r\n If a second argument is supplied the result is stored there.\r\n The return value `out` is then a reference to that array.\r\n\r\n See Also\r\n --------\r\n trunc, floor, ceil\r\n around : Round to given number of decimals\r\n\r\n Examples\r\n --------\r\n >>> np.fix(3.14)\r\n 3.0\r\n >>> np.fix(3)\r\n 3.0\r\n >>> np.fix([2.1, 2.9, -2.1, -2.9])\r\n array([ 2., 2., -2., -2.])\r\n\r\n \"\"\"\r\n # promote back to an array if flattened\r\n res = nx.asanyarray(nx.ceil(x, out=out))\r\n res = nx.floor(x, out=res, where=nx.greater_equal(x, 0))\r\n\r\n # when no out argument is passed and no subclasses are involved, flatten\r\n # scalars\r\n if out is None and type(res) is nx.ndarray:\r\n res = res[()]\r\n return res\r\n\r\n\r\n@array_function_dispatch(_dispatcher, verify=False, module='numpy')\r\n@_fix_and_maybe_deprecate_out_named_y\r\ndef isposinf(x, out=None):\r\n \"\"\"\r\n Test element-wise for positive infinity, return result as bool array.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n The input array.\r\n out : array_like, optional\r\n A location into which the result is stored. If provided, it must have a\r\n shape that the input broadcasts to. If not provided or None, a\r\n freshly-allocated boolean array is returned.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n A boolean array with the same dimensions as the input.\r\n If second argument is not supplied then a boolean array is returned\r\n with values True where the corresponding element of the input is\r\n positive infinity and values False where the element of the input is\r\n not positive infinity.\r\n\r\n If a second argument is supplied the result is stored there. If the\r\n type of that array is a numeric type the result is represented as zeros\r\n and ones, if the type is boolean then as False and True.\r\n The return value `out` is then a reference to that array.\r\n\r\n See Also\r\n --------\r\n isinf, isneginf, isfinite, isnan\r\n\r\n Notes\r\n -----\r\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic\r\n (IEEE 754).\r\n\r\n Errors result if the second argument is also supplied when x is a scalar\r\n input, if first and second arguments have different shapes, or if the\r\n first argument has complex values\r\n\r\n Examples\r\n --------\r\n >>> np.isposinf(np.PINF)\r\n True\r\n >>> np.isposinf(np.inf)\r\n True\r\n >>> np.isposinf(np.NINF)\r\n False\r\n >>> np.isposinf([-np.inf, 0., np.inf])\r\n array([False, False, True])\r\n\r\n >>> x = np.array([-np.inf, 0., np.inf])\r\n >>> y = np.array([2, 2, 2])\r\n >>> np.isposinf(x, y)\r\n array([0, 0, 1])\r\n >>> y\r\n array([0, 0, 1])\r\n\r\n \"\"\"\r\n is_inf = nx.isinf(x)\r\n try:\r\n signbit = ~nx.signbit(x)\r\n except TypeError as e:\r\n raise TypeError('This operation is not supported for complex values '\r\n 'because it would be ambiguous.') from e\r\n else:\r\n return nx.logical_and(is_inf, signbit, out)\r\n\r\n\r\n@array_function_dispatch(_dispatcher, verify=False, module='numpy')\r\n@_fix_and_maybe_deprecate_out_named_y\r\ndef isneginf(x, out=None):\r\n \"\"\"\r\n Test element-wise for negative infinity, return result as bool array.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n The input array.\r\n out : array_like, optional\r\n A location into which the result is stored. If provided, it must have a\r\n shape that the input broadcasts to. If not provided or None, a\r\n freshly-allocated boolean array is returned.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n A boolean array with the same dimensions as the input.\r\n If second argument is not supplied then a numpy boolean array is\r\n returned with values True where the corresponding element of the\r\n input is negative infinity and values False where the element of\r\n the input is not negative infinity.\r\n\r\n If a second argument is supplied the result is stored there. If the\r\n type of that array is a numeric type the result is represented as\r\n zeros and ones, if the type is boolean then as False and True. The\r\n return value `out` is then a reference to that array.\r\n\r\n See Also\r\n --------\r\n isinf, isposinf, isnan, isfinite\r\n\r\n Notes\r\n -----\r\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic\r\n (IEEE 754).\r\n\r\n Errors result if the second argument is also supplied when x is a scalar\r\n input, if first and second arguments have different shapes, or if the\r\n first argument has complex values.\r\n\r\n Examples\r\n --------\r\n >>> np.isneginf(np.NINF)\r\n True\r\n >>> np.isneginf(np.inf)\r\n False\r\n >>> np.isneginf(np.PINF)\r\n False\r\n >>> np.isneginf([-np.inf, 0., np.inf])\r\n array([ True, False, False])\r\n\r\n >>> x = np.array([-np.inf, 0., np.inf])\r\n >>> y = np.array([2, 2, 2])\r\n >>> np.isneginf(x, y)\r\n array([1, 0, 0])\r\n >>> y\r\n array([1, 0, 0])\r\n\r\n \"\"\"\r\n is_inf = nx.isinf(x)\r\n try:\r\n signbit = nx.signbit(x)\r\n except TypeError as e:\r\n raise TypeError('This operation is not supported for complex values '\r\n 'because it would be ambiguous.') from e\r\n else:\r\n return nx.logical_and(is_inf, signbit, out)\r\n",
"import os\r\nimport pytest\r\n\r\nimport numpy as np\r\nfrom numpy.testing import assert_raises, assert_equal\r\n\r\nfrom . import util\r\n\r\n\r\ndef _path(*a):\r\n return os.path.join(*((os.path.dirname(__file__),) + a))\r\n\r\n\r\nclass TestIntentInOut(util.F2PyTest):\r\n # Check that intent(in out) translates as intent(inout)\r\n sources = [_path('src', 'regression', 'inout.f90')]\r\n\r\n @pytest.mark.slow\r\n def test_inout(self):\r\n # non-contiguous should raise error\r\n x = np.arange(6, dtype=np.float32)[::2]\r\n assert_raises(ValueError, self.module.foo, x)\r\n\r\n # check values with contiguous array\r\n x = np.arange(3, dtype=np.float32)\r\n self.module.foo(x)\r\n assert_equal(x, [3, 1, 2])\r\n",
"import pytest\r\nimport numpy as np\r\nfrom numpy.core import (\r\n array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,\r\n newaxis, concatenate, stack\r\n )\r\nfrom numpy.core.shape_base import (_block_dispatcher, _block_setup,\r\n _block_concatenate, _block_slicing)\r\nfrom numpy.testing import (\r\n assert_, assert_raises, assert_array_equal, assert_equal,\r\n assert_raises_regex, assert_warns\r\n )\r\n\r\n\r\nclass TestAtleast1d:\r\n def test_0D_array(self):\r\n a = array(1)\r\n b = array(2)\r\n res = [atleast_1d(a), atleast_1d(b)]\r\n desired = [array([1]), array([2])]\r\n assert_array_equal(res, desired)\r\n\r\n def test_1D_array(self):\r\n a = array([1, 2])\r\n b = array([2, 3])\r\n res = [atleast_1d(a), atleast_1d(b)]\r\n desired = [array([1, 2]), array([2, 3])]\r\n assert_array_equal(res, desired)\r\n\r\n def test_2D_array(self):\r\n a = array([[1, 2], [1, 2]])\r\n b = array([[2, 3], [2, 3]])\r\n res = [atleast_1d(a), atleast_1d(b)]\r\n desired = [a, b]\r\n assert_array_equal(res, desired)\r\n\r\n def test_3D_array(self):\r\n a = array([[1, 2], [1, 2]])\r\n b = array([[2, 3], [2, 3]])\r\n a = array([a, a])\r\n b = array([b, b])\r\n res = [atleast_1d(a), atleast_1d(b)]\r\n desired = [a, b]\r\n assert_array_equal(res, desired)\r\n\r\n def test_r1array(self):\r\n \"\"\" Test to make sure equivalent Travis O's r1array function\r\n \"\"\"\r\n assert_(atleast_1d(3).shape == (1,))\r\n assert_(atleast_1d(3j).shape == (1,))\r\n assert_(atleast_1d(3.0).shape == (1,))\r\n assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))\r\n\r\n\r\nclass TestAtleast2d:\r\n def test_0D_array(self):\r\n a = array(1)\r\n b = array(2)\r\n res = [atleast_2d(a), atleast_2d(b)]\r\n desired = [array([[1]]), array([[2]])]\r\n assert_array_equal(res, desired)\r\n\r\n def test_1D_array(self):\r\n a = array([1, 2])\r\n b = array([2, 3])\r\n res = [atleast_2d(a), atleast_2d(b)]\r\n desired = [array([[1, 2]]), array([[2, 3]])]\r\n assert_array_equal(res, desired)\r\n\r\n def test_2D_array(self):\r\n a = array([[1, 2], [1, 2]])\r\n b = array([[2, 3], [2, 3]])\r\n res = [atleast_2d(a), atleast_2d(b)]\r\n desired = [a, b]\r\n assert_array_equal(res, desired)\r\n\r\n def test_3D_array(self):\r\n a = array([[1, 2], [1, 2]])\r\n b = array([[2, 3], [2, 3]])\r\n a = array([a, a])\r\n b = array([b, b])\r\n res = [atleast_2d(a), atleast_2d(b)]\r\n desired = [a, b]\r\n assert_array_equal(res, desired)\r\n\r\n def test_r2array(self):\r\n \"\"\" Test to make sure equivalent Travis O's r2array function\r\n \"\"\"\r\n assert_(atleast_2d(3).shape == (1, 1))\r\n assert_(atleast_2d([3j, 1]).shape == (1, 2))\r\n assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))\r\n\r\n\r\nclass TestAtleast3d:\r\n def test_0D_array(self):\r\n a = array(1)\r\n b = array(2)\r\n res = [atleast_3d(a), atleast_3d(b)]\r\n desired = [array([[[1]]]), array([[[2]]])]\r\n assert_array_equal(res, desired)\r\n\r\n def test_1D_array(self):\r\n a = array([1, 2])\r\n b = array([2, 3])\r\n res = [atleast_3d(a), atleast_3d(b)]\r\n desired = [array([[[1], [2]]]), array([[[2], [3]]])]\r\n assert_array_equal(res, desired)\r\n\r\n def test_2D_array(self):\r\n a = array([[1, 2], [1, 2]])\r\n b = array([[2, 3], [2, 3]])\r\n res = [atleast_3d(a), atleast_3d(b)]\r\n desired = [a[:,:, newaxis], b[:,:, newaxis]]\r\n assert_array_equal(res, desired)\r\n\r\n def test_3D_array(self):\r\n a = array([[1, 2], [1, 2]])\r\n b = array([[2, 3], [2, 3]])\r\n a = array([a, a])\r\n b = array([b, b])\r\n res = [atleast_3d(a), atleast_3d(b)]\r\n desired = [a, b]\r\n assert_array_equal(res, desired)\r\n\r\n\r\nclass TestHstack:\r\n def test_non_iterable(self):\r\n assert_raises(TypeError, hstack, 1)\r\n\r\n def test_empty_input(self):\r\n assert_raises(ValueError, hstack, ())\r\n\r\n def test_0D_array(self):\r\n a = array(1)\r\n b = array(2)\r\n res = hstack([a, b])\r\n desired = array([1, 2])\r\n assert_array_equal(res, desired)\r\n\r\n def test_1D_array(self):\r\n a = array([1])\r\n b = array([2])\r\n res = hstack([a, b])\r\n desired = array([1, 2])\r\n assert_array_equal(res, desired)\r\n\r\n def test_2D_array(self):\r\n a = array([[1], [2]])\r\n b = array([[1], [2]])\r\n res = hstack([a, b])\r\n desired = array([[1, 1], [2, 2]])\r\n assert_array_equal(res, desired)\r\n\r\n def test_generator(self):\r\n with assert_warns(FutureWarning):\r\n hstack((np.arange(3) for _ in range(2)))\r\n with assert_warns(FutureWarning):\r\n hstack(map(lambda x: x, np.ones((3, 2))))\r\n\r\n\r\nclass TestVstack:\r\n def test_non_iterable(self):\r\n assert_raises(TypeError, vstack, 1)\r\n\r\n def test_empty_input(self):\r\n assert_raises(ValueError, vstack, ())\r\n\r\n def test_0D_array(self):\r\n a = array(1)\r\n b = array(2)\r\n res = vstack([a, b])\r\n desired = array([[1], [2]])\r\n assert_array_equal(res, desired)\r\n\r\n def test_1D_array(self):\r\n a = array([1])\r\n b = array([2])\r\n res = vstack([a, b])\r\n desired = array([[1], [2]])\r\n assert_array_equal(res, desired)\r\n\r\n def test_2D_array(self):\r\n a = array([[1], [2]])\r\n b = array([[1], [2]])\r\n res = vstack([a, b])\r\n desired = array([[1], [2], [1], [2]])\r\n assert_array_equal(res, desired)\r\n\r\n def test_2D_array2(self):\r\n a = array([1, 2])\r\n b = array([1, 2])\r\n res = vstack([a, b])\r\n desired = array([[1, 2], [1, 2]])\r\n assert_array_equal(res, desired)\r\n\r\n def test_generator(self):\r\n with assert_warns(FutureWarning):\r\n vstack((np.arange(3) for _ in range(2)))\r\n\r\n\r\nclass TestConcatenate:\r\n def test_returns_copy(self):\r\n a = np.eye(3)\r\n b = np.concatenate([a])\r\n b[0, 0] = 2\r\n assert b[0, 0] != a[0, 0]\r\n\r\n def test_exceptions(self):\r\n # test axis must be in bounds\r\n for ndim in [1, 2, 3]:\r\n a = np.ones((1,)*ndim)\r\n np.concatenate((a, a), axis=0) # OK\r\n assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)\r\n assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))\r\n\r\n # Scalars cannot be concatenated\r\n assert_raises(ValueError, concatenate, (0,))\r\n assert_raises(ValueError, concatenate, (np.array(0),))\r\n\r\n # dimensionality must match\r\n assert_raises_regex(\r\n ValueError,\r\n r\"all the input arrays must have same number of dimensions, but \"\r\n r\"the array at index 0 has 1 dimension\\(s\\) and the array at \"\r\n r\"index 1 has 2 dimension\\(s\\)\",\r\n np.concatenate, (np.zeros(1), np.zeros((1, 1))))\r\n\r\n # test shapes must match except for concatenation axis\r\n a = np.ones((1, 2, 3))\r\n b = np.ones((2, 2, 3))\r\n axis = list(range(3))\r\n for i in range(3):\r\n np.concatenate((a, b), axis=axis[0]) # OK\r\n assert_raises_regex(\r\n ValueError,\r\n \"all the input array dimensions for the concatenation axis \"\r\n \"must match exactly, but along dimension {}, the array at \"\r\n \"index 0 has size 1 and the array at index 1 has size 2\"\r\n .format(i),\r\n np.concatenate, (a, b), axis=axis[1])\r\n assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])\r\n a = np.moveaxis(a, -1, 0)\r\n b = np.moveaxis(b, -1, 0)\r\n axis.append(axis.pop(0))\r\n\r\n # No arrays to concatenate raises ValueError\r\n assert_raises(ValueError, concatenate, ())\r\n\r\n def test_concatenate_axis_None(self):\r\n a = np.arange(4, dtype=np.float64).reshape((2, 2))\r\n b = list(range(3))\r\n c = ['x']\r\n r = np.concatenate((a, a), axis=None)\r\n assert_equal(r.dtype, a.dtype)\r\n assert_equal(r.ndim, 1)\r\n r = np.concatenate((a, b), axis=None)\r\n assert_equal(r.size, a.size + len(b))\r\n assert_equal(r.dtype, a.dtype)\r\n r = np.concatenate((a, b, c), axis=None)\r\n d = array(['0.0', '1.0', '2.0', '3.0',\r\n '0', '1', '2', 'x'])\r\n assert_array_equal(r, d)\r\n\r\n out = np.zeros(a.size + len(b))\r\n r = np.concatenate((a, b), axis=None)\r\n rout = np.concatenate((a, b), axis=None, out=out)\r\n assert_(out is rout)\r\n assert_equal(r, rout)\r\n\r\n def test_large_concatenate_axis_None(self):\r\n # When no axis is given, concatenate uses flattened versions.\r\n # This also had a bug with many arrays (see gh-5979).\r\n x = np.arange(1, 100)\r\n r = np.concatenate(x, None)\r\n assert_array_equal(x, r)\r\n\r\n # This should probably be deprecated:\r\n r = np.concatenate(x, 100) # axis is >= MAXDIMS\r\n assert_array_equal(x, r)\r\n\r\n def test_concatenate(self):\r\n # Test concatenate function\r\n # One sequence returns unmodified (but as array)\r\n r4 = list(range(4))\r\n assert_array_equal(concatenate((r4,)), r4)\r\n # Any sequence\r\n assert_array_equal(concatenate((tuple(r4),)), r4)\r\n assert_array_equal(concatenate((array(r4),)), r4)\r\n # 1D default concatenation\r\n r3 = list(range(3))\r\n assert_array_equal(concatenate((r4, r3)), r4 + r3)\r\n # Mixed sequence types\r\n assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)\r\n assert_array_equal(concatenate((array(r4), r3)), r4 + r3)\r\n # Explicit axis specification\r\n assert_array_equal(concatenate((r4, r3), 0), r4 + r3)\r\n # Including negative\r\n assert_array_equal(concatenate((r4, r3), -1), r4 + r3)\r\n # 2D\r\n a23 = array([[10, 11, 12], [13, 14, 15]])\r\n a13 = array([[0, 1, 2]])\r\n res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])\r\n assert_array_equal(concatenate((a23, a13)), res)\r\n assert_array_equal(concatenate((a23, a13), 0), res)\r\n assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)\r\n assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)\r\n # Arrays much match shape\r\n assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)\r\n # 3D\r\n res = arange(2 * 3 * 7).reshape((2, 3, 7))\r\n a0 = res[..., :4]\r\n a1 = res[..., 4:6]\r\n a2 = res[..., 6:]\r\n assert_array_equal(concatenate((a0, a1, a2), 2), res)\r\n assert_array_equal(concatenate((a0, a1, a2), -1), res)\r\n assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)\r\n\r\n out = res.copy()\r\n rout = concatenate((a0, a1, a2), 2, out=out)\r\n assert_(out is rout)\r\n assert_equal(res, rout)\r\n\r\n def test_bad_out_shape(self):\r\n a = array([1, 2])\r\n b = array([3, 4])\r\n\r\n assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))\r\n assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))\r\n assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))\r\n concatenate((a, b), out=np.empty(4))\r\n\r\n def test_out_dtype(self):\r\n out = np.empty(4, np.float32)\r\n res = concatenate((array([1, 2]), array([3, 4])), out=out)\r\n assert_(out is res)\r\n\r\n out = np.empty(4, np.complex64)\r\n res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out)\r\n assert_(out is res)\r\n\r\n # invalid cast\r\n out = np.empty(4, np.int32)\r\n assert_raises(TypeError, concatenate,\r\n (array([0.1, 0.2]), array([0.3, 0.4])), out=out)\r\n\r\n\r\ndef test_stack():\r\n # non-iterable input\r\n assert_raises(TypeError, stack, 1)\r\n\r\n # 0d input\r\n for input_ in [(1, 2, 3),\r\n [np.int32(1), np.int32(2), np.int32(3)],\r\n [np.array(1), np.array(2), np.array(3)]]:\r\n assert_array_equal(stack(input_), [1, 2, 3])\r\n # 1d input examples\r\n a = np.array([1, 2, 3])\r\n b = np.array([4, 5, 6])\r\n r1 = array([[1, 2, 3], [4, 5, 6]])\r\n assert_array_equal(np.stack((a, b)), r1)\r\n assert_array_equal(np.stack((a, b), axis=1), r1.T)\r\n # all input types\r\n assert_array_equal(np.stack(list([a, b])), r1)\r\n assert_array_equal(np.stack(array([a, b])), r1)\r\n # all shapes for 1d input\r\n arrays = [np.random.randn(3) for _ in range(10)]\r\n axes = [0, 1, -1, -2]\r\n expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]\r\n for axis, expected_shape in zip(axes, expected_shapes):\r\n assert_equal(np.stack(arrays, axis).shape, expected_shape)\r\n assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)\r\n assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)\r\n # all shapes for 2d input\r\n arrays = [np.random.randn(3, 4) for _ in range(10)]\r\n axes = [0, 1, 2, -1, -2, -3]\r\n expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),\r\n (3, 4, 10), (3, 10, 4), (10, 3, 4)]\r\n for axis, expected_shape in zip(axes, expected_shapes):\r\n assert_equal(np.stack(arrays, axis).shape, expected_shape)\r\n # empty arrays\r\n assert_(stack([[], [], []]).shape == (3, 0))\r\n assert_(stack([[], [], []], axis=1).shape == (0, 3))\r\n # out\r\n out = np.zeros_like(r1)\r\n np.stack((a, b), out=out)\r\n assert_array_equal(out, r1)\r\n # edge cases\r\n assert_raises_regex(ValueError, 'need at least one array', stack, [])\r\n assert_raises_regex(ValueError, 'must have the same shape',\r\n stack, [1, np.arange(3)])\r\n assert_raises_regex(ValueError, 'must have the same shape',\r\n stack, [np.arange(3), 1])\r\n assert_raises_regex(ValueError, 'must have the same shape',\r\n stack, [np.arange(3), 1], axis=1)\r\n assert_raises_regex(ValueError, 'must have the same shape',\r\n stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)\r\n assert_raises_regex(ValueError, 'must have the same shape',\r\n stack, [np.arange(2), np.arange(3)])\r\n # generator is deprecated\r\n with assert_warns(FutureWarning):\r\n result = stack((x for x in range(3)))\r\n assert_array_equal(result, np.array([0, 1, 2]))\r\n\r\n\r\nclass TestBlock:\r\n @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])\r\n def block(self, request):\r\n # blocking small arrays and large arrays go through different paths.\r\n # the algorithm is triggered depending on the number of element\r\n # copies required.\r\n # We define a test fixture that forces most tests to go through\r\n # both code paths.\r\n # Ultimately, this should be removed if a single algorithm is found\r\n # to be faster for both small and large arrays.\r\n def _block_force_concatenate(arrays):\r\n arrays, list_ndim, result_ndim, _ = _block_setup(arrays)\r\n return _block_concatenate(arrays, list_ndim, result_ndim)\r\n\r\n def _block_force_slicing(arrays):\r\n arrays, list_ndim, result_ndim, _ = _block_setup(arrays)\r\n return _block_slicing(arrays, list_ndim, result_ndim)\r\n\r\n if request.param == 'force_concatenate':\r\n return _block_force_concatenate\r\n elif request.param == 'force_slicing':\r\n return _block_force_slicing\r\n elif request.param == 'block':\r\n return block\r\n else:\r\n raise ValueError('Unknown blocking request. There is a typo in the tests.')\r\n\r\n def test_returns_copy(self, block):\r\n a = np.eye(3)\r\n b = block(a)\r\n b[0, 0] = 2\r\n assert b[0, 0] != a[0, 0]\r\n\r\n def test_block_total_size_estimate(self, block):\r\n _, _, _, total_size = _block_setup([1])\r\n assert total_size == 1\r\n\r\n _, _, _, total_size = _block_setup([[1]])\r\n assert total_size == 1\r\n\r\n _, _, _, total_size = _block_setup([[1, 1]])\r\n assert total_size == 2\r\n\r\n _, _, _, total_size = _block_setup([[1], [1]])\r\n assert total_size == 2\r\n\r\n _, _, _, total_size = _block_setup([[1, 2], [3, 4]])\r\n assert total_size == 4\r\n\r\n def test_block_simple_row_wise(self, block):\r\n a_2d = np.ones((2, 2))\r\n b_2d = 2 * a_2d\r\n desired = np.array([[1, 1, 2, 2],\r\n [1, 1, 2, 2]])\r\n result = block([a_2d, b_2d])\r\n assert_equal(desired, result)\r\n\r\n def test_block_simple_column_wise(self, block):\r\n a_2d = np.ones((2, 2))\r\n b_2d = 2 * a_2d\r\n expected = np.array([[1, 1],\r\n [1, 1],\r\n [2, 2],\r\n [2, 2]])\r\n result = block([[a_2d], [b_2d]])\r\n assert_equal(expected, result)\r\n\r\n def test_block_with_1d_arrays_row_wise(self, block):\r\n # # # 1-D vectors are treated as row arrays\r\n a = np.array([1, 2, 3])\r\n b = np.array([2, 3, 4])\r\n expected = np.array([1, 2, 3, 2, 3, 4])\r\n result = block([a, b])\r\n assert_equal(expected, result)\r\n\r\n def test_block_with_1d_arrays_multiple_rows(self, block):\r\n a = np.array([1, 2, 3])\r\n b = np.array([2, 3, 4])\r\n expected = np.array([[1, 2, 3, 2, 3, 4],\r\n [1, 2, 3, 2, 3, 4]])\r\n result = block([[a, b], [a, b]])\r\n assert_equal(expected, result)\r\n\r\n def test_block_with_1d_arrays_column_wise(self, block):\r\n # # # 1-D vectors are treated as row arrays\r\n a_1d = np.array([1, 2, 3])\r\n b_1d = np.array([2, 3, 4])\r\n expected = np.array([[1, 2, 3],\r\n [2, 3, 4]])\r\n result = block([[a_1d], [b_1d]])\r\n assert_equal(expected, result)\r\n\r\n def test_block_mixed_1d_and_2d(self, block):\r\n a_2d = np.ones((2, 2))\r\n b_1d = np.array([2, 2])\r\n result = block([[a_2d], [b_1d]])\r\n expected = np.array([[1, 1],\r\n [1, 1],\r\n [2, 2]])\r\n assert_equal(expected, result)\r\n\r\n def test_block_complicated(self, block):\r\n # a bit more complicated\r\n one_2d = np.array([[1, 1, 1]])\r\n two_2d = np.array([[2, 2, 2]])\r\n three_2d = np.array([[3, 3, 3, 3, 3, 3]])\r\n four_1d = np.array([4, 4, 4, 4, 4, 4])\r\n five_0d = np.array(5)\r\n six_1d = np.array([6, 6, 6, 6, 6])\r\n zero_2d = np.zeros((2, 6))\r\n\r\n expected = np.array([[1, 1, 1, 2, 2, 2],\r\n [3, 3, 3, 3, 3, 3],\r\n [4, 4, 4, 4, 4, 4],\r\n [5, 6, 6, 6, 6, 6],\r\n [0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0]])\r\n\r\n result = block([[one_2d, two_2d],\r\n [three_2d],\r\n [four_1d],\r\n [five_0d, six_1d],\r\n [zero_2d]])\r\n assert_equal(result, expected)\r\n\r\n def test_nested(self, block):\r\n one = np.array([1, 1, 1])\r\n two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])\r\n three = np.array([3, 3, 3])\r\n four = np.array([4, 4, 4])\r\n five = np.array(5)\r\n six = np.array([6, 6, 6, 6, 6])\r\n zero = np.zeros((2, 6))\r\n\r\n result = block([\r\n [\r\n block([\r\n [one],\r\n [three],\r\n [four]\r\n ]),\r\n two\r\n ],\r\n [five, six],\r\n [zero]\r\n ])\r\n expected = np.array([[1, 1, 1, 2, 2, 2],\r\n [3, 3, 3, 2, 2, 2],\r\n [4, 4, 4, 2, 2, 2],\r\n [5, 6, 6, 6, 6, 6],\r\n [0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0]])\r\n\r\n assert_equal(result, expected)\r\n\r\n def test_3d(self, block):\r\n a000 = np.ones((2, 2, 2), int) * 1\r\n\r\n a100 = np.ones((3, 2, 2), int) * 2\r\n a010 = np.ones((2, 3, 2), int) * 3\r\n a001 = np.ones((2, 2, 3), int) * 4\r\n\r\n a011 = np.ones((2, 3, 3), int) * 5\r\n a101 = np.ones((3, 2, 3), int) * 6\r\n a110 = np.ones((3, 3, 2), int) * 7\r\n\r\n a111 = np.ones((3, 3, 3), int) * 8\r\n\r\n result = block([\r\n [\r\n [a000, a001],\r\n [a010, a011],\r\n ],\r\n [\r\n [a100, a101],\r\n [a110, a111],\r\n ]\r\n ])\r\n expected = array([[[1, 1, 4, 4, 4],\r\n [1, 1, 4, 4, 4],\r\n [3, 3, 5, 5, 5],\r\n [3, 3, 5, 5, 5],\r\n [3, 3, 5, 5, 5]],\r\n\r\n [[1, 1, 4, 4, 4],\r\n [1, 1, 4, 4, 4],\r\n [3, 3, 5, 5, 5],\r\n [3, 3, 5, 5, 5],\r\n [3, 3, 5, 5, 5]],\r\n\r\n [[2, 2, 6, 6, 6],\r\n [2, 2, 6, 6, 6],\r\n [7, 7, 8, 8, 8],\r\n [7, 7, 8, 8, 8],\r\n [7, 7, 8, 8, 8]],\r\n\r\n [[2, 2, 6, 6, 6],\r\n [2, 2, 6, 6, 6],\r\n [7, 7, 8, 8, 8],\r\n [7, 7, 8, 8, 8],\r\n [7, 7, 8, 8, 8]],\r\n\r\n [[2, 2, 6, 6, 6],\r\n [2, 2, 6, 6, 6],\r\n [7, 7, 8, 8, 8],\r\n [7, 7, 8, 8, 8],\r\n [7, 7, 8, 8, 8]]])\r\n\r\n assert_array_equal(result, expected)\r\n\r\n def test_block_with_mismatched_shape(self, block):\r\n a = np.array([0, 0])\r\n b = np.eye(2)\r\n assert_raises(ValueError, block, [a, b])\r\n assert_raises(ValueError, block, [b, a])\r\n\r\n to_block = [[np.ones((2,3)), np.ones((2,2))],\r\n [np.ones((2,2)), np.ones((2,2))]]\r\n assert_raises(ValueError, block, to_block)\r\n def test_no_lists(self, block):\r\n assert_equal(block(1), np.array(1))\r\n assert_equal(block(np.eye(3)), np.eye(3))\r\n\r\n def test_invalid_nesting(self, block):\r\n msg = 'depths are mismatched'\r\n assert_raises_regex(ValueError, msg, block, [1, [2]])\r\n assert_raises_regex(ValueError, msg, block, [1, []])\r\n assert_raises_regex(ValueError, msg, block, [[1], 2])\r\n assert_raises_regex(ValueError, msg, block, [[], 2])\r\n assert_raises_regex(ValueError, msg, block, [\r\n [[1], [2]],\r\n [[3, 4]],\r\n [5] # missing brackets\r\n ])\r\n\r\n def test_empty_lists(self, block):\r\n assert_raises_regex(ValueError, 'empty', block, [])\r\n assert_raises_regex(ValueError, 'empty', block, [[]])\r\n assert_raises_regex(ValueError, 'empty', block, [[1], []])\r\n\r\n def test_tuple(self, block):\r\n assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))\r\n assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])\r\n\r\n def test_different_ndims(self, block):\r\n a = 1.\r\n b = 2 * np.ones((1, 2))\r\n c = 3 * np.ones((1, 1, 3))\r\n\r\n result = block([a, b, c])\r\n expected = np.array([[[1., 2., 2., 3., 3., 3.]]])\r\n\r\n assert_equal(result, expected)\r\n\r\n def test_different_ndims_depths(self, block):\r\n a = 1.\r\n b = 2 * np.ones((1, 2))\r\n c = 3 * np.ones((1, 2, 3))\r\n\r\n result = block([[a, b], [c]])\r\n expected = np.array([[[1., 2., 2.],\r\n [3., 3., 3.],\r\n [3., 3., 3.]]])\r\n\r\n assert_equal(result, expected)\r\n\r\n def test_block_memory_order(self, block):\r\n # 3D\r\n arr_c = np.zeros((3,)*3, order='C')\r\n arr_f = np.zeros((3,)*3, order='F')\r\n\r\n b_c = [[[arr_c, arr_c],\r\n [arr_c, arr_c]],\r\n [[arr_c, arr_c],\r\n [arr_c, arr_c]]]\r\n\r\n b_f = [[[arr_f, arr_f],\r\n [arr_f, arr_f]],\r\n [[arr_f, arr_f],\r\n [arr_f, arr_f]]]\r\n\r\n assert block(b_c).flags['C_CONTIGUOUS']\r\n assert block(b_f).flags['F_CONTIGUOUS']\r\n\r\n arr_c = np.zeros((3, 3), order='C')\r\n arr_f = np.zeros((3, 3), order='F')\r\n # 2D\r\n b_c = [[arr_c, arr_c],\r\n [arr_c, arr_c]]\r\n\r\n b_f = [[arr_f, arr_f],\r\n [arr_f, arr_f]]\r\n\r\n assert block(b_c).flags['C_CONTIGUOUS']\r\n assert block(b_f).flags['F_CONTIGUOUS']\r\n\r\n\r\ndef test_block_dispatcher():\r\n class ArrayLike:\r\n pass\r\n a = ArrayLike()\r\n b = ArrayLike()\r\n c = ArrayLike()\r\n assert_equal(list(_block_dispatcher(a)), [a])\r\n assert_equal(list(_block_dispatcher([a])), [a])\r\n assert_equal(list(_block_dispatcher([a, b])), [a, b])\r\n assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])\r\n # don't recurse into non-lists\r\n assert_equal(list(_block_dispatcher((a, b))), [(a, b)])\r\n",
"\r\n# http://www.absoft.com/literature/osxuserguide.pdf\r\n# http://www.absoft.com/documentation.html\r\n\r\n# Notes:\r\n# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py\r\n# generated extension modules (works for f2py v2.45.241_1936 and up)\r\nimport os\r\n\r\nfrom numpy.distutils.cpuinfo import cpu\r\nfrom numpy.distutils.fcompiler import FCompiler, dummy_fortran_file\r\nfrom numpy.distutils.misc_util import cyg2win32\r\n\r\ncompilers = ['AbsoftFCompiler']\r\n\r\nclass AbsoftFCompiler(FCompiler):\r\n\r\n compiler_type = 'absoft'\r\n description = 'Absoft Corp Fortran Compiler'\r\n #version_pattern = r'FORTRAN 77 Compiler (?P<version>[^\\s*,]*).*?Absoft Corp'\r\n version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\\\r\n r' (?P<version>[^\\s*,]*)(.*?Absoft Corp|)'\r\n\r\n # on windows: f90 -V -c dummy.f\r\n # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16\r\n\r\n # samt5735(8)$ f90 -V -c dummy.f\r\n # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0\r\n # Note that fink installs g77 as f77, so need to use f90 for detection.\r\n\r\n executables = {\r\n 'version_cmd' : None, # set by update_executables\r\n 'compiler_f77' : [\"f77\"],\r\n 'compiler_fix' : [\"f90\"],\r\n 'compiler_f90' : [\"f90\"],\r\n 'linker_so' : [\"<F90>\"],\r\n 'archiver' : [\"ar\", \"-cr\"],\r\n 'ranlib' : [\"ranlib\"]\r\n }\r\n\r\n if os.name=='nt':\r\n library_switch = '/out:' #No space after /out:!\r\n\r\n module_dir_switch = None\r\n module_include_switch = '-p'\r\n\r\n def update_executables(self):\r\n f = cyg2win32(dummy_fortran_file())\r\n self.executables['version_cmd'] = ['<F90>', '-V', '-c',\r\n f+'.f', '-o', f+'.o']\r\n\r\n def get_flags_linker_so(self):\r\n if os.name=='nt':\r\n opt = ['/dll']\r\n # The \"-K shared\" switches are being left in for pre-9.0 versions\r\n # of Absoft though I don't think versions earlier than 9 can\r\n # actually be used to build shared libraries. In fact, version\r\n # 8 of Absoft doesn't recognize \"-K shared\" and will fail.\r\n elif self.get_version() >= '9.0':\r\n opt = ['-shared']\r\n else:\r\n opt = [\"-K\", \"shared\"]\r\n return opt\r\n\r\n def library_dir_option(self, dir):\r\n if os.name=='nt':\r\n return ['-link', '/PATH:%s' % (dir)]\r\n return \"-L\" + dir\r\n\r\n def library_option(self, lib):\r\n if os.name=='nt':\r\n return '%s.lib' % (lib)\r\n return \"-l\" + lib\r\n\r\n def get_library_dirs(self):\r\n opt = FCompiler.get_library_dirs(self)\r\n d = os.environ.get('ABSOFT')\r\n if d:\r\n if self.get_version() >= '10.0':\r\n # use shared libraries, the static libraries were not compiled -fPIC\r\n prefix = 'sh'\r\n else:\r\n prefix = ''\r\n if cpu.is_64bit():\r\n suffix = '64'\r\n else:\r\n suffix = ''\r\n opt.append(os.path.join(d, '%slib%s' % (prefix, suffix)))\r\n return opt\r\n\r\n def get_libraries(self):\r\n opt = FCompiler.get_libraries(self)\r\n if self.get_version() >= '11.0':\r\n opt.extend(['af90math', 'afio', 'af77math', 'amisc'])\r\n elif self.get_version() >= '10.0':\r\n opt.extend(['af90math', 'afio', 'af77math', 'U77'])\r\n elif self.get_version() >= '8.0':\r\n opt.extend(['f90math', 'fio', 'f77math', 'U77'])\r\n else:\r\n opt.extend(['fio', 'f90math', 'fmath', 'U77'])\r\n if os.name =='nt':\r\n opt.append('COMDLG32')\r\n return opt\r\n\r\n def get_flags(self):\r\n opt = FCompiler.get_flags(self)\r\n if os.name != 'nt':\r\n opt.extend(['-s'])\r\n if self.get_version():\r\n if self.get_version()>='8.2':\r\n opt.append('-fpic')\r\n return opt\r\n\r\n def get_flags_f77(self):\r\n opt = FCompiler.get_flags_f77(self)\r\n opt.extend(['-N22', '-N90', '-N110'])\r\n v = self.get_version()\r\n if os.name == 'nt':\r\n if v and v>='8.0':\r\n opt.extend(['-f', '-N15'])\r\n else:\r\n opt.append('-f')\r\n if v:\r\n if v<='4.6':\r\n opt.append('-B108')\r\n else:\r\n # Though -N15 is undocumented, it works with\r\n # Absoft 8.0 on Linux\r\n opt.append('-N15')\r\n return opt\r\n\r\n def get_flags_f90(self):\r\n opt = FCompiler.get_flags_f90(self)\r\n opt.extend([\"-YCFRL=1\", \"-YCOM_NAMES=LCS\", \"-YCOM_PFX\", \"-YEXT_PFX\",\r\n \"-YCOM_SFX=_\", \"-YEXT_SFX=_\", \"-YEXT_NAMES=LCS\"])\r\n if self.get_version():\r\n if self.get_version()>'4.6':\r\n opt.extend([\"-YDEALLOC=ALL\"])\r\n return opt\r\n\r\n def get_flags_fix(self):\r\n opt = FCompiler.get_flags_fix(self)\r\n opt.extend([\"-YCFRL=1\", \"-YCOM_NAMES=LCS\", \"-YCOM_PFX\", \"-YEXT_PFX\",\r\n \"-YCOM_SFX=_\", \"-YEXT_SFX=_\", \"-YEXT_NAMES=LCS\"])\r\n opt.extend([\"-f\", \"fixed\"])\r\n return opt\r\n\r\n def get_flags_opt(self):\r\n opt = ['-O']\r\n return opt\r\n\r\nif __name__ == '__main__':\r\n from distutils import log\r\n log.set_verbosity(2)\r\n from numpy.distutils import customized_fcompiler\r\n print(customized_fcompiler(compiler='absoft').get_version())\r\n",
"import sys\r\n\r\ndef configuration(parent_package='',top_path=None):\r\n from numpy.distutils.misc_util import Configuration\r\n config = Configuration('fft', parent_package, top_path)\r\n\r\n config.add_subpackage('tests')\r\n\r\n # AIX needs to be told to use large file support - at all times\r\n defs = [('_LARGE_FILES', None)] if sys.platform[:3] == \"aix\" else []\r\n # Configure pocketfft_internal\r\n config.add_extension('_pocketfft_internal',\r\n sources=['_pocketfft.c'],\r\n define_macros=defs,\r\n )\r\n\r\n return config\r\n\r\nif __name__ == '__main__':\r\n from numpy.distutils.core import setup\r\n setup(configuration=configuration)\r\n",
"from functools import reduce\r\n\r\nimport numpy as np\r\nimport numpy.core.umath as umath\r\nimport numpy.core.fromnumeric as fromnumeric\r\nfrom numpy.testing import (\r\n assert_, assert_raises, assert_equal,\r\n )\r\nfrom numpy.ma import (\r\n MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue,\r\n arange, arccos, arcsin, arctan, arctan2, array, average, choose,\r\n concatenate, conjugate, cos, cosh, count, divide, equal, exp, filled,\r\n getmask, greater, greater_equal, inner, isMaskedArray, less,\r\n less_equal, log, log10, make_mask, masked, masked_array, masked_equal,\r\n masked_greater, masked_greater_equal, masked_inside, masked_less,\r\n masked_less_equal, masked_not_equal, masked_outside,\r\n masked_print_option, masked_values, masked_where, maximum, minimum,\r\n multiply, nomask, nonzero, not_equal, ones, outer, product, put, ravel,\r\n repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum,\r\n take, tan, tanh, transpose, where, zeros,\r\n )\r\nfrom numpy.compat import pickle\r\n\r\npi = np.pi\r\n\r\n\r\ndef eq(v, w, msg=''):\r\n result = allclose(v, w)\r\n if not result:\r\n print(\"Not eq:%s\\n%s\\n----%s\" % (msg, str(v), str(w)))\r\n return result\r\n\r\n\r\nclass TestMa:\r\n\r\n def setup(self):\r\n x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])\r\n y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])\r\n a10 = 10.\r\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\r\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]\r\n xm = array(x, mask=m1)\r\n ym = array(y, mask=m2)\r\n z = np.array([-.5, 0., .5, .8])\r\n zm = array(z, mask=[0, 1, 0, 0])\r\n xf = np.where(m1, 1e+20, x)\r\n s = x.shape\r\n xm.set_fill_value(1e+20)\r\n self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)\r\n\r\n def test_testBasic1d(self):\r\n # Test of basic array creation and properties in 1 dimension.\r\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\r\n assert_(not isMaskedArray(x))\r\n assert_(isMaskedArray(xm))\r\n assert_equal(shape(xm), s)\r\n assert_equal(xm.shape, s)\r\n assert_equal(xm.dtype, x.dtype)\r\n assert_equal(xm.size, reduce(lambda x, y:x * y, s))\r\n assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))\r\n assert_(eq(xm, xf))\r\n assert_(eq(filled(xm, 1.e20), xf))\r\n assert_(eq(x, xm))\r\n\r\n def test_testBasic2d(self):\r\n # Test of basic array creation and properties in 2 dimensions.\r\n for s in [(4, 3), (6, 2)]:\r\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\r\n x.shape = s\r\n y.shape = s\r\n xm.shape = s\r\n ym.shape = s\r\n xf.shape = s\r\n\r\n assert_(not isMaskedArray(x))\r\n assert_(isMaskedArray(xm))\r\n assert_equal(shape(xm), s)\r\n assert_equal(xm.shape, s)\r\n assert_equal(xm.size, reduce(lambda x, y:x * y, s))\r\n assert_equal(count(xm),\r\n len(m1) - reduce(lambda x, y:x + y, m1))\r\n assert_(eq(xm, xf))\r\n assert_(eq(filled(xm, 1.e20), xf))\r\n assert_(eq(x, xm))\r\n self.setup()\r\n\r\n def test_testArithmetic(self):\r\n # Test of basic arithmetic.\r\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\r\n a2d = array([[1, 2], [0, 4]])\r\n a2dm = masked_array(a2d, [[0, 0], [1, 0]])\r\n assert_(eq(a2d * a2d, a2d * a2dm))\r\n assert_(eq(a2d + a2d, a2d + a2dm))\r\n assert_(eq(a2d - a2d, a2d - a2dm))\r\n for s in [(12,), (4, 3), (2, 6)]:\r\n x = x.reshape(s)\r\n y = y.reshape(s)\r\n xm = xm.reshape(s)\r\n ym = ym.reshape(s)\r\n xf = xf.reshape(s)\r\n assert_(eq(-x, -xm))\r\n assert_(eq(x + y, xm + ym))\r\n assert_(eq(x - y, xm - ym))\r\n assert_(eq(x * y, xm * ym))\r\n with np.errstate(divide='ignore', invalid='ignore'):\r\n assert_(eq(x / y, xm / ym))\r\n assert_(eq(a10 + y, a10 + ym))\r\n assert_(eq(a10 - y, a10 - ym))\r\n assert_(eq(a10 * y, a10 * ym))\r\n with np.errstate(divide='ignore', invalid='ignore'):\r\n assert_(eq(a10 / y, a10 / ym))\r\n assert_(eq(x + a10, xm + a10))\r\n assert_(eq(x - a10, xm - a10))\r\n assert_(eq(x * a10, xm * a10))\r\n assert_(eq(x / a10, xm / a10))\r\n assert_(eq(x ** 2, xm ** 2))\r\n assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5))\r\n assert_(eq(x ** y, xm ** ym))\r\n assert_(eq(np.add(x, y), add(xm, ym)))\r\n assert_(eq(np.subtract(x, y), subtract(xm, ym)))\r\n assert_(eq(np.multiply(x, y), multiply(xm, ym)))\r\n with np.errstate(divide='ignore', invalid='ignore'):\r\n assert_(eq(np.divide(x, y), divide(xm, ym)))\r\n\r\n def test_testMixedArithmetic(self):\r\n na = np.array([1])\r\n ma = array([1])\r\n assert_(isinstance(na + ma, MaskedArray))\r\n assert_(isinstance(ma + na, MaskedArray))\r\n\r\n def test_testUfuncs1(self):\r\n # Test various functions such as sin, cos.\r\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\r\n assert_(eq(np.cos(x), cos(xm)))\r\n assert_(eq(np.cosh(x), cosh(xm)))\r\n assert_(eq(np.sin(x), sin(xm)))\r\n assert_(eq(np.sinh(x), sinh(xm)))\r\n assert_(eq(np.tan(x), tan(xm)))\r\n assert_(eq(np.tanh(x), tanh(xm)))\r\n with np.errstate(divide='ignore', invalid='ignore'):\r\n assert_(eq(np.sqrt(abs(x)), sqrt(xm)))\r\n assert_(eq(np.log(abs(x)), log(xm)))\r\n assert_(eq(np.log10(abs(x)), log10(xm)))\r\n assert_(eq(np.exp(x), exp(xm)))\r\n assert_(eq(np.arcsin(z), arcsin(zm)))\r\n assert_(eq(np.arccos(z), arccos(zm)))\r\n assert_(eq(np.arctan(z), arctan(zm)))\r\n assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))\r\n assert_(eq(np.absolute(x), absolute(xm)))\r\n assert_(eq(np.equal(x, y), equal(xm, ym)))\r\n assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))\r\n assert_(eq(np.less(x, y), less(xm, ym)))\r\n assert_(eq(np.greater(x, y), greater(xm, ym)))\r\n assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))\r\n assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))\r\n assert_(eq(np.conjugate(x), conjugate(xm)))\r\n assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))\r\n assert_(eq(np.concatenate((x, y)), concatenate((x, y))))\r\n assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))\r\n assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))\r\n\r\n def test_xtestCount(self):\r\n # Test count\r\n ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])\r\n assert_(count(ott).dtype.type is np.intp)\r\n assert_equal(3, count(ott))\r\n assert_equal(1, count(1))\r\n assert_(eq(0, array(1, mask=[1])))\r\n ott = ott.reshape((2, 2))\r\n assert_(count(ott).dtype.type is np.intp)\r\n assert_(isinstance(count(ott, 0), np.ndarray))\r\n assert_(count(ott).dtype.type is np.intp)\r\n assert_(eq(3, count(ott)))\r\n assert_(getmask(count(ott, 0)) is nomask)\r\n assert_(eq([1, 2], count(ott, 0)))\r\n\r\n def test_testMinMax(self):\r\n # Test minimum and maximum.\r\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\r\n xr = np.ravel(x) # max doesn't work if shaped\r\n xmr = ravel(xm)\r\n\r\n # true because of careful selection of data\r\n assert_(eq(max(xr), maximum.reduce(xmr)))\r\n assert_(eq(min(xr), minimum.reduce(xmr)))\r\n\r\n def test_testAddSumProd(self):\r\n # Test add, sum, product.\r\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\r\n assert_(eq(np.add.reduce(x), add.reduce(x)))\r\n assert_(eq(np.add.accumulate(x), add.accumulate(x)))\r\n assert_(eq(4, sum(array(4), axis=0)))\r\n assert_(eq(4, sum(array(4), axis=0)))\r\n assert_(eq(np.sum(x, axis=0), sum(x, axis=0)))\r\n assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))\r\n assert_(eq(np.sum(x, 0), sum(x, 0)))\r\n assert_(eq(np.product(x, axis=0), product(x, axis=0)))\r\n assert_(eq(np.product(x, 0), product(x, 0)))\r\n assert_(eq(np.product(filled(xm, 1), axis=0),\r\n product(xm, axis=0)))\r\n if len(s) > 1:\r\n assert_(eq(np.concatenate((x, y), 1),\r\n concatenate((xm, ym), 1)))\r\n assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1)))\r\n assert_(eq(np.sum(x, 1), sum(x, 1)))\r\n assert_(eq(np.product(x, 1), product(x, 1)))\r\n\r\n def test_testCI(self):\r\n # Test of conversions and indexing\r\n x1 = np.array([1, 2, 4, 3])\r\n x2 = array(x1, mask=[1, 0, 0, 0])\r\n x3 = array(x1, mask=[0, 1, 0, 1])\r\n x4 = array(x1)\r\n # test conversion to strings\r\n str(x2) # raises?\r\n repr(x2) # raises?\r\n assert_(eq(np.sort(x1), sort(x2, fill_value=0)))\r\n # tests of indexing\r\n assert_(type(x2[1]) is type(x1[1]))\r\n assert_(x1[1] == x2[1])\r\n assert_(x2[0] is masked)\r\n assert_(eq(x1[2], x2[2]))\r\n assert_(eq(x1[2:5], x2[2:5]))\r\n assert_(eq(x1[:], x2[:]))\r\n assert_(eq(x1[1:], x3[1:]))\r\n x1[2] = 9\r\n x2[2] = 9\r\n assert_(eq(x1, x2))\r\n x1[1:3] = 99\r\n x2[1:3] = 99\r\n assert_(eq(x1, x2))\r\n x2[1] = masked\r\n assert_(eq(x1, x2))\r\n x2[1:3] = masked\r\n assert_(eq(x1, x2))\r\n x2[:] = x1\r\n x2[1] = masked\r\n assert_(allequal(getmask(x2), array([0, 1, 0, 0])))\r\n x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])\r\n assert_(allequal(getmask(x3), array([0, 1, 1, 0])))\r\n x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])\r\n assert_(allequal(getmask(x4), array([0, 1, 1, 0])))\r\n assert_(allequal(x4, array([1, 2, 3, 4])))\r\n x1 = np.arange(5) * 1.0\r\n x2 = masked_values(x1, 3.0)\r\n assert_(eq(x1, x2))\r\n assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))\r\n assert_(eq(3.0, x2.fill_value))\r\n x1 = array([1, 'hello', 2, 3], object)\r\n x2 = np.array([1, 'hello', 2, 3], object)\r\n s1 = x1[1]\r\n s2 = x2[1]\r\n assert_equal(type(s2), str)\r\n assert_equal(type(s1), str)\r\n assert_equal(s1, s2)\r\n assert_(x1[1:1].shape == (0,))\r\n\r\n def test_testCopySize(self):\r\n # Tests of some subtle points of copying and sizing.\r\n n = [0, 0, 1, 0, 0]\r\n m = make_mask(n)\r\n m2 = make_mask(m)\r\n assert_(m is m2)\r\n m3 = make_mask(m, copy=True)\r\n assert_(m is not m3)\r\n\r\n x1 = np.arange(5)\r\n y1 = array(x1, mask=m)\r\n assert_(y1._data is not x1)\r\n assert_(allequal(x1, y1._data))\r\n assert_(y1._mask is m)\r\n\r\n y1a = array(y1, copy=0)\r\n # For copy=False, one might expect that the array would just\r\n # passed on, i.e., that it would be \"is\" instead of \"==\".\r\n # See gh-4043 for discussion.\r\n assert_(y1a._mask.__array_interface__ ==\r\n y1._mask.__array_interface__)\r\n\r\n y2 = array(x1, mask=m3, copy=0)\r\n assert_(y2._mask is m3)\r\n assert_(y2[2] is masked)\r\n y2[2] = 9\r\n assert_(y2[2] is not masked)\r\n assert_(y2._mask is m3)\r\n assert_(allequal(y2.mask, 0))\r\n\r\n y2a = array(x1, mask=m, copy=1)\r\n assert_(y2a._mask is not m)\r\n assert_(y2a[2] is masked)\r\n y2a[2] = 9\r\n assert_(y2a[2] is not masked)\r\n assert_(y2a._mask is not m)\r\n assert_(allequal(y2a.mask, 0))\r\n\r\n y3 = array(x1 * 1.0, mask=m)\r\n assert_(filled(y3).dtype is (x1 * 1.0).dtype)\r\n\r\n x4 = arange(4)\r\n x4[2] = masked\r\n y4 = resize(x4, (8,))\r\n assert_(eq(concatenate([x4, x4]), y4))\r\n assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))\r\n y5 = repeat(x4, (2, 2, 2, 2), axis=0)\r\n assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))\r\n y6 = repeat(x4, 2, axis=0)\r\n assert_(eq(y5, y6))\r\n\r\n def test_testPut(self):\r\n # Test of put\r\n d = arange(5)\r\n n = [0, 0, 0, 1, 1]\r\n m = make_mask(n)\r\n m2 = m.copy()\r\n x = array(d, mask=m)\r\n assert_(x[3] is masked)\r\n assert_(x[4] is masked)\r\n x[[1, 4]] = [10, 40]\r\n assert_(x._mask is m)\r\n assert_(x[3] is masked)\r\n assert_(x[4] is not masked)\r\n assert_(eq(x, [0, 10, 2, -1, 40]))\r\n\r\n x = array(d, mask=m2, copy=True)\r\n x.put([0, 1, 2], [-1, 100, 200])\r\n assert_(x._mask is not m2)\r\n assert_(x[3] is masked)\r\n assert_(x[4] is masked)\r\n assert_(eq(x, [-1, 100, 200, 0, 0]))\r\n\r\n def test_testPut2(self):\r\n # Test of put\r\n d = arange(5)\r\n x = array(d, mask=[0, 0, 0, 0, 0])\r\n z = array([10, 40], mask=[1, 0])\r\n assert_(x[2] is not masked)\r\n assert_(x[3] is not masked)\r\n x[2:4] = z\r\n assert_(x[2] is masked)\r\n assert_(x[3] is not masked)\r\n assert_(eq(x, [0, 1, 10, 40, 4]))\r\n\r\n d = arange(5)\r\n x = array(d, mask=[0, 0, 0, 0, 0])\r\n y = x[2:4]\r\n z = array([10, 40], mask=[1, 0])\r\n assert_(x[2] is not masked)\r\n assert_(x[3] is not masked)\r\n y[:] = z\r\n assert_(y[0] is masked)\r\n assert_(y[1] is not masked)\r\n assert_(eq(y, [10, 40]))\r\n assert_(x[2] is masked)\r\n assert_(x[3] is not masked)\r\n assert_(eq(x, [0, 1, 10, 40, 4]))\r\n\r\n def test_testMaPut(self):\r\n (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d\r\n m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]\r\n i = np.nonzero(m)[0]\r\n put(ym, i, zm)\r\n assert_(all(take(ym, i, axis=0) == zm))\r\n\r\n def test_testOddFeatures(self):\r\n # Test of other odd features\r\n x = arange(20)\r\n x = x.reshape(4, 5)\r\n x.flat[5] = 12\r\n assert_(x[1, 0] == 12)\r\n z = x + 10j * x\r\n assert_(eq(z.real, x))\r\n assert_(eq(z.imag, 10 * x))\r\n assert_(eq((z * conjugate(z)).real, 101 * x * x))\r\n z.imag[...] = 0.0\r\n\r\n x = arange(10)\r\n x[3] = masked\r\n assert_(str(x[3]) == str(masked))\r\n c = x >= 8\r\n assert_(count(where(c, masked, masked)) == 0)\r\n assert_(shape(where(c, masked, masked)) == c.shape)\r\n z = where(c, x, masked)\r\n assert_(z.dtype is x.dtype)\r\n assert_(z[3] is masked)\r\n assert_(z[4] is masked)\r\n assert_(z[7] is masked)\r\n assert_(z[8] is not masked)\r\n assert_(z[9] is not masked)\r\n assert_(eq(x, z))\r\n z = where(c, masked, x)\r\n assert_(z.dtype is x.dtype)\r\n assert_(z[3] is masked)\r\n assert_(z[4] is not masked)\r\n assert_(z[7] is not masked)\r\n assert_(z[8] is masked)\r\n assert_(z[9] is masked)\r\n z = masked_where(c, x)\r\n assert_(z.dtype is x.dtype)\r\n assert_(z[3] is masked)\r\n assert_(z[4] is not masked)\r\n assert_(z[7] is not masked)\r\n assert_(z[8] is masked)\r\n assert_(z[9] is masked)\r\n assert_(eq(x, z))\r\n x = array([1., 2., 3., 4., 5.])\r\n c = array([1, 1, 1, 0, 0])\r\n x[2] = masked\r\n z = where(c, x, -x)\r\n assert_(eq(z, [1., 2., 0., -4., -5]))\r\n c[0] = masked\r\n z = where(c, x, -x)\r\n assert_(eq(z, [1., 2., 0., -4., -5]))\r\n assert_(z[0] is masked)\r\n assert_(z[1] is not masked)\r\n assert_(z[2] is masked)\r\n assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))\r\n assert_(eq(masked_where(greater_equal(x, 2), x),\r\n masked_greater_equal(x, 2)))\r\n assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))\r\n assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))\r\n assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))\r\n assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))\r\n assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))\r\n assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))\r\n assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))\r\n assert_(eq(masked_inside(array(list(range(5)),\r\n mask=[1, 0, 0, 0, 0]), 1, 3).mask,\r\n [1, 1, 1, 1, 0]))\r\n assert_(eq(masked_outside(array(list(range(5)),\r\n mask=[0, 1, 0, 0, 0]), 1, 3).mask,\r\n [1, 1, 0, 0, 1]))\r\n assert_(eq(masked_equal(array(list(range(5)),\r\n mask=[1, 0, 0, 0, 0]), 2).mask,\r\n [1, 0, 1, 0, 0]))\r\n assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],\r\n mask=[1, 0, 0, 0, 0]), 2).mask,\r\n [1, 0, 1, 0, 1]))\r\n assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),\r\n [99, 99, 3, 4, 5]))\r\n atest = ones((10, 10, 10), dtype=np.float32)\r\n btest = zeros(atest.shape, MaskType)\r\n ctest = masked_where(btest, atest)\r\n assert_(eq(atest, ctest))\r\n z = choose(c, (-x, x))\r\n assert_(eq(z, [1., 2., 0., -4., -5]))\r\n assert_(z[0] is masked)\r\n assert_(z[1] is not masked)\r\n assert_(z[2] is masked)\r\n x = arange(6)\r\n x[5] = masked\r\n y = arange(6) * 10\r\n y[2] = masked\r\n c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])\r\n cm = c.filled(1)\r\n z = where(c, x, y)\r\n zm = where(cm, x, y)\r\n assert_(eq(z, zm))\r\n assert_(getmask(zm) is nomask)\r\n assert_(eq(zm, [0, 1, 2, 30, 40, 50]))\r\n z = where(c, masked, 1)\r\n assert_(eq(z, [99, 99, 99, 1, 1, 1]))\r\n z = where(c, 1, masked)\r\n assert_(eq(z, [99, 1, 1, 99, 99, 99]))\r\n\r\n def test_testMinMax2(self):\r\n # Test of minimum, maximum.\r\n assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]))\r\n assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]))\r\n x = arange(5)\r\n y = arange(5) - 2\r\n x[3] = masked\r\n y[0] = masked\r\n assert_(eq(minimum(x, y), where(less(x, y), x, y)))\r\n assert_(eq(maximum(x, y), where(greater(x, y), x, y)))\r\n assert_(minimum.reduce(x) == 0)\r\n assert_(maximum.reduce(x) == 4)\r\n\r\n def test_testTakeTransposeInnerOuter(self):\r\n # Test of take, transpose, inner, outer products\r\n x = arange(24)\r\n y = np.arange(24)\r\n x[5:6] = masked\r\n x = x.reshape(2, 3, 4)\r\n y = y.reshape(2, 3, 4)\r\n assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))\r\n assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))\r\n assert_(eq(np.inner(filled(x, 0), filled(y, 0)),\r\n inner(x, y)))\r\n assert_(eq(np.outer(filled(x, 0), filled(y, 0)),\r\n outer(x, y)))\r\n y = array(['abc', 1, 'def', 2, 3], object)\r\n y[2] = masked\r\n t = take(y, [0, 3, 4])\r\n assert_(t[0] == 'abc')\r\n assert_(t[1] == 2)\r\n assert_(t[2] == 3)\r\n\r\n def test_testInplace(self):\r\n # Test of inplace operations and rich comparisons\r\n y = arange(10)\r\n\r\n x = arange(10)\r\n xm = arange(10)\r\n xm[2] = masked\r\n x += 1\r\n assert_(eq(x, y + 1))\r\n xm += 1\r\n assert_(eq(x, y + 1))\r\n\r\n x = arange(10)\r\n xm = arange(10)\r\n xm[2] = masked\r\n x -= 1\r\n assert_(eq(x, y - 1))\r\n xm -= 1\r\n assert_(eq(xm, y - 1))\r\n\r\n x = arange(10) * 1.0\r\n xm = arange(10) * 1.0\r\n xm[2] = masked\r\n x *= 2.0\r\n assert_(eq(x, y * 2))\r\n xm *= 2.0\r\n assert_(eq(xm, y * 2))\r\n\r\n x = arange(10) * 2\r\n xm = arange(10)\r\n xm[2] = masked\r\n x //= 2\r\n assert_(eq(x, y))\r\n xm //= 2\r\n assert_(eq(x, y))\r\n\r\n x = arange(10) * 1.0\r\n xm = arange(10) * 1.0\r\n xm[2] = masked\r\n x /= 2.0\r\n assert_(eq(x, y / 2.0))\r\n xm /= arange(10)\r\n assert_(eq(xm, ones((10,))))\r\n\r\n x = arange(10).astype(np.float32)\r\n xm = arange(10)\r\n xm[2] = masked\r\n x += 1.\r\n assert_(eq(x, y + 1.))\r\n\r\n def test_testPickle(self):\r\n # Test of pickling\r\n x = arange(12)\r\n x[4:10:2] = masked\r\n x = x.reshape(4, 3)\r\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\r\n s = pickle.dumps(x, protocol=proto)\r\n y = pickle.loads(s)\r\n assert_(eq(x, y))\r\n\r\n def test_testMasked(self):\r\n # Test of masked element\r\n xx = arange(6)\r\n xx[1] = masked\r\n assert_(str(masked) == '--')\r\n assert_(xx[1] is masked)\r\n assert_equal(filled(xx[1], 0), 0)\r\n\r\n def test_testAverage1(self):\r\n # Test of average.\r\n ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])\r\n assert_(eq(2.0, average(ott, axis=0)))\r\n assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))\r\n result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True)\r\n assert_(eq(2.0, result))\r\n assert_(wts == 4.0)\r\n ott[:] = masked\r\n assert_(average(ott, axis=0) is masked)\r\n ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])\r\n ott = ott.reshape(2, 2)\r\n ott[:, 1] = masked\r\n assert_(eq(average(ott, axis=0), [2.0, 0.0]))\r\n assert_(average(ott, axis=1)[0] is masked)\r\n assert_(eq([2., 0.], average(ott, axis=0)))\r\n result, wts = average(ott, axis=0, returned=True)\r\n assert_(eq(wts, [1., 0.]))\r\n\r\n def test_testAverage2(self):\r\n # More tests of average.\r\n w1 = [0, 1, 1, 1, 1, 0]\r\n w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]\r\n x = arange(6)\r\n assert_(allclose(average(x, axis=0), 2.5))\r\n assert_(allclose(average(x, axis=0, weights=w1), 2.5))\r\n y = array([arange(6), 2.0 * arange(6)])\r\n assert_(allclose(average(y, None),\r\n np.add.reduce(np.arange(6)) * 3. / 12.))\r\n assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))\r\n assert_(allclose(average(y, axis=1),\r\n [average(x, axis=0), average(x, axis=0)*2.0]))\r\n assert_(allclose(average(y, None, weights=w2), 20. / 6.))\r\n assert_(allclose(average(y, axis=0, weights=w2),\r\n [0., 1., 2., 3., 4., 10.]))\r\n assert_(allclose(average(y, axis=1),\r\n [average(x, axis=0), average(x, axis=0)*2.0]))\r\n m1 = zeros(6)\r\n m2 = [0, 0, 1, 1, 0, 0]\r\n m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]\r\n m4 = ones(6)\r\n m5 = [0, 1, 1, 1, 1, 1]\r\n assert_(allclose(average(masked_array(x, m1), axis=0), 2.5))\r\n assert_(allclose(average(masked_array(x, m2), axis=0), 2.5))\r\n assert_(average(masked_array(x, m4), axis=0) is masked)\r\n assert_equal(average(masked_array(x, m5), axis=0), 0.0)\r\n assert_equal(count(average(masked_array(x, m4), axis=0)), 0)\r\n z = masked_array(y, m3)\r\n assert_(allclose(average(z, None), 20. / 6.))\r\n assert_(allclose(average(z, axis=0),\r\n [0., 1., 99., 99., 4.0, 7.5]))\r\n assert_(allclose(average(z, axis=1), [2.5, 5.0]))\r\n assert_(allclose(average(z, axis=0, weights=w2),\r\n [0., 1., 99., 99., 4.0, 10.0]))\r\n\r\n a = arange(6)\r\n b = arange(6) * 3\r\n r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)\r\n assert_equal(shape(r1), shape(w1))\r\n assert_equal(r1.shape, w1.shape)\r\n r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True)\r\n assert_equal(shape(w2), shape(r2))\r\n r2, w2 = average(ones((2, 2, 3)), returned=True)\r\n assert_equal(shape(w2), shape(r2))\r\n r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True)\r\n assert_(shape(w2) == shape(r2))\r\n a2d = array([[1, 2], [0, 4]], float)\r\n a2dm = masked_array(a2d, [[0, 0], [1, 0]])\r\n a2da = average(a2d, axis=0)\r\n assert_(eq(a2da, [0.5, 3.0]))\r\n a2dma = average(a2dm, axis=0)\r\n assert_(eq(a2dma, [1.0, 3.0]))\r\n a2dma = average(a2dm, axis=None)\r\n assert_(eq(a2dma, 7. / 3.))\r\n a2dma = average(a2dm, axis=1)\r\n assert_(eq(a2dma, [1.5, 4.0]))\r\n\r\n def test_testToPython(self):\r\n assert_equal(1, int(array(1)))\r\n assert_equal(1.0, float(array(1)))\r\n assert_equal(1, int(array([[[1]]])))\r\n assert_equal(1.0, float(array([[1]])))\r\n assert_raises(TypeError, float, array([1, 1]))\r\n assert_raises(ValueError, bool, array([0, 1]))\r\n assert_raises(ValueError, bool, array([0, 0], mask=[0, 1]))\r\n\r\n def test_testScalarArithmetic(self):\r\n xm = array(0, mask=1)\r\n #TODO FIXME: Find out what the following raises a warning in r8247\r\n with np.errstate(divide='ignore'):\r\n assert_((1 / array(0)).mask)\r\n assert_((1 + xm).mask)\r\n assert_((-xm).mask)\r\n assert_((-xm).mask)\r\n assert_(maximum(xm, xm).mask)\r\n assert_(minimum(xm, xm).mask)\r\n assert_(xm.filled().dtype is xm._data.dtype)\r\n x = array(0, mask=0)\r\n assert_(x.filled() == x._data)\r\n assert_equal(str(xm), str(masked_print_option))\r\n\r\n def test_testArrayMethods(self):\r\n a = array([1, 3, 2])\r\n assert_(eq(a.any(), a._data.any()))\r\n assert_(eq(a.all(), a._data.all()))\r\n assert_(eq(a.argmax(), a._data.argmax()))\r\n assert_(eq(a.argmin(), a._data.argmin()))\r\n assert_(eq(a.choose(0, 1, 2, 3, 4),\r\n a._data.choose(0, 1, 2, 3, 4)))\r\n assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))\r\n assert_(eq(a.conj(), a._data.conj()))\r\n assert_(eq(a.conjugate(), a._data.conjugate()))\r\n m = array([[1, 2], [3, 4]])\r\n assert_(eq(m.diagonal(), m._data.diagonal()))\r\n assert_(eq(a.sum(), a._data.sum()))\r\n assert_(eq(a.take([1, 2]), a._data.take([1, 2])))\r\n assert_(eq(m.transpose(), m._data.transpose()))\r\n\r\n def test_testArrayAttributes(self):\r\n a = array([1, 3, 2])\r\n assert_equal(a.ndim, 1)\r\n\r\n def test_testAPI(self):\r\n assert_(not [m for m in dir(np.ndarray)\r\n if m not in dir(MaskedArray) and\r\n not m.startswith('_')])\r\n\r\n def test_testSingleElementSubscript(self):\r\n a = array([1, 3, 2])\r\n b = array([1, 3, 2], mask=[1, 0, 1])\r\n assert_equal(a[0].shape, ())\r\n assert_equal(b[0].shape, ())\r\n assert_equal(b[1].shape, ())\r\n\r\n\r\nclass TestUfuncs:\r\n def setup(self):\r\n self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),\r\n array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)\r\n\r\n def test_testUfuncRegression(self):\r\n f_invalid_ignore = [\r\n 'sqrt', 'arctanh', 'arcsin', 'arccos',\r\n 'arccosh', 'arctanh', 'log', 'log10', 'divide',\r\n 'true_divide', 'floor_divide', 'remainder', 'fmod']\r\n for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',\r\n 'sin', 'cos', 'tan',\r\n 'arcsin', 'arccos', 'arctan',\r\n 'sinh', 'cosh', 'tanh',\r\n 'arcsinh',\r\n 'arccosh',\r\n 'arctanh',\r\n 'absolute', 'fabs', 'negative',\r\n 'floor', 'ceil',\r\n 'logical_not',\r\n 'add', 'subtract', 'multiply',\r\n 'divide', 'true_divide', 'floor_divide',\r\n 'remainder', 'fmod', 'hypot', 'arctan2',\r\n 'equal', 'not_equal', 'less_equal', 'greater_equal',\r\n 'less', 'greater',\r\n 'logical_and', 'logical_or', 'logical_xor']:\r\n try:\r\n uf = getattr(umath, f)\r\n except AttributeError:\r\n uf = getattr(fromnumeric, f)\r\n mf = getattr(np.ma, f)\r\n args = self.d[:uf.nin]\r\n with np.errstate():\r\n if f in f_invalid_ignore:\r\n np.seterr(invalid='ignore')\r\n if f in ['arctanh', 'log', 'log10']:\r\n np.seterr(divide='ignore')\r\n ur = uf(*args)\r\n mr = mf(*args)\r\n assert_(eq(ur.filled(0), mr.filled(0), f))\r\n assert_(eqmask(ur.mask, mr.mask))\r\n\r\n def test_reduce(self):\r\n a = self.d[0]\r\n assert_(not alltrue(a, axis=0))\r\n assert_(sometrue(a, axis=0))\r\n assert_equal(sum(a[:3], axis=0), 0)\r\n assert_equal(product(a, axis=0), 0)\r\n\r\n def test_minmax(self):\r\n a = arange(1, 13).reshape(3, 4)\r\n amask = masked_where(a < 5, a)\r\n assert_equal(amask.max(), a.max())\r\n assert_equal(amask.min(), 5)\r\n assert_((amask.max(0) == a.max(0)).all())\r\n assert_((amask.min(0) == [5, 6, 7, 8]).all())\r\n assert_(amask.max(1)[0].mask)\r\n assert_(amask.min(1)[0].mask)\r\n\r\n def test_nonzero(self):\r\n for t in \"?bhilqpBHILQPfdgFDGO\":\r\n x = array([1, 0, 2, 0], mask=[0, 0, 1, 1])\r\n assert_(eq(nonzero(x), [0]))\r\n\r\n\r\nclass TestArrayMethods:\r\n\r\n def setup(self):\r\n x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,\r\n 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,\r\n 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,\r\n 6.04, 9.63, 7.712, 3.382, 4.489, 6.479,\r\n 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,\r\n 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])\r\n X = x.reshape(6, 6)\r\n XX = x.reshape(3, 2, 2, 3)\r\n\r\n m = np.array([0, 1, 0, 1, 0, 0,\r\n 1, 0, 1, 1, 0, 1,\r\n 0, 0, 0, 1, 0, 1,\r\n 0, 0, 0, 1, 1, 1,\r\n 1, 0, 0, 1, 0, 0,\r\n 0, 0, 1, 0, 1, 0])\r\n mx = array(data=x, mask=m)\r\n mX = array(data=X, mask=m.reshape(X.shape))\r\n mXX = array(data=XX, mask=m.reshape(XX.shape))\r\n\r\n self.d = (x, X, XX, m, mx, mX, mXX)\r\n\r\n def test_trace(self):\r\n (x, X, XX, m, mx, mX, mXX,) = self.d\r\n mXdiag = mX.diagonal()\r\n assert_equal(mX.trace(), mX.diagonal().compressed().sum())\r\n assert_(eq(mX.trace(),\r\n X.trace() - sum(mXdiag.mask * X.diagonal(),\r\n axis=0)))\r\n\r\n def test_clip(self):\r\n (x, X, XX, m, mx, mX, mXX,) = self.d\r\n clipped = mx.clip(2, 8)\r\n assert_(eq(clipped.mask, mx.mask))\r\n assert_(eq(clipped._data, x.clip(2, 8)))\r\n assert_(eq(clipped._data, mx._data.clip(2, 8)))\r\n\r\n def test_ptp(self):\r\n (x, X, XX, m, mx, mX, mXX,) = self.d\r\n (n, m) = X.shape\r\n assert_equal(mx.ptp(), mx.compressed().ptp())\r\n rows = np.zeros(n, np.float_)\r\n cols = np.zeros(m, np.float_)\r\n for k in range(m):\r\n cols[k] = mX[:, k].compressed().ptp()\r\n for k in range(n):\r\n rows[k] = mX[k].compressed().ptp()\r\n assert_(eq(mX.ptp(0), cols))\r\n assert_(eq(mX.ptp(1), rows))\r\n\r\n def test_swapaxes(self):\r\n (x, X, XX, m, mx, mX, mXX,) = self.d\r\n mXswapped = mX.swapaxes(0, 1)\r\n assert_(eq(mXswapped[-1], mX[:, -1]))\r\n mXXswapped = mXX.swapaxes(0, 2)\r\n assert_equal(mXXswapped.shape, (2, 2, 3, 3))\r\n\r\n def test_cumprod(self):\r\n (x, X, XX, m, mx, mX, mXX,) = self.d\r\n mXcp = mX.cumprod(0)\r\n assert_(eq(mXcp._data, mX.filled(1).cumprod(0)))\r\n mXcp = mX.cumprod(1)\r\n assert_(eq(mXcp._data, mX.filled(1).cumprod(1)))\r\n\r\n def test_cumsum(self):\r\n (x, X, XX, m, mx, mX, mXX,) = self.d\r\n mXcp = mX.cumsum(0)\r\n assert_(eq(mXcp._data, mX.filled(0).cumsum(0)))\r\n mXcp = mX.cumsum(1)\r\n assert_(eq(mXcp._data, mX.filled(0).cumsum(1)))\r\n\r\n def test_varstd(self):\r\n (x, X, XX, m, mx, mX, mXX,) = self.d\r\n assert_(eq(mX.var(axis=None), mX.compressed().var()))\r\n assert_(eq(mX.std(axis=None), mX.compressed().std()))\r\n assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape))\r\n assert_(eq(mX.var().shape, X.var().shape))\r\n (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))\r\n for k in range(6):\r\n assert_(eq(mXvar1[k], mX[k].compressed().var()))\r\n assert_(eq(mXvar0[k], mX[:, k].compressed().var()))\r\n assert_(eq(np.sqrt(mXvar0[k]),\r\n mX[:, k].compressed().std()))\r\n\r\n\r\ndef eqmask(m1, m2):\r\n if m1 is nomask:\r\n return m2 is nomask\r\n if m2 is nomask:\r\n return m1 is nomask\r\n return (m1 == m2).all()\r\n"
] | [
[
"numpy.sqrt"
],
[
"numpy.core.numeric.ceil",
"numpy.core.numeric.signbit",
"numpy.core.numeric.isinf",
"numpy.core.numeric.logical_and",
"numpy.core.overrides.array_function_dispatch",
"numpy.core.numeric.greater_equal"
],
[
"numpy.arange",
"numpy.testing.assert_raises",
"numpy.testing.assert_equal"
],
[
"numpy.core.vstack",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.moveaxis",
"numpy.core.stack",
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.eye",
"numpy.stack",
"numpy.core.hstack",
"numpy.zeros",
"numpy.core.atleast_3d",
"numpy.testing.assert_raises_regex",
"numpy.core.array",
"numpy.core.shape_base._block_dispatcher",
"numpy.core.arange",
"numpy.core.shape_base._block_setup",
"numpy.testing.assert_raises",
"numpy.core.shape_base._block_concatenate",
"numpy.testing.assert_",
"numpy.core.atleast_2d",
"numpy.array",
"numpy.testing.assert_warns",
"numpy.core.atleast_1d",
"numpy.core.concatenate",
"numpy.int32",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.core.block",
"numpy.core.shape_base._block_slicing",
"numpy.empty"
],
[
"numpy.distutils.fcompiler.FCompiler.get_libraries",
"numpy.distutils.fcompiler.FCompiler.get_flags",
"numpy.distutils.customized_fcompiler",
"numpy.distutils.fcompiler.dummy_fortran_file",
"numpy.distutils.fcompiler.FCompiler.get_flags_fix",
"numpy.distutils.fcompiler.FCompiler.get_flags_f90",
"numpy.distutils.cpuinfo.cpu.is_64bit",
"numpy.distutils.fcompiler.FCompiler.get_library_dirs",
"numpy.distutils.fcompiler.FCompiler.get_flags_f77"
],
[
"numpy.distutils.misc_util.Configuration",
"numpy.distutils.core.setup"
],
[
"numpy.sqrt",
"numpy.arctan2",
"numpy.ma.subtract",
"numpy.ma.greater",
"numpy.ma.masked_where",
"numpy.where",
"numpy.exp",
"numpy.sin",
"numpy.less_equal",
"numpy.ma.arctan",
"numpy.zeros",
"numpy.ma.put",
"numpy.multiply",
"numpy.ma.concatenate",
"numpy.ma.absolute",
"numpy.equal",
"numpy.array",
"numpy.tanh",
"numpy.sum",
"numpy.ma.masked_greater_equal",
"numpy.absolute",
"numpy.ma.make_mask",
"numpy.ma.minimum",
"numpy.ma.masked_values",
"numpy.ma.masked_array",
"numpy.ma.not_equal",
"numpy.add",
"numpy.compat.pickle.loads",
"numpy.arctan",
"numpy.ma.cosh",
"numpy.concatenate",
"numpy.seterr",
"numpy.ma.where",
"numpy.ma.getmask",
"numpy.ma.tanh",
"numpy.divide",
"numpy.arcsin",
"numpy.compat.pickle.dumps",
"numpy.less",
"numpy.ma.ravel",
"numpy.ma.masked_greater",
"numpy.greater_equal",
"numpy.ma.arange",
"numpy.ma.product",
"numpy.ma.isMaskedArray",
"numpy.cosh",
"numpy.ma.less",
"numpy.ma.log",
"numpy.arccos",
"numpy.ma.add",
"numpy.tan",
"numpy.testing.assert_",
"numpy.errstate",
"numpy.not_equal",
"numpy.ma.masked_less_equal",
"numpy.add.accumulate",
"numpy.ma.minimum.reduce",
"numpy.ma.filled",
"numpy.ma.add.reduce",
"numpy.ma.take",
"numpy.sinh",
"numpy.ma.arccos",
"numpy.ma.sinh",
"numpy.product",
"numpy.ma.masked_less",
"numpy.take",
"numpy.ma.transpose",
"numpy.ma.outer",
"numpy.conjugate",
"numpy.testing.assert_equal",
"numpy.ma.sum",
"numpy.ma.nonzero",
"numpy.greater",
"numpy.ma.allclose",
"numpy.add.reduce",
"numpy.ma.cos",
"numpy.ma.ones",
"numpy.ma.sort",
"numpy.ma.add.accumulate",
"numpy.nonzero",
"numpy.transpose",
"numpy.ma.tan",
"numpy.ma.arcsin",
"numpy.ma.less_equal",
"numpy.ma.masked_not_equal",
"numpy.ma.maximum",
"numpy.cos",
"numpy.ma.allequal",
"numpy.ma.exp",
"numpy.ma.alltrue",
"numpy.ma.count",
"numpy.ma.multiply",
"numpy.ma.divide",
"numpy.ma.array",
"numpy.ma.arctan2",
"numpy.ma.sin",
"numpy.ma.choose",
"numpy.arange",
"numpy.ma.maximum.reduce",
"numpy.ma.sometrue",
"numpy.ma.shape",
"numpy.subtract",
"numpy.ma.greater_equal",
"numpy.ravel",
"numpy.ma.conjugate",
"numpy.ma.zeros",
"numpy.ma.sqrt",
"numpy.ma.inner",
"numpy.ma.masked_equal",
"numpy.ma.equal",
"numpy.ma.average",
"numpy.ma.log10",
"numpy.sort",
"numpy.ma.resize",
"numpy.ma.repeat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Purple-PI/rlstructures | [
"9b201b083715bbda2f3534b010c84e11dfc0a1c7",
"9b201b083715bbda2f3534b010c84e11dfc0a1c7",
"9b201b083715bbda2f3534b010c84e11dfc0a1c7"
] | [
"rlstructures/deprecated/batchers/buffers.py",
"tutorial/deprecated/tutorial_a2c_with_infinite_env/a2c.py",
"rlalgos/ppo/run_cartpole.py"
] | [
"#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\n\nimport torch\nimport torch.multiprocessing as mp\nfrom rlstructures import TemporalDictTensor, DictTensor\n\n\nclass Buffer:\n def get_free_slots(self, k):\n raise NotImplementedError\n\n def set_free_slots(self, s):\n raise NotImplementedError\n\n def write(self, slots, variables):\n raise NotImplementedError\n\n def close(self):\n raise NotImplementedError\n\n def get_trajectories(self, trajectories, erase=True):\n raise NotImplementedError\n\n\nclass LocalBuffer(Buffer):\n \"\"\"\n Defines a shared buffer to store trajectories / transitions\n The buffer is structured as nslots of size s_slots for each possible variable\n \"\"\"\n\n def __init__(\n self,\n n_slots=None,\n s_slots=None,\n specs_agent_state=None,\n specs_agent_output=None,\n specs_environment=None,\n device=torch.device(\"cpu\"),\n ):\n \"\"\"\n Init a new buffer\n\n Args:\n n_slots (int): the number of slots\n s_slots (int): the size of each slot (temporal dimension)\n specs (dict): The description of the variable to store in the buffer\n \"\"\"\n self._device = device\n self.buffers = {}\n self.n_slots = n_slots\n self.s_slots = s_slots\n\n # Creation of the storage buffers\n nspecs_agent_state = {\"_\" + k: specs_agent_state[k] for k in specs_agent_state}\n nspecs_env = {\"_\" + k: specs_environment[k] for k in specs_environment}\n specs = {\n **specs_agent_state,\n **specs_agent_output,\n **specs_environment,\n **nspecs_agent_state,\n **nspecs_env,\n \"position_in_slot\": {\"size\": torch.Size([]), \"dtype\": torch.int64},\n }\n\n for n in specs:\n size = (n_slots, s_slots) + specs[n][\"size\"]\n print(\n \"Creating buffer for '\"\n + n\n + \"' of size \"\n + str(size)\n + \" and type \"\n + str(specs[n][\"dtype\"])\n )\n assert not n in self.buffers, \"Same key is used by the agent and the env\"\n self.buffers[n] = (\n torch.zeros(size, dtype=specs[n][\"dtype\"])\n .to(self._device)\n .share_memory_()\n )\n self.position_in_slot = (\n torch.zeros(n_slots).to(self._device).long().share_memory_()\n )\n self._free_slots_queue = mp.Queue()\n self._free_slots_queue.cancel_join_thread()\n for i in range(n_slots):\n self._free_slots_queue.put(i, block=True)\n self._full_slots_queue = mp.Queue()\n self._full_slots_queue.cancel_join_thread()\n\n def device(self):\n return self._device\n\n def get_free_slots(self, k):\n \"\"\"\n Returns k available slots. Wait until enough slots are free\n \"\"\"\n assert k > 0\n x = [self._free_slots_queue.get() for i in range(k)]\n for i in x:\n self.position_in_slot[i] = 0\n return x\n\n def set_free_slots(self, s):\n \"\"\"\n Tells the buffer that it can reuse the given slots\n :param s may be one slot (int) or multiple slots (list of int)\n \"\"\"\n assert not s is None\n if isinstance(s, int):\n self._free_slots_queue.put(s)\n else:\n for ss in s:\n self._free_slots_queue.put(ss)\n # logging.getLogger(\"buffer\").debug(\"SET FREE \" + str(s))\n\n def write(self, slots, variables):\n if not variables.device() == self._device:\n variables = variables.to(self._device)\n\n slots = torch.tensor(slots).to(self._device)\n assert variables.n_elems() == len(slots)\n positions = self.position_in_slot[slots]\n a = torch.arange(len(slots)).to(self._device)\n for n in variables.keys():\n # assert variables[n].size()[0] == 1\n # print(self.buffers[n][slots].size())\n self.buffers[n][slots, positions] = variables[n][a].detach()\n self.position_in_slot[slots] += 1\n\n def is_slot_full(self, slot):\n \"\"\"\n Returns True of a slot is full\n \"\"\"\n return self.position_in_slot[slot] == self.s_slots\n\n def get_single(self, slots, position):\n assert isinstance(slots, list)\n assert isinstance(slots[0], int)\n idx = torch.tensor(slots).to(self._device).long()\n d = {k: self.buffers[k][idx, position] for k in self.buffers}\n return DictTensor(d)\n\n def close(self):\n \"\"\"\n Close the buffer\n \"\"\"\n self._free_slots_queue.close()\n self._full_slots_queue.close()\n\n def get_single_slots(self, slots, erase=True):\n assert isinstance(slots, list)\n assert isinstance(slots[0], int)\n idx = torch.tensor(slots).to(self._device).long()\n lengths = self.position_in_slot[idx]\n ml = lengths.max().item()\n v = {k: self.buffers[k][idx, :ml].clone() for k in self.buffers}\n if erase:\n self.set_free_slots(slots)\n return TemporalDictTensor(v, lengths)\n\n def get_multiple_slots(self, trajectories, erase=True):\n \"\"\"\n Return the concatenation of multiple slots. This function is not well optimized and could be fasten\n \"\"\"\n assert isinstance(trajectories, list) or isinstance(trajectories, tuple)\n assert isinstance(trajectories[0], list)\n assert isinstance(trajectories[0][0], int)\n # 1: Unify the size of all trajectories....\n max_l = 0\n for traj in trajectories:\n max_l = max(max_l, len(traj))\n ntrajectories = []\n for traj in trajectories:\n while not len(traj) == max_l:\n traj.append(None)\n ntrajectories.append(traj)\n\n # 2: Copy the content\n length = torch.zeros(len(ntrajectories)).to(self._device).long()\n tensors = []\n for k in range(max_l):\n idxs = [traj[k] for traj in ntrajectories]\n nidxs = []\n for _id in idxs:\n if _id is None:\n nidxs.append(0)\n else:\n nidxs.append(_id)\n nidxs = torch.tensor(nidxs).to(self._device)\n v = {k: self.buffers[k][nidxs] for k in self.buffers}\n pis = self.position_in_slot[nidxs]\n # Check that slots are full\n if k < max_l - 1:\n for i in range(len(pis)):\n if not ntrajectories[i][k + 1] is None:\n assert pis[i] == self.s_slots\n\n for i in range(len(pis)):\n if not ntrajectories[i][k] is None:\n length[i] = length[i] + pis[i]\n\n tensors.append(v)\n ftrajectories = {\n k: torch.cat([t[k] for t in tensors], dim=1) for k in self.buffers\n }\n if erase:\n for k in trajectories:\n for kk in k:\n if not kk is None:\n self.set_free_slots(kk)\n\n return TemporalDictTensor(ftrajectories, length).shorten()\n",
"#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\n\nfrom rlstructures.logger import Logger, TFLogger\nfrom rlstructures import DictTensor, TemporalDictTensor\nfrom rlstructures import logging\nfrom rlstructures.tools import weight_init\nfrom rlstructures.batchers import EpisodeBatcher, Batcher\nimport torch.nn as nn\nimport copy\nimport torch\nimport time\nimport numpy as np\nimport torch.nn.functional as F\nfrom tutorial.tutorial_reinforce.agent import *\n\n\nclass A2C:\n def __init__(self, config, create_env, create_train_env, create_agent):\n self.config = config\n\n # Creation of the Logger (that saves in tensorboard and CSV)\n self.logger = TFLogger(log_dir=self.config[\"logdir\"], hps=self.config)\n\n self._create_env = create_env\n self._create_train_env = create_train_env\n self._create_agent = create_agent\n\n # Creation of one env instance to get the dimensionnality of observations and number of actions\n env = self._create_env(\n self.config[\"n_envs\"], seed=0, env_name=self.config[\"env_name\"]\n )\n self.n_actions = env.action_space.n\n self.obs_dim = env.reset()[0][\"frame\"].size()[1]\n del env\n\n def run(self):\n # Instantiate the learning model abd the baseline model\n self.learning_model = AgentModel(self.obs_dim, self.n_actions, 32)\n self.critic_model = BaselineModel(self.obs_dim, 32)\n\n # We create a batcher dedicated to evaluation\n model = copy.deepcopy(self.learning_model)\n self.evaluation_batcher = EpisodeBatcher(\n n_timesteps=self.config[\"max_episode_steps\"],\n n_slots=self.config[\"n_evaluation_episodes\"],\n create_agent=self._create_agent,\n create_env=self._create_env,\n env_args={\n \"n_envs\": self.config[\"n_envs\"],\n \"max_episode_steps\": self.config[\"max_episode_steps\"],\n \"env_name\": self.config[\"env_name\"],\n },\n agent_args={\"n_actions\": self.n_actions, \"model\": model},\n n_threads=self.config[\"n_evaluation_threads\"],\n seeds=[\n self.config[\"env_seed\"] + k * 10\n for k in range(self.config[\"n_evaluation_threads\"])\n ],\n )\n\n # Creation of the batcher for sampling complete pieces of trajectories (i.e Batcher)\n # The batcher will sample n_threads*n_envs trajectories at each call\n # To have a fast batcher, we have to configure it with n_timesteps=self.config[\"max_episode_steps\"]\n model = copy.deepcopy(self.learning_model)\n self.train_batcher = Batcher(\n n_timesteps=self.config[\"a2c_timesteps\"],\n n_slots=self.config[\"n_envs\"] * self.config[\"n_threads\"],\n create_agent=self._create_agent,\n create_env=self._create_train_env,\n env_args={\n \"n_envs\": self.config[\"n_envs\"],\n \"max_episode_steps\": self.config[\"max_episode_steps\"],\n \"env_name\": self.config[\"env_name\"],\n },\n agent_args={\"n_actions\": self.n_actions, \"model\": model},\n n_threads=self.config[\"n_threads\"],\n seeds=[\n self.config[\"env_seed\"] + k * 10\n for k in range(self.config[\"n_threads\"])\n ],\n )\n\n # Creation of the optimizer\n optimizer = torch.optim.Adam(\n nn.Sequential(self.learning_model, self.critic_model).parameters(),\n lr=self.config[\"lr\"],\n )\n\n # Training Loop:\n _start_time = time.time()\n self.iteration = 0\n\n # #We launch the evaluation batcher (in deterministic mode)\n n_episodes = self.config[\"n_evaluation_episodes\"]\n agent_info = DictTensor(\n {\"stochastic\": torch.tensor([False]).repeat(n_episodes)}\n )\n self.evaluation_batcher.execute(n_episodes=n_episodes, agent_info=agent_info)\n self.evaluation_iteration = self.iteration\n\n # Initialize the training batcher such that agents will start to acqire pieces of episodes\n self.train_batcher.update(self.learning_model.state_dict())\n n_episodes = self.config[\"n_envs\"] * self.config[\"n_threads\"]\n agent_info = DictTensor({\"stochastic\": torch.tensor([True]).repeat(n_episodes)})\n self.train_batcher.reset(agent_info=agent_info)\n\n while time.time() - _start_time < self.config[\"time_limit\"]:\n # Call the batcher to get a sample of trajectories\n\n # 2) We get the pieces of episodes. Since the env is an infinite env, we will always receive a new piece of episode\n self.train_batcher.execute()\n trajectories = self.train_batcher.get(blocking=True)\n\n # 3) Now, we compute the loss\n dt = self.get_loss(trajectories)\n [self.logger.add_scalar(k, dt[k].item(), self.iteration) for k in dt.keys()]\n\n # Computation of final loss\n ld = self.config[\"critic_coef\"] * dt[\"critic_loss\"]\n lr = self.config[\"a2c_coef\"] * dt[\"a2c_loss\"]\n le = self.config[\"entropy_coef\"] * dt[\"entropy_loss\"]\n\n floss = ld - le - lr\n floss = floss / n_episodes * trajectories.n_elems()\n\n optimizer.zero_grad()\n floss.backward()\n optimizer.step()\n\n # Update the train batcher with the updated model\n self.train_batcher.update(self.learning_model.state_dict())\n self.iteration += 1\n\n # We check the evaluation batcher\n evaluation_trajectories = self.evaluation_batcher.get(blocking=False)\n if not evaluation_trajectories is None: # trajectories are available\n # Compute the cumulated reward\n cumulated_reward = (\n (\n evaluation_trajectories[\"_reward\"]\n * evaluation_trajectories.mask()\n )\n .sum(1)\n .mean()\n )\n self.logger.add_scalar(\n \"evaluation_reward\",\n cumulated_reward.item(),\n self.evaluation_iteration,\n )\n print(\n \"At iteration %d, reward is %f\"\n % (self.evaluation_iteration, cumulated_reward.item())\n )\n # We reexecute the evaluation batcher (with same value of agent_info and same number of episodes)\n self.evaluation_batcher.update(self.learning_model.state_dict())\n self.evaluation_iteration = self.iteration\n self.evaluation_batcher.reexecute()\n\n self.train_batcher.close()\n self.evaluation_batcher.get() # To wait for the last trajectories\n self.evaluation_batcher.close()\n self.logger.update_csv() # To save as a CSV file in logdir\n self.logger.close()\n\n def get_loss(self, trajectories):\n # First, we want to compute the cumulated reward per trajectory\n # The reward is a t+1 in each iteration (since it is btained after the aaction), so we use the '_reward' field in the trajectory\n # The 'reward' field corresopnds to the reward at time t\n reward = trajectories[\"_reward\"]\n\n # We get the mask that tells which transition is in a trajectory (1) or not (0)\n mask = trajectories.mask()\n\n # We remove the reward values that are not in the trajectories\n reward = reward * mask\n max_length = trajectories.lengths.max().item()\n # Now, we want to compute the action probabilities over the trajectories such that we will be able to do 'backward'\n action_probabilities = []\n for t in range(max_length):\n proba = self.learning_model(trajectories[\"frame\"][:, t])\n action_probabilities.append(\n proba.unsqueeze(1)\n ) # We append the probability, and introduces the temporal dimension (2nde dimension)\n action_probabilities = torch.cat(\n action_probabilities, dim=1\n ) # Now, we have a B x T x n_actions tensor\n\n # We compute the critic value for t=0 to T (i.e including the very last observation)\n critic = []\n for t in range(max_length):\n b = self.critic_model(trajectories[\"frame\"][:, t])\n critic.append(b.unsqueeze(1))\n critic = torch.cat(critic + [b.unsqueeze(1)], dim=1).squeeze(\n -1\n ) # Now, we have a B x (T+1) tensor\n # We also need to compute the critic value at for the last observation of the trajectories (to compute the TD)\n # It may be the last element of the trajectories (if episode is not finished), or on the last frame of the episode\n idx = torch.arange(trajectories.n_elems())\n last_critic = self.critic_model(\n trajectories[\"_frame\"][idx, trajectories.lengths - 1]\n ).squeeze(-1)\n critic[idx, trajectories.lengths] = last_critic\n\n # We compute the temporal difference\n target = (\n reward\n + self.config[\"discount_factor\"]\n * (1 - trajectories[\"_done\"].float())\n * critic[:, 1:].detach()\n )\n td = critic[:, :-1] - target\n\n critic_loss = td ** 2\n # We sum the loss for each episode (considering the mask)\n critic_loss = (critic_loss * mask).sum(1) / mask.sum(1)\n # We average the loss over all the trajectories\n avg_critic_loss = critic_loss.mean()\n\n # We do the same on the reinforce loss\n action_distribution = torch.distributions.Categorical(action_probabilities)\n log_proba = action_distribution.log_prob(trajectories[\"action\"])\n a2c_loss = -log_proba * td.detach()\n a2c_loss = (a2c_loss * mask).sum(1) / mask.sum(1)\n avg_a2c_loss = a2c_loss.mean()\n\n # We compute the entropy loss\n entropy = action_distribution.entropy()\n entropy = (entropy * mask).sum(1) / mask.sum(1)\n avg_entropy = entropy.mean()\n\n return DictTensor(\n {\n \"critic_loss\": avg_critic_loss,\n \"a2c_loss\": avg_a2c_loss,\n \"entropy_loss\": avg_entropy,\n }\n )\n",
"#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\n\nfrom rlstructures import logging\nfrom rlstructures.env_wrappers import GymEnv, GymEnvInf\nfrom rlstructures.tools import weight_init\nimport torch.nn as nn\nimport copy\nimport torch\nimport time\nimport numpy as np\nimport torch.nn.functional as F\nfrom rlalgos.a2c_gae.agent import RecurrentAgent, ActionModel\nimport gym\nfrom gym.wrappers import TimeLimit\nfrom rlalgos.ppo.discrete_ppo import PPO\nfrom rlalgos.a2c_gae.agent import ActionModel, CriticModel, Model\n\n\ndef create_gym_env(env_name):\n return gym.make(env_name)\n\n\ndef create_env(n_envs, env_name=None, max_episode_steps=None, seed=None):\n envs = []\n for k in range(n_envs):\n e = create_gym_env(env_name)\n e = TimeLimit(e, max_episode_steps=max_episode_steps)\n envs.append(e)\n return GymEnv(envs, seed)\n\n\ndef create_train_env(n_envs, env_name=None, max_episode_steps=None, seed=None):\n envs = []\n for k in range(n_envs):\n e = create_gym_env(env_name)\n e = TimeLimit(e, max_episode_steps=max_episode_steps)\n envs.append(e)\n return GymEnvInf(envs, seed)\n\n\ndef create_agent(model, n_actions=1):\n return RecurrentAgent(model=model, n_actions=n_actions)\n\n\nclass Experiment(PPO):\n def __init__(self, config, create_train_env, create_env, create_agent):\n super().__init__(config, create_train_env, create_env, create_agent)\n\n def _create_model(self):\n action_model = ActionModel(\n self.obs_dim, self.n_actions, self.config[\"model/hidden_size\"]\n )\n critic_model = CriticModel(self.obs_dim, self.config[\"model/hidden_size\"])\n module = Model(action_model, critic_model)\n module.apply(weight_init)\n return module\n\n\nif __name__ == \"__main__\":\n import torch.multiprocessing as mp\n\n mp.set_start_method(\"spawn\")\n\n config = {\n \"env_name\": \"CartPole-v0\",\n \"n_envs\": 4,\n \"max_episode_steps\": 100,\n \"discount_factor\": 0.9,\n \"logdir\": \"./results\",\n \"lr\": 0.001,\n \"n_processes\": 4,\n \"n_evaluation_processes\": 4,\n \"n_evaluation_envs\": 64,\n \"time_limit\": 360,\n \"coef_critic\": 1.0,\n \"coef_entropy\": 0.01,\n \"coef_ppo\": 1.0,\n \"env_seed\": 42,\n \"ppo_timesteps\": 20,\n \"k_epochs\": 4,\n \"eps_clip\": 0.2,\n \"gae_coef\": 0.3,\n \"clip_grad\": 2,\n \"learner_device\": \"cpu\",\n \"evaluation_mode\": \"stochastic\",\n \"verbose\": True,\n \"model/hidden_size\": 16,\n }\n exp = Experiment(config, create_train_env, create_env, create_agent)\n exp.run()\n"
] | [
[
"torch.Size",
"torch.multiprocessing.Queue",
"torch.cat",
"torch.zeros",
"torch.tensor",
"torch.device"
],
[
"torch.tensor",
"torch.nn.Sequential",
"torch.distributions.Categorical",
"torch.cat"
],
[
"torch.multiprocessing.set_start_method"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pvaneck/tfjs-converter | [
"074ed3252fab3d913679d5415a5e530efd4966b3"
] | [
"python/tensorflowjs/converters/keras_h5_conversion_test.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Unit tests for artifact conversion to and from Python Keras.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nimport h5py\nimport keras\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflowjs.converters import keras_h5_conversion as conversion\n\n\nclass ConvertH5WeightsTest(unittest.TestCase):\n\n def setUp(self):\n self._tmp_dir = tempfile.mkdtemp()\n super(ConvertH5WeightsTest, self).setUp()\n\n def tearDown(self):\n if os.path.isdir(self._tmp_dir):\n shutil.rmtree(self._tmp_dir)\n super(ConvertH5WeightsTest, self).tearDown()\n\n def testConvertWeightsFromSimpleModelNoSplitByLayer(self):\n input_tensor = keras.layers.Input((3,))\n dense1 = keras.layers.Dense(\n 4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros',\n name='MyDense10')(input_tensor)\n output = keras.layers.Dense(\n 2, use_bias=False, kernel_initializer='ones', name='MyDense20')(dense1)\n model = keras.models.Model(inputs=[input_tensor], outputs=[output])\n h5_path = os.path.join(self._tmp_dir, 'MyModel.h5')\n model.save_weights(h5_path)\n\n # Load the saved weights as a JSON string.\n groups = conversion.h5_weights_to_tfjs_format(h5py.File(h5_path))\n\n # Check the loaded weights.\n # Due to the default `split_by_layer=True`, there should be only one weight\n # group.\n self.assertEqual(1, len(groups))\n self.assertEqual(3, len(groups[0]))\n kernel1 = groups[0][0]\n self.assertEqual('MyDense10/kernel', kernel1['name'])\n self.assertEqual('float32', kernel1['data'].dtype)\n self.assertEqual((3, 4), kernel1['data'].shape)\n self.assertTrue(np.allclose(np.ones([3, 4]), kernel1['data']))\n bias1 = groups[0][1]\n self.assertEqual('MyDense10/bias', bias1['name'])\n self.assertEqual('float32', bias1['data'].dtype)\n self.assertEqual((4,), bias1['data'].shape)\n self.assertTrue(np.allclose(np.zeros([4]), bias1['data']))\n kernel2 = groups[0][2]\n self.assertEqual('MyDense20/kernel', kernel2['name'])\n self.assertEqual('float32', kernel2['data'].dtype)\n self.assertEqual((4, 2), kernel2['data'].shape)\n self.assertTrue(np.allclose(np.ones([4, 2]), kernel2['data']))\n\n def testConvertWeightsFromSimpleModelSplitByLayer(self):\n input_tensor = keras.layers.Input((3,))\n dense1 = keras.layers.Dense(\n 4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros',\n name='MyDense30')(input_tensor)\n output = keras.layers.Dense(\n 2, use_bias=False, kernel_initializer='ones', name='MyDense40')(dense1)\n model = keras.models.Model(inputs=[input_tensor], outputs=[output])\n h5_path = os.path.join(self._tmp_dir, 'MyModel.h5')\n model.save_weights(h5_path)\n\n # Load the saved weights as a JSON string.\n groups = conversion.h5_weights_to_tfjs_format(h5py.File(h5_path),\n split_by_layer=True)\n\n # Check the loaded weights.\n # Due to `split_by_layer=True` and the fact that the model has two layers,\n # there should be two weight groups.\n self.assertEqual(2, len(groups))\n self.assertEqual(2, len(groups[0]))\n kernel1 = groups[0][0]\n self.assertEqual('MyDense30/kernel', kernel1['name'])\n self.assertEqual('float32', kernel1['data'].dtype)\n self.assertEqual((3, 4), kernel1['data'].shape)\n self.assertTrue(np.allclose(np.ones([3, 4]), kernel1['data']))\n bias1 = groups[0][1]\n self.assertEqual('MyDense30/bias', bias1['name'])\n self.assertEqual('float32', bias1['data'].dtype)\n self.assertEqual((4,), bias1['data'].shape)\n self.assertTrue(np.allclose(np.zeros([4]), bias1['data']))\n\n self.assertEqual(1, len(groups[1]))\n kernel2 = groups[1][0]\n self.assertEqual('MyDense40/kernel', kernel2['name'])\n self.assertEqual('float32', kernel2['data'].dtype)\n self.assertEqual((4, 2), kernel2['data'].shape)\n self.assertTrue(np.allclose(np.ones([4, 2]), kernel2['data']))\n\n def testConvertModelWithNestedLayerNames(self):\n model = keras.Sequential()\n\n # Add a layer with a nested layer name, i.e., a layer name with slash(es)\n # in it.\n model.add(keras.layers.Dense(2, input_shape=[12], name='dense'))\n model.add(keras.layers.Dense(8, name='foo/dense'))\n model.add(keras.layers.Dense(4, name='foo/bar/dense'))\n tfjs_path = os.path.join(self._tmp_dir, 'nested_layer_names_model')\n conversion.save_keras_model(model, tfjs_path)\n\n # Check model.json and weights manifest.\n with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:\n model_json = json.load(f)\n self.assertTrue(model_json['modelTopology'])\n weights_manifest = model_json['weightsManifest']\n weight_shapes = dict()\n for group in weights_manifest:\n for weight in group['weights']:\n weight_shapes[weight['name']] = weight['shape']\n self.assertEqual(\n sorted(['dense/kernel', 'dense/bias', 'foo/dense/kernel',\n 'foo/dense/bias', 'foo/bar/dense/kernel',\n 'foo/bar/dense/bias']),\n sorted(list(weight_shapes.keys())))\n self.assertEqual([12, 2], weight_shapes['dense/kernel'])\n self.assertEqual([2], weight_shapes['dense/bias'])\n self.assertEqual([2, 8], weight_shapes['foo/dense/kernel'])\n self.assertEqual([8], weight_shapes['foo/dense/bias'])\n self.assertEqual([8, 4], weight_shapes['foo/bar/dense/kernel'])\n self.assertEqual([4], weight_shapes['foo/bar/dense/bias'])\n\n def testConvertMergedModelFromSimpleModelNoSplitByLayer(self):\n input_tensor = keras.layers.Input((3,))\n dense1 = keras.layers.Dense(\n 4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros',\n name='MergedDense10')(input_tensor)\n output = keras.layers.Dense(\n 2, use_bias=False,\n kernel_initializer='ones', name='MergedDense20')(dense1)\n model = keras.models.Model(inputs=[input_tensor], outputs=[output])\n h5_path = os.path.join(self._tmp_dir, 'MyModelMerged.h5')\n model.save(h5_path)\n config_json = json.loads(model.to_json(), encoding='utf8')\n\n # Load the saved weights as a JSON string.\n out, groups = conversion.h5_merged_saved_model_to_tfjs_format(\n h5py.File(h5_path))\n saved_topology = out['model_config']\n\n # check the model topology was stored\n self.assertEqual(config_json['class_name'], saved_topology['class_name'])\n self.assertEqual(config_json['config'], saved_topology['config'])\n\n # Check the loaded weights.\n # By default, all weights of the model ought to be put in the same group.\n self.assertEqual(1, len(groups))\n\n self.assertEqual(keras.__version__, out['keras_version'])\n self.assertEqual('tensorflow', out['backend'])\n weight_group = groups[0]\n self.assertEqual(3, len(weight_group))\n kernel1 = weight_group[0]\n self.assertEqual('MergedDense10/kernel', kernel1['name'])\n self.assertEqual('float32', kernel1['data'].dtype)\n self.assertEqual((3, 4), kernel1['data'].shape)\n self.assertTrue(np.allclose(np.ones([3, 4]), kernel1['data']))\n bias1 = weight_group[1]\n self.assertEqual('MergedDense10/bias', bias1['name'])\n self.assertEqual('float32', bias1['data'].dtype)\n self.assertEqual((4,), bias1['data'].shape)\n self.assertTrue(np.allclose(np.zeros([4]), bias1['data']))\n kernel2 = weight_group[2]\n self.assertEqual('MergedDense20/kernel', kernel2['name'])\n self.assertEqual('float32', kernel2['data'].dtype)\n self.assertEqual((4, 2), kernel2['data'].shape)\n self.assertTrue(np.allclose(np.ones([4, 2]), kernel2['data']))\n\n def testConvertMergedModelFromSimpleModelSplitByLayer(self):\n input_tensor = keras.layers.Input((3,))\n dense1 = keras.layers.Dense(\n 4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros',\n name='MergedDense30')(input_tensor)\n output = keras.layers.Dense(\n 2, use_bias=False,\n kernel_initializer='ones', name='MergedDense40')(dense1)\n model = keras.models.Model(inputs=[input_tensor], outputs=[output])\n h5_path = os.path.join(self._tmp_dir, 'MyModelMerged.h5')\n model.save(h5_path)\n config_json = json.loads(model.to_json(), encoding='utf8')\n\n # Load the saved weights as a JSON string.\n out, groups = conversion.h5_merged_saved_model_to_tfjs_format(\n h5py.File(h5_path), split_by_layer=True)\n saved_topology = out['model_config']\n\n # check the model topology was stored\n self.assertEqual(config_json['class_name'], saved_topology['class_name'])\n self.assertEqual(config_json['config'], saved_topology['config'])\n\n # Check the loaded weights.\n # Due to `split_by_layer=True`, there ought to be two weigth groups,\n # because the model has two layers.\n self.assertEqual(2, len(groups))\n\n self.assertEqual(keras.__version__, out['keras_version'])\n self.assertEqual('tensorflow', out['backend'])\n self.assertEqual(2, len(groups[0]))\n kernel1 = groups[0][0]\n self.assertEqual('MergedDense30/kernel', kernel1['name'])\n self.assertEqual('float32', kernel1['data'].dtype)\n self.assertEqual((3, 4), kernel1['data'].shape)\n self.assertTrue(np.allclose(np.ones([3, 4]), kernel1['data']))\n bias1 = groups[0][1]\n self.assertEqual('MergedDense30/bias', bias1['name'])\n self.assertEqual('float32', bias1['data'].dtype)\n self.assertEqual((4,), bias1['data'].shape)\n self.assertTrue(np.allclose(np.zeros([4]), bias1['data']))\n self.assertEqual(1, len(groups[1]))\n kernel2 = groups[1][0]\n self.assertEqual('MergedDense40/kernel', kernel2['name'])\n self.assertEqual('float32', kernel2['data'].dtype)\n self.assertEqual((4, 2), kernel2['data'].shape)\n self.assertTrue(np.allclose(np.ones([4, 2]), kernel2['data']))\n\n def testConvertWeightsFromSequentialModelNoSplitByLayer(self):\n sequential_model = keras.models.Sequential([\n keras.layers.Dense(\n 3, input_shape=(2,), use_bias=True, kernel_initializer='ones',\n name='Dense10'),\n keras.layers.Dense(\n 1, use_bias=False, kernel_initializer='ones', name='Dense20')])\n h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')\n sequential_model.save_weights(h5_path)\n\n # Load the saved weights as a JSON string.\n groups = conversion.h5_weights_to_tfjs_format(h5py.File(h5_path))\n\n # Check the loaded weights.\n # Due to the default `split_by_layer=False`, there should be only one weight\n # group.\n self.assertEqual(1, len(groups))\n self.assertEqual(3, len(groups[0]))\n kernel1 = groups[0][0]\n self.assertEqual('Dense10/kernel', kernel1['name'])\n self.assertEqual('float32', kernel1['data'].dtype)\n self.assertEqual((2, 3), kernel1['data'].shape)\n self.assertTrue(np.allclose(np.ones([2, 3]).tolist(), kernel1['data']))\n bias1 = groups[0][1]\n self.assertEqual('Dense10/bias', bias1['name'])\n self.assertEqual('float32', bias1['data'].dtype)\n self.assertEqual((3,), bias1['data'].shape)\n self.assertTrue(np.allclose(np.zeros([3]).tolist(), bias1['data']))\n kernel2 = groups[0][2]\n self.assertEqual('Dense20/kernel', kernel2['name'])\n self.assertEqual('float32', kernel2['data'].dtype)\n self.assertEqual((3, 1), kernel2['data'].shape)\n self.assertTrue(np.allclose(np.ones([3, 1]).tolist(), kernel2['data']))\n\n def testConvertWeightsFromSequentialModelSplitByLayer(self):\n sequential_model = keras.models.Sequential([\n keras.layers.Dense(\n 3, input_shape=(2,), use_bias=True, kernel_initializer='ones',\n name='Dense30'),\n keras.layers.Dense(\n 1, use_bias=False, kernel_initializer='ones', name='Dense40')])\n h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')\n sequential_model.save_weights(h5_path)\n\n # Load the saved weights as a JSON string.\n groups = conversion.h5_weights_to_tfjs_format(h5py.File(h5_path),\n split_by_layer=True)\n\n # Check the loaded weights.\n # Due to the default `split_by_layer=True`, there should be two weight\n # gropus, because the model has two layers.\n self.assertEqual(2, len(groups))\n self.assertEqual(2, len(groups[0]))\n kernel1 = groups[0][0]\n self.assertEqual('Dense30/kernel', kernel1['name'])\n self.assertEqual('float32', kernel1['data'].dtype)\n self.assertEqual((2, 3), kernel1['data'].shape)\n self.assertTrue(np.allclose(np.ones([2, 3]).tolist(), kernel1['data']))\n bias1 = groups[0][1]\n self.assertEqual('Dense30/bias', bias1['name'])\n self.assertEqual('float32', bias1['data'].dtype)\n self.assertEqual((3,), bias1['data'].shape)\n self.assertTrue(np.allclose(np.zeros([3]).tolist(), bias1['data']))\n\n self.assertEqual(1, len(groups[1]))\n kernel2 = groups[1][0]\n self.assertEqual('Dense40/kernel', kernel2['name'])\n self.assertEqual('float32', kernel2['data'].dtype)\n self.assertEqual((3, 1), kernel2['data'].shape)\n self.assertTrue(np.allclose(np.ones([3, 1]).tolist(), kernel2['data']))\n\n def testSaveModelSucceedsForNonSequentialModel(self):\n t_input = keras.Input([2])\n dense_layer = keras.layers.Dense(3)\n t_output = dense_layer(t_input)\n model = keras.Model(t_input, t_output)\n conversion.save_keras_model(model, self._tmp_dir)\n\n # Verify the content of the artifacts output directory.\n self.assertTrue(\n os.path.isfile(os.path.join(self._tmp_dir, 'group1-shard1of1.bin')))\n model_json = json.load(\n open(os.path.join(self._tmp_dir, 'model.json'), 'rt'))\n\n topology_json = model_json['modelTopology']\n self.assertIn('keras_version', topology_json)\n self.assertIn('backend', topology_json)\n self.assertIn('model_config', topology_json)\n\n weights_manifest = model_json['weightsManifest']\n self.assertTrue(isinstance(weights_manifest, list))\n self.assertEqual(1, len(weights_manifest))\n self.assertIn('paths', weights_manifest[0])\n\n def testSaveModelSucceedsForTfKerasNonSequentialModel(self):\n t_input = tf.keras.Input([2])\n dense_layer = tf.keras.layers.Dense(3)\n t_output = dense_layer(t_input)\n model = tf.keras.Model(t_input, t_output)\n\n # `tf.keras.Model`s must be compiled before they can be saved.\n model.compile(loss='mean_squared_error', optimizer='sgd')\n\n conversion.save_keras_model(model, self._tmp_dir)\n\n # Verify the content of the artifacts output directory.\n self.assertTrue(\n os.path.isfile(os.path.join(self._tmp_dir, 'group1-shard1of1.bin')))\n model_json = json.load(\n open(os.path.join(self._tmp_dir, 'model.json'), 'rt'))\n\n topology_json = model_json['modelTopology']\n self.assertIn('keras_version', topology_json)\n self.assertIn('backend', topology_json)\n self.assertIn('model_config', topology_json)\n\n weights_manifest = model_json['weightsManifest']\n self.assertTrue(isinstance(weights_manifest, list))\n self.assertEqual(1, len(weights_manifest))\n self.assertIn('paths', weights_manifest[0])\n\n def testSaveModelSucceedsForNestedKerasModel(self):\n inner_model = keras.Sequential([\n keras.layers.Dense(4, input_shape=[3], activation='relu'),\n keras.layers.Dense(3, activation='tanh')])\n outer_model = keras.Sequential()\n outer_model.add(inner_model)\n outer_model.add(keras.layers.Dense(1, activation='sigmoid'))\n\n conversion.save_keras_model(outer_model, self._tmp_dir)\n\n # Verify the content of the artifacts output directory.\n self.assertTrue(\n os.path.isfile(os.path.join(self._tmp_dir, 'group1-shard1of1.bin')))\n model_json = json.load(\n open(os.path.join(self._tmp_dir, 'model.json'), 'rt'))\n\n topology_json = model_json['modelTopology']\n self.assertIn('keras_version', topology_json)\n self.assertIn('backend', topology_json)\n self.assertIn('model_config', topology_json)\n\n # Verify that all the layers' weights are present.\n weights_manifest = model_json['weightsManifest']\n self.assertTrue(isinstance(weights_manifest, list))\n weight_entries = []\n for group in weights_manifest:\n weight_entries.extend(group['weights'])\n self.assertEqual(6, len(weight_entries))\n\n def testSaveModelSucceedsForTfKerasSequentialModel(self):\n model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[2])])\n\n # `tf.keras.Model`s must be compiled before they can be saved.\n model.compile(loss='mean_squared_error', optimizer='sgd')\n\n conversion.save_keras_model(model, self._tmp_dir)\n\n # Verify the content of the artifacts output directory.\n self.assertTrue(\n os.path.isfile(os.path.join(self._tmp_dir, 'group1-shard1of1.bin')))\n model_json = json.load(\n open(os.path.join(self._tmp_dir, 'model.json'), 'rt'))\n\n topology_json = model_json['modelTopology']\n self.assertIn('keras_version', topology_json)\n self.assertIn('backend', topology_json)\n self.assertIn('model_config', topology_json)\n\n weights_manifest = model_json['weightsManifest']\n self.assertTrue(isinstance(weights_manifest, list))\n self.assertEqual(1, len(weights_manifest))\n self.assertIn('paths', weights_manifest[0])\n\n def testSavedModelSucceedsForExistingDirAndSequential(self):\n artifacts_dir = os.path.join(self._tmp_dir, 'artifacts')\n os.makedirs(artifacts_dir)\n model = keras.Sequential()\n model.add(keras.layers.Dense(3, input_shape=[2]))\n conversion.save_keras_model(model, artifacts_dir)\n\n # Verify the content of the artifacts output directory.\n self.assertTrue(\n os.path.isfile(os.path.join(artifacts_dir, 'group1-shard1of1.bin')))\n model_json = json.load(\n open(os.path.join(artifacts_dir, 'model.json'), 'rt'))\n\n topology_json = model_json['modelTopology']\n self.assertIn('keras_version', topology_json)\n self.assertIn('backend', topology_json)\n self.assertIn('model_config', topology_json)\n\n weights_manifest = model_json['weightsManifest']\n self.assertTrue(isinstance(weights_manifest, list))\n self.assertEqual(1, len(weights_manifest))\n self.assertIn('paths', weights_manifest[0])\n\n def testSavedModelRaisesErrorIfArtifactsDirExistsAsAFile(self):\n artifacts_dir = os.path.join(self._tmp_dir, 'artifacts')\n with open(artifacts_dir, 'wt') as f:\n f.write('foo\\n')\n t_input = keras.Input([2])\n dense_layer = keras.layers.Dense(3)\n t_output = dense_layer(t_input)\n model = keras.Model(t_input, t_output)\n with self.assertRaisesRegexp( # pylint: disable=deprecated-method\n ValueError, r'already exists as a file'):\n conversion.save_keras_model(model, artifacts_dir)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"tensorflow.keras.Input",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"numpy.ones",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
wangxr0526/RetroPrime | [
"a765b670b72fbfd512d0d437da8f27a95f9f0554",
"a765b670b72fbfd512d0d437da8f27a95f9f0554"
] | [
"retroprime/data_process/clean_uspto.py",
"retroprime/transformer_model/onmt/modules/position_ffn_h.py"
] | [
"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\n\nimport numpy as np\nimport random\nimport csv\nimport os\nimport sys\nimport re\nfrom tqdm import tqdm\nfrom rdkit import Chem\nimport pickle as cp\n\n\ndef get_rxn_smiles(prod, reactants):\n prod_smi = Chem.MolToSmiles(prod, True)\n\n # Get rid of reactants when they don't contribute to this prod\n prod_maps = set(re.findall('\\:([[0-9]+)\\]', prod_smi))\n reactants_smi_list = []\n for mol in reactants:\n if mol is None:\n continue\n used = False\n for a in mol.GetAtoms():\n if a.HasProp('molAtomMapNumber'):\n if a.GetProp('molAtomMapNumber') in prod_maps:\n used = True \n else:\n a.ClearProp('molAtomMapNumber')\n if used:\n reactants_smi_list.append(Chem.MolToSmiles(mol, True))\n\n reactants_smi = '.'.join(reactants_smi_list)\n return '{}>>{}'.format(reactants_smi, prod_smi)\n\n\nif __name__ == '__main__':\n seed = 19260817\n np.random.seed(seed)\n random.seed(seed)\n opt = argparse.ArgumentParser()\n opt.add_argument('-fname', default='../../databox/uspto_full/1976_Sep2016_USPTOgrants_smiles.rsmi')\n args, _ = opt.parse_known_args()\n fname = args.fname\n split_mode = 'single' # single or multi\n\n pt = re.compile(r':(\\d+)]')\n cnt = 0\n clean_list = []\n set_rxn = set()\n num_single = 0\n num_multi = 0\n bad_mapping = 0\n bad_prod = 0\n missing_map = 0\n raw_num = 0\n with open(fname, 'r') as f:\n reader = csv.reader(f, delimiter='\\t')\n header = next(reader)\n print(header)\n pbar = tqdm(reader)\n bad_rxn = 0\n for row in pbar:\n rxn_smiles = row[header.index('ReactionSmiles')]\n all_reactants, reagents, prods = rxn_smiles.split('>')\n all_reactants = all_reactants.split()[0] # remove ' |f:1...'\n prods = prods.split()[0] # remove ' |f:1...'\n if '.' in prods:\n num_multi += 1\n else:\n num_single += 1\n if split_mode == 'single' and '.' in prods: # multiple prods\n continue\n rids = ','.join(sorted(re.findall(pt, all_reactants)))\n pids = ','.join(sorted(re.findall(pt, prods)))\n if rids != pids: # mapping is not 1:1\n bad_mapping += 1\n continue\n reactants = [Chem.MolFromSmiles(smi) for smi in all_reactants.split('.')]\n \n for sub_prod in prods.split('.'):\n mol_prod = Chem.MolFromSmiles(sub_prod)\n if mol_prod is None: # rdkit is not able to parse the product\n bad_prod += 1\n continue\n # Make sure all have atom mapping\n if not all([a.HasProp('molAtomMapNumber') for a in mol_prod.GetAtoms()]):\n missing_map += 1\n continue\n \n raw_num += 1\n rxn_smiles = get_rxn_smiles(mol_prod, reactants)\n if not rxn_smiles in set_rxn:\n clean_list.append((row[header.index('PatentNumber')], rxn_smiles))\n set_rxn.add(rxn_smiles)\n pbar.set_description('select: %d, dup: %d' % (len(clean_list), raw_num))\n print('# clean', len(clean_list))\n print('single', num_single, 'multi', num_multi)\n print('bad mapping', bad_mapping)\n print('bad prod', bad_prod)\n print('missing map', missing_map)\n print('raw extracted', raw_num)\n \n random.shuffle(clean_list)\n\n num_val = num_test = int(len(clean_list) * 0.1)\n\n out_folder = '../../databox/uspto_full/'\n for phase in ['val', 'test', 'train']:\n fout = os.path.join(out_folder, 'raw_%s.csv' % phase)\n with open(fout, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['id', 'reactants>reagents>production'])\n\n if phase == 'val':\n r = range(num_val)\n elif phase == 'test':\n r = range(num_val, num_val + num_test)\n else:\n r = range(num_val + num_test, len(clean_list))\n for i in r:\n rxn_smiles = clean_list[i][1].split('>')\n result = []\n for r in rxn_smiles:\n if len(r.strip()):\n r = r.split()[0]\n result.append(r)\n rxn_smiles = '>'.join(result)\n writer.writerow([clean_list[i][0], rxn_smiles])\n",
"\"\"\"\nPosition feed-forward network from \"Attention is All You Need\"\n\"\"\"\n\nimport torch.nn as nn\n\nimport onmt\n\nfrom onmt.modules.hyperbolic import cLinear\n\n\nclass PositionwiseFeedForward_h(nn.Module):\n \"\"\" A two-layer Feed-Forward-Network with residual layer norm.\n\n Args:\n d_model (int): the size of input for the first-layer of the FFN.\n d_ff (int): the hidden layer size of the second-layer\n of the FNN.\n dropout (float): dropout probability(0-1.0).\n \"\"\"\n\n def __init__(self, d_model, d_ff, c, dropout=0.1):\n super(PositionwiseFeedForward_h, self).__init__()\n # self.w_1 = nn.Linear(d_model, d_ff)\n # self.w_2 = nn.Linear(d_ff, d_model)\n self.w_1 = cLinear(d_model, d_ff, c)\n self.w_2 = cLinear(d_ff, d_model, c)\n self.layer_norm = onmt.modules.LayerNorm(d_model)\n self.dropout_1 = nn.Dropout(dropout)\n self.relu = nn.ReLU()\n self.dropout_2 = nn.Dropout(dropout)\n\n def forward(self, x):\n \"\"\"\n Layer definition.\n\n Args:\n input: [ batch_size, input_len, model_dim ]\n\n\n Returns:\n output: [ batch_size, input_len, model_dim ]\n \"\"\"\n inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))\n output = self.dropout_2(self.w_2(inter))\n return output + x\n"
] | [
[
"numpy.random.seed"
],
[
"torch.nn.Dropout",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liuchangtai1996/MLIP | [
"ec217d592b8c51e98729d8c2b2abe1b2d918e14f",
"ec217d592b8c51e98729d8c2b2abe1b2d918e14f"
] | [
"code/resnet_approach/prepare.py",
"code/resnet_approach/ResNet18.py"
] | [
"import numpy as np\r\nimport cv2\r\n\r\ndef load_batch(ls,od,data_path,batch_size,which_batch):\r\n image_list=[]\r\n for i in range(which_batch*batch_size,(which_batch+1)*batch_size):\r\n image=[]\r\n image.append(cv2.imread(data_path+ls[od[i]]+'_red.png',0))\r\n image.append(cv2.imread(data_path+ls[od[i]]+'_green.png',0))\r\n image.append(cv2.imread(data_path+ls[od[i]]+'_blue.png',0))\r\n image.append(cv2.imread(data_path+ls[od[i]]+'_yellow.png',0))\r\n image=np.asarray(image).T\r\n image_list.append(image)\r\n image_list=np.asarray(image_list)\r\n return image_list\r\n\r\ndef normalize(image_list):\r\n ma=max(image_list.flatten())\r\n mi=min(image_list.flatten())\r\n mean = float((ma+mi)/2.0)\r\n output = (image_list-mean)/(ma-mean)\r\n return output\r\n",
"import numpy as np\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n# This is our neural networks class that inherits from nn.Module\r\n\r\nclass ResidualBlock(nn.Module):\r\n # Here we define our block structure\r\n def __init__(self, num_in, num_out, stride = 1):\r\n super(ResidualBlock,self).__init__()\r\n self.block = nn.Sequential(\r\n nn.Conv2d (num_in, num_out, 3, stride = stride, padding = 1).double(),\r\n nn.BatchNorm2d (num_out).double(),\r\n nn.ReLU(inplace=True).double(),\r\n nn.Conv2d(num_out, num_out, 3, stride = 1, padding = 1).double(),\r\n )\r\n self.bn = nn.BatchNorm2d (num_out).double()\r\n # add residuals\r\n if num_in != num_out or stride != 1:\r\n self.res = nn.Sequential(\r\n nn.Conv2d(num_in,num_out,1,stride = stride).double()\r\n )\r\n else:\r\n self.res = nn.Sequential()\r\n def forward(self,x):\r\n out = self.block(x)\r\n out = out + self.res(x)\r\n out = self.bn(out)\r\n out = F.relu(out)\r\n return out\r\n\r\n\r\n\r\nclass ResNet(nn.Module):\r\n \r\n # Here we define our network structure\r\n def __init__( self ):\r\n super(ResNet , self ). __init__ ()\r\n self.num_in = 64\r\n self.conv1 = nn.Sequential(\r\n nn.Conv2d (4, 64, 7, stride = 2, padding = 3). double(),\r\n nn.BatchNorm2d(64).double()\r\n )\r\n self.layer2 = self.makelayer(ResidualBlock,64,64,1,2)\r\n self.layer3 = self.makelayer(ResidualBlock,64,128,2,2)\r\n self.layer4 = self.makelayer(ResidualBlock,128,256,2,2)\r\n self.layer5 = self.makelayer(ResidualBlock,256,512,2,2)\r\n\r\n self.fc0_1 = nn.Linear (2048, 28).double()\r\n self.fc0_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc1_1 = nn.Linear (2048, 28).double()\r\n self.fc1_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc2_1 = nn.Linear (2048, 28).double()\r\n self.fc2_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc3_1 = nn.Linear (2048, 28).double()\r\n self.fc3_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc4_1 = nn.Linear (2048, 28).double()\r\n self.fc4_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc5_1 = nn.Linear (2048, 28).double()\r\n self.fc5_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc6_1 = nn.Linear (2048, 28).double()\r\n self.fc6_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc7_1 = nn.Linear (2048, 28).double()\r\n self.fc7_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc8_1 = nn.Linear (2048, 28).double()\r\n self.fc8_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc9_1 = nn.Linear (2048, 28).double()\r\n self.fc9_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc10_1 = nn.Linear (2048, 28).double()\r\n self.fc10_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc11_1 = nn.Linear (2048, 28).double()\r\n self.fc11_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc12_1 = nn.Linear (2048, 28).double()\r\n self.fc12_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc13_1 = nn.Linear (2048, 28).double()\r\n self.fc13_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc14_1 = nn.Linear (2048, 28).double()\r\n self.fc14_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc15_1 = nn.Linear (2048, 28).double()\r\n self.fc15_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc16_1 = nn.Linear (2048, 28).double()\r\n self.fc16_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc17_1 = nn.Linear (2048, 28).double()\r\n self.fc17_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc18_1 = nn.Linear (2048, 28).double()\r\n self.fc18_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc19_1 = nn.Linear (2048, 28).double()\r\n self.fc19_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc20_1 = nn.Linear (2048, 28).double()\r\n self.fc20_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc21_1 = nn.Linear (2048, 28).double()\r\n self.fc21_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc22_1 = nn.Linear (2048, 28).double()\r\n self.fc22_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc23_1 = nn.Linear (2048, 28).double()\r\n self.fc23_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc24_1 = nn.Linear (2048, 28).double()\r\n self.fc24_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc25_1 = nn.Linear (2048, 28).double()\r\n self.fc25_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc26_1 = nn.Linear (2048, 28).double()\r\n self.fc26_2 = nn.Linear (28, 2). double ()\r\n \r\n self.fc27_1 = nn.Linear (2048, 28).double()\r\n self.fc27_2 = nn.Linear (28, 2). double ()\r\n \r\n def makelayer(self,block,num_in,num_out,stride,k):\r\n layer = []\r\n for i in range(k):\r\n if i == 0:\r\n layer.append(block(num_in, num_out, stride))\r\n else:\r\n layer.append(block(num_out, num_out))\r\n return nn.Sequential(*layer)\r\n \r\n def forward(self,x):\r\n x = F.max_pool2d(F.relu(self.conv1(x)), kernel_size = 3, stride = 2)\r\n x = self.layer2(x)\r\n x = self.layer3(x)\r\n x = self.layer4(x)\r\n x = self.layer5(x)\r\n x = F.avg_pool2d(x,8)\r\n x = x.view(-1,self.num_flat_features(x))\r\n \r\n x0 = F.relu(self.fc0_1(x))\r\n x0 = self.fc0_2(x0)\r\n \r\n x1 = F.relu(self.fc1_1(x))\r\n x1 = self.fc1_2(x1)\r\n \r\n x2 = F.relu(self.fc2_1(x))\r\n x2 = self.fc2_2(x2)\r\n \r\n x3 = F.relu(self.fc3_1(x))\r\n x3 = self.fc3_2(x3)\r\n \r\n x4 = F.relu(self.fc4_1(x))\r\n x4 = self.fc4_2(x4)\r\n \r\n x5 = F.relu(self.fc5_1(x))\r\n x5 = self.fc5_2(x5)\r\n \r\n x6 = F.relu(self.fc6_1(x))\r\n x6 = self.fc6_2(x6)\r\n \r\n x7 = F.relu(self.fc7_1(x))\r\n x7 = self.fc7_2(x7)\r\n \r\n x8 = F.relu(self.fc8_1(x))\r\n x8 = self.fc8_2(x8)\r\n \r\n x9 = F.relu(self.fc9_1(x))\r\n x9 = self.fc9_2(x9)\r\n \r\n x10 = F.relu(self.fc10_1(x))\r\n x10 = self.fc10_2(x10)\r\n \r\n x11 = F.relu(self.fc11_1(x))\r\n x11 = self.fc11_2(x11)\r\n \r\n x12 = F.relu(self.fc12_1(x))\r\n x12 = self.fc12_2(x12)\r\n \r\n x13 = F.relu(self.fc13_1(x))\r\n x13 = self.fc13_2(x13)\r\n \r\n x14 = F.relu(self.fc14_1(x))\r\n x14 = self.fc14_2(x14)\r\n \r\n x15 = F.relu(self.fc15_1(x))\r\n x15 = self.fc15_2(x15)\r\n \r\n x16 = F.relu(self.fc16_1(x))\r\n x16 = self.fc16_2(x16)\r\n \r\n x17 = F.relu(self.fc17_1(x))\r\n x17 = self.fc17_2(x17)\r\n \r\n x18 = F.relu(self.fc18_1(x))\r\n x18 = self.fc18_2(x18)\r\n \r\n x19 = F.relu(self.fc19_1(x))\r\n x19 = self.fc19_2(x19)\r\n \r\n x20 = F.relu(self.fc20_1(x))\r\n x20 = self.fc20_2(x20)\r\n \r\n x21 = F.relu(self.fc21_1(x))\r\n x21 = self.fc21_2(x21)\r\n \r\n x22 = F.relu(self.fc22_1(x))\r\n x22 = self.fc22_2(x22)\r\n \r\n x23 = F.relu(self.fc23_1(x))\r\n x23 = self.fc23_2(x23)\r\n \r\n x24 = F.relu(self.fc24_1(x))\r\n x24 = self.fc24_2(x24)\r\n \r\n x25 = F.relu(self.fc25_1(x))\r\n x25 = self.fc25_2(x25)\r\n \r\n x26 = F.relu(self.fc26_1(x))\r\n x26 = self.fc26_2(x26)\r\n \r\n x27 = F.relu(self.fc27_1(x))\r\n x27 = self.fc27_2(x27)\r\n \r\n return x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27\r\n \r\n # Determine the number of features in a batch of tensors\r\n def num_flat_features (self , x):\r\n size = x. size ()[1:]\r\n return np. prod ( size )\r\n"
] | [
[
"numpy.asarray"
],
[
"torch.nn.Sequential",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.functional.relu",
"numpy.prod",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
19tony97/Autonomous_vheicle_drivingProject | [
"3938b132e11f7705bff2c2004ee80e1f87438565"
] | [
"behavioural_planner.py"
] | [
"#!/usr/bin/env python3\n\nimport numpy as np\nimport math\n\nfrom obstacle_detection import check_for_obs\n\n# State machine states\nFOLLOW_LANE = 0\nDECELERATE_TO_STOP = 1\nSTAY_STOPPED = 2\n# Stop speed threshold\nSTOP_THRESHOLD = 0.02\n# Number of cycles before moving from stop sign.\n\nSEMAPHORE = 3\n\nSTOP_COUNTS = 10\n\nclass BehaviouralPlanner:\n def __init__(self, lookahead, lead_vehicle_lookahead, traffic_light_state):\n self._lookahead = lookahead\n self._follow_lead_vehicle_lookahead = lead_vehicle_lookahead\n self._state = FOLLOW_LANE\n self._follow_lead_vehicle = False\n self._obstacle_on_lane = False\n self._goal_state = [0.0, 0.0, 0.0]\n self._goal_index = 0\n self._stop_count = 0\n self._lookahead_collision_index = 0\n self.traffic_light_state = traffic_light_state\n def set_lookahead(self, lookahead):\n self._lookahead = lookahead\n\n # Handles state transitions and computes the goal state.\n def transition_state(self, waypoints, ego_state, closed_loop_speed, camera_data, potential_obs):\n \"\"\"Handles state transitions and computes the goal state.\n\n args:\n waypoints: current waypoints to track (global frame).\n length and speed in m and m/s.\n (includes speed to track at each x,y location.)\n format: [[x0, y0, v0],\n [x1, y1, v1],\n ...\n [xn, yn, vn]]\n example:\n waypoints[2][1]:\n returns the 3rd waypoint's y position\n\n waypoints[5]:\n returns [x5, y5, v5] (6th waypoint)\n ego_state: ego state vector for the vehicle. (global frame)\n format: [ego_x, ego_y, ego_yaw, ego_open_loop_speed]\n ego_x and ego_y : position (m)\n ego_yaw : top-down orientation [-pi to pi]\n ego_open_loop_speed : open loop speed (m/s)\n closed_loop_speed: current (closed-loop) speed for vehicle (m/s)\n variables to set:\n self._goal_index: Goal index for the vehicle to reach\n i.e. waypoints[self._goal_index] gives the goal waypoint\n self._goal_state: Goal state for the vehicle to reach (global frame)\n format: [x_goal, y_goal, v_goal]\n self._state: The current state of the vehicle.\n available states:\n FOLLOW_LANE : Follow the global waypoints (lane).\n DECELERATE_TO_STOP : Decelerate to stop.\n STAY_STOPPED : Stay stopped.\n self._stop_count: Counter used to count the number of cycles which\n the vehicle was in the STAY_STOPPED state so far.\n useful_constants:\n STOP_THRESHOLD : Stop speed threshold (m). The vehicle should fully\n stop when its speed falls within this threshold.\n STOP_COUNTS : Number of cycles (simulation iterations)\n before moving from stop sign.\n \"\"\"\n # In this state, continue tracking the lane by finding the\n # goal index in the waypoint list that is within the lookahead\n # distance. Then, check to see if the waypoint path intersects\n # with any stop lines. If it does, then ensure that the goal\n # state enforces the car to be stopped before the stop line.\n # You should use the get_closest_index(), get_goal_index(), and\n # check_for_stop_signs() helper functions.\n # Make sure that get_closest_index() and get_goal_index() functions are\n # complete, and examine the check_for_stop_signs() function to\n # understand it.\n if self._state == FOLLOW_LANE:\n print(\"FOLLOW_LANE\")\n # First, find the closest index to the ego vehicle.\n update_waypoints(self,waypoints,ego_state)\n\n tl_state = self.traffic_light_state.detect_traffic_light(camera_data)\n #TODO add trafficlight detection and collision_prediction\n # to change from state \"FOLLOW_LANE\" to state \"DECELERATE_TO_STOP\"\n \n\n if check_for_obs(potential_obs, ego_state,is_collision = False) or tl_state == 1:\n self._goal_state[2] = 0\n self._state = DECELERATE_TO_STOP\n \n elif self._state == DECELERATE_TO_STOP:\n print(\"DECELERATE TO STOP...\")\n update_waypoints(self,waypoints,ego_state)\n tl_state = self.traffic_light_state.detect_traffic_light(camera_data)\n if abs(closed_loop_speed) <= STOP_THRESHOLD:\n self._goal_state[2]=0\n self._state = STAY_STOPPED\n self._stop_count = 0\n elif tl_state != 1 and not check_for_obs(potential_obs, ego_state,is_collision = False):\n self._state = FOLLOW_LANE\n\n\n\n\n # In this state, check to see if we have stayed stopped for at\n # least STOP_COUNTS number of cycles. If so, we can now leave\n # the stop sign and transition to the next state.\n elif self._state == STAY_STOPPED:\n print(\"STAY_STOPPED\")\n #TODO here make sure if no restriction change to state \"FOLLOW_LANE\"\n # We have stayed stopped for the required number of cycles.\n # Allow the ego vehicle to leave the stop sign. Once it has\n # passed the stop sign, return to lane following.\n # You should use the get_closest_index(), get_goal_index(), and\n # check_for_stop_signs() helper functions.\n print(\"Waiting: \" + str(self._stop_count))\n tl_state = self.traffic_light_state.detect_traffic_light(camera_data)\n if self._stop_count == STOP_COUNTS:\n \n update_waypoints(self, waypoints, ego_state)\n\n if tl_state != 1 and not check_for_obs(potential_obs, ego_state,is_collision = False):\n self._state = FOLLOW_LANE\n self._stop_count = 0\n\n elif tl_state != 1 and not check_for_obs(potential_obs, ego_state,is_collision = False):\n self._stop_count += 1\n\n else:\n raise ValueError('Invalid state value.')\n\n # Gets the goal index in the list of waypoints, based on the lookahead and\n # the current ego state. In particular, find the earliest waypoint that has accumulated\n # arc length (including closest_len) that is greater than or equal to self._lookahead.\n def get_goal_index(self, waypoints, ego_state, closest_len, closest_index):\n \"\"\"Gets the goal index for the vehicle.\n\n Set to be the earliest waypoint that has accumulated arc length\n accumulated arc length (including closest_len) that is greater than or\n equal to self._lookahead.\n\n args:\n waypoints: current waypoints to track. (global frame)\n length and speed in m and m/s.\n (includes speed to track at each x,y location.)\n format: [[x0, y0, v0],\n [x1, y1, v1],\n ...\n [xn, yn, vn]]\n example:\n waypoints[2][1]:\n returns the 3rd waypoint's y position\n\n waypoints[5]:\n returns [x5, y5, v5] (6th waypoint)\n ego_state: ego state vector for the vehicle. (global frame)\n format: [ego_x, ego_y, ego_yaw, ego_open_loop_speed]\n ego_x and ego_y : position (m)\n ego_yaw : top-down orientation [-pi to pi]\n ego_open_loop_speed : open loop speed (m/s)\n closest_len: length (m) to the closest waypoint from the vehicle.\n closest_index: index of the waypoint which is closest to the vehicle.\n i.e. waypoints[closest_index] gives the waypoint closest to the vehicle.\n returns:\n wp_index: Goal index for the vehicle to reach\n i.e. waypoints[wp_index] gives the goal waypoint\n \"\"\"\n # Find the farthest point along the path that is within the\n # lookahead distance of the ego vehicle.\n # Take the distance from the ego vehicle to the closest waypoint into\n # consideration.\n arc_length = closest_len\n wp_index = closest_index\n \n\n # In this case, reaching the closest waypoint is already far enough for\n # the planner. No need to check additional waypoints.\n if arc_length > self._lookahead:\n return wp_index\n\n # We are already at the end of the path.\n if wp_index == len(waypoints) - 1:\n return wp_index\n\n # Otherwise, find our next waypoint.\n while wp_index < len(waypoints) - 1:\n arc_length += np.sqrt((waypoints[wp_index][0] - waypoints[wp_index+1][0])**2 + (waypoints[wp_index][1] - waypoints[wp_index+1][1])**2)\n if arc_length > self._lookahead: break\n wp_index += 1\n\n return wp_index % len(waypoints)\n\n # Checks to see if we need to modify our velocity profile to accomodate the\n # lead vehicle.\n\n\n def check_for_lead_vehicle(self, ego_state, lead_car_position):\n \"\"\"Checks for lead vehicle within the proximity of the ego car, such\n that the ego car should begin to follow the lead vehicle.\n\n args:\n ego_state: ego state vector for the vehicle. (global frame)\n format: [ego_x, ego_y, ego_yaw, ego_open_loop_speed]\n ego_x and ego_y : position (m)\n ego_yaw : top-down orientation [-pi to pi]\n ego_open_loop_speed : open loop speed (m/s)\n lead_car_position: The [x, y] position of the lead vehicle.\n Lengths are in meters, and it is in the global frame.\n sets:\n self._follow_lead_vehicle: Boolean flag on whether the ego vehicle\n should follow (true) the lead car or not (false).\n \"\"\"\n # Check lead car position delta vector relative to heading, as well as\n # distance, to determine if car should be followed.\n # Check to see if lead vehicle is within range, and is ahead of us.\n if not self._follow_lead_vehicle:\n # Compute the angle between the normalized vector between the lead vehicle\n # and ego vehicle position with the ego vehicle's heading vector.\n lead_car_delta_vector = [lead_car_position[0] - ego_state[0],\n lead_car_position[1] - ego_state[1]]\n lead_car_distance = np.linalg.norm(lead_car_delta_vector)\n # In this case, the car is too far away.\n if lead_car_distance > self._follow_lead_vehicle_lookahead:\n return\n\n lead_car_delta_vector = np.divide(lead_car_delta_vector,\n lead_car_distance)\n ego_heading_vector = [math.cos(ego_state[2]),\n math.sin(ego_state[2])]\n # Check to see if the relative angle between the lead vehicle and the ego\n # vehicle lies within +/- 45 degrees of the ego vehicle's heading.\n if np.dot(lead_car_delta_vector,\n ego_heading_vector) < (1 / math.sqrt(2)):\n return\n\n self._follow_lead_vehicle = True\n\n else:\n lead_car_delta_vector = [lead_car_position[0] - ego_state[0],\n lead_car_position[1] - ego_state[1]]\n lead_car_distance = np.linalg.norm(lead_car_delta_vector)\n\n # Add a 15m buffer to prevent oscillations for the distance check.\n if lead_car_distance < self._follow_lead_vehicle_lookahead + 15:\n return\n # Check to see if the lead vehicle is still within the ego vehicle's\n # frame of view.\n lead_car_delta_vector = np.divide(lead_car_delta_vector, lead_car_distance)\n ego_heading_vector = [math.cos(ego_state[2]), math.sin(ego_state[2])]\n if np.dot(lead_car_delta_vector, ego_heading_vector) > (1 / math.sqrt(2)):\n return\n\n self._follow_lead_vehicle = False\n\n# Compute the waypoint index that is closest to the ego vehicle, and return\n# it as well as the distance from the ego vehicle to that waypoint.\ndef get_closest_index(waypoints, ego_state):\n \"\"\"Gets closest index a given list of waypoints to the vehicle position.\n\n args:\n waypoints: current waypoints to track. (global frame)\n length and speed in m and m/s.\n (includes speed to track at each x,y location.)\n format: [[x0, y0, v0],\n [x1, y1, v1],\n ...\n [xn, yn, vn]]\n example:\n waypoints[2][1]:\n returns the 3rd waypoint's y position\n\n waypoints[5]:\n returns [x5, y5, v5] (6th waypoint)\n ego_state: ego state vector for the vehicle. (global frame)\n format: [ego_x, ego_y, ego_yaw, ego_open_loop_speed]\n ego_x and ego_y : position (m)\n ego_yaw : top-down orientation [-pi to pi]\n ego_open_loop_speed : open loop speed (m/s)\n\n returns:\n [closest_len, closest_index]:\n closest_len: length (m) to the closest waypoint from the vehicle.\n closest_index: index of the waypoint which is closest to the vehicle.\n i.e. waypoints[closest_index] gives the waypoint closest to the vehicle.\n \"\"\"\n closest_len = float('Inf')\n closest_index = 0\n\n for i in range(len(waypoints)):\n temp = (waypoints[i][0] - ego_state[0])**2 + (waypoints[i][1] - ego_state[1])**2\n if temp < closest_len:\n closest_len = temp\n closest_index = i\n closest_len = np.sqrt(closest_len)\n\n return closest_len, closest_index\n\n# Checks if p2 lies on segment p1-p3, if p1, p2, p3 are collinear.\ndef pointOnSegment(p1, p2, p3):\n if (p2[0] <= max(p1[0], p3[0]) and (p2[0] >= min(p1[0], p3[0])) and \\\n (p2[1] <= max(p1[1], p3[1])) and (p2[1] >= min(p1[1], p3[1]))):\n return True\n else:\n return False\n\n\ndef update_waypoints(self, waypoints, ego_state):\n\n #First, find the closest index to the ego vehicle\n closest_len, closest_index = get_closest_index(waypoints, ego_state)\n\n # Next, find the goal index that lies within the lookahed distance\n # along the waypoints\n goal_index = self.get_goal_index(waypoints, ego_state, closest_len, closest_index)\n while waypoints[goal_index][2] <= 0.1: goal_index += 1\n\n self._goal_index = goal_index\n self._goal_state = waypoints[goal_index]"
] | [
[
"numpy.dot",
"numpy.linalg.norm",
"numpy.sqrt",
"numpy.divide"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
QuintonWeenink/pso | [
"f6fbb4c5889745d5747b367ecb3c9833805d9963"
] | [
"mlpy/particleSwarmOptimization/structure/particle.py"
] | [
"import numpy as np\n\nclass Particle(object):\n def __init__(self, bounds, weight, cognitiveConstant, socialConstant):\n self.position = None # particle position\n self.velocity = None # particle velocity\n\n self.best_position = None # best position individual\n self.best_error = float('inf') # best error individual\n\n self.error = None # error individual\n\n self.num_dimensions = None\n self.weight = weight\n self.cognitiveConstant = cognitiveConstant\n self.socialConstant = socialConstant\n self.bounds = bounds\n\n self.neighbourhood = []\n\n def initPos(self, position, velocity):\n self.num_dimensions = len(position)\n\n self.position = np.array(position)\n self.velocity = np.array(velocity)\n\n def getPersonalBest(self):\n if self.error < self.best_error:\n self.best_position = np.array(self.position)\n self.best_error = self.error\n\n return self.best_error\n\n def update_velocity(self, group_best_position):\n r1 = np.random.random(self.num_dimensions)\n r2 = np.random.random(self.num_dimensions)\n\n vel_cognitive = self.cognitiveConstant * r1 * (self.best_position - self.position)\n vel_social = self.socialConstant * r2 * (group_best_position - self.position)\n vel_inertia = self.weight * self.velocity\n self.velocity = vel_inertia + vel_cognitive + vel_social\n\n return self.velocity\n\n def update_position(self):\n self.position = self.position + self.velocity\n\n return self.velocity\n\n\n def toString(self):\n return ('\\tPosition: {position}\\n'+\n '\\tBest Position: {pos_best}\\n' +\n '\\tError: {err}\\n').format(position=self.position,\n pos_best=self.best_position,\n err=self.error)\n"
] | [
[
"numpy.array",
"numpy.random.random"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JeremieMelo/pytorch-onn | [
"670996112277a6c19c7da400afbe0a4ce45ad5de"
] | [
"examples/core/models/mzi_cnn.py"
] | [
"\"\"\"\nDescription:\nAuthor: Jiaqi Gu ([email protected])\nDate: 2021-06-07 03:43:40\nLastEditors: Jiaqi Gu ([email protected])\nLastEditTime: 2021-06-07 03:43:40\n\"\"\"\n\nfrom torchonn.op.mzi_op import project_matrix_to_unitary\nfrom typing import List, Union\n\nimport torch\nfrom torch import Tensor, nn\nfrom torch.types import Device, _size\nfrom torchonn.layers import MZIBlockConv2d, MZIBlockLinear\nfrom torchonn.models import ONNBaseModel\nfrom collections import OrderedDict\n\n__all__ = [\"MZI_CLASS_CNN\"]\n\n\nclass ConvBlock(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int = 3,\n stride: Union[int, _size] = 1,\n padding: Union[int, _size] = 0,\n dilation: _size = 1,\n groups: int = 1,\n bias: bool = False,\n miniblock: int = 8,\n mode: str = \"weight\",\n decompose_alg: str = \"clements\",\n photodetect: bool = False,\n device: Device = torch.device(\"cuda\"),\n ) -> None:\n super().__init__()\n self.conv = MZIBlockConv2d(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias,\n miniblock=miniblock,\n mode=mode,\n decompose_alg=decompose_alg,\n photodetect=photodetect,\n device=device,\n )\n\n self.bn = nn.BatchNorm2d(out_channels)\n\n self.activation = nn.ReLU(inplace=True)\n\n def forward(self, x: Tensor) -> Tensor:\n return self.activation(self.bn(self.conv(x)))\n\n\nclass LinearBlock(nn.Module):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = False,\n miniblock: int = 8,\n mode: str = \"weight\",\n decompose_alg: str = \"clements\",\n photodetect: bool = False,\n activation: bool = True,\n device: Device = torch.device(\"cuda\"),\n ) -> None:\n super().__init__()\n self.linear = MZIBlockLinear(\n in_features,\n out_features,\n bias=bias,\n miniblock=miniblock,\n mode=mode,\n decompose_alg=decompose_alg,\n photodetect=photodetect,\n device=device,\n )\n\n self.activation = nn.ReLU(inplace=True) if activation else None\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.linear(x)\n if self.activation is not None:\n x = self.activation(x)\n return x\n\n\nclass MZI_CLASS_CNN(ONNBaseModel):\n \"\"\"\n MZI CNN for classification.\n Blocking matrix multiplication, which is much faster and more scalable than implementing the entire weight matrix on an MZI array.\n Each block is implemented by a square MZI array\n\n \"\"\"\n\n _conv_linear = (MZIBlockConv2d, MZIBlockLinear)\n _conv = (MZIBlockConv2d,)\n _linear = (MZIBlockLinear,)\n\n def __init__(\n self,\n img_height: int,\n img_width: int,\n in_channels: int,\n num_classes: int,\n kernel_list: List[int] = [32],\n kernel_size_list: List[int] = [3],\n stride_list: List[int] = [1],\n padding_list: List[int] = [1],\n dilation_list: List[int] = [1],\n pool_out_size: int = 5,\n hidden_list: List[int] = [32],\n block_list: List[int] = [8],\n mode: str = \"usv\",\n decompose_alg: str = \"clements\",\n photodetect: bool = True,\n bias: bool = False,\n device: Device = torch.device(\"cuda\"),\n ) -> None:\n super().__init__()\n self.img_height = img_height\n self.img_width = img_width\n self.in_channels = in_channels\n self.num_classes = num_classes\n self.kernel_list = kernel_list\n self.kernel_size_list = kernel_size_list\n self.stride_list = stride_list\n self.padding_list = padding_list\n self.dilation_list = dilation_list\n\n self.pool_out_size = pool_out_size\n\n self.hidden_list = hidden_list\n self.block_list = block_list\n self.mode = mode\n self.decompose_alg = decompose_alg\n\n self.photodetect = photodetect\n self.bias = bias\n\n self.device = device\n\n self.build_layers()\n\n self.reset_parameters()\n\n def build_layers(self):\n self.features = OrderedDict()\n for idx, out_channels in enumerate(self.kernel_list, 0):\n layer_name = \"conv\" + str(idx + 1)\n in_channels = self.in_channels if (idx == 0) else self.kernel_list[idx - 1]\n\n self.features[layer_name] = ConvBlock(\n in_channels,\n out_channels,\n kernel_size=self.kernel_size_list[idx],\n stride=self.stride_list[idx],\n padding=self.padding_list[idx],\n dilation=self.dilation_list[idx],\n groups=1,\n bias=self.bias,\n miniblock=self.block_list[idx],\n mode=self.mode,\n decompose_alg=self.decompose_alg,\n photodetect=self.photodetect,\n device=self.device,\n )\n self.features = nn.Sequential(self.features)\n\n if self.pool_out_size > 0:\n self.pool2d = nn.AdaptiveAvgPool2d(self.pool_out_size)\n feature_size = self.kernel_list[-1] * self.pool_out_size * self.pool_out_size\n else:\n self.pool2d = None\n img_height, img_width = self.img_height, self.img_width\n for layer in self.modules():\n if isinstance(layer, self._conv):\n img_height, img_width = layer.get_output_dim(img_height, img_width)\n feature_size = img_height * img_width * self.kernel_list[-1]\n\n self.classifier = OrderedDict()\n for idx, hidden_dim in enumerate(self.hidden_list, 0):\n layer_name = \"fc\" + str(idx + 1)\n in_channel = feature_size if idx == 0 else self.hidden_list[idx - 1]\n out_channel = hidden_dim\n self.classifier[layer_name] = LinearBlock(\n in_channel,\n out_channel,\n bias=self.bias,\n miniblock=self.block_list[idx + len(self.kernel_list)],\n mode=self.mode,\n decompose_alg=self.decompose_alg,\n photodetect=self.photodetect,\n activation=True,\n device=self.device,\n )\n\n layer_name = \"fc\" + str(len(self.hidden_list) + 1)\n self.classifier[layer_name] = LinearBlock(\n self.hidden_list[-1] if len(self.hidden_list) > 0 else feature_size,\n self.num_classes,\n bias=self.bias,\n miniblock=self.block_list[-1],\n mode=self.mode,\n decompose_alg=self.decompose_alg,\n photodetect=self.photodetect,\n activation=False,\n device=self.device,\n )\n self.classifier = nn.Sequential(self.classifier)\n\n def unitary_projection(self) -> None:\n assert self.mode == \"usv\", \"Unitary projection can only be applied in usv mode\"\n for m in self.modules():\n if isinstance(m, self._conv_linear):\n m.U.data.copy_(project_matrix_to_unitary(m.U.data))\n m.V.data.copy_(project_matrix_to_unitary(m.V.data))\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.features(x)\n if self.pool2d is not None:\n x = self.pool2d(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n\n return x\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.flatten",
"torch.device",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
888yzbt888/Pensieve-multiagent | [
"b5409c949a4855afedc910de5dd6eabe076567cc"
] | [
"demo/demo.py"
] | [
"import time\nimport bisect\nimport cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib import gridspec\n\n\nVIDEO_FILE = 'la_luna.mp4'\nRL_BITRATE_FILE = './rl_bitrate'\nRL_BUFFER_FILE = './rl_buffer'\nMPC_BITRATE_FILE = './mpc_bitrate'\nMPC_BUFFER_FILE = './mpc_buffer'\nTRACE_FILE = './network_trace'\nSKIP_FRAMES = 8820\nTOTAL_FRAMES = 2000\nCHUNK_LEN = 4.0\nALL_BITRATES = {0.3, 0.75, 1.2}\n\ncap = cv.VideoCapture(VIDEO_FILE)\nkern_map = {0.3: np.ones((12, 12), np.float32) / 144, \n\t\t\t0.75: np.ones((6, 6), np.float32) / 36, \n\t\t\t1.2: np.ones((1, 1), np.float32) / 1}\ntext_map = {0.3: '240P',\n\t\t\t0.75: '360P',\n\t\t\t1.2: '720P'}\n\ndef read_file(FILE_NAME):\n\tts = []\n\tvs = []\n\twith open(FILE_NAME, 'rb') as f:\n\t\tfor line in f:\n\t\t\tparse = line.split()\n\t\t\tif len(parse) != 2:\n\t\t\t\tbreak\n\t\t\tts.append(float(parse[0]))\n\t\t\tvs.append(float(parse[1]))\n\treturn ts, vs\n\nrl_bitrates_ts, rl_bitrates = read_file(RL_BITRATE_FILE)\nrl_buffer_ts, rl_buffers = read_file(RL_BUFFER_FILE)\nmpc_bitrates_ts, mpc_bitrates = read_file(MPC_BITRATE_FILE)\nmpc_buffer_ts, mpc_buffers = read_file(MPC_BUFFER_FILE)\ntrace_ts, trace_bw = read_file(TRACE_FILE)\n\nprint (\" -- Processing videos -- \")\nall_processed_frames = {}\nfor br in ALL_BITRATES:\n\tall_processed_frames[br] = []\n\nfor _ in xrange(SKIP_FRAMES):\n\t_, frame = cap.read()\t\n\n# while(cap.isOpened()):\nfor f in xrange(TOTAL_FRAMES):\n\tprint ('frame', f)\n\t_, frame = cap.read()\n\tframe = cv.cvtColor(frame, cv.COLOR_BGR2RGB)\n\tfor br in ALL_BITRATES:\n\t\tprocessed_frame = cv.filter2D(frame, -1, kern_map[br])\n\t\tall_processed_frames[br].append(processed_frame)\n\nframe = all_processed_frames[1.2][0]\n\nfig = plt.figure(figsize=(14, 6))\n\ngs = gridspec.GridSpec(6, 6)\nax1 = plt.subplot(gs[:3, :3])\nax2 = plt.subplot(gs[3:, :3])\nax3 = plt.subplot(gs[:2, 3:])\nax4 = plt.subplot(gs[2:4, 3:])\nax5 = plt.subplot(gs[4:, 3:])\n\nbbox_props = dict(boxstyle=\"round\", fc=\"gray\", ec=\"0.5\", alpha=0.5)\n\nimg1 = ax1.imshow(frame)\nax1.set_ylabel('RL', size=21, color='#f23030')\nax1.xaxis.set_tick_params(bottom='off', labelbottom='off') \nax1.yaxis.set_tick_params(left='off', labelleft='off')\ntext1 = ax1.text(1150, 650, \"240P\", color=\"white\", ha=\"center\", va=\"center\", size=16, bbox=bbox_props)\n\nimg2 = ax2.imshow(frame)\nax2.set_ylabel('MPC', size=21, color='#2127dd')\nax2.xaxis.set_tick_params(bottom='off', labelbottom='off') \nax2.yaxis.set_tick_params(left='off', labelleft='off')\ntext2 = ax2.text(1150, 650, \"240P\", color=\"white\", ha=\"center\", va=\"center\", size=16, bbox=bbox_props)\n\nax3.plot(rl_buffer_ts, rl_buffers, color='#f23030')\nbar1, = ax3.plot([0, 0], [0, 25], color=\"orange\", alpha=0.5)\nax3.set_ylabel('RL buffer (sec)')\nax3.set_xlim([-5, 105])\nax3.set_ylim([-2, 26])\nax3.xaxis.set_tick_params(labelbottom='off') \n# ax3.yaxis.set_tick_params(labelleft='off') \n\nax4.plot(trace_ts, trace_bw, color='#1c1c1c', alpha=0.9)\nbar2, = ax4.plot([0, 0], [0.4, 2.3], color=\"orange\", alpha=0.5)\nax4.set_ylabel('Throughput (mbps)')\nax4.set_xlim([-5, 105])\nax4.xaxis.set_tick_params(labelbottom='off') \n# ax4.yaxis.set_tick_params(labelleft='off') \n\nax5.plot(mpc_buffer_ts, mpc_buffers, color='#2127dd')\nbar3, = ax5.plot([0, 0], [0, 25], color=\"orange\", alpha=0.5)\nax5.set_ylabel('MPC buffer (sec)')\nax5.set_xlim([-5, 105])\nax3.set_ylim([-2, 26])\nax5.xaxis.set_tick_params(labelbottom='off') \n# ax5.yaxis.set_tick_params(labelleft='off') \nax5.set_xlabel('Time')\n\nrolling_ts = np.linspace(0, 4 * len(rl_bitrates) - 4, len(rl_bitrates) * 20)\ndef get_frame_quality(rolling_ts, bitrates_ts, bitrates, buffer_ts, buffers):\n\tframe_quality = {}\n\ttext_quality = {}\n\n\tlast_frame = 0\n\tfor t in rolling_ts:\n\t\tbr_pt = bisect.bisect(bitrates_ts, t) - 1\n\t\tbuf_pt = bisect.bisect(buffer_ts, t) - 1\n\n\t\tif buffers[buf_pt] > 0.05:\n\t\t\tlast_frame = (last_frame + 2) % TOTAL_FRAMES\n\n\t\tframe_quality[t] = all_processed_frames[bitrates[br_pt]][last_frame]\n\t\ttext_quality[t] = text_map[bitrates[br_pt]]\n\n\treturn frame_quality, text_quality\n\nrl_frame_quality, rl_text_quality = get_frame_quality(\n\trolling_ts, rl_bitrates_ts, rl_bitrates, rl_buffer_ts, rl_buffers)\nmpc_frame_quality, mpc_text_quality = get_frame_quality(\n\trolling_ts, mpc_bitrates_ts, mpc_bitrates, mpc_buffer_ts, mpc_buffers)\n\ndef animate(i):\n\tbar1.set_data([i, i], [0, 25])\n\tbar2.set_data([i, i], [0.4, 2.3])\n\tbar3.set_data([i, i], [0, 25])\n\n\timg1.set_data(rl_frame_quality[i])\n\timg2.set_data(mpc_frame_quality[i])\n\n\ttext1.set_text(rl_text_quality[i])\n\ttext2.set_text(mpc_text_quality[i])\n\t\n\treturn bar1, bar2, bar3, img1, img2, text1, text2\n\nani = animation.FuncAnimation(fig, animate, rolling_ts, \n\t\t\t\t\t\t\t interval=50, blit=True)\n\n# plt.show()\n\n# Set up formatting for the movie files\nWriter = animation.writers['ffmpeg']\nwriter = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)\n\nani.save('demo.mp4', writer=writer)\n"
] | [
[
"numpy.ones",
"matplotlib.pyplot.subplot",
"matplotlib.animation.FuncAnimation",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
caoyi0905/Surprise | [
"1a707b32a3a68b0eb3f23b829b57604a131e00f7",
"1a707b32a3a68b0eb3f23b829b57604a131e00f7"
] | [
"tests/test_dataset.py",
"surprise/prediction_algorithms/knns.py"
] | [
"\"\"\"\nModule for testing the Dataset class.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport os\nimport random\n\nimport pytest\nimport pandas as pd\n\nfrom surprise import BaselineOnly\nfrom surprise import Dataset\nfrom surprise import Reader\nfrom surprise.builtin_datasets import get_dataset_dir\n\n\nrandom.seed(1)\n\n\ndef test_wrong_file_name():\n \"\"\"Ensure file names are checked when creating a (custom) Dataset.\"\"\"\n wrong_files = [('does_not_exist', 'does_not_either')]\n\n with pytest.raises(ValueError):\n Dataset.load_from_folds(folds_files=wrong_files, reader=Reader(),\n rating_scale=(1, 5))\n\n\ndef test_build_full_trainset(toy_data):\n \"\"\"Test the build_full_trainset method.\"\"\"\n\n trainset = toy_data.build_full_trainset()\n\n assert len(trainset.ur) == 5\n assert len(trainset.ir) == 2\n assert trainset.n_users == 5\n assert trainset.n_items == 2\n\n\ndef test_no_call_to_split(toy_data):\n \"\"\"Ensure, as mentioned in the split() docstring, that even if split is not\n called then the data is split with 5 folds after being shuffled.\"\"\"\n\n with pytest.warns(UserWarning):\n assert len(list(toy_data.folds())) == 5\n\n # make sure data has been shuffled. If not shuffled, the users in the\n # testsets would be 0, 1, 2... 4 (in that order).\n with pytest.warns(UserWarning):\n users = [int(testset[0][0][-1])\n for (_, testset) in toy_data.folds()]\n assert users != list(range(5))\n\n\ndef test_split(toy_data):\n \"\"\"Test the split method.\"\"\"\n\n # Test the shuffle parameter\n # Make sure data has not been shuffled. If not shuffled, the users in the\n # testsets are 0, 1, 2... 4 (in that order).\n with pytest.warns(UserWarning):\n toy_data.split(n_folds=5, shuffle=False)\n users = [int(testset[0][0][-1])\n for (_, testset) in toy_data.folds()]\n assert users == list(range(5))\n\n # Test the shuffle parameter\n # Make sure that when called two times without shuffling, folds are the\n # same.\n with pytest.warns(UserWarning):\n toy_data.split(n_folds=3, shuffle=False)\n testsets_a = [testset for (_, testset) in toy_data.folds()]\n toy_data.split(n_folds=3, shuffle=False)\n testsets_b = [testset for (_, testset) in toy_data.folds()]\n assert testsets_a == testsets_b\n\n # We'll now shuffle b and check that folds are different.\n with pytest.warns(UserWarning):\n toy_data.split(n_folds=3, shuffle=True)\n testsets_b = [testset for (_, testset) in toy_data.folds()]\n assert testsets_a != testsets_b\n\n # Ensure that folds are the same if split is not called again\n with pytest.warns(UserWarning):\n testsets_a = [testset for (_, testset) in toy_data.folds()]\n testsets_b = [testset for (_, testset) in toy_data.folds()]\n assert testsets_a == testsets_b\n\n # Test n_folds parameter\n with pytest.warns(UserWarning):\n toy_data.split(5)\n assert len(list(toy_data.folds())) == 5\n\n with pytest.raises(ValueError):\n toy_data.split(10) # Too big (greater than number of ratings)\n\n with pytest.raises(ValueError):\n toy_data.split(1) # Too low (must be >= 2)\n\n\ndef test_trainset_testset(toy_data_reader):\n \"\"\"Test the construct_trainset and construct_testset methods.\"\"\"\n\n current_dir = os.path.dirname(os.path.realpath(__file__))\n folds_files = [(current_dir + '/custom_train',\n current_dir + '/custom_test')]\n\n data = Dataset.load_from_folds(folds_files=folds_files,\n reader=toy_data_reader, rating_scale=(1, 5))\n\n with pytest.warns(UserWarning):\n trainset, testset = next(data.folds())\n\n # test ur\n ur = trainset.ur\n assert ur[0] == [(0, 4)]\n assert ur[1] == [(0, 4), (1, 2)]\n assert ur[40] == [] # not in the trainset\n\n # test ir\n ir = trainset.ir\n assert ir[0] == [(0, 4), (1, 4), (2, 1)]\n assert ir[1] == [(1, 2), (2, 1), (3, 5)]\n assert ir[20000] == [] # not in the trainset\n\n # test n_users, n_items, n_ratings, rating_scale\n assert trainset.n_users == 4\n assert trainset.n_items == 2\n assert trainset.n_ratings == 6\n assert trainset.rating_scale == (1, 5)\n\n # test raw2inner\n for i in range(4):\n assert trainset.to_inner_uid('user' + str(i)) == i\n with pytest.raises(ValueError):\n trainset.to_inner_uid('unkown_user')\n\n for i in range(2):\n assert trainset.to_inner_iid('item' + str(i)) == i\n with pytest.raises(ValueError):\n trainset.to_inner_iid('unkown_item')\n\n # test inner2raw\n assert trainset._inner2raw_id_users is None\n assert trainset._inner2raw_id_items is None\n for i in range(4):\n assert trainset.to_raw_uid(i) == 'user' + str(i)\n for i in range(2):\n assert trainset.to_raw_iid(i) == 'item' + str(i)\n assert trainset._inner2raw_id_users is not None\n assert trainset._inner2raw_id_items is not None\n\n # Test the build_testset() method\n algo = BaselineOnly()\n algo.fit(trainset)\n testset = trainset.build_testset()\n algo.test(testset) # ensure an algorithm can manage the data\n assert ('user0', 'item0', 4) in testset\n assert ('user3', 'item1', 5) in testset\n assert ('user3', 'item1', 0) not in testset\n\n # Test the build_anti_testset() method\n algo = BaselineOnly()\n algo.fit(trainset)\n testset = trainset.build_anti_testset()\n algo.test(testset) # ensure an algorithm can manage the data\n assert ('user0', 'item0', trainset.global_mean) not in testset\n assert ('user3', 'item1', trainset.global_mean) not in testset\n assert ('user0', 'item1', trainset.global_mean) in testset\n assert ('user3', 'item0', trainset.global_mean) in testset\n\n\ndef test_load_form_df():\n \"\"\"Ensure reading dataset from pandas dataframe is OK.\"\"\"\n\n # DF creation.\n ratings_dict = {'itemID': [1, 1, 1, 2, 2],\n 'userID': [9, 32, 2, 45, '10000'],\n 'rating': [3, 2, 4, 3, 1]}\n df = pd.DataFrame(ratings_dict)\n\n data = Dataset.load_from_df(df[['userID', 'itemID', 'rating']],\n rating_scale=(1, 5))\n\n # Assert split and folds can be used without problems\n with pytest.warns(UserWarning):\n data.split(2)\n assert sum(1 for _ in data.folds()) == 2\n\n # assert users and items are correctly mapped\n trainset = data.build_full_trainset()\n assert trainset.knows_user(trainset.to_inner_uid(9))\n assert trainset.knows_user(trainset.to_inner_uid('10000'))\n assert trainset.knows_item(trainset.to_inner_iid(2))\n\n # assert r(9, 1) = 3 and r(2, 1) = 4\n uid9 = trainset.to_inner_uid(9)\n uid2 = trainset.to_inner_uid(2)\n iid1 = trainset.to_inner_iid(1)\n assert trainset.ur[uid9] == [(iid1, 3)]\n assert trainset.ur[uid2] == [(iid1, 4)]\n\n # mess up the column ordering and assert that users are not correctly\n # mapped\n data = Dataset.load_from_df(df[['rating', 'itemID', 'userID']],\n rating_scale=(1, 5))\n trainset = data.build_full_trainset()\n with pytest.raises(ValueError):\n trainset.to_inner_uid('10000')\n\n\ndef test_build_anti_testset():\n ratings_dict = {'itemID': [1, 2, 3, 4, 5, 6, 7, 8, 9],\n 'userID': [1, 2, 3, 4, 5, 6, 7, 8, 9],\n 'rating': [1, 2, 3, 4, 5, 6, 7, 8, 9]}\n df = pd.DataFrame(ratings_dict)\n\n data = Dataset.load_from_df(df[['userID', 'itemID', 'rating']],\n rating_scale=(1, 5))\n with pytest.warns(UserWarning):\n data.split(2)\n trainset, __testset = next(data.folds())\n # fill with some specific value\n for fillvalue in (0, 42., -1):\n anti = trainset.build_anti_testset(fill=fillvalue)\n for (u, i, r) in anti:\n assert r == fillvalue\n # fill with global_mean\n anti = trainset.build_anti_testset(fill=None)\n for (u, i, r) in anti:\n assert r == trainset.global_mean\n expect = trainset.n_users * trainset.n_items\n assert trainset.n_ratings + len(anti) == expect\n\n\ndef test_get_dataset_dir():\n '''Test the get_dataset_dir() function.'''\n\n os.environ['SURPRISE_DATA_FOLDER'] = '/tmp/surprise_data'\n assert get_dataset_dir() == '/tmp/surprise_data'\n\n # Fall back to default\n del os.environ['SURPRISE_DATA_FOLDER']\n assert get_dataset_dir() == os.path.expanduser('~' + '/.surprise_data/')\n",
"\"\"\"\nthe :mod:`knns` module includes some k-NN inspired algorithms.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport numpy as np\nfrom six import iteritems\nimport heapq\n\nfrom .predictions import PredictionImpossible\nfrom .algo_base import AlgoBase\n\n\n# Important note: as soon as an algorithm uses a similarity measure, it should\n# also allow the bsl_options parameter because of the pearson_baseline\n# similarity. It can be done explicitely (e.g. KNNBaseline), or implicetely\n# using kwargs (e.g. KNNBasic).\n\nclass SymmetricAlgo(AlgoBase):\n \"\"\"This is an abstract class aimed to ease the use of symmetric algorithms.\n\n A symmetric algorithm is an algorithm that can can be based on users or on\n items indifferently, e.g. all the algorithms in this module.\n\n When the algo is user-based x denotes a user and y an item. Else, it's\n reversed.\n \"\"\"\n\n def __init__(self, sim_options={}, verbose=True, **kwargs):\n\n AlgoBase.__init__(self, sim_options=sim_options, **kwargs)\n self.verbose = verbose\n\n def fit(self, trainset):\n\n AlgoBase.fit(self, trainset)\n\n ub = self.sim_options['user_based']\n self.n_x = self.trainset.n_users if ub else self.trainset.n_items\n self.n_y = self.trainset.n_items if ub else self.trainset.n_users\n self.xr = self.trainset.ur if ub else self.trainset.ir\n self.yr = self.trainset.ir if ub else self.trainset.ur\n\n return self\n\n def switch(self, u_stuff, i_stuff):\n \"\"\"Return x_stuff and y_stuff depending on the user_based field.\"\"\"\n\n if self.sim_options['user_based']:\n return u_stuff, i_stuff\n else:\n return i_stuff, u_stuff\n\n\nclass KNNBasic(SymmetricAlgo):\n \"\"\"A basic collaborative filtering algorithm.\n\n The prediction :math:`\\\\hat{r}_{ui}` is set as:\n\n .. math::\n \\hat{r}_{ui} = \\\\frac{\n \\\\sum\\\\limits_{v \\in N^k_i(u)} \\\\text{sim}(u, v) \\cdot r_{vi}}\n {\\\\sum\\\\limits_{v \\in N^k_i(u)} \\\\text{sim}(u, v)}\n\n or\n\n .. math::\n \\hat{r}_{ui} = \\\\frac{\n \\\\sum\\\\limits_{j \\in N^k_u(i)} \\\\text{sim}(i, j) \\cdot r_{uj}}\n {\\\\sum\\\\limits_{j \\in N^k_u(i)} \\\\text{sim}(i, j)}\n\n depending on the ``user_based`` field of the ``sim_options`` parameter.\n\n Args:\n k(int): The (max) number of neighbors to take into account for\n aggregation (see :ref:`this note <actual_k_note>`). Default is\n ``40``.\n min_k(int): The minimum number of neighbors to take into account for\n aggregation. If there are not enough neighbors, the prediction is\n set the the global mean of all ratings. Default is ``1``.\n sim_options(dict): A dictionary of options for the similarity\n measure. See :ref:`similarity_measures_configuration` for accepted\n options.\n verbose(bool): Whether to print trace messages of bias estimation,\n similarity, etc. Default is True.\n \"\"\"\n\n def __init__(self, k=40, min_k=1, sim_options={}, verbose=True, **kwargs):\n\n SymmetricAlgo.__init__(self, sim_options=sim_options, verbose=verbose,\n **kwargs)\n self.k = k\n self.min_k = min_k\n\n def fit(self, trainset):\n\n SymmetricAlgo.fit(self, trainset)\n self.sim = self.compute_similarities(verbose=self.verbose)\n\n return self\n\n def estimate(self, u, i):\n\n if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):\n raise PredictionImpossible('User and/or item is unkown.')\n\n x, y = self.switch(u, i)\n\n neighbors = [(self.sim[x, x2], r) for (x2, r) in self.yr[y]]\n k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[0])\n\n # compute weighted average\n sum_sim = sum_ratings = actual_k = 0\n for (sim, r) in k_neighbors:\n if sim > 0:\n sum_sim += sim\n sum_ratings += sim * r\n actual_k += 1\n\n if actual_k < self.min_k:\n raise PredictionImpossible('Not enough neighbors.')\n\n est = sum_ratings / sum_sim\n\n details = {'actual_k': actual_k}\n return est, details\n\n\nclass KNNWithMeans(SymmetricAlgo):\n \"\"\"A basic collaborative filtering algorithm, taking into account the mean\n ratings of each user.\n\n The prediction :math:`\\\\hat{r}_{ui}` is set as:\n\n .. math::\n \\hat{r}_{ui} = \\mu_u + \\\\frac{ \\\\sum\\\\limits_{v \\in N^k_i(u)}\n \\\\text{sim}(u, v) \\cdot (r_{vi} - \\mu_v)} {\\\\sum\\\\limits_{v \\in\n N^k_i(u)} \\\\text{sim}(u, v)}\n\n or\n\n .. math::\n \\hat{r}_{ui} = \\mu_i + \\\\frac{ \\\\sum\\\\limits_{j \\in N^k_u(i)}\n \\\\text{sim}(i, j) \\cdot (r_{uj} - \\mu_j)} {\\\\sum\\\\limits_{j \\in\n N^k_u(i)} \\\\text{sim}(i, j)}\n\n depending on the ``user_based`` field of the ``sim_options`` parameter.\n\n\n Args:\n k(int): The (max) number of neighbors to take into account for\n aggregation (see :ref:`this note <actual_k_note>`). Default is\n ``40``.\n min_k(int): The minimum number of neighbors to take into account for\n aggregation. If there are not enough neighbors, the neighbor\n aggregation is set to zero (so the prediction ends up being\n equivalent to the mean :math:`\\mu_u` or :math:`\\mu_i`). Default is\n ``1``.\n sim_options(dict): A dictionary of options for the similarity\n measure. See :ref:`similarity_measures_configuration` for accepted\n options.\n verbose(bool): Whether to print trace messages of bias estimation,\n similarity, etc. Default is True.\n \"\"\"\n\n def __init__(self, k=40, min_k=1, sim_options={}, verbose=True, **kwargs):\n\n SymmetricAlgo.__init__(self, sim_options=sim_options,\n verbose=verbose, **kwargs)\n\n self.k = k\n self.min_k = min_k\n\n def fit(self, trainset):\n\n SymmetricAlgo.fit(self, trainset)\n self.sim = self.compute_similarities(verbose=self.verbose)\n\n self.means = np.zeros(self.n_x)\n for x, ratings in iteritems(self.xr):\n self.means[x] = np.mean([r for (_, r) in ratings])\n\n return self\n\n def estimate(self, u, i):\n\n if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):\n raise PredictionImpossible('User and/or item is unkown.')\n\n x, y = self.switch(u, i)\n\n neighbors = [(x2, self.sim[x, x2], r) for (x2, r) in self.yr[y]]\n k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[1])\n\n est = self.means[x]\n\n # compute weighted average\n sum_sim = sum_ratings = actual_k = 0\n for (nb, sim, r) in k_neighbors:\n if sim > 0:\n sum_sim += sim\n sum_ratings += sim * (r - self.means[nb])\n actual_k += 1\n\n if actual_k < self.min_k:\n sum_ratings = 0\n\n try:\n est += sum_ratings / sum_sim\n except ZeroDivisionError:\n pass # return mean\n\n details = {'actual_k': actual_k}\n return est, details\n\n\nclass KNNBaseline(SymmetricAlgo):\n \"\"\"A basic collaborative filtering algorithm taking into account a\n *baseline* rating.\n\n\n The prediction :math:`\\\\hat{r}_{ui}` is set as:\n\n .. math::\n \\hat{r}_{ui} = b_{ui} + \\\\frac{ \\\\sum\\\\limits_{v \\in N^k_i(u)}\n \\\\text{sim}(u, v) \\cdot (r_{vi} - b_{vi})} {\\\\sum\\\\limits_{v \\in\n N^k_i(u)} \\\\text{sim}(u, v)}\n\n or\n\n\n .. math::\n \\hat{r}_{ui} = b_{ui} + \\\\frac{ \\\\sum\\\\limits_{j \\in N^k_u(i)}\n \\\\text{sim}(i, j) \\cdot (r_{uj} - b_{uj})} {\\\\sum\\\\limits_{j \\in\n N^k_u(i)} \\\\text{sim}(i, j)}\n\n depending on the ``user_based`` field of the ``sim_options`` parameter. For\n the best predictions, use the :func:`pearson_baseline\n <surprise.similarities.pearson_baseline>` similarity measure.\n\n This algorithm corresponds to formula (3), section 2.2 of\n :cite:`Koren:2010`.\n\n Args:\n k(int): The (max) number of neighbors to take into account for\n aggregation (see :ref:`this note <actual_k_note>`). Default is\n ``40``.\n min_k(int): The minimum number of neighbors to take into account for\n aggregation. If there are not enough neighbors, the neighbor\n aggregation is set to zero (so the prediction ends up being\n equivalent to the baseline). Default is ``1``.\n sim_options(dict): A dictionary of options for the similarity\n measure. See :ref:`similarity_measures_configuration` for accepted\n options. It is recommended to use the :func:`pearson_baseline\n <surprise.similarities.pearson_baseline>` similarity measure.\n\n bsl_options(dict): A dictionary of options for the baseline estimates\n computation. See :ref:`baseline_estimates_configuration` for\n accepted options.\n verbose(bool): Whether to print trace messages of bias estimation,\n similarity, etc. Default is True.\n\n \"\"\"\n\n def __init__(self, k=40, min_k=1, sim_options={}, bsl_options={},\n verbose=True, **kwargs):\n\n SymmetricAlgo.__init__(self, sim_options=sim_options,\n bsl_options=bsl_options, verbose=verbose,\n **kwargs)\n\n self.k = k\n self.min_k = min_k\n\n def fit(self, trainset):\n\n SymmetricAlgo.fit(self, trainset)\n self.bu, self.bi = self.compute_baselines(verbose=self.verbose)\n self.bx, self.by = self.switch(self.bu, self.bi)\n self.sim = self.compute_similarities(verbose=self.verbose)\n\n return self\n\n def estimate(self, u, i):\n\n est = self.trainset.global_mean\n if self.trainset.knows_user(u):\n est += self.bu[u]\n if self.trainset.knows_item(i):\n est += self.bi[i]\n\n x, y = self.switch(u, i)\n\n if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):\n return est\n\n neighbors = [(x2, self.sim[x, x2], r) for (x2, r) in self.yr[y]]\n k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[1])\n\n # compute weighted average\n sum_sim = sum_ratings = actual_k = 0\n for (nb, sim, r) in k_neighbors:\n if sim > 0:\n sum_sim += sim\n nb_bsl = self.trainset.global_mean + self.bx[nb] + self.by[y]\n sum_ratings += sim * (r - nb_bsl)\n actual_k += 1\n\n if actual_k < self.min_k:\n sum_ratings = 0\n\n try:\n est += sum_ratings / sum_sim\n except ZeroDivisionError:\n pass # just baseline again\n\n details = {'actual_k': actual_k}\n return est, details\n\n\nclass KNNWithZScore(SymmetricAlgo):\n \"\"\"A basic collaborative filtering algorithm, taking into account\n the z-score normalization of each user.\n\n The prediction :math:`\\\\hat{r}_{ui}` is set as:\n\n .. math::\n \\hat{r}_{ui} = \\mu_u + \\sigma_u \\\\frac{ \\\\sum\\\\limits_{v \\in N^k_i(u)}\n \\\\text{sim}(u, v) \\cdot (r_{vi} - \\mu_v) / \\sigma_v} {\\\\sum\\\\limits_{v\n \\in N^k_i(u)} \\\\text{sim}(u, v)}\n\n or\n\n .. math::\n \\hat{r}_{ui} = \\mu_i + \\sigma_i \\\\frac{ \\\\sum\\\\limits_{j \\in N^k_u(i)}\n \\\\text{sim}(i, j) \\cdot (r_{uj} - \\mu_j) / \\sigma_j} {\\\\sum\\\\limits_{j\n \\in N^k_u(i)} \\\\text{sim}(i, j)}\n\n depending on the ``user_based`` field of the ``sim_options`` parameter.\n\n If :math:`\\sigma` is 0, than the overall sigma is used in that case.\n\n Args:\n k(int): The (max) number of neighbors to take into account for\n aggregation (see :ref:`this note <actual_k_note>`). Default is\n ``40``.\n min_k(int): The minimum number of neighbors to take into account for\n aggregation. If there are not enough neighbors, the neighbor\n aggregation is set to zero (so the prediction ends up being\n equivalent to the mean :math:`\\mu_u` or :math:`\\mu_i`). Default is\n ``1``.\n sim_options(dict): A dictionary of options for the similarity\n measure. See :ref:`similarity_measures_configuration` for accepted\n options.\n verbose(bool): Whether to print trace messages of bias estimation,\n similarity, etc. Default is True.\n \"\"\"\n\n def __init__(self, k=40, min_k=1, sim_options={}, verbose=True, **kwargs):\n\n SymmetricAlgo.__init__(self, sim_options=sim_options, verbose=verbose,\n **kwargs)\n\n self.k = k\n self.min_k = min_k\n\n def fit(self, trainset):\n\n SymmetricAlgo.fit(self, trainset)\n\n self.means = np.zeros(self.n_x)\n self.sigmas = np.zeros(self.n_x)\n # when certain sigma is 0, use overall sigma\n self.overall_sigma = np.std([r for (_, _, r)\n in self.trainset.all_ratings()])\n\n for x, ratings in iteritems(self.xr):\n self.means[x] = np.mean([r for (_, r) in ratings])\n sigma = np.std([r for (_, r) in ratings])\n self.sigmas[x] = self.overall_sigma if sigma == 0.0 else sigma\n\n self.sim = self.compute_similarities(verbose=self.verbose)\n\n return self\n\n def estimate(self, u, i):\n\n if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):\n raise PredictionImpossible('User and/or item is unkown.')\n\n x, y = self.switch(u, i)\n\n neighbors = [(x2, self.sim[x, x2], r) for (x2, r) in self.yr[y]]\n k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[1])\n\n est = self.means[x]\n\n # compute weighted average\n sum_sim = sum_ratings = actual_k = 0\n for (nb, sim, r) in k_neighbors:\n if sim > 0:\n sum_sim += sim\n sum_ratings += sim * (r - self.means[nb]) / self.sigmas[nb]\n actual_k += 1\n\n if actual_k < self.min_k:\n sum_ratings = 0\n\n try:\n est += sum_ratings / sum_sim * self.sigmas[x]\n except ZeroDivisionError:\n pass # return mean\n\n details = {'actual_k': actual_k}\n return est, details\n"
] | [
[
"pandas.DataFrame"
],
[
"numpy.std",
"numpy.zeros",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jfw225/ptlib | [
"a78ef707ed09aa9c1f1b1157a20e3d9d3fb04c4d"
] | [
"tests/queue_test.py"
] | [
"import cv2\nimport pickle\nimport numpy as np\n\nimport ptlib as pt\nfrom ptlib.core.metadata import MetadataManager\nfrom ptlib.core.queue import Queue\n\n\nclass VideoIngest(pt.Task):\n # variables here are static\n NUM_WORKERS = 1\n\n VIDEO_PATH = \"C:\\\\Users\\\\Owner\\\\Videos\\\\Battlefield 2042 Open Beta\\\\testvid.mp4\"\n BATCH_SIZE = 30 * 5 # fps * duartion in seconds\n\n def __init__(self):\n # maybe no init\n super().__init__(num_workers=VideoIngest.NUM_WORKERS)\n\n def create_map(self, worker):\n # create capture object\n capture = cv2.VideoCapture(self.VIDEO_PATH)\n\n # compute task specific start position, stop position, and batch_id\n num_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n start_pos = worker.id * num_frames // self.num_workers\n stop_pos = (worker.id + 1) * num_frames // self.num_workers\n worker.batch_id = start_pos // self.BATCH_SIZE\n print(\n f\"ingest: {worker.id} | start: {start_pos} | stop: {stop_pos} | batch_id: {worker.batch_id}\")\n\n # get shape and dtype\n _, frame = capture.read()\n shape, dtype = frame.shape, frame.dtype\n\n # set the capture object to the start position\n capture.set(cv2.CAP_PROP_POS_FRAMES, start_pos)\n\n # create range to iterate over\n batch_iter = [i for i in range(self.BATCH_SIZE)]\n\n # set the current frame position\n worker.current_pos = start_pos\n\n # set zero array and output array\n worker.output_arr = np.zeros((self.BATCH_SIZE, *shape), dtype=dtype)\n\n def job_map(input_job):\n worker.output_arr.fill(0)\n for i in batch_iter:\n ret, frame = capture.read()\n\n if not ret or worker.current_pos == stop_pos:\n capture.release()\n worker.EXIT_FLAG = True\n break\n\n worker.output_arr[i] = frame\n worker.current_pos += 1\n\n worker.batch_id += 1\n\n # return output_batch\n return [worker.output_arr]\n\n return job_map\n\n\n##### pseudo-controller implementation for testing #####\ndef run(pipeline, meta_manager, output_q):\n # link output queue\n output_q._link_mem(create_local=True)\n\n # set start time\n meta_manager.set_time()\n\n # start all worker processes\n for task in pipeline.iter_tasks():\n task._start_workers()\n\n task = pipeline\n while task is not pt.EmptyTask:\n # force pull from output queue\n output_q.get()\n\n # update metadata\n meta_manager.update()\n\n # if current task finishes, send kill signal to workers\n if not task._workers_running():\n print(f\"Task Finished: {task.name}\")\n task = task.next\n task._kill_workers()\n\n # finish retreiving metadata (in case loop exits before getting all metadata)\n meta_manager.update()\n\n # set finish time\n meta_manager.set_time()\n\n\nif __name__ == '__main__':\n # create pipeline\n pipeline = VideoIngest()\n\n # infer output\n output_job, job_specs = pipeline.infer_structure(None)\n\n # create I/O queues\n input_q, output_q = Queue(), Queue(job_specs, capacity=5)\n\n # create metadata manager\n meta_manager = MetadataManager(pipeline)\n\n # create workers and assign them to task\n pipeline.create_workers(input_q, output_q, meta_manager.meta_q)\n\n # start pseudo-controller\n run(pipeline, meta_manager, output_q)\n\n # create and run controller\n # controller = pt.Controller(pipeline, 5)\n # controller.run()\n # controller.graph()\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vipksmosar/habr_rss_parse | [
"91fa3df3db3a1416348237919824696239d4cb29"
] | [
"airflow/airflow_own/plugins/WRITER.py"
] | [
"import os\ntry:\n import pandas as pd\nexcept:\n os.system('pip3 install pandas')\n import pandas as pd\ntry:\n import psycopg2\nexcept:\n os.system('pip3 install psycopg2-binary')\n import psycopg2\nimport numpy as np\nfrom psycopg2.extensions import register_adapter, AsIs\npsycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)\n\nclass POSTGREE_WRITER:\n \n def __init__(self, string_to_connect='postgresql://postgres:postgres@pdbserv:5432/postgres'):\n \n self.string_to_connect = string_to_connect\n self.conn = psycopg2.connect(self.string_to_connect)\n self.cursor = self.conn.cursor()\n \n def __create_DF(self, path='./habr_news/'):\n DF_ALL = pd.DataFrame()\n for file in os.listdir(path):\n full_filename = '{}{}'.format(path, file)\n DF = pd.read_parquet(full_filename)\n DF_ALL = pd.concat([DF_ALL, DF])\n DF_ALL = DF_ALL.drop_duplicates()\n return DF_ALL\n \n def __clear_directory(self, path='./habr_news/'):\n for file in os.listdir(path):\n full_filename = '{}{}'.format(path, file)\n os.remove(full_filename)\n \n \n def sql_test(self, table_name):\n select_req = '''SELECT * FROM {};'''.format(table_name)\n #select_req = '''SELECT * FROM INFORMATION_SCHEMA.TABLES'''\n self.cursor.execute(select_req)\n data_final = self.cursor.fetchall()\n columns_data = list(map(lambda x: x[0], self.cursor.description))\n self.conn.commit()\n return data_final, columns_data\n \n def sql_insert(self, table_name, data):\n self.__init__('postgresql://postgres:postgres@pdbserv:5432/postgres')\n select_insert = '''INSERT INTO {}\n (\n title,\n link,\n author_name,\n real_author_name,\n body,\n index_hash,\n create_at,\n text_len,\n word_count,\n word_mean,\n eng_symbol ,\n rus_symbol,\n num_symbol \n )\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\n '''.format(table_name)\n self.cursor.execute(select_insert, data)\n self.conn.commit()\n \n def sql_start(self, path):\n self.result = {'error_count':0, 'success_count':0}\n self.create_table_if_not_exist('habr_news')\n data = self.__create_DF(path)\n for i in range(len(data)):\n try:\n string = data.iloc[i]\n self.sql_insert('habr_news',(string['title'], string['link'], string['author_name'],\n string['real_author_name'],string['body'], string['index_hash'],\n string['create_at'], string['text_len'], string['word_count'],\n string['word_mean'], string['eng_symbol'], string['rus_symbol'],\n string['num_symbol']))\n self.result['success_count']+=1\n except Exception as E:\n #print(E)\n if 'duplicate key value violates unique constraint' in '{}'.format(E):\n self.result['error_count']+=1\n #data, columns = self.sql_test('habr_news')\n self.cursor.close()\n self.conn.close()\n if self.result['error_count']==len(data):\n self.__clear_directory('/opt/airflow/tmp_dir/habr_news/')\n return self.result\n \n def create_table_if_not_exist(self, table_name):\n select_insert = '''create table if not exists {} (\n title TEXT,\n link TEXT,\n author_name TEXT,\n real_author_name TEXT,\n body TEXT,\n index_hash VARCHAR NOT NULL,\n create_at TIMESTAMP,\n text_len INT,\n word_count REAL,\n word_mean REAL,\n eng_symbol INT,\n rus_symbol INT,\n num_symbol INT,\n CONSTRAINT habr_news_pk PRIMARY KEY (index_hash)\n )'''.format(table_name)\n self.cursor.execute(select_insert)\n self.conn.commit()"
] | [
[
"pandas.read_parquet",
"pandas.concat",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
where-is-brett/tensorflow | [
"5da8599b2cf9edfb9fac4431c705501bf7ceccd8",
"5da8599b2cf9edfb9fac4431c705501bf7ceccd8",
"5da8599b2cf9edfb9fac4431c705501bf7ceccd8",
"5da8599b2cf9edfb9fac4431c705501bf7ceccd8"
] | [
"tensorflow/lite/testing/op_tests/unroll_batch_matmul.py",
"tensorflow/python/keras/layers/preprocessing/image_preprocessing.py",
"tensorflow/python/profiler/traceme.py",
"tensorflow/python/autograph/pyct/static_analysis/activity_test.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for unroll_batch_matmul.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\n@register_make_test_function()\ndef make_unroll_batch_matmul_tests(options):\n \"\"\"Make a set of tests to test unroll_batch_matmul.\"\"\"\n\n # The test cases below requires broadcasting support (BatchMatMulV2 semantic),\n # whis isn't supported as of this change.\n broadcast_shape_params = [\n # Simple broadcast.\n [(1, 2, 3), (3, 5), False, False],\n # Empty batch broadcast.\n [(2, 5, 3), (3, 7), False, False],\n # Single batch with non-empty batch broadcast.\n [(1, 5, 3), (4, 3, 7), False, False],\n # Broadcast both operands\n [(3, 1, 5, 3), (1, 4, 3, 7), False, False],\n ]\n\n test_parameters = [{\n \"dtype\": [tf.float32],\n \"shape\": [[(2, 2, 3),\n (2, 3, 2), False, False], [(2, 2, 3), (2, 3, 2), True, True],\n [(2, 2, 3),\n (2, 2, 3), False, True], [(2, 2, 3), (2, 2, 3), True, False],\n [(4, 2, 2, 3), (4, 2, 3, 2), False, False],\n [(4, 2, 2, 3), (4, 2, 3, 2), True, True],\n [(4, 2, 2, 3), (4, 2, 2, 3), False, True],\n [(4, 2, 2, 3),\n (4, 2, 2, 3), True, False]] + broadcast_shape_params,\n # TODO(b/130887442): Improve the forward compatibility tests for every\n # ops.\n \"forward_compatibility_test\": [False, True],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the batch_matmul op testing graph.\"\"\"\n\n def _build_graph():\n \"\"\"Build the graph.\"\"\"\n input_tensor1 = tf.compat.v1.placeholder(\n dtype=parameters[\"dtype\"], shape=parameters[\"shape\"][0])\n input_tensor2 = tf.compat.v1.placeholder(\n dtype=parameters[\"dtype\"], shape=parameters[\"shape\"][1])\n # Should be unrolled and replaced with fully_connected ops in the end.\n out = tf.matmul(\n input_tensor1,\n input_tensor2,\n transpose_a=parameters[\"shape\"][2],\n transpose_b=parameters[\"shape\"][3])\n return [input_tensor1, input_tensor2], [out]\n\n if parameters[\"forward_compatibility_test\"]:\n # This is hardcoded to the date after MatMulV2 is activated.\n # TODO(b/130887442): Improve the forward compatibility tests for every\n # ops, and remove the hardcoded date.\n with tf.compat.forward_compatibility_horizon(2019, 4, 26):\n return _build_graph()\n else:\n return _build_graph()\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value1 = create_tensor_data(\n parameters[\"dtype\"], shape=parameters[\"shape\"][0])\n input_value2 = create_tensor_data(\n parameters[\"dtype\"], shape=parameters[\"shape\"][1])\n return [input_value1, input_value2], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))\n\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs)\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras image preprocessing layers.\"\"\"\n# pylint: disable=g-classes-have-attributes\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.engine.base_layer import Layer\nfrom tensorflow.python.keras.engine.input_spec import InputSpec\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import image_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import stateful_random_ops\nfrom tensorflow.python.ops import stateless_random_ops\n\nResizeMethod = image_ops.ResizeMethod\n\n_RESIZE_METHODS = {\n 'bilinear': ResizeMethod.BILINEAR,\n 'nearest': ResizeMethod.NEAREST_NEIGHBOR,\n 'bicubic': ResizeMethod.BICUBIC,\n 'area': ResizeMethod.AREA,\n 'lanczos3': ResizeMethod.LANCZOS3,\n 'lanczos5': ResizeMethod.LANCZOS5,\n 'gaussian': ResizeMethod.GAUSSIAN,\n 'mitchellcubic': ResizeMethod.MITCHELLCUBIC\n}\n\n\nclass Resizing(Layer):\n \"\"\"Image resizing layer.\n\n Resize the batched image input to target height and width. The input should\n be a 4-D tensor in the format of NHWC.\n\n Arguments:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n interpolation: String, the interpolation method. Defaults to `bilinear`.\n Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,\n `gaussian`, `mitchellcubic`\n \"\"\"\n\n def __init__(self, height, width, interpolation='bilinear', **kwargs):\n self.target_height = height\n self.target_width = width\n self.interpolation = interpolation\n self._interpolation_method = get_interpolation(interpolation)\n self.input_spec = InputSpec(ndim=4)\n super(Resizing, self).__init__(**kwargs)\n\n def build(self, input_shape):\n channel_axis = 3\n channel_dim = int(input_shape[channel_axis])\n self.input_spec = InputSpec(ndim=4, axes={channel_axis: channel_dim})\n self.built = True\n\n def call(self, inputs):\n outputs = image_ops.resize_images_v2(\n images=inputs,\n size=[self.target_height, self.target_width],\n method=self._interpolation_method)\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n return tensor_shape.TensorShape(\n [input_shape[0], self.target_height, self.target_width, input_shape[3]])\n\n def get_config(self):\n config = {\n 'height': self.target_height,\n 'width': self.target_width,\n 'interpolation': self.interpolation,\n }\n base_config = super(Resizing, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass CenterCrop(Layer):\n \"\"\"Crop the central portion of the images to target height and width.\n\n Input shape:\n 4D tensor with shape:\n `(samples, height, width, channels)`, data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(samples, target_height, target_width, channels)`.\n\n If the input height/width is even and the target height/width is odd (or\n inversely), the input image is left-padded by 1 pixel.\n\n Arguments:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n \"\"\"\n\n def __init__(self, height, width, **kwargs):\n self.target_height = height\n self.target_width = width\n self.input_spec = InputSpec(ndim=4)\n super(CenterCrop, self).__init__(**kwargs)\n\n def build(self, input_shape):\n channel_axis = 3\n channel_dim = int(input_shape[channel_axis])\n self.input_spec = InputSpec(ndim=4, axes={channel_axis: channel_dim})\n self.built = True\n\n def call(self, inputs):\n inputs_shape = array_ops.shape(inputs)\n h_axis, w_axis = 1, 2\n img_hd = inputs_shape[h_axis]\n img_wd = inputs_shape[w_axis]\n img_hd_diff = img_hd - self.target_height\n img_wd_diff = img_wd - self.target_width\n checks = []\n checks.append(\n check_ops.assert_non_negative(\n img_hd_diff,\n message='The crop height {} should not be greater than input '\n 'height.'.format(self.target_height)))\n checks.append(\n check_ops.assert_non_negative(\n img_wd_diff,\n message='The crop width {} should not be greater than input '\n 'width.'.format(self.target_width)))\n with ops.control_dependencies(checks):\n bbox_h_start = math_ops.cast(img_hd_diff / 2, dtypes.int32)\n bbox_w_start = math_ops.cast(img_wd_diff / 2, dtypes.int32)\n bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0])\n bbox_size = array_ops.stack(\n [-1, self.target_height, self.target_width, -1])\n outputs = array_ops.slice(inputs, bbox_begin, bbox_size)\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n return tensor_shape.TensorShape(\n [input_shape[0], self.target_height, self.target_width, input_shape[3]])\n\n def get_config(self):\n config = {\n 'height': self.target_height,\n 'width': self.target_width,\n }\n base_config = super(CenterCrop, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass RandomCrop(Layer):\n \"\"\"Randomly crop the images to target height and width.\n\n This layer will crop all the images in the same batch to the same cropping\n location.\n By default, random cropping is only applied during training. At inference\n time, the images will be first rescaled to preserve the shorter side, and\n center cropped. If you need to apply random cropping at inference time,\n set `training` to True when calling the layer.\n\n Input shape:\n 4D tensor with shape:\n `(samples, height, width, channels)`, data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(samples, target_height, target_width, channels)`.\n\n Arguments:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n seed: Integer. Used to create a random seed.\n \"\"\"\n\n def __init__(self, height, width, seed=None, **kwargs):\n self.height = height\n self.width = width\n self.seed = seed\n self._rng = make_generator(self.seed)\n self.input_spec = InputSpec(ndim=4)\n super(RandomCrop, self).__init__(**kwargs)\n\n def call(self, inputs, training=None):\n if training is None:\n training = K.learning_phase()\n\n def random_cropped_inputs():\n \"\"\"Cropped inputs with stateless random ops.\"\"\"\n input_shape = array_ops.shape(inputs)\n crop_size = array_ops.stack(\n [input_shape[0], self.height, self.width, input_shape[3]])\n check = control_flow_ops.Assert(\n math_ops.reduce_all(input_shape >= crop_size),\n [self.height, self.width])\n input_shape = control_flow_ops.with_dependencies([check], input_shape)\n limit = input_shape - crop_size + 1\n offset = stateless_random_ops.stateless_random_uniform(\n array_ops.shape(input_shape),\n dtype=crop_size.dtype,\n maxval=crop_size.dtype.max,\n seed=self._rng.make_seeds()[:, 0]) % limit\n return array_ops.slice(inputs, offset, crop_size)\n\n # TODO(b/143885775): Share logic with Resize and CenterCrop.\n def resize_and_center_cropped_inputs():\n \"\"\"Deterministically resize to shorter side and center crop.\"\"\"\n input_shape = array_ops.shape(inputs)\n input_height_t = input_shape[1]\n input_width_t = input_shape[2]\n ratio_cond = (input_height_t / input_width_t > 1.)\n # pylint: disable=g-long-lambda\n resized_height = tf_utils.smart_cond(\n ratio_cond,\n lambda: math_ops.cast(self.width * input_height_t / input_width_t,\n input_height_t.dtype), lambda: self.height)\n resized_width = tf_utils.smart_cond(\n ratio_cond, lambda: self.width,\n lambda: math_ops.cast(self.height * input_width_t / input_height_t,\n input_width_t.dtype))\n # pylint: enable=g-long-lambda\n resized_inputs = image_ops.resize_images_v2(\n images=inputs, size=array_ops.stack([resized_height, resized_width]))\n\n img_hd_diff = resized_height - self.height\n img_wd_diff = resized_width - self.width\n bbox_h_start = math_ops.cast(img_hd_diff / 2, dtypes.int32)\n bbox_w_start = math_ops.cast(img_wd_diff / 2, dtypes.int32)\n bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0])\n bbox_size = array_ops.stack([-1, self.height, self.width, -1])\n outputs = array_ops.slice(resized_inputs, bbox_begin, bbox_size)\n return outputs\n\n output = tf_utils.smart_cond(training, random_cropped_inputs,\n resize_and_center_cropped_inputs)\n original_shape = inputs.shape.as_list()\n batch_size, num_channels = original_shape[0], original_shape[3]\n output_shape = [batch_size] + [self.height, self.width] + [num_channels]\n output.set_shape(output_shape)\n return output\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n return tensor_shape.TensorShape(\n [input_shape[0], self.height, self.width, input_shape[3]])\n\n def get_config(self):\n config = {\n 'height': self.height,\n 'width': self.width,\n 'seed': self.seed,\n }\n base_config = super(RandomCrop, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Rescaling(Layer):\n \"\"\"Multiply inputs by `scale`.\n\n For instance, to rescale an input in the `[0, 255]` range\n to be in the `[0, 1]` range, you would pass `scale=1./255`.\n\n The rescaling is applied both during training and inference.\n\n Input shape:\n Arbitrary.\n\n Output shape:\n Same as input.\n\n Arguments:\n scale: Float, the scale to apply to the inputs.\n \"\"\"\n\n def __init__(self, scale, **kwargs):\n self.scale = scale\n super(Rescaling, self).__init__(**kwargs)\n\n def call(self, inputs):\n dtype = self._compute_dtype\n return math_ops.cast(inputs, dtype) * math_ops.cast(self.scale, dtype)\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {\n 'scale': self.scale,\n }\n base_config = super(Rescaling, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass RandomFlip(Layer):\n \"\"\"Randomly flip each image horizontally and vertically.\n\n This layer will by default flip the images horizontally and then vertically\n during training time.\n `RandomFlip(horizontal=True)` will only flip the input horizontally.\n `RandomFlip(vertical=True)` will only flip the input vertically.\n During inference time, the output will be identical to input. Call the layer\n with `training=True` to flip the input.\n\n Input shape:\n 4D tensor with shape:\n `(samples, height, width, channels)`, data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(samples, height, width, channels)`, data_format='channels_last'.\n\n Attributes:\n horizontal: Bool, whether to randomly flip horizontally.\n width: Bool, whether to randomly flip vertically.\n seed: Integer. Used to create a random seed.\n \"\"\"\n\n def __init__(self, horizontal=None, vertical=None, seed=None, **kwargs):\n # If both arguments are None, set both to True.\n if horizontal is None and vertical is None:\n self.horizontal = True\n self.vertical = True\n else:\n self.horizontal = horizontal or False\n self.vertical = vertical or False\n self.seed = seed\n self._rng = make_generator(self.seed)\n self.input_spec = InputSpec(ndim=4)\n super(RandomFlip, self).__init__(**kwargs)\n\n def call(self, inputs, training=None):\n if training is None:\n training = K.learning_phase()\n\n def random_flipped_inputs():\n flipped_outputs = inputs\n if self.horizontal:\n flipped_outputs = image_ops.random_flip_up_down(flipped_outputs,\n self.seed)\n if self.vertical:\n flipped_outputs = image_ops.random_flip_left_right(\n flipped_outputs, self.seed)\n return flipped_outputs\n\n output = tf_utils.smart_cond(training, random_flipped_inputs,\n lambda: inputs)\n output.set_shape(inputs.shape)\n return output\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {\n 'horizontal': self.horizontal,\n 'vertical': self.vertical,\n 'seed': self.seed,\n }\n base_config = super(RandomFlip, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass RandomTranslation(Layer):\n \"\"\"Randomly translate each image during training.\n\n Arguments:\n height_factor: a positive float represented as fraction of value, or a tuple\n of size 2 representing lower and upper bound for shifting vertically. When\n represented as a single float, this value is used for both the upper and\n lower bound. For instance, `height_factor=(0.2, 0.3)` results in an output\n height varying in the range `[original - 20%, original + 30%]`.\n `height_factor=0.2` results in an output height varying in the range\n `[original - 20%, original + 20%]`.\n width_factor: a positive float represented as fraction of value, or a tuple\n of size 2 representing lower and upper bound for shifting horizontally.\n When represented as a single float, this value is used for both the upper\n and lower bound.\n fill_mode: Points outside the boundaries of the input are filled according\n to the given mode (one of `{'nearest', 'bilinear'}`).\n fill_value: Value used for points outside the boundaries of the input if\n `mode='constant'`.\n seed: Integer. Used to create a random seed.\n Input shape:\n 4D tensor with shape: `(samples, height, width, channels)`,\n data_format='channels_last'.\n Output shape:\n 4D tensor with shape: `(samples, height, width, channels)`,\n data_format='channels_last'.\n Raise:\n ValueError: if lower bound is not between [0, 1], or upper bound is\n negative.\n \"\"\"\n\n def __init__(self,\n height_factor,\n width_factor,\n fill_mode='nearest',\n fill_value=0.,\n seed=None,\n **kwargs):\n self.height_factor = height_factor\n if isinstance(height_factor, (tuple, list)):\n self.height_lower = abs(height_factor[0])\n self.height_upper = height_factor[1]\n else:\n self.height_lower = self.height_upper = height_factor\n if self.height_upper < 0.:\n raise ValueError('`height_factor` cannot have negative values as upper '\n 'bound, got {}'.format(height_factor))\n if abs(self.height_lower) > 1. or abs(self.height_upper) > 1.:\n raise ValueError('`height_factor` must have values between [-1, 1], '\n 'got {}'.format(height_factor))\n\n self.width_factor = width_factor\n if isinstance(width_factor, (tuple, list)):\n self.width_lower = abs(width_factor[0])\n self.width_upper = width_factor[1]\n else:\n self.width_lower = self.width_upper = width_factor\n if self.width_upper < 0.:\n raise ValueError('`width_factor` cannot have negative values as upper '\n 'bound, got {}'.format(width_factor))\n if abs(self.width_lower) > 1. or abs(self.width_upper) > 1.:\n raise ValueError('`width_factor` must have values between [-1, 1], '\n 'got {}'.format(width_factor))\n\n if fill_mode not in {'nearest', 'bilinear'}:\n raise NotImplementedError(\n '`fill_mode` {} is not supported yet.'.format(fill_mode))\n self.fill_mode = fill_mode\n self.fill_value = fill_value\n self.seed = seed\n self._rng = make_generator(self.seed)\n self.input_spec = InputSpec(ndim=4)\n super(RandomTranslation, self).__init__(**kwargs)\n\n def call(self, inputs, training=None):\n if training is None:\n training = K.learning_phase()\n\n def random_translated_inputs():\n \"\"\"Translated inputs with random ops.\"\"\"\n inputs_shape = array_ops.shape(inputs)\n batch_size = inputs_shape[0]\n h_axis, w_axis = 1, 2\n img_hd = math_ops.cast(inputs_shape[h_axis], dtypes.float32)\n img_wd = math_ops.cast(inputs_shape[w_axis], dtypes.float32)\n height_translate = self._rng.uniform(\n shape=[batch_size, 1],\n minval=-self.height_lower,\n maxval=self.height_upper)\n height_translate = height_translate * img_hd\n width_translate = self._rng.uniform(\n shape=[batch_size, 1],\n minval=-self.width_lower,\n maxval=self.width_upper)\n width_translate = width_translate * img_wd\n translations = math_ops.cast(\n array_ops.concat([height_translate, width_translate], axis=1),\n dtype=inputs.dtype)\n return transform(\n inputs,\n get_translation_matrix(translations),\n interpolation=self.fill_mode)\n\n output = tf_utils.smart_cond(training, random_translated_inputs,\n lambda: inputs)\n output.set_shape(inputs.shape)\n return output\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {\n 'height_factor': self.height_factor,\n 'width_factor': self.width_factor,\n 'fill_mode': self.fill_mode,\n 'fill_value': self.fill_value,\n 'seed': self.seed,\n }\n base_config = super(RandomTranslation, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef get_translation_matrix(translations, name=None):\n \"\"\"Returns projective transform(s) for the given translation(s).\n\n Args:\n translations: A matrix of 2-element lists representing [dx, dy] to translate\n for each image (for a batch of images).\n name: The name of the op.\n\n Returns:\n A tensor of shape (num_images, 8) projective transforms which can be given\n to `transform`.\n \"\"\"\n with ops.name_scope(name, 'translation_matrix'):\n num_translations = array_ops.shape(translations)[0]\n # The translation matrix looks like:\n # [[1 0 -dx]\n # [0 1 -dy]\n # [0 0 1]]\n # where the last entry is implicit.\n # Translation matrices are always float32.\n return array_ops.concat(\n values=[\n array_ops.ones((num_translations, 1), dtypes.float32),\n array_ops.zeros((num_translations, 1), dtypes.float32),\n -translations[:, 0, None],\n array_ops.zeros((num_translations, 1), dtypes.float32),\n array_ops.ones((num_translations, 1), dtypes.float32),\n -translations[:, 1, None],\n array_ops.zeros((num_translations, 2), dtypes.float32),\n ],\n axis=1)\n\n\ndef transform(images,\n transforms,\n interpolation='nearest',\n output_shape=None,\n name=None):\n \"\"\"Applies the given transform(s) to the image(s).\n\n Args:\n images: A tensor of shape (num_images, num_rows, num_columns, num_channels)\n (NHWC), (num_rows, num_columns, num_channels) (HWC), or (num_rows,\n num_columns) (HW). The rank must be statically known (the shape is not\n `TensorShape(None)`.\n transforms: Projective transform matrix/matrices. A vector of length 8 or\n tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0, b1, b2,\n c0, c1], then it maps the *output* point `(x, y)` to a transformed *input*\n point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where\n `k = c0 x + c1 y + 1`. The transforms are *inverted* compared to the\n transform mapping input points to output points. Note that gradients are\n not backpropagated into transformation parameters.\n interpolation: Interpolation mode. Supported values: \"NEAREST\", \"BILINEAR\".\n output_shape: Output dimesion after the transform, [height, width]. If None,\n output is the same size as input image.\n name: The name of the op.\n\n Returns:\n Image(s) with the same type and shape as `images`, with the given\n transform(s) applied. Transformed coordinates outside of the input image\n will be filled with zeros.\n\n Raises:\n TypeError: If `image` is an invalid type.\n ValueError: If output shape is not 1-D int32 Tensor.\n \"\"\"\n with ops.name_scope(name, 'transform'):\n if output_shape is None:\n output_shape = array_ops.shape(images)[1:3]\n if not context.executing_eagerly():\n output_shape_value = tensor_util.constant_value(output_shape)\n if output_shape_value is not None:\n output_shape = output_shape_value\n\n output_shape = ops.convert_to_tensor_v2(\n output_shape, dtypes.int32, name='output_shape')\n\n if not output_shape.get_shape().is_compatible_with([2]):\n raise ValueError('output_shape must be a 1-D Tensor of 2 elements: '\n 'new_height, new_width, instead got '\n '{}'.format(output_shape))\n\n return image_ops.image_projective_transform_v2(\n images,\n output_shape=output_shape,\n transforms=transforms,\n interpolation=interpolation.upper())\n\n\ndef get_rotation_matrix(angles, image_height, image_width, name=None):\n \"\"\"Returns projective transform(s) for the given angle(s).\n\n Args:\n angles: A scalar angle to rotate all images by, or (for batches of images) a\n vector with an angle to rotate each image in the batch. The rank must be\n statically known (the shape is not `TensorShape(None)`).\n image_height: Height of the image(s) to be transformed.\n image_width: Width of the image(s) to be transformed.\n name: The name of the op.\n\n Returns:\n A tensor of shape (num_images, 8). Projective transforms which can be given\n to operation `image_projective_transform_v2`. If one row of transforms is\n [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point\n `(x, y)` to a transformed *input* point\n `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,\n where `k = c0 x + c1 y + 1`.\n \"\"\"\n with ops.name_scope(name, 'rotation_matrix'):\n x_offset = ((image_width - 1) - (math_ops.cos(angles) *\n (image_width - 1) - math_ops.sin(angles) *\n (image_height - 1))) / 2.0\n y_offset = ((image_height - 1) - (math_ops.sin(angles) *\n (image_width - 1) + math_ops.cos(angles) *\n (image_height - 1))) / 2.0\n num_angles = array_ops.shape(angles)[0]\n return array_ops.concat(\n values=[\n math_ops.cos(angles)[:, None],\n -math_ops.sin(angles)[:, None],\n x_offset[:, None],\n math_ops.sin(angles)[:, None],\n math_ops.cos(angles)[:, None],\n y_offset[:, None],\n array_ops.zeros((num_angles, 2), dtypes.float32),\n ],\n axis=1)\n\n\nclass RandomRotation(Layer):\n \"\"\"Randomly rotate each image.\n\n By default, random rotations are only applied during training.\n At inference time, the layer does nothing. If you need to apply random\n rotations at inference time, set `training` to True when calling the layer.\n\n Input shape:\n 4D tensor with shape:\n `(samples, height, width, channels)`, data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(samples, height, width, channels)`, data_format='channels_last'.\n\n Attributes:\n factor: a positive float represented as fraction of 2pi, or a tuple of size\n 2 representing lower and upper bound for rotating clockwise and\n counter-clockwise. When represented as a single float, lower = upper.\n fill_mode: Points outside the boundaries of the input are filled according\n to the given mode (one of `{'constant', 'nearest', 'bilinear', 'reflect',\n 'wrap'}`).\n seed: Integer. Used to create a random seed.\n Raise:\n ValueError: if lower bound is not between [0, 1], or upper bound is\n negative.\n \"\"\"\n\n def __init__(self,\n factor,\n fill_mode='nearest',\n seed=None,\n **kwargs):\n self.factor = factor\n if isinstance(factor, (tuple, list)):\n self.lower = factor[0]\n self.upper = factor[1]\n else:\n self.lower = self.upper = factor\n if self.lower < 0. or self.upper < 0.:\n raise ValueError('Factor cannot have negative values, '\n 'got {}'.format(factor))\n if fill_mode not in {'nearest', 'bilinear'}:\n raise NotImplementedError(\n '`fill_mode` {} is not supported yet.'.format(fill_mode))\n self.fill_mode = fill_mode\n self.seed = seed\n self._rng = make_generator(self.seed)\n self.input_spec = InputSpec(ndim=4)\n super(RandomRotation, self).__init__(**kwargs)\n\n def call(self, inputs, training=None):\n if training is None:\n training = K.learning_phase()\n\n def random_rotated_inputs():\n \"\"\"Rotated inputs with random ops.\"\"\"\n inputs_shape = array_ops.shape(inputs)\n batch_size = inputs_shape[0]\n h_axis, w_axis = 1, 2\n img_hd = math_ops.cast(inputs_shape[h_axis], dtypes.float32)\n img_wd = math_ops.cast(inputs_shape[w_axis], dtypes.float32)\n min_angle = self.lower * 2. * np.pi\n max_angle = self.upper * 2. * np.pi\n angles = self._rng.uniform(\n shape=[batch_size], minval=-min_angle, maxval=max_angle)\n return transform(\n inputs,\n get_rotation_matrix(angles, img_hd, img_wd),\n interpolation=self.fill_mode)\n\n output = tf_utils.smart_cond(training, random_rotated_inputs,\n lambda: inputs)\n output.set_shape(inputs.shape)\n return output\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {\n 'factor': self.factor,\n 'fill_mode': self.fill_mode,\n 'seed': self.seed,\n }\n base_config = super(RandomRotation, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass RandomZoom(Layer):\n \"\"\"Randomly zoom each image during training.\n\n Arguments:\n height_factor: a positive float represented as fraction of value, or a tuple\n of size 2 representing lower and upper bound for zooming horizontally.\n When represented as a single float, this value is used for both the\n upper and lower bound. For instance, `height_factor=(0.2, 0.3)` result in\n an output zoom varying in the range `[original * 20%, original * 30%]`.\n width_factor: a positive float represented as fraction of value, or a tuple\n of size 2 representing lower and upper bound for zooming vertically.\n When represented as a single float, this value is used for both the\n upper and lower bound. For instance, `width_factor=(0.2, 0.3)` result in\n an output zoom varying in the range `[original * 20%, original * 30%]`.\n fill_mode: Points outside the boundaries of the input are filled according\n to the given mode (one of `{'nearest', 'bilinear'}`).\n fill_value: Value used for points outside the boundaries of the input if\n `mode='constant'`.\n seed: Integer. Used to create a random seed.\n\n Input shape:\n 4D tensor with shape:\n `(samples, height, width, channels)`, data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(samples, height, width, channels)`, data_format='channels_last'.\n\n Raise:\n ValueError: if lower bound is not between [0, 1], or upper bound is\n negative.\n \"\"\"\n\n def __init__(self,\n height_factor,\n width_factor,\n fill_mode='nearest',\n fill_value=0.,\n seed=None,\n **kwargs):\n self.height_factor = height_factor\n if isinstance(height_factor, (tuple, list)):\n self.height_lower = height_factor[0]\n self.height_upper = height_factor[1]\n else:\n self.height_lower = self.height_upper = height_factor\n if self.height_lower < 0. or self.height_upper < 0.:\n raise ValueError('`height_factor` cannot have negative values, '\n 'got {}'.format(height_factor))\n if self.height_lower > self.height_upper:\n raise ValueError('`height_factor` cannot have lower bound larger than '\n 'upper bound, got {}.'.format(height_factor))\n\n self.width_factor = width_factor\n if isinstance(width_factor, (tuple, list)):\n self.width_lower = width_factor[0]\n self.width_upper = width_factor[1]\n else:\n self.width_lower = self.width_upper = width_factor\n if self.width_lower < 0. or self.width_upper < 0.:\n raise ValueError('`width_factor` cannot have negative values, '\n 'got {}'.format(width_factor))\n if self.width_lower > self.width_upper:\n raise ValueError('`width_factor` cannot have lower bound larger than '\n 'upper bound, got {}.'.format(width_factor))\n\n if fill_mode not in {'nearest', 'bilinear'}:\n raise NotImplementedError(\n '`fill_mode` {} is not supported yet.'.format(fill_mode))\n self.fill_mode = fill_mode\n self.fill_value = fill_value\n self.seed = seed\n self._rng = make_generator(self.seed)\n self.input_spec = InputSpec(ndim=4)\n super(RandomZoom, self).__init__(**kwargs)\n\n def call(self, inputs, training=None):\n if training is None:\n training = K.learning_phase()\n\n def random_zoomed_inputs():\n \"\"\"Zoomed inputs with random ops.\"\"\"\n inputs_shape = array_ops.shape(inputs)\n batch_size = inputs_shape[0]\n h_axis, w_axis = 1, 2\n img_hd = math_ops.cast(inputs_shape[h_axis], dtypes.float32)\n img_wd = math_ops.cast(inputs_shape[w_axis], dtypes.float32)\n height_zoom = self._rng.uniform(\n shape=[batch_size, 1],\n minval=-self.height_lower,\n maxval=self.height_upper)\n height_zoom = height_zoom * img_hd\n width_zoom = self._rng.uniform(\n shape=[batch_size, 1],\n minval=-self.width_lower,\n maxval=self.width_upper)\n width_zoom = width_zoom * img_wd\n zooms = math_ops.cast(\n array_ops.concat([height_zoom, width_zoom], axis=1),\n dtype=inputs.dtype)\n return transform(\n inputs, get_zoom_matrix(zooms, img_hd, img_wd),\n interpolation=self.fill_mode)\n\n output = tf_utils.smart_cond(training, random_zoomed_inputs,\n lambda: inputs)\n output.set_shape(inputs.shape)\n return output\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {\n 'height_factor': self.height_factor,\n 'width_factor': self.width_factor,\n 'fill_mode': self.fill_mode,\n 'fill_value': self.fill_value,\n 'seed': self.seed,\n }\n base_config = super(RandomZoom, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef get_zoom_matrix(zooms, image_height, image_width, name=None):\n \"\"\"Returns projective transform(s) for the given zoom(s).\n\n Args:\n zooms: A matrix of 2-element lists representing [zx, zy] to zoom\n for each image (for a batch of images).\n image_height: Height of the image(s) to be transformed.\n image_width: Width of the image(s) to be transformed.\n name: The name of the op.\n\n Returns:\n A tensor of shape (num_images, 8). Projective transforms which can be given\n to operation `image_projective_transform_v2`. If one row of transforms is\n [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point\n `(x, y)` to a transformed *input* point\n `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,\n where `k = c0 x + c1 y + 1`.\n \"\"\"\n with ops.name_scope(name, 'zoom_matrix'):\n num_zooms = array_ops.shape(zooms)[0]\n # The zoom matrix looks like:\n # [[zx 0 0]\n # [0 zy 0]\n # [0 0 1]]\n # where the last entry is implicit.\n # Zoom matrices are always float32.\n x_offset = ((image_height + 1.) / 2.0) * (zooms[:, 0, None] - 1.)\n y_offset = ((image_width + 1.) / 2.0) * (zooms[:, 1, None] - 1.)\n return array_ops.concat(\n values=[\n zooms[:, 0, None],\n array_ops.zeros((num_zooms, 1), dtypes.float32),\n x_offset,\n array_ops.zeros((num_zooms, 1), dtypes.float32),\n zooms[:, 1, None],\n y_offset,\n array_ops.zeros((num_zooms, 2), dtypes.float32),\n ],\n axis=1)\n\n\nclass RandomContrast(Layer):\n \"\"\"Adjust the contrast of an image or images by a random factor.\n\n Contrast is adjusted independently for each channel of each image during\n training.\n\n For each channel, this layer computes the mean of the image pixels in the\n channel and then adjusts each component `x` of each pixel to\n `(x - mean) * contrast_factor + mean`.\n\n Input shape:\n 4D tensor with shape:\n `(samples, height, width, channels)`, data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(samples, height, width, channels)`, data_format='channels_last'.\n\n Attributes:\n factor: a positive float represented as fraction of value, or a tuple\n of size 2 representing lower and upper bound. When represented as a\n single float, lower = upper. The contrast factor will be randomly picked\n between [1.0 - lower, 1.0 + upper].\n seed: Integer. Used to create a random seed.\n Raise:\n ValueError: if lower bound is not between [0, 1], or upper bound is\n negative.\n \"\"\"\n\n def __init__(self, factor, seed=None, **kwargs):\n self.factor = factor\n if isinstance(factor, (tuple, list)):\n self.lower = factor[0]\n self.upper = factor[1]\n else:\n self.lower = self.upper = factor\n if self.lower < 0. or self.upper < 0. or self.lower > 1.:\n raise ValueError('Factor cannot have negative values, '\n 'got {}'.format(factor))\n self.seed = seed\n self.input_spec = InputSpec(ndim=4)\n super(RandomContrast, self).__init__(**kwargs)\n\n def call(self, inputs, training=None):\n if training is None:\n training = K.learning_phase()\n\n def random_contrasted_inputs():\n return image_ops.random_contrast(inputs, 1. - self.lower, 1. + self.upper,\n self.seed)\n\n output = tf_utils.smart_cond(training, random_contrasted_inputs,\n lambda: inputs)\n output.set_shape(inputs.shape)\n return output\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {\n 'factor': self.factor,\n 'seed': self.seed,\n }\n base_config = super(RandomContrast, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass RandomHeight(Layer):\n \"\"\"Randomly vary the height of a batch of images during training.\n\n Adjusts the height of a batch of images by a random factor. The input\n should be a 4-D tensor in the \"channels_last\" image data format.\n\n By default, this layer is inactive during inference.\n\n Arguments:\n factor: A positive float (fraction of original height), or a tuple of\n size 2 representing lower and upper bound for resizing vertically. When\n represented as a single float, this value is used for both the upper and\n lower bound. For instance, `factor=(0.2, 0.3)` results in an output height\n varying in the range `[original + 20%, original + 30%]`. `factor=(-0.2,\n 0.3)` results in an output height varying in the range `[original - 20%,\n original + 30%]`. `factor=0.2` results in an output height varying in the\n range `[original - 20%, original + 20%]`.\n interpolation: String, the interpolation method. Defaults to `bilinear`.\n Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,\n `gaussian`, `mitchellcubic`\n seed: Integer. Used to create a random seed.\n\n Input shape:\n 4D tensor with shape:\n `(samples, height, width, channels)` (data_format='channels_last').\n\n Output shape:\n 4D tensor with shape:\n `(samples, random_height, width, channels)`.\n \"\"\"\n\n def __init__(self, factor, interpolation='bilinear', seed=None, **kwargs):\n self.factor = factor\n if isinstance(factor, (tuple, list)):\n self.height_lower = -factor[0]\n self.height_upper = factor[1]\n else:\n self.height_lower = self.height_upper = factor\n if self.height_lower > 1.:\n raise ValueError('`factor` cannot have abs lower bound larger than 1.0, '\n 'got {}'.format(factor))\n self.interpolation = interpolation\n self._interpolation_method = get_interpolation(interpolation)\n self.input_spec = InputSpec(ndim=4)\n self.seed = seed\n self._rng = make_generator(self.seed)\n super(RandomHeight, self).__init__(**kwargs)\n\n def call(self, inputs, training=None):\n if training is None:\n training = K.learning_phase()\n\n def random_height_inputs():\n \"\"\"Inputs height-adjusted with random ops.\"\"\"\n inputs_shape = array_ops.shape(inputs)\n h_axis, w_axis = 1, 2\n img_hd = math_ops.cast(inputs_shape[h_axis], dtypes.float32)\n img_wd = inputs_shape[w_axis]\n height_factor = self._rng.uniform(\n shape=[],\n minval=(1.0 - self.height_lower),\n maxval=(1.0 + self.height_upper))\n adjusted_height = math_ops.cast(height_factor * img_hd, dtypes.int32)\n adjusted_size = array_ops.stack([adjusted_height, img_wd])\n output = image_ops.resize_images_v2(\n images=inputs, size=adjusted_size, method=self._interpolation_method)\n original_shape = inputs.shape.as_list()\n output_shape = [original_shape[0]] + [None] + original_shape[2:4]\n output.set_shape(output_shape)\n return output\n\n return tf_utils.smart_cond(training, random_height_inputs, lambda: inputs)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n return tensor_shape.TensorShape(\n [input_shape[0], None, input_shape[2], input_shape[3]])\n\n def get_config(self):\n config = {\n 'factor': self.factor,\n 'interpolation': self.interpolation,\n 'seed': self.seed,\n }\n base_config = super(RandomHeight, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass RandomWidth(Layer):\n \"\"\"Randomly vary the width of a batch of images during training.\n\n Adjusts the width of a batch of images by a random factor. The input\n should be a 4-D tensor in the \"channels_last\" image data format.\n\n By default, this layer is inactive during inference.\n\n Arguments:\n factor: A positive float (fraction of original width), or a tuple of\n size 2 representing lower and upper bound for resizing horizontally. When\n represented as a single float, this value is used for both the upper and\n lower bound. For instance, `factor=(0.2, 0.3)` results in an output width\n varying in the range `[original + 20%, original + 30%]`. `factor=(-0.2,\n 0.3)` results in an output width varying in the range `[original - 20%,\n original + 30%]`. `factor=0.2` results in an output width varying in the\n range `[original - 20%, original + 20%]`.\n interpolation: String, the interpolation method. Defaults to `bilinear`.\n Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,\n `gaussian`, `mitchellcubic`\n seed: Integer. Used to create a random seed.\n\n Input shape:\n 4D tensor with shape:\n `(samples, height, width, channels)` (data_format='channels_last').\n\n Output shape:\n 4D tensor with shape:\n `(samples, random_height, width, channels)`.\n \"\"\"\n\n def __init__(self, factor, interpolation='bilinear', seed=None, **kwargs):\n self.factor = factor\n if isinstance(factor, (tuple, list)):\n self.width_lower = -factor[0]\n self.width_upper = factor[1]\n else:\n self.width_lower = self.width_upper = factor\n if self.width_lower > 1.:\n raise ValueError('`factor` cannot have abs lower bound larger than 1.0, '\n 'got {}'.format(factor))\n self.interpolation = interpolation\n self._interpolation_method = get_interpolation(interpolation)\n self.input_spec = InputSpec(ndim=4)\n self.seed = seed\n self._rng = make_generator(self.seed)\n super(RandomWidth, self).__init__(**kwargs)\n\n def call(self, inputs, training=None):\n if training is None:\n training = K.learning_phase()\n\n def random_width_inputs():\n \"\"\"Inputs width-adjusted with random ops.\"\"\"\n inputs_shape = array_ops.shape(inputs)\n h_axis, w_axis = 1, 2\n img_hd = inputs_shape[h_axis]\n img_wd = math_ops.cast(inputs_shape[w_axis], dtypes.float32)\n width_factor = self._rng.uniform(\n shape=[],\n minval=(1.0 - self.width_lower),\n maxval=(1.0 + self.width_upper))\n adjusted_width = math_ops.cast(width_factor * img_wd, dtypes.int32)\n adjusted_size = array_ops.stack([img_hd, adjusted_width])\n output = image_ops.resize_images_v2(\n images=inputs, size=adjusted_size, method=self._interpolation_method)\n original_shape = inputs.shape.as_list()\n output_shape = original_shape[0:2] + [None] + [original_shape[3]]\n output.set_shape(output_shape)\n return output\n\n return tf_utils.smart_cond(training, random_width_inputs, lambda: inputs)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n return tensor_shape.TensorShape(\n [input_shape[0], input_shape[1], None, input_shape[3]])\n\n def get_config(self):\n config = {\n 'factor': self.factor,\n 'interpolation': self.interpolation,\n 'seed': self.seed,\n }\n base_config = super(RandomWidth, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef make_generator(seed=None):\n if seed:\n return stateful_random_ops.Generator.from_seed(seed)\n else:\n return stateful_random_ops.Generator.from_non_deterministic_state()\n\n\ndef get_interpolation(interpolation):\n interpolation = interpolation.lower()\n if interpolation not in _RESIZE_METHODS:\n raise NotImplementedError(\n 'Value not recognized for `interpolation`: {}. Supported values '\n 'are: {}'.format(interpolation, _RESIZE_METHODS.keys()))\n return _RESIZE_METHODS[interpolation]\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TraceMe allows the profiler to trace python events.\n\nUsage:\n with profiler.TraceMe('name'):\n ...\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom tensorflow.python.profiler.internal import _pywrap_traceme\n\n\nclass TraceMe(object):\n \"\"\"Context manager that generates a trace event in the profiler.\"\"\"\n\n def __init__(self, name, **kwargs):\n if _pywrap_traceme.TraceMe.IsEnabled():\n if kwargs:\n name += '#' + ','.join(key + '=' + str(value)\n for key, value in six.iteritems(kwargs)) + '#'\n self._traceme = _pywrap_traceme.TraceMe(name)\n else:\n self._traceme = None\n\n def __enter__(self):\n if self._traceme:\n self._traceme.Enter()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self._traceme:\n self._traceme.Exit()\n\n\ndef traceme_wrapper(func):\n name = getattr(func, '__qualname__', None)\n if not name:\n name = func.__name__\n\n def wrapper(*args, **kwargs):\n with TraceMe(name):\n return func(*args, **kwargs)\n return wrapper\n\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for activity module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gast\nimport six\n\nfrom tensorflow.python.autograph.pyct import anno\nfrom tensorflow.python.autograph.pyct import parser\nfrom tensorflow.python.autograph.pyct import qual_names\nfrom tensorflow.python.autograph.pyct import transformer\nfrom tensorflow.python.autograph.pyct.static_analysis import activity\nfrom tensorflow.python.autograph.pyct.static_analysis import annos\nfrom tensorflow.python.platform import test\n\n\nQN = qual_names.QN\nNodeAnno = annos.NodeAnno\n\nglobal_a = 7\nglobal_b = 17\n\n\nclass ScopeTest(test.TestCase):\n\n def assertMissing(self, qn, scope):\n self.assertNotIn(qn, scope.read)\n self.assertNotIn(qn, scope.modified)\n\n def assertReadOnly(self, qn, scope):\n self.assertIn(qn, scope.read)\n self.assertNotIn(qn, scope.modified)\n\n def assertWriteOnly(self, qn, scope):\n self.assertNotIn(qn, scope.read)\n self.assertIn(qn, scope.modified)\n\n def assertReadWrite(self, qn, scope):\n self.assertIn(qn, scope.read)\n self.assertIn(qn, scope.modified)\n\n def test_copy_from(self):\n scope = activity.Scope(None)\n scope.modified.add(QN('foo'))\n other = activity.Scope(None)\n other.copy_from(scope)\n\n self.assertWriteOnly(QN('foo'), other)\n\n scope.modified.add(QN('bar'))\n scope.copy_from(other)\n\n self.assertMissing(QN('bar'), scope)\n\n scope.modified.add(QN('bar'))\n scope.merge_from(other)\n\n self.assertWriteOnly(QN('bar'), scope)\n self.assertMissing(QN('bar'), other)\n\n def test_copy_of(self):\n scope = activity.Scope(None)\n scope.read.add(QN('foo'))\n other = activity.Scope.copy_of(scope)\n\n self.assertReadOnly(QN('foo'), other)\n\n child_scope = activity.Scope(scope)\n child_scope.read.add(QN('bar'))\n other = activity.Scope.copy_of(child_scope)\n\n self.assertReadOnly(QN('bar'), other)\n\n def test_referenced(self):\n scope = activity.Scope(None)\n scope.read.add(QN('a'))\n\n child = activity.Scope(scope)\n child.read.add(QN('b'))\n\n child2 = activity.Scope(child, isolated=False)\n child2.read.add(QN('c'))\n\n child2.finalize()\n child.finalize()\n scope.finalize()\n\n self.assertIn(QN('c'), child2.referenced)\n self.assertIn(QN('b'), child2.referenced)\n self.assertIn(QN('a'), child2.referenced)\n\n self.assertIn(QN('c'), child.referenced)\n self.assertIn(QN('b'), child.referenced)\n self.assertIn(QN('a'), child.referenced)\n\n\nclass ActivityAnalyzerTestBase(test.TestCase):\n\n def _parse_and_analyze(self, test_fn):\n node, source = parser.parse_entity(test_fn, future_features=())\n entity_info = transformer.EntityInfo(\n source_code=source, source_file=None, future_features=(), namespace={})\n node = qual_names.resolve(node)\n ctx = transformer.Context(entity_info)\n node = activity.resolve(node, ctx)\n return node, entity_info\n\n def assertSymbolSetsAre(self, expected, actual, name):\n expected = set(expected)\n actual = set(str(s) for s in actual)\n self.assertSetEqual(\n expected, actual, 'for symbol set: %s\\n'\n ' Expected: %s\\n'\n ' Got: %s\\n'\n ' Missing: %s\\n'\n ' Extra: %s\\n' % (name.upper(), expected, actual,\n expected - actual, actual - expected))\n\n def assertScopeIs(self, scope, used, modified):\n \"\"\"Assert the scope contains specific used, modified & created variables.\"\"\"\n self.assertSymbolSetsAre(used, scope.read, 'read')\n self.assertSymbolSetsAre(modified, scope.modified, 'modified')\n\n\nclass ActivityAnalyzerTest(ActivityAnalyzerTestBase):\n\n def test_import(self):\n\n def test_fn():\n import a, b.x, y as c, z.u as d # pylint:disable=g-multiple-import,g-import-not-at-top,unused-variable\n\n node, _ = self._parse_and_analyze(test_fn)\n scope = anno.getanno(node.body[0], anno.Static.SCOPE)\n self.assertScopeIs(scope, (), ('a', 'b', 'c', 'd'))\n\n def test_import_from(self):\n\n def test_fn():\n from x import a # pylint:disable=g-import-not-at-top,unused-variable\n from y import z as b # pylint:disable=g-import-not-at-top,unused-variable\n\n node, _ = self._parse_and_analyze(test_fn)\n scope = anno.getanno(node.body[0], anno.Static.SCOPE)\n self.assertScopeIs(scope, (), ('a',))\n scope = anno.getanno(node.body[1], anno.Static.SCOPE)\n self.assertScopeIs(scope, (), ('b',))\n\n def test_print_statement(self):\n\n def test_fn(a):\n b = 0\n c = 1\n print(a, b)\n return c\n\n node, _ = self._parse_and_analyze(test_fn)\n print_node = node.body[2]\n if isinstance(print_node, gast.Print):\n # Python 2\n print_args_scope = anno.getanno(print_node, NodeAnno.ARGS_SCOPE)\n else:\n # Python 3\n assert isinstance(print_node, gast.Expr)\n # The call node should be the one being annotated.\n print_node = print_node.value\n print_args_scope = anno.getanno(print_node, NodeAnno.ARGS_SCOPE)\n # We basically need to detect which variables are captured by the call\n # arguments.\n self.assertScopeIs(print_args_scope, ('a', 'b'), ())\n\n def test_call_args(self):\n\n def test_fn(a):\n b = 0\n c = 1\n foo(a, b) # pylint:disable=undefined-variable\n return c\n\n node, _ = self._parse_and_analyze(test_fn)\n call_node = node.body[2].value\n # We basically need to detect which variables are captured by the call\n # arguments.\n self.assertScopeIs(\n anno.getanno(call_node, NodeAnno.ARGS_SCOPE), ('a', 'b'), ())\n\n def test_call_args_attributes(self):\n\n def foo(*_):\n pass\n\n def test_fn(a):\n a.c = 0\n foo(a.b, a.c)\n return a.d\n\n node, _ = self._parse_and_analyze(test_fn)\n call_node = node.body[1].value\n self.assertScopeIs(\n anno.getanno(call_node, NodeAnno.ARGS_SCOPE), ('a', 'a.b', 'a.c'), ())\n\n def test_call_args_subscripts(self):\n\n def foo(*_):\n pass\n\n def test_fn(a):\n b = 1\n c = 2\n foo(a[0], a[b])\n return a[c]\n\n node, _ = self._parse_and_analyze(test_fn)\n call_node = node.body[2].value\n self.assertScopeIs(\n anno.getanno(call_node, NodeAnno.ARGS_SCOPE),\n ('a', 'a[0]', 'a[b]', 'b'), ())\n\n def test_while(self):\n\n def test_fn(a):\n b = a\n while b > 0:\n c = b\n b -= 1\n return b, c\n\n node, _ = self._parse_and_analyze(test_fn)\n while_node = node.body[1]\n self.assertScopeIs(\n anno.getanno(while_node, NodeAnno.BODY_SCOPE), ('b',), ('b', 'c'))\n self.assertScopeIs(\n anno.getanno(while_node, NodeAnno.BODY_SCOPE).parent, ('a', 'b', 'c'),\n ('b', 'c'))\n self.assertScopeIs(\n anno.getanno(while_node, NodeAnno.COND_SCOPE), ('b',), ())\n\n def test_for(self):\n\n def test_fn(a):\n b = a\n for _ in a:\n c = b\n b -= 1\n return b, c\n\n node, _ = self._parse_and_analyze(test_fn)\n for_node = node.body[1]\n self.assertScopeIs(\n anno.getanno(for_node, NodeAnno.ITERATE_SCOPE), (), ('_'))\n self.assertScopeIs(\n anno.getanno(for_node, NodeAnno.BODY_SCOPE), ('b',), ('b', 'c'))\n self.assertScopeIs(\n anno.getanno(for_node, NodeAnno.BODY_SCOPE).parent, ('a', 'b', 'c'),\n ('b', 'c', '_'))\n\n def test_if(self):\n\n def test_fn(x):\n if x > 0:\n x = -x\n y = 2 * x\n z = -y\n else:\n x = 2 * x\n y = -x\n u = -y\n return z, u\n\n node, _ = self._parse_and_analyze(test_fn)\n if_node = node.body[0]\n self.assertScopeIs(\n anno.getanno(if_node, NodeAnno.BODY_SCOPE), ('x', 'y'), ('x', 'y', 'z'))\n self.assertScopeIs(\n anno.getanno(if_node, NodeAnno.BODY_SCOPE).parent, ('x', 'y', 'z', 'u'),\n ('x', 'y', 'z', 'u'))\n self.assertScopeIs(\n anno.getanno(if_node, NodeAnno.ORELSE_SCOPE), ('x', 'y'),\n ('x', 'y', 'u'))\n self.assertScopeIs(\n anno.getanno(if_node, NodeAnno.ORELSE_SCOPE).parent,\n ('x', 'y', 'z', 'u'), ('x', 'y', 'z', 'u'))\n\n def test_if_attributes(self):\n\n def test_fn(a):\n if a > 0:\n a.b = -a.c\n d = 2 * a\n else:\n a.b = a.c\n d = 1\n return d\n\n node, _ = self._parse_and_analyze(test_fn)\n if_node = node.body[0]\n self.assertScopeIs(\n anno.getanno(if_node, NodeAnno.BODY_SCOPE), ('a', 'a.c'), ('a.b', 'd'))\n self.assertScopeIs(\n anno.getanno(if_node, NodeAnno.ORELSE_SCOPE), ('a', 'a.c'),\n ('a.b', 'd'))\n self.assertScopeIs(\n anno.getanno(if_node, NodeAnno.BODY_SCOPE).parent, ('a', 'a.c', 'd'),\n ('a.b', 'd'))\n\n def test_if_subscripts(self):\n\n def test_fn(a, b, c, e):\n if a > 0:\n a[b] = -a[c]\n d = 2 * a\n else:\n a[0] = e\n d = 1\n return d\n\n node, _ = self._parse_and_analyze(test_fn)\n if_node = node.body[0]\n self.assertScopeIs(\n anno.getanno(if_node, NodeAnno.BODY_SCOPE), ('a', 'b', 'c', 'a[c]'),\n ('a[b]', 'd'))\n # TODO(mdan): Should subscript writes (a[0] = 1) be considered to read \"a\"?\n self.assertScopeIs(\n anno.getanno(if_node, NodeAnno.ORELSE_SCOPE), ('a', 'e'), ('a[0]', 'd'))\n self.assertScopeIs(\n anno.getanno(if_node, NodeAnno.ORELSE_SCOPE).parent,\n ('a', 'b', 'c', 'd', 'e', 'a[c]'), ('d', 'a[b]', 'a[0]'))\n\n def test_nested_if(self):\n\n def test_fn(b):\n if b > 0:\n if b < 5:\n a = b\n else:\n a = b * b\n return a\n\n node, _ = self._parse_and_analyze(test_fn)\n inner_if_node = node.body[0].body[0]\n self.assertScopeIs(\n anno.getanno(inner_if_node, NodeAnno.BODY_SCOPE), ('b',), ('a',))\n self.assertScopeIs(\n anno.getanno(inner_if_node, NodeAnno.ORELSE_SCOPE), ('b',), ('a',))\n\n def test_nested_function(self):\n\n def test_fn(a):\n\n def f(x):\n y = x * x\n return y\n\n b = a\n for i in a:\n c = b\n b -= f(i)\n return b, c\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_def_node = node.body[0]\n\n self.assertScopeIs(\n anno.getanno(fn_def_node, NodeAnno.BODY_SCOPE), ('x', 'y'), ('y',))\n\n def test_constructor_attributes(self):\n\n class TestClass(object):\n\n def __init__(self, a):\n self.b = a\n self.b.c = 1\n\n node, _ = self._parse_and_analyze(TestClass)\n init_node = node.body[0]\n self.assertScopeIs(\n anno.getanno(init_node, NodeAnno.BODY_SCOPE), ('self', 'a', 'self.b'),\n ('self', 'self.b', 'self.b.c'))\n\n def test_aug_assign_subscripts(self):\n\n def test_fn(a):\n a[0] += 1\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n self.assertScopeIs(\n anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a', 'a[0]'), ('a[0]',))\n\n def test_return_vars_are_read(self):\n\n def test_fn(a, b, c): # pylint: disable=unused-argument\n return c\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n self.assertScopeIs(anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('c',), ())\n self.assertScopeIs(\n anno.getanno(node.body[0], anno.Static.SCOPE), ('c',), ())\n\n def test_raise_names_are_read(self):\n\n def test_fn(a, b, c): # pylint: disable=unused-argument\n raise b\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n self.assertScopeIs(anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('b',), ())\n self.assertScopeIs(\n anno.getanno(node.body[0], anno.Static.SCOPE), ('b',), ())\n\n def test_except_exposes_names(self):\n\n def test_fn(a, b, c): # pylint: disable=unused-argument\n try:\n pass\n except: # pylint: disable=bare-except\n b = c\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n self.assertScopeIs(\n anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('c',), ('b',))\n\n def test_except_hides_exception_var_name(self):\n\n def test_fn(a, b, c): # pylint: disable=unused-argument\n try:\n pass\n except a as e:\n b = e\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n self.assertScopeIs(\n anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a',), ('b',))\n\n def test_aug_assign(self):\n\n def test_fn(a, b):\n a += b\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n self.assertScopeIs(\n anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a', 'b'), ('a'))\n\n def test_aug_assign_rvalues(self):\n\n a = dict(bar=3)\n\n def foo():\n return a\n\n def test_fn(x):\n foo()['bar'] += x\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n self.assertScopeIs(\n anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('foo', 'x'), ())\n\n def test_params(self):\n\n def test_fn(a, b): # pylint: disable=unused-argument\n return b\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n self.assertScopeIs(body_scope, ('b',), ())\n self.assertScopeIs(body_scope.parent, ('b',), ())\n\n args_scope = anno.getanno(fn_node.args, anno.Static.SCOPE)\n self.assertSymbolSetsAre(('a', 'b'), args_scope.params.keys(), 'params')\n\n def test_lambda_captures_reads(self):\n\n def test_fn(a, b):\n return lambda: a + b\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n self.assertScopeIs(body_scope, ('a', 'b'), ())\n # Nothing local to the lambda is tracked.\n self.assertSymbolSetsAre((), body_scope.params.keys(), 'params')\n\n def test_lambda_params_are_isolated(self):\n\n def test_fn(a, b): # pylint: disable=unused-argument\n return lambda a: a + b\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n self.assertScopeIs(body_scope, ('b',), ())\n self.assertSymbolSetsAre((), body_scope.params.keys(), 'params')\n\n def test_lambda_complex(self):\n\n def test_fn(a, b, c, d): # pylint: disable=unused-argument\n a = (lambda a, b, c: a + b + c)(d, 1, 2) + b\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n self.assertScopeIs(body_scope, ('b', 'd'), ('a',))\n self.assertSymbolSetsAre((), body_scope.params.keys(), 'params')\n\n def test_lambda_nested(self):\n\n def test_fn(a, b, c, d, e): # pylint: disable=unused-argument\n a = lambda a, b: d(lambda b: a + b + c) # pylint: disable=undefined-variable\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n self.assertScopeIs(body_scope, ('c', 'd'), ('a',))\n self.assertSymbolSetsAre((), body_scope.params.keys(), 'params')\n\n def test_comprehension_targets_are_isolated(self):\n\n def test_fn(a):\n b = {c for c in a} # pylint:disable=unused-variable\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n self.assertScopeIs(body_scope, ('a',), ('b',))\n\n def test_comprehension_targets_are_isolated_list_function_w_generator(self):\n\n def test_fn(a):\n b = list(c for c in a) # pylint:disable=unused-variable\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n self.assertScopeIs(body_scope, ('a', 'list'), ('b',))\n\n def test_list_comprehension_targets_are_sometimes_isolated(self):\n\n def test_fn(a):\n b = [c for c in a] # pylint:disable=unused-variable\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n if six.PY2:\n self.assertScopeIs(body_scope, ('a',), ('b', 'c'))\n else:\n self.assertScopeIs(body_scope, ('a',), ('b',))\n\n def test_comprehension_targets_are_isolated_in_augassign(self):\n\n def test_fn(a, b):\n b += [c for c in a] # pylint:disable=unused-variable\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n if six.PY2:\n self.assertScopeIs(body_scope, ('a', 'b'), ('b', 'c'))\n else:\n self.assertScopeIs(body_scope, ('a', 'b'), ('b',))\n\n def test_comprehension_generator_order(self):\n\n def test_fn(a, b, c): # pylint:disable=unused-argument\n e = {d: (a, b) for (a, b) in c for d in b} # pylint:disable=unused-variable,g-complex-comprehension\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n self.assertScopeIs(body_scope, ('c',), ('e',))\n\n def test_global_symbol(self):\n\n def test_fn(c):\n global global_a\n global global_b\n global_a = global_b + c\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n self.assertScopeIs(body_scope, ('global_b', 'c'), ('global_a',))\n self.assertSetEqual(body_scope.globals, set(\n (QN('global_a'), QN('global_b'))))\n\n def test_class_definition_basic(self):\n\n def test_fn(a, b):\n class C(a(b)):\n d = 1\n return C\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n self.assertScopeIs(body_scope, ('a', 'b', 'C'), ('C',))\n\n def test_class_definition_isolates_method_writes(self):\n\n def test_fn(a, b, c):\n class C(a(b)):\n d = 1\n\n def e(self):\n f = c + 1\n return f\n return C\n\n node, _ = self._parse_and_analyze(test_fn)\n fn_node = node\n body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)\n self.assertScopeIs(body_scope, ('a', 'b', 'C', 'c'), ('C',))\n\n\nif __name__ == '__main__':\n test.main()\n"
] | [
[
"tensorflow.lite.testing.zip_test_utils.make_zip_of_tests",
"tensorflow.lite.testing.zip_test_utils.create_tensor_data",
"tensorflow.compat.v1.compat.v1.placeholder",
"tensorflow.compat.v1.matmul",
"tensorflow.lite.testing.zip_test_utils.register_make_test_function",
"tensorflow.compat.v1.compat.forward_compatibility_horizon"
],
[
"tensorflow.python.ops.image_ops.random_flip_up_down",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.math_ops.sin",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.framework.ops.convert_to_tensor_v2",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.stateful_random_ops.Generator.from_non_deterministic_state",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.image_ops.resize_images_v2",
"tensorflow.python.ops.math_ops.reduce_all",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.python.ops.image_ops.random_flip_left_right",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.ops.stateful_random_ops.Generator.from_seed",
"tensorflow.python.ops.image_ops.random_contrast",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.math_ops.cos",
"tensorflow.python.keras.utils.tf_utils.smart_cond",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.keras.engine.input_spec.InputSpec",
"tensorflow.python.keras.backend.learning_phase"
],
[
"tensorflow.python.profiler.internal._pywrap_traceme.TraceMe",
"tensorflow.python.profiler.internal._pywrap_traceme.TraceMe.IsEnabled"
],
[
"tensorflow.python.autograph.pyct.parser.parse_entity",
"tensorflow.python.autograph.pyct.transformer.EntityInfo",
"tensorflow.python.autograph.pyct.qual_names.resolve",
"tensorflow.python.autograph.pyct.transformer.Context",
"tensorflow.python.platform.test.main",
"tensorflow.python.autograph.pyct.static_analysis.activity.resolve",
"tensorflow.python.autograph.pyct.anno.getanno",
"tensorflow.python.autograph.pyct.static_analysis.activity.Scope",
"tensorflow.python.autograph.pyct.static_analysis.activity.Scope.copy_of"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.3",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.