repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
HastingsGreer/mermaid | [
"bd13c5fc427eb8cd9054973a8eaaeb302078182d",
"ba07883cc3cb5982e4655048a434b4495cb49c6d"
] | [
"mermaid/forward_models.py",
"mermaid/multiscale_optimizer.py"
] | [
"\"\"\"\nPackage defining various dynamic forward models as well as convenience methods to generate the\nright hand sides (RHS) of the related partial differential equations.\n\nCurrently, the following forward models are implemented:\n #. An advection equation for images\n #. An advection equation for maps\n #. The EPDiff-equation parameterized using the vector-valued momentum for images\n #. The EPDiff-equation parameterized using the vector-valued momentum for maps\n #. The EPDiff-equation parameterized using the scalar-valued momentum for images\n #. The EPDiff-equation parameterized using the scalar-valued momentum for maps\n \nThe images are expected to be tensors of dimension: BxCxXxYxZ (or BxCxX in 1D and BxCxXxY in 2D),\nwhere B is the batch-size, C the number of channels, and X, Y, and Z are the spatial coordinate indices.\n\nFuthermore the following (RHSs) are provided\n #. Image advection\n #. Map advection\n #. Scalar conservation law\n #. EPDiff\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nfrom builtins import range\nfrom builtins import object\nfrom abc import ABCMeta, abstractmethod\nimport numpy as np\nfrom . import finite_differences_multi_channel as fdm\nfrom . import utils\nfrom .data_wrapper import MyTensor\nfrom future.utils import with_metaclass\nimport torch.nn as nn\nimport torch\n\n\nclass RHSLibrary(object):\n \"\"\"\n Convenience class to quickly generate various right hand sides (RHSs) of popular partial differential \n equations. In this way new forward models can be written with minimal code duplication.\n \"\"\"\n\n def __init__(self, spacing, use_neumann_BC_for_map=False):\n \"\"\"\n Constructor\n \n :param spacing: Spacing for the images. This will be an array with 1, 2, or 3 entries in 1D, 2D, and 3D respectively. \n \"\"\"\n self.spacing = spacing\n \"\"\"spatial spacing\"\"\"\n self.spacing_min = np.min(spacing)\n \"\"\" min of the spacing\"\"\"\n self.spacing_ratio = spacing/self.spacing_min\n self.fdt_ne = fdm.FD_torch_multi_channel(spacing,mode='neumann_zero')\n \"\"\"torch finite differencing support neumann zero\"\"\"\n self.fdt_le = fdm.FD_torch_multi_channel( spacing, mode='linear')\n \"\"\"torch finite differencing support linear extrapolation\"\"\"\n self.fdt_di = fdm.FD_torch_multi_channel(spacing, mode='dirichlet_zero')\n \"\"\"torch finite differencing support dirichlet zero\"\"\"\n self.dim = len(self.spacing)\n \"\"\"spatial dimension\"\"\"\n self.use_neumann_BC_for_map = use_neumann_BC_for_map\n \"\"\"If True uses zero Neumann boundary conditions also for evolutions of the map, if False uses linear extrapolation\"\"\"\n\n def rhs_advect_image_multiNC(self,I,v):\n '''\n Advects a batch of images which can be multi-channel. Expected image format here, is \n BxCxXxYxZ, where B is the number of images (batch size), C, the number of channels\n per image and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)\n\n \n :math:`-\\\\nabla I^Tv`\n\n \n :param I: Image batch BxCIxXxYxZ\n :param v: Velocity fields (this will be one velocity field per image) BxCxXxYxZ\n :return: Returns the RHS of the advection equations involved BxCxXxYxZ\n '''\n\n rhs_ret= self._rhs_advect_image_multiN(I, v )\n return rhs_ret\n\n\n def _rhs_advect_image_multiN(self,I,v):\n \"\"\"\n :param I: One-channel input image: Bx1xXxYxZ\n :param v: velocity field BxCxXxYxZ\n :return: Returns the RHS of the advection equation for one channel BxXxYxZ\n \"\"\"\n\n if self.dim == 1:\n rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1]\n elif self.dim == 2:\n rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]\n elif self.dim == 3:\n rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]-self.fdt_ne.dZc(I)*v[:,2:3]\n else:\n raise ValueError('Only supported up to dimension 3')\n return rhs_ret\n\n\n def rhs_scalar_conservation_multiNC(self, I, v):\n \"\"\"\n Scalar conservation law for a batch of images which can be multi-channel. Expected image format here, is \n BxCxXxYxZ, where B is the number of images (batch size), C, the number of channels\n per image and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)\n\n :math:`-div(Iv)`\n\n :param I: Image batch BxCIxXxYxZ\n :param v: Velocity fields (this will be one velocity field per image) BxCxXxYxZ\n :return: Returns the RHS of the scalar conservation law equations involved BxCxXxYxZ\n \"\"\"\n\n rhs_ret=self._rhs_scalar_conservation_multiN(I, v)\n return rhs_ret\n\n\n\n def _rhs_scalar_conservation_multiN(self, I, v):\n \"\"\"\n :param I: One-channel input image: Bx1xXxYxZ\n :param v: velocity field BxCxXxYxZ\n :return: Returns the RHS of the scalar-conservation law equation for one channel BxXxYxZ\n \"\"\"\n\n if self.dim==1:\n rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1])\n elif self.dim==2:\n rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])\n elif self.dim==3:\n rhs_ret = -self.fdt_ne.dXc(I* v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])-self.fdt_ne.dZc(I*v[:,2:3])\n else:\n raise ValueError('Only supported up to dimension 3')\n return rhs_ret\n\n\n def rhs_lagrangian_evolve_map_multiNC(self, phi, v):\n \"\"\"\n Evolves a set of N maps (for N images). Expected format here, is\n BxCxXxYxZ, where B is the number of images/maps (batch size), C, the number of channels\n per (here the spatial dimension for the map coordinate functions),\n and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D).\n This is used to evolve the map going from source to target image. Requires interpolation\n so should if at all possible not be used as part of an optimization.\n the idea of compute inverse map is due to the map is defined\n in the source space, referring to point move to where,(compared with the target space, refers to where it comes from)\n in this situation, we only need to capture the velocity at that place and accumulate along the time step\n since advecton function is moves the image (or phi based image) by v step, which means v is shared by different coordinate,\n so it is safe to compute in this way.\n\n :math:`v\\circ\\phi`\n\n :param phi: map batch BxCxXxYxZ\n :param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ\n :return: Returns the RHS of the evolution equations involved BxCxXxYxZ\n :param phi:\n :param v:\n :return:\n \"\"\"\n\n rhs_ret = utils.compute_warped_image_multiNC(v, phi, spacing=self.spacing, spline_order=1,zero_boundary=False)\n return rhs_ret\n\n\n def rhs_advect_map_multiNC(self, phi, v):\n '''\n Advects a set of N maps (for N images). Expected format here, is \n BxCxXxYxZ, where B is the number of images/maps (batch size), C, the number of channels\n per (here the spatial dimension for the map coordinate functions), \n and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)\n\n\n :math:`-D\\\\phi v`\n\n :param phi: map batch BxCxXxYxZ\n :param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ\n :return: Returns the RHS of the advection equations involved BxCxXxYxZ\n '''\n\n sz = phi.size()\n rhs_ret = self._rhs_advect_map_call(phi, v)\n return rhs_ret\n\n def _rhs_advect_map_call(self,phi,v):\n \"\"\"\n\n :param phi: map batch BxCxXxYxZ\n :param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ\n :return rhsphi: Returns the RHS of the advection equations involved BxCxXxYxZ\n \"\"\"\n\n fdc = self.fdt_le # use order boundary conditions (interpolation)\n\n if self.dim==1:\n dxc_phi = -fdc.dXc(phi)\n rhsphi = v[:, 0:1] * dxc_phi\n elif self.dim==2:\n dxc_phi = -fdc.dXc(phi)\n dyc_phi = -fdc.dYc(phi)\n rhsphi = v[:, 0:1] * dxc_phi + v[:, 1:2] * dyc_phi\n elif self.dim==3:\n dxc_phi = -fdc.dXc(phi)\n dyc_phi = -fdc.dYc(phi)\n dzc_phi = -fdc.dZc(phi)\n rhsphi = v[:,0:1]*dxc_phi + v[:,1:2]*dyc_phi + v[:,2:3]*dzc_phi\n else:\n raise ValueError('Only supported up to dimension 3')\n return rhsphi\n\n\n def rhs_epdiff_multiNC(self, m, v):\n '''\n Computes the right hand side of the EPDiff equation for of N momenta (for N images). \n Expected format here, is BxCxXxYxZ, where B is the number of momenta (batch size), C, \n the number of channels per (here the spatial dimension for the momenta), \n and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)\n\n a new version, where batch is no longer calculated separately\n\n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n\n :param m: momenta batch BxCXxYxZ\n :param v: Velocity fields (this will be one velocity field per momentum) BxCXxYxZ\n :return: Returns the RHS of the EPDiff equations involved BxCXxYxZ\n '''\n\n sz = m.size()\n rhs_ret = MyTensor(sz).zero_()\n rhs_ret = self._rhs_epdiff_call(m, v, rhs_ret)\n return rhs_ret\n\n def _rhs_epdiff_call(self, m, v,rhsm):\n \"\"\"\n :param m: momenta batch BxCxXxYxZ\n :param v: Velocity fields (this will be one velocity field per momentum) BxCxXxYxZ\n :return rhsm: Returns the RHS of the EPDiff equations involved BxCxXxYxZ\n \"\"\"\n\n # if self.use_neumann_BC_for_map:\n # fdc = self.fdt_ne # use zero Neumann boundary conditions\n # else:\n # fdc = self.fdt_le # do linear extrapolation\n\n fdc = self.fdt_ne\n #fdc = self.fdt_le\n if self.dim == 1:\n dxc_mv0 = -fdc.dXc(m*v[:,0:1])\n dxc_v = -fdc.dXc(v)\n dxc_v_multi_m = dxc_v * m\n rhsm[:]= dxc_mv0 + dxc_v_multi_m\n\n elif self.dim == 2:\n # (m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm (EPDiff equation)\n dxc_mv0 = -fdc.dXc(m*v[:,0:1])\n dyc_mv1 = -fdc.dYc(m*v[:,1:2])\n dc_mv_sum = dxc_mv0 + dyc_mv1\n dxc_v = -fdc.dXc(v)\n dyc_v = -fdc.dYc(v)\n dxc_v_multi_m = dxc_v * m\n dyc_v_multi_m = dyc_v * m\n dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m, 1)\n dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m, 1)\n rhsm[:,0, :, :] = dc_mv_sum[:,0] + dxc_v_multi_m_sum\n\n rhsm[:,1, :, :] = dc_mv_sum[:,1] + dyc_v_multi_m_sum\n\n elif self.dim == 3:\n dxc_mv0 = -fdc.dXc(m*v[:,0:1])\n dyc_mv1 = -fdc.dYc(m*v[:,1:2])\n dzc_mv2 = -fdc.dZc(m*v[:,2:3])\n dc_mv_sum = dxc_mv0 + dyc_mv1 + dzc_mv2\n dxc_v = -fdc.dXc(v)\n dyc_v = -fdc.dYc(v)\n dzc_v = -fdc.dZc(v)\n dxc_v_multi_m = dxc_v*m\n dyc_v_multi_m = dyc_v*m\n dzc_v_multi_m = dzc_v*m\n dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m,1)\n dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m,1)\n dzc_v_multi_m_sum = torch.sum(dzc_v_multi_m,1)\n\n rhsm[:, 0] = dc_mv_sum[:,0] + dxc_v_multi_m_sum\n\n rhsm[:, 1] = dc_mv_sum[:,1] + dyc_v_multi_m_sum\n\n rhsm[:, 2] = dc_mv_sum[:,2] + dzc_v_multi_m_sum\n\n else:\n raise ValueError('Only supported up to dimension ')\n return rhsm\n\n\n\n def rhs_adapt_epdiff_wkw_multiNC(self, m, v,w, sm_wm,smoother):\n '''\n Computes the right hand side of the EPDiff equation for of N momenta (for N images).\n Expected format here, is BxCxXxYxZ, where B is the number of momenta (batch size), C,\n the number of channels per (here the spatial dimension for the momenta),\n and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)\n\n a new version, where batch is no longer calculated separately\n\n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n\n :param m: momenta batch BxCXxYxZ\n :param v: Velocity fields (this will be one velocity field per momentum) BxCXxYxZ\n :return: Returns the RHS of the EPDiff equations involved BxCXxYxZ\n '''\n\n sz = m.size()\n rhs_ret = MyTensor(sz).zero_()\n rhs_ret = self._rhs_adapt_epdiff_wkw_call(m, v,w,sm_wm,smoother, rhs_ret)\n return rhs_ret\n\n def _rhs_adapt_epdiff_wkw_call(self, m, v,w,sm_wm, smoother, rhsm):\n \"\"\"\n :param m: momenta batch BxCxXxYxZ\n :param sm_wm: smoothed(wm) batch x K x dim x X x Y x ...\n :param w: smoothed(wm) batch x K x X x Y x ...\n :param v: Velocity fields (this will be one velocity field per momentum) BxCxXxYxZ\n :return rhsm: Returns the RHS of the EPDiff equations involved BxCxXxYxZ\n \"\"\"\n # if self.use_neumann_BC_for_map:\n # fdc = self.fdt_ne # use zero Neumann boundary conditions\n # else:\n # fdc = self.fdt_le # do linear extrapolation\n\n fdc = self.fdt_ne\n rhs = self._rhs_epdiff_call(m,v,rhsm)\n ret_var = torch.empty_like(rhs)\n # ret_var, rhs should batch x dim x X x Yx ..\n dim = m.shape[1]\n sz = [m.shape[0]]+[1]+list(m.shape[1:]) # batchx1xdimx X x Y\n m = m.view(*sz)\n m_sm_wm = m* sm_wm\n m_sm_wm = m_sm_wm.sum(dim=2)\n sm_m_sm_wm = smoother.smooth(m_sm_wm) # batchx K x X xY...\n dxc_w = fdc.dXc(w)\n dc_w_list = [dxc_w]\n if dim == 2 or dim == 3:\n dyc_w = fdc.dYc(w)\n dc_w_list.append(dyc_w)\n if dim == 3:\n dzc_w = fdc.dZc(w) # batch x K x X xY ...\n dc_w_list.append(dzc_w)\n for i in range(dim):\n ret_var[:, i] = rhs[:, i] + (sm_m_sm_wm* dc_w_list[i]).sum(1)\n\n return ret_var\n\n\n\nclass ForwardModel(with_metaclass(ABCMeta, object)):\n \"\"\"\n Abstract forward model class. Should never be instantiated.\n Derived classes require the definition of f(self,t,x,u,pars) and u(self,t,pars).\n These functions will be used for integration: x'(t) = f(t,x(t),u(t))\n \"\"\"\n\n def __init__(self, sz, spacing, params=None):\n '''\n Constructor of abstract forward model class\n \n :param sz: size of images\n :param spacing: numpy array for spacing in x,y,z directions\n '''\n\n self.dim = spacing.size # spatial dimension of the problem\n \"\"\"spatial dimension\"\"\"\n self.spacing = spacing\n \"\"\"spatial spacing\"\"\"\n self.sz = sz\n \"\"\"image size (BxCxXxYxZ)\"\"\"\n self.params = params\n \"\"\"ParameterDict instance holding parameters\"\"\"\n self.rhs = RHSLibrary(self.spacing)\n \"\"\"rhs library support\"\"\"\n\n if self.dim>3 or self.dim<1:\n raise ValueError('Forward models are currently only supported in dimensions 1 to 3')\n\n self.debug_mode_on =False\n\n @abstractmethod\n def f(self,t,x,u,pars,variables_from_optimizer=None):\n \"\"\"\n Function to be integrated\n \n :param t: time\n :param x: state\n :param u: input\n :param pars: optional parameters\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: the function value, should return a list (to support easy concatenations of states)\n \"\"\"\n\n pass\n\n def u(self,t,pars,variables_from_optimizer=None):\n \"\"\"\n External input\n \n :param t: time\n :param pars: parameters\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: the external input\n \"\"\"\n\n return []\n\n\nclass AdvectMap(ForwardModel):\n \"\"\"\n Forward model to advect an n-D map using a transport equation: :math:`\\\\Phi_t + D\\\\Phi v = 0`.\n v is treated as an external argument and \\Phi is the state\n \"\"\"\n\n def __init__(self, sz, spacing, params=None,compute_inverse_map=False):\n super(AdvectMap,self).__init__(sz,spacing,params)\n self.compute_inverse_map = compute_inverse_map\n \"\"\"If True then computes the inverse map on the fly for a map-based solution\"\"\"\n\n def u(self,t, pars, variables_from_optimizer=None):\n \"\"\"\n External input, to hold the velocity field\n \n :param t: time (ignored; not time-dependent) \n :param pars: assumes an n-D velocity field is passed as the only input argument\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: Simply returns this velocity field\n \"\"\"\n\n return pars['v']\n\n def f(self,t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of transport equation: \n \n :math:`-D\\\\phi v`\n \n :param t: time (ignored; not time-dependent) \n :param x: state, here the map, \\Phi, itself (assumes 3D-5D array; [nrI,0,:,:] x-coors; [nrI,1,:,:] y-coors; ...\n :param u: external input, will be the velocity field here\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [phi]\n \"\"\"\n\n if self.compute_inverse_map:\n return [self.rhs.rhs_advect_map_multiNC(x[0], u),self.rhs.rhs_lagrangian_evolve_map_multiNC(x[1], u)]\n else:\n return [self.rhs.rhs_advect_map_multiNC(x[0],u)]\n\nclass AdvectImage(ForwardModel):\n \"\"\"\n Forward model to advect an image using a transport equation: :math:`I_t + \\\\nabla I^Tv = 0`.\n v is treated as an external argument and I is the state\n \"\"\"\n\n def __init__(self, sz, spacing, params=None):\n super(AdvectImage, self).__init__(sz, spacing,params)\n\n\n def u(self,t, pars, variables_from_optimizer=None):\n \"\"\"\n External input, to hold the velocity field\n \n :param t: time (ignored; not time-dependent) \n :param pars: assumes an n-D velocity field is passed as the only input argument\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: Simply returns this velocity field\n \"\"\"\n\n return pars['v']\n\n def f(self,t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of transport equation: :math:`-\\\\nabla I^T v`\n \n :param t: time (ignored; not time-dependent) \n :param x: state, here the image, I, itself (supports multiple images and channels)\n :param u: external input, will be the velocity field here\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [I]\n \"\"\"\n\n return [self.rhs.rhs_advect_image_multiNC(x[0],u)]\n\n\n\nclass EPDiffImage(ForwardModel):\n \"\"\"\n Forward model for the EPdiff equation. State is the momentum, m, and the image I:\n :math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n \n :math:`v=Km`\n \n :math:`I_t+\\\\nabla I^Tv=0`\n \"\"\"\n def __init__(self, sz, spacing, smoother, params=None):\n super(EPDiffImage, self).__init__(sz, spacing,params)\n self.smoother = smoother\n\n def f(self,t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of the EPDiff equation: \n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n \n :math:`-\\\\nabla I^Tv`\n \n :param t: time (ignored; not time-dependent) \n :param x: state, here the vector momentum, m, and the image, I\n :param u: ignored, no external input\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [m,I]\n \"\"\"\n\n # assume x[0] is m and x[1] is I for the state\n m = x[0]\n I = x[1]\n v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I': I}),variables_from_optimizer)\n # print('max(|v|) = ' + str( v.abs().max() ))\n return [self.rhs.rhs_epdiff_multiNC(m,v), self.rhs.rhs_advect_image_multiNC(I,v)]\n\n\nclass EPDiffMap(ForwardModel):\n \"\"\"\n Forward model for the EPDiff equation. State is the momentum, m, and the transform, :math:`\\\\phi` \n (mapping the source image to the target image).\n\n :math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n \n :math:`v=Km`\n \n :math:`\\\\phi_t+D\\\\phi v=0`\n \"\"\"\n\n def __init__(self, sz, spacing, smoother, params=None,compute_inverse_map=False):\n super(EPDiffMap, self).__init__(sz,spacing,params)\n self.compute_inverse_map = compute_inverse_map\n \"\"\"If True then computes the inverse map on the fly for a map-based solution\"\"\"\n\n self.smoother = smoother\n self.use_net = True if self.params['smoother']['type'] == 'adaptiveNet' else False\n\n def debugging(self,input,t):\n x = utils.checkNan(input)\n if np.sum(x):\n print(\"find nan at {} step\".format(t))\n print(\"flag m: {}, \".format(x[0]))\n print(\"flag v: {},\".format(x[1]))\n print(\"flag phi: {},\".format(x[2]))\n print(\"flag new_m: {},\".format(x[3]))\n print(\"flag new_phi: {},\".format(x[4]))\n raise ValueError(\"nan error\")\n\n def f(self,t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of the EPDiff equation:\n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm'\n \n :math:`-D\\\\phi v`\n \n :param t: time (ignored; not time-dependent) \n :param x: state, here the image, vector momentum, m, and the map, :math:`\\\\phi`\n :param u: ignored, no external input\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [m,phi]\n \"\"\"\n\n # assume x[0] is m and x[1] is phi for the state\n m = x[0]\n m = m.clamp(max=1., min=-1.)\n phi = x[1]\n\n if self.compute_inverse_map:\n phi_inv = x[2]\n\n if not self.use_net:\n v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'phi':phi}),variables_from_optimizer)\n else:\n v = self.smoother.adaptive_smooth(m, phi, using_map=True)\n\n # print('max(|v|) = ' + str( v.abs().max() ))\n\n if self.compute_inverse_map:\n ret_val= [self.rhs.rhs_epdiff_multiNC(m,v),\n self.rhs.rhs_advect_map_multiNC(phi,v),\n self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]\n else:\n new_m = self.rhs.rhs_epdiff_multiNC(m,v)\n new_phi = self.rhs.rhs_advect_map_multiNC(phi,v)\n ret_val= [new_m, new_phi]\n return ret_val\n\n\n\nclass EPDiffAdaptMap(ForwardModel):\n \"\"\"\n Forward model for the EPDiff equation. State is the momentum, m, and the transform, :math:`\\\\phi`\n (mapping the source image to the target image).\n\n :math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n\n :math:`v=Km`\n\n :math:`\\\\phi_t+D\\\\phi v=0`\n \"\"\"\n\n def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False, update_sm_by_advect= True, update_sm_with_interpolation=True,compute_on_initial_map=True):\n super(EPDiffAdaptMap, self).__init__(sz, spacing, params)\n from . import module_parameters as pars\n from . import smoother_factory as sf\n self.compute_inverse_map = compute_inverse_map\n \"\"\"If True then computes the inverse map on the fly for a map-based solution\"\"\"\n\n self.smoother = smoother\n self.update_sm_by_advect = update_sm_by_advect\n self.use_the_first_step_penalty = True\n self.update_sm_with_interpolation = update_sm_with_interpolation\n self.compute_on_initial_map=compute_on_initial_map\n self.update_sm_weight=None\n self.velocity_mask = None\n self.debug_mode_on = False\n s_m_params = pars.ParameterDict()\n s_m_params['smoother']['type'] = 'gaussian'\n s_m_params['smoother']['gaussian_std'] =self.params['smoother']['deep_smoother']['deep_network_local_weight_smoothing']\n self.embedded_smoother = sf.SmootherFactory(sz[2:], spacing).create_smoother(\n s_m_params)\n\n \"\"\" if only take the first step penalty as the total penalty, otherwise accumluate the penalty\"\"\"\n def debug_nan(self, input, t,name=''):\n x = utils.checkNan([input])\n if np.sum(x):\n # print(input[0])\n print(\"find nan at {} step, {} with number {}\".format(t,name,x[0]))\n\n raise ValueError(\"nan error\")\n def init_zero_sm_weight(self,sm_weight):\n self.update_sm_weight = torch.zeros_like(sm_weight).detach()\n\n\n def init_velocity_mask(self,velocity_mask):\n self.velocity_mask = velocity_mask\n\n\n def debug_distrib(self,var,name):\n var = var.detach().cpu().numpy()\n density,_= np.histogram(var,[-100,-10,-1,0,1,10,100],density=True)\n print(\"{} distri:{}\".format(name,density))\n\n\n def f(self, t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of the EPDiff equation:\n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm'\n\n :math:`-D\\\\phi v`\n\n :param t: time (ignored; not time-dependent)\n :param x: state, here the image, vector momentum, m, and the map, :math:`\\\\phi`\n :param u: ignored, no external input\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [m,phi]\n \"\"\"\n\n # assume x[0] is m and x[1] is phi for the state\n m = x[0]\n m=m.clamp(max=1., min=-1.)\n phi = x[1]\n return_val_name = []\n sm_weight = None\n if self.update_sm_by_advect:\n if not self.update_sm_with_interpolation:\n sm_weight_pre = x[2]\n sm_weight = self.embedded_smoother.smooth(sm_weight_pre)\n\n v, extra_ret = self.smoother.smooth(m, None, {'w':sm_weight},multi_output=True)\n if self.velocity_mask is not None:\n v = v* self.velocity_mask\n new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)\n new_sm_weight_pre = self.rhs.rhs_advect_map_multiNC(sm_weight_pre, v)\n new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m, v, new_sm_weight_pre, extra_ret,\n self.embedded_smoother)\n\n ret_val = [new_m, new_phi,new_sm_weight_pre]\n return_val_name =['new_m','new_phi','new_sm_weight']\n else:\n if self.compute_on_initial_map:\n sm_weight = x[2]\n sm_phi = x[3]\n new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, sm_phi, self.spacing, 1,\n zero_boundary=False)\n pre_weight = sm_weight\n new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)\n #print('t{},m min, mean,max {} {} {}'.format(t,m.min().item(),m.mean().item(),m.max().item()))\n v,extra_ret = self.smoother.smooth(m,None,{'w': new_sm_weight},multi_output=True)\n if self.velocity_mask is not None:\n v = v * self.velocity_mask\n\n new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)\n new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)\n new_sm_phi = self.rhs.rhs_advect_map_multiNC(sm_phi, v)\n new_sm_weight = self.update_sm_weight.detach()\n ret_val = [new_m, new_phi,new_sm_weight,new_sm_phi]\n return_val_name = ['new_m', 'new_phi', 'new_sm_weight','new_sm_phi']\n else: #todo just attention here is what we currently used\n sm_weight = x[2]\n new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, phi, self.spacing, 1,\n zero_boundary=False)\n\n pre_weight = sm_weight\n new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)\n\n v, extra_ret = self.smoother.smooth(m, None,{'w':new_sm_weight}, multi_output=True)\n\n if self.velocity_mask is not None:\n v = v * self.velocity_mask\n\n new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)\n new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)\n new_sm_weight = self.update_sm_weight.detach()\n ret_val = [new_m, new_phi, new_sm_weight]\n return_val_name = ['new_m', 'new_phi', 'new_sm_weight']\n\n else:\n if not t==0:\n if self.use_the_first_step_penalty:\n self.smoother.disable_penalty_computation()\n else:\n self.smoother.enable_accumulated_penalty()\n\n I = utils.compute_warped_image_multiNC(pars['I0'], phi, self.spacing, 1,zero_boundary=True)\n pars['I'] = I.detach() # TODO check whether I should be detached here\n v = self.smoother.smooth(m, None, pars, variables_from_optimizer)\n if self.velocity_mask is not None:\n v = v * self.velocity_mask\n new_m = self.rhs.rhs_epdiff_multiNC(m, v)\n new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)\n ret_val = [new_m, new_phi]\n return_val_name =['new_m','new_phi']\n\n\n if self.debug_mode_on:\n toshows = [m, v,phi]+ret_val if sm_weight is None else [m, v,phi]+ret_val +[sm_weight]\n name = ['m', 'v','phi']+return_val_name if sm_weight is None else ['m', 'v','phi']+return_val_name +['sm_weight']\n for i, toshow in enumerate(toshows):\n print('t{},{} min, mean,max {} {} {}'.format(t, name[i], toshow.min().item(), toshow.mean().item(),\n toshow.max().item()))\n self.debug_distrib(toshow, name[i])\n self.debug_nan(toshow,t,name[i])\n return ret_val\n\n\n\n # print('max(|v|) = ' + str( v.abs().max() ))\n\n\n\nclass EPDiffScalarMomentum(ForwardModel):\n \"\"\"\n Base class for scalar momentum EPDiff solutions. Defines a smoother that can be commonly used.\n \"\"\"\n\n def __init__(self, sz, spacing, smoother, params):\n super(EPDiffScalarMomentum,self).__init__(sz,spacing,params)\n\n self.smoother = smoother\n\n\nclass EPDiffScalarMomentumImage(EPDiffScalarMomentum):\n \"\"\"\n Forward model for the scalar momentum EPdiff equation. State is the scalar momentum, lam, and the image I\n :math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n\n :math:`v=Km`\n\n :math:'m=\\\\lambda\\\\nabla I`\n\n :math:`I_t+\\\\nabla I^Tv=0`\n\n :math:`\\\\lambda_t + div(\\\\lambda v)=0`\n \"\"\"\n\n def __init__(self, sz, spacing, smoother, params=None):\n super(EPDiffScalarMomentumImage, self).__init__(sz, spacing, smoother, params)\n\n def f(self, t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of the EPDiff equation:\n\n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n\n :math:`-\\\\nabla I^Tv`\n\n :math: `-div(\\\\lambda v)`\n\n :param t: time (ignored; not time-dependent) \n :param x: state, here the scalar momentum, lam, and the image, I, itself\n :param u: no external input\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [lam,I]\n \"\"\"\n\n # assume x[0] is \\lambda and x[1] is I for the state\n lam = x[0]\n I = x[1]\n\n # now compute the momentum\n m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)\n v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)\n\n # advection for I, scalar-conservation law for lam\n return [self.rhs.rhs_scalar_conservation_multiNC(lam, v), self.rhs.rhs_advect_image_multiNC(I, v)]\n\n\n\nclass EPDiffScalarMomentumMap(EPDiffScalarMomentum):\n \"\"\"\n Forward model for the scalar momentum EPDiff equation. State is the scalar momentum, lam, the image, I, and the transform, phi.\n :math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n \n :math:`v=Km`\n \n :math:`m=\\\\lambda\\\\nabla I`\n \n :math:`I_t+\\\\nabla I^Tv=0`\n \n :math:`\\\\lambda_t + div(\\\\lambda v)=0`\n \n :math:`\\\\Phi_t+D\\\\Phi v=0`\n \"\"\"\n\n def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False):\n super(EPDiffScalarMomentumMap, self).__init__(sz,spacing, smoother, params)\n self.compute_inverse_map = compute_inverse_map\n \"\"\"If True then computes the inverse map on the fly for a map-based solution\"\"\"\n\n def f(self,t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of the EPDiff equation:\n \n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n \n :math:`-\\\\nabla I^Tv`\n \n :math:`-div(\\\\lambda v)`\n \n :math:`-D\\\\Phi v`\n \n :param t: time (ignored; not time-dependent) \n :param x: state, here the scalar momentum, lam, the image, I, and the transform, :math:`\\\\phi`\n :param u: ignored, no external input\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [lam,I,phi]\n \"\"\"\n\n # assume x[0] is lam and x[1] is I and x[2] is phi for the state\n lam = x[0]\n I = x[1]\n phi = x[2]\n\n if self.compute_inverse_map:\n phi_inv = x[3]\n\n # now compute the momentum\n m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)\n # todo: replace this by phi again\n #v = self.smoother.smooth(m,None,[phi,True],variables_from_optimizer)\n v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)\n\n if self.compute_inverse_map:\n ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),\n self.rhs.rhs_advect_image_multiNC(I,v),\n self.rhs.rhs_advect_map_multiNC(phi,v),\n self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]\n else:\n ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),\n self.rhs.rhs_advect_image_multiNC(I,v),\n self.rhs.rhs_advect_map_multiNC(phi,v)]\n\n return ret_val\n",
"\"\"\"\nThis package enables easy single-scale and multi-scale optimization support.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\n# from builtins import zip\n# from builtins import str\n# from builtins import range\n# from builtins import object\nfrom abc import ABCMeta, abstractmethod\nimport os\nimport time\nimport copy\nfrom . import utils\nfrom . import visualize_registration_results as vizReg\nfrom . import custom_optimizers as CO\nimport numpy as np\nimport torch\nfrom .data_wrapper import USE_CUDA, AdaptVal, MyTensor\nfrom . import model_factory as MF\nfrom . import image_sampling as IS\nfrom .metrics import get_multi_metric\nfrom .res_recorder import XlsxRecorder\nfrom .data_utils import make_dir\nfrom torch.utils.data import Dataset, DataLoader\nfrom . import optimizer_data_loaders as OD\nfrom . import fileio as FIO\nfrom . import model_evaluation\n\nfrom collections import defaultdict\nfrom future.utils import with_metaclass\n\nfrom termcolor import colored, cprint\n\n# add some convenience functionality\nclass SimpleRegistration(with_metaclass(ABCMeta, object)):\n \"\"\"\n Abstract optimizer base class.\n \"\"\"\n\n def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):\n \"\"\"\n :param ISource: source image\n :param ITarget: target image\n :param spacing: image spacing\n :param params: parameters\n :param compute_inverse_map: for map-based method the inverse map can be computed on the fly\n \"\"\"\n self.params = params\n self.use_map = self.params['model']['deformation'][('use_map', True, '[True|False] either do computations via a map or directly using the image')]\n self.map_low_res_factor = self.params['model']['deformation'][('map_low_res_factor', 1.0, 'Set to a value in (0,1) if a map-based solution should be computed at a lower internal resolution (image matching is still at full resolution')]\n self.spacing = spacing\n self.ISource = ISource\n self.ITarget = ITarget\n self.sz = sz\n self.compute_inverse_map = compute_inverse_map\n self.default_learning_rate=default_learning_rate\n self.optimizer = None\n\n def get_history(self):\n \"\"\"\n Returns the optimization history as a dictionary. Keeps track of energies, iterations counts, and additonal custom measures.\n\n :return: history dictionary\n \"\"\"\n\n if self.optimizer is not None:\n return self.optimizer.get_history()\n else:\n return None\n\n def write_parameters_to_settings(self):\n \"\"\"\n Allows currently computed parameters (if they were optimized) to be written back to an output parameter file\n :return:\n \"\"\"\n if self.optimizer is not None:\n self.optimizer.write_parameters_to_settings()\n\n @abstractmethod\n def register(self):\n \"\"\"\n Abstract method to register the source to the target image\n :return: \n \"\"\"\n pass\n\n def get_optimizer(self):\n \"\"\"\n Returns the optimizer being used (can be used to customize the simple registration if desired)\n :return: optimizer\n \"\"\"\n return self.optimizer\n\n def get_energy(self):\n \"\"\"\n Returns the current energy\n :return: Returns a tuple (energy, similarity energy, regularization energy)\n \"\"\"\n if self.optimizer is not None:\n return self.optimizer.get_energy()\n else:\n return None\n\n def get_warped_label(self):\n \"\"\"\n Returns the warped label\n :return: the warped label\n \"\"\"\n if self.optimizer is not None:\n return self.optimizer.get_warped_label()\n else:\n return None\n\n def get_warped_image(self):\n \"\"\"\n Returns the warped image\n :return: the warped image\n \"\"\"\n if self.optimizer is not None:\n return self.optimizer.get_warped_image()\n else:\n return None\n\n def set_initial_map(self,map0,initial_inverse_map=None):\n \"\"\"\n Sets the initial map for the registrations; by default (w/o setting anything) this will be the identity\n map, but by setting it to a different initial condition one can concatenate transformations.\n\n :param map0:\n :return: n/a\n \"\"\"\n if self.optimizer is not None:\n self.optimizer.set_initial_map(map0, initial_inverse_map)\n # self.optimizer.set_initial_inverse_map(initial_inverse_map)\n\n def set_weight_map(self,weight_map):\n if self.optimizer is not None:\n self.optimizer.set_initial_map(weight_map)\n\n def get_initial_map(self):\n \"\"\"\n Returns the initial map; this will typically be the identity map, but can be set to a different initial\n condition using set_initial_map\n\n :return: returns the initial map (if applicable)\n \"\"\"\n\n if self.optimizer is not None:\n return self.optimizer.get_initial_map()\n else:\n return None\n\n def get_initial_inverse_map(self):\n \"\"\"\n Returns the initial inverse map; this will typically be the identity map, but can be set to a different initial\n condition using set_initial_map\n\n :return: returns the initial map (if applicable)\n \"\"\"\n\n if self.optimizer is not None:\n return self.optimizer.get_initial_inverse_map()\n else:\n return None\n\n def get_map(self):\n \"\"\"\n Returns the deformation map\n :return: deformation map\n \"\"\"\n if self.optimizer is not None:\n return self.optimizer.get_map()\n\n def get_inverse_map(self):\n \"\"\"\n Returns the inverse deformation map if available\n :return: deformation map\n \"\"\"\n if self.optimizer is not None:\n return self.optimizer.get_inverse_map()\n\n def get_model_parameters(self):\n \"\"\"\n Returns the parameters of the model\n\n :return: model parameters \n \"\"\"\n return self.optimizer.get_model_parameters()\n\n def set_model_parameters(self,p):\n \"\"\"\n Sets the parameters of a model\n\n :param p: model parameters\n :return:\n \"\"\"\n self.optimizer.set_model_parameters(p)\n\n def get_model_state_dict(self):\n \"\"\"\n Returns the state dictionary of the mode\n\n :return: state dictionary\n \"\"\"\n return self.optimizer.get_model_state_dict()\n\n def set_model_state_dict(self,sd):\n \"\"\"\n Sets the state dictionary of the model\n\n :param sd: state dictionary\n :return:\n \"\"\"\n self.optimizer.set_model_state_dict(sd)\n\n\n\nclass SimpleSingleScaleRegistration(SimpleRegistration):\n \"\"\"\n Simple single scale registration\n \"\"\"\n def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):\n super(SimpleSingleScaleRegistration, self).__init__(ISource,ITarget,spacing,sz,params,compute_inverse_map=compute_inverse_map,default_learning_rate=default_learning_rate)\n self.optimizer = SingleScaleRegistrationOptimizer(self.sz,self.spacing,self.use_map,self.map_low_res_factor,self.params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n\n def register(self):\n \"\"\"\n Registers the source to the target image\n :return: n/a\n \"\"\"\n self.optimizer.register(self.ISource, self.ITarget)\n\n\nclass SimpleSingleScaleConsensusRegistration(SimpleRegistration):\n \"\"\"\n Single scale registration making use of consensus optimization (to allow for multiple independent registration\n that can share parameters).\n \"\"\"\n def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):\n super(SimpleSingleScaleConsensusRegistration, self).__init__(ISource,ITarget,spacing,sz,params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n self.optimizer = SingleScaleConsensusRegistrationOptimizer(self.sz,self.spacing,self.use_map,self.map_low_res_factor,self.params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n\n def register(self):\n \"\"\"\n Registers the source to the target image\n :return: n/a\n \"\"\"\n self.optimizer.register(self.ISource, self.ITarget)\n\n\nclass SimpleSingleScaleBatchRegistration(SimpleRegistration):\n \"\"\"\n Single scale registration making use of batch optimization (to allow optimizing over many or large images).\n \"\"\"\n def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):\n super(SimpleSingleScaleBatchRegistration, self).__init__(ISource,ITarget,spacing,sz,params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n self.optimizer = SingleScaleBatchRegistrationOptimizer(self.sz,self.spacing,self.use_map,self.map_low_res_factor,self.params,compute_inverse_map=compute_inverse_map,default_learning_rate=default_learning_rate)\n\n def register(self):\n \"\"\"\n Registers the source to the target image\n :return: n/a\n \"\"\"\n self.optimizer.register(self.ISource, self.ITarget)\n\n\nclass SimpleMultiScaleRegistration(SimpleRegistration):\n \"\"\"\n Simple multi scale registration\n \"\"\"\n def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):\n super(SimpleMultiScaleRegistration, self).__init__(ISource, ITarget, spacing,sz,params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n self.optimizer = MultiScaleRegistrationOptimizer(self.sz,self.spacing,self.use_map,self.map_low_res_factor,self.params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n\n def register(self):\n \"\"\"\n Registers the source to the target image\n :return: n/a\n \"\"\"\n self.optimizer.register(self.ISource,self.ITarget)\n\n\nclass Optimizer(with_metaclass(ABCMeta, object)):\n \"\"\"\n Abstract optimizer base class.\n \"\"\"\n\n def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):\n \"\"\"\n Constructor.\n \n :param sz: image size in BxCxXxYxZ format\n :param spacing: spatial spacing, e.g., [0.1,0.1,0.1] in 3D\n :param useMap: boolean, True if a coordinate map is evolved to warp images, False otherwise\n :param map_low_res_factor: if <1 evolutions happen at a lower resolution; >=1 ignored \n :param params: ParametersDict() instance to hold parameters\n :param compute_inverse_map: for map-based models the inverse map can be computed on the fly\n \"\"\"\n self.sz = sz\n \"\"\"image size\"\"\"\n self.spacing = spacing\n \"\"\"image spacing\"\"\"\n self.lowResSize = None\n \"\"\"low res image size\"\"\"\n self.lowResSpacing = None\n \"\"\"low res image spacing\"\"\"\n self.useMap = useMap\n \"\"\"makes use of map\"\"\"\n self.mapLowResFactor = mapLowResFactor\n \"\"\"if <1 then evolutions are at a lower resolution, but image is compared at the same resolution; >=1 ignored\"\"\"\n if self.mapLowResFactor is not None:\n if self.mapLowResFactor>1:\n print('mapLowResFactor needs to be <=1 but is set to ' + str( self.mapLowResFactor ) + '; ignoring it')\n self.mapLowResFactor = None\n elif self.mapLowResFactor==1:\n print('mapLowResFactor = 1: performing computations at original resolution.')\n self.mapLowResFactor = None\n\n self.compute_inverse_map = compute_inverse_map\n \"\"\"If set to True the inverse map is computed on the fly for map-based models\"\"\"\n self.default_learning_rate = default_learning_rate\n \"\"\"If set, this will be the learning rate that the optimizers used (otherwise, as specified in the json configuration, via params)\"\"\"\n\n self.params = params\n \"\"\"general parameters\"\"\"\n self.rel_ftol = 1e-4\n \"\"\"relative termination tolerance for optimizer\"\"\"\n self.last_successful_step_size_taken = None\n \"\"\"Records the last successful step size an optimizer took (possible use: propogate step size between multiscale levels\"\"\"\n\n self.external_optimizer_parameter_loss = None\n\n if (self.mapLowResFactor is not None):\n self.lowResSize = utils._get_low_res_size_from_size( sz, self.mapLowResFactor )\n self.lowResSpacing = utils._get_low_res_spacing_from_spacing(self.spacing,sz,self.lowResSize)\n self.sampler = IS.ResampleImage()\n\n self.params[('optimizer', {}, 'optimizer settings')]\n self.params[('model', {}, 'general model settings')]\n self.params['model'][('deformation', {}, 'model describing the desired deformation model')]\n self.params['model'][('registration_model', {}, 'general settings for the registration model')]\n\n self.params['model']['deformation']['use_map']= (useMap, '[True|False] either do computations via a map or directly using the image')\n self.params['model']['deformation']['map_low_res_factor'] = (mapLowResFactor, 'Set to a value in (0,1) if a map-based solution should be computed at a lower internal resolution (image matching is still at full resolution')\n\n self.compute_similarity_measure_at_low_res = self.params['model']['deformation'][('compute_similarity_measure_at_low_res',False,'If set to true map is not upsampled and the entire computations proceeds at low res')]\n\n self.rel_ftol = self.params['optimizer']['single_scale'][('rel_ftol',self.rel_ftol,'relative termination tolerance for optimizer')]\n\n self.spline_order = params['model']['registration_model'][('spline_order', 1, 'Spline interpolation order; 1 is linear interpolation (default); 3 is cubic spline')]\n \"\"\"order of the spline for interpolations\"\"\"\n\n self.show_iteration_output = True\n self.history = dict()\n\n self.optimizer_has_been_initialized = False\n \"\"\"\n Needs to be set before the actual optimization commences; allows to keep track if all parameters have been set\n and for example to delay external parameter settings\n \"\"\"\n\n def write_parameters_to_settings(self):\n \"\"\"\n Writes current state of optimized parameters back to the json setting file (for example to keep track of optimized weights)\n :return:\n \"\"\"\n pass\n\n def turn_iteration_output_on(self):\n self.show_iteration_output = True\n\n def turn_iteration_output_off(self):\n self.show_iteration_output = False\n\n def get_history(self):\n \"\"\"\n Returns the optimization history as a dictionary. Keeps track of energies, iterations counts, and additonal custom measures.\n\n :return: history dictionary\n \"\"\"\n return self.history\n\n def _add_to_history(self,key,value):\n \"\"\"\n Adds an element to the optimizer history\n\n :param key: history key\n :param value: value that is associated with it\n :return: n/a\n \"\"\"\n if key not in self.history:\n self.history[key] = [value]\n else:\n self.history[key].append(value)\n\n def set_last_successful_step_size_taken(self,lr):\n \"\"\"\n Function to let the optimizer know what step size has been successful previously.\n Useful for example to retain optimization \"memory\" across scales in a multi-scale implementation\n :param lr: step size\n :return: n/a\n \"\"\"\n self.last_successful_step_size_taken=lr\n\n def get_last_successful_step_size_taken(self):\n \"\"\"\n Returns the last successful step size the optimizer has taken (if the optimizer supports querying the step size)\n :return: last successful step size\n \"\"\"\n return self.last_successful_step_size_taken\n\n def set_rel_ftol(self, rel_ftol):\n \"\"\"Sets the relative termination tolerance: :math:`|f(x_i)-f(x_{i-1})|/f(x_i)<tol`\n \n :param rel_ftol: relative termination tolerance for optimizer\n \"\"\"\n self.rel_ftol = rel_ftol\n self.params['optimizer']['single_scale']['rel_ftol'] = (rel_ftol,'relative termination tolerance for optimizer')\n self.rel_ftol = self.params['optimizer']['single_scale']['rel_ftol']\n\n def get_rel_ftol(self):\n \"\"\"\n Returns the optimizer termination tolerance\n \"\"\"\n return self.rel_ftol\n\n\n\n @abstractmethod\n def set_model(self, modelName):\n \"\"\"\n Abstract method to select the model which should be optimized by name\n \n :param modelName: name (string) of the model that should be solved\n \"\"\"\n pass\n\n @abstractmethod\n def optimize(self):\n \"\"\"\n Abstract method to start the optimization\n \"\"\"\n pass\n\n def get_last_successful_step_size_taken(self):\n return self.last_successful_step_size_taken\n\n def get_checkpoint_dict(self):\n \"\"\"\n Returns a dict() object containing the information for the current checkpoint.\n :return: checpoint dictionary\n \"\"\"\n return dict()\n\n def load_checkpoint_dict(self,d,load_optimizer_state=False):\n \"\"\"\n Takes the dictionary from a checkpoint and loads it as the current state of optimizer and model\n\n :param d: dictionary\n :param load_optimizer_state: if set to True the optimizer state will be restored\n :return: n/a\n \"\"\"\n pass\n\n def save_checkpoint(self,filename):\n torch.save(self.get_checkpoint_dict(),filename)\n\n def load_checkpoint(self,filename):\n d = torch.load(filename)\n self.load_checkpoint_dict(d)\n\n def set_external_optimizer_parameter_loss(self,opt_parameter_loss):\n \"\"\"\n Allows to set an external method as an optimizer parameter loss\n :param opt_parameter_loss: method which takes shared_model_parameters as its only input\n :return: returns a scalar value which is the loss\n \"\"\"\n self.external_optimizer_parameter_loss = opt_parameter_loss\n\n def get_external_optimizer_parameter_loss(self):\n \"\"\"\n Returns the externally set method for parameter loss. Will be None if none was set.\n :return: method\n \"\"\"\n return self.external_optimizer_parameter_loss\n\n def compute_optimizer_parameter_loss(self,shared_model_parameters):\n \"\"\"\n Returns the optimizer parameter loss. This is the method that should be called to compute this loss.\n Will either evaluate the method optimizer_parameter_loss or if one was externally defined, the\n externally defined one will have priority.\n\n :param shared_model_parameters: paramters that have been declared shared in a model\n :return: parameter loss\n \"\"\"\n if self.external_optimizer_parameter_loss is not None:\n return self.external_optimizer_parameter_loss(shared_model_parameters)\n else:\n return self.optimizer_parameter_loss(shared_model_parameters)\n\n def optimizer_parameter_loss(self,shared_model_parameters):\n \"\"\"\n This allows to define additional terms for the loss which are based on parameters that are shared\n between models (for example for the smoother). Can be used to define a form of consensus optimization.\n :param shared_model_parameters: paramters that have been declared shared in a model\n :return: 0 by default, otherwise the corresponding penalty\n \"\"\"\n return MyTensor(1).zero_()\n\nclass ImageRegistrationOptimizer(Optimizer):\n \"\"\"\n Optimization class for image registration.\n \"\"\"\n\n def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):\n super(ImageRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n self.ISource = None\n \"\"\"source image\"\"\"\n self.lowResISource = None\n \"\"\"if mapLowResFactor <1, a lowres soure image needs to be created to parameterize some of the registration algorithms\"\"\"\n self.lowResITarget = None\n \"\"\"if mapLowResFactor <1, a lowres target image may need to be created to be used as additonal inputs for registration algorithms\"\"\"\n self.ITarget = None\n \"\"\"target image\"\"\"\n self.LSource = None\n \"\"\" source label \"\"\"\n self.LTarget = None\n \"\"\" target label \"\"\"\n self.lowResLSource = None\n \"\"\"if mapLowResFactor <1, a lowres soure label image needs to be created to parameterize some of the registration algorithms\"\"\"\n self.lowResLTarget = None\n \"\"\"if mapLowResFactor <1, a lowres target label image needs to be created to parameterize some of the registration algorithms\"\"\"\n self.initialMap = None\n \"\"\" initial map\"\"\"\n self.initialInverseMap = None\n \"\"\" initial inverse map\"\"\"\n self.weight_map =None\n \"\"\" initial weight map\"\"\"\n self.multi_scale_info_dic = None\n \"\"\" dicts containing full resolution image and label\"\"\"\n self.optimizer_name = None #''lbfgs_ls'\n \"\"\"name of the optimizer to use\"\"\"\n self.optimizer_params = {}\n \"\"\"parameters that should be passed to the optimizer\"\"\"\n self.optimizer = None\n \"\"\"optimizer object itself (to be instantiated)\"\"\"\n self.visualize = True\n \"\"\"if True figures are created during the run\"\"\"\n self.visualize_step = 10\n \"\"\"how often the figures are updated; each self.visualize_step-th iteration\"\"\"\n self.nrOfIterations = None\n \"\"\"the maximum number of iterations for the optimizer\"\"\"\n self.current_epoch = None\n \"\"\"Can be set externally, so the optimizer knows in which epoch we are\"\"\"\n self.save_fig=False\n \"\"\" save fig during the visualization\"\"\"\n self.save_fig_path=None\n \"\"\" the path for saving figures\"\"\"\n self.save_fig_num =-1\n \"\"\" the max num of the fig to be saved during one call, set -1 to save all\"\"\"\n self.pair_name=None\n \"\"\" name list of the registration pair \"\"\"\n self.iter_count = 0\n \"\"\" count of the iterations over multi-resolution\"\"\"\n self.recording_step = None\n \"\"\"sets the step-size for recording all intermediate results to the history\"\"\"\n\n def set_recording_step(self, step):\n assert step > 0, 'Recording step needs to be larger than 0'\n self.recording_step = step\n self.history['recording'] = []\n\n def set_current_epoch(self,current_epoch):\n self.current_epoch = current_epoch\n\n def get_current_epoch(self):\n return self.current_epoch\n\n\n\n def turn_visualization_on(self):\n \"\"\"\n Turns on visualization during the run\n \"\"\"\n self.visualize = True\n\n def turn_visualization_off(self):\n \"\"\"\n Turns off visualization during the run\n \"\"\"\n self.visualize = False\n\n def set_visualization(self, vis):\n \"\"\"\n Set if visualization should be on (True) or off (False)\n\n :param vis: visualization status on (True) or off (False)\n \"\"\"\n self.visualize = vis\n\n def get_visualization(self):\n \"\"\"\n Returns the visualization status\n\n :return: Returns True if visualizations will be displayed and False otherwise\n \"\"\"\n return self.visualize\n\n def set_visualize_step(self, nr_step):\n \"\"\"\n Set after how many steps a visualization should be updated\n\n :param nr_step:\n \"\"\"\n self.visualize_step = nr_step\n\n\n def get_visualize_step(self):\n \"\"\"\n Returns after how many steps visualizations are updated\n\n :return: after how many steps visualizations are updated\n \"\"\"\n return self.visualize_step\n\n def set_save_fig(self,save_fig):\n \"\"\"\n :param save_fig: True: save the visualized figs\n :return:\n \"\"\"\n self.save_fig = save_fig\n def get_save_fig(self):\n \"\"\"\n :param save_fig: True: get the visualized figs\n :return:\n \"\"\"\n return self.save_fig\n\n def set_save_fig_path(self, save_fig_path):\n \"\"\"\n the path of saved figures, default is the ../data/expr_name\n :param save_fig_path:\n :return:\n \"\"\"\n self.save_fig_path = save_fig_path\n\n\n\n def get_save_fig_path(self):\n \"\"\"\n the path of saved figures, default is the ../data/expr_name\n :param save_fig_path:\n :return:\n \"\"\"\n return self.save_fig_path\n\n\n def set_save_fig_num(self, save_fig_num=-1):\n \"\"\"\n set the num of the fig to save\n :param save_fig_num:\n :return:\n \"\"\"\n self.save_fig_num = save_fig_num\n\n def get_save_fig_num(self):\n \"\"\"\n set the num of the fig to save\n :param save_fig_num:\n :return:\n \"\"\"\n return self.save_fig_num\n\n def set_expr_name(self, expr_name):\n \"\"\"\n the name of experiments\n :param expr_name:\n :return:\n \"\"\"\n self.expr_name = expr_name\n\n def get_expr_name(self): \n \"\"\"\n the name of experiments\n :param expr_name:\n :return:\n \"\"\"\n return self.expr_name\n\n def set_pair_name(self, pair_name):\n self.pair_name = pair_name\n\n\n def get_pair_name(self):\n return self.pair_name\n\n\n def register(self, ISource, ITarget):\n \"\"\"\n Registers the source to the target image\n :param ISource: source image\n :param ITarget: target image\n :return: n/a\n \"\"\"\n self.set_source_image(ISource)\n self.set_target_image(ITarget)\n self.optimize()\n self.write_parameters_to_settings()\n\n def set_source_image(self, I):\n \"\"\"\n Setting the source image which should be deformed to match the target image\n\n :param I: source image\n \"\"\"\n self.ISource = I\n\n def set_multi_scale_info(self, ISource, ITarget, spacing, LSource=None, LTarget=None):\n \"\"\"provide full resolution of Image and Label\"\"\"\n self.multi_scale_info_dic = {'ISource': ISource, 'ITarget': ITarget, 'spacing': spacing, 'LSource': LSource,\n 'LTarget': LTarget}\n\n def _compute_low_res_image(self,I,params,spacing=None):\n low_res_image = None\n if self.mapLowResFactor is not None:\n low_res_image,_ = self.sampler.downsample_image_to_size(I,spacing,self.lowResSize[2::],self.spline_order)\n return low_res_image\n\n def _compute_low_res_label_map(self,label_map,params, spacing=None):\n low_res_label_map = None\n if self.mapLowResFactor is not None:\n low_res_image, _ = self.sampler.downsample_image_to_size(label_map, spacing, self.lowResSize[2::],\n 0)\n return low_res_label_map\n\n def compute_low_res_image_if_needed(self):\n \"\"\"To be called before the optimization starts\"\"\"\n if self.multi_scale_info_dic is None:\n ISource = self.ISource\n ITarget = self.ITarget\n LSource = self.LSource\n LTarget = self.LTarget\n spacing = self.spacing\n else:\n ISource, ITarget, LSource, LTarget, spacing = self.multi_scale_info_dic['ISource'], self.multi_scale_info_dic['ITarget'],\\\n self.multi_scale_info_dic['LSource'],self.multi_scale_info_dic['LTarget'],self.multi_scale_info_dic['spacing']\n if self.mapLowResFactor is not None:\n self.lowResISource = self._compute_low_res_image(ISource,self.params,spacing)\n # todo: can be removed to save memory; is more experimental at this point\n self.lowResITarget = self._compute_low_res_image(ITarget,self.params,spacing)\n if self.LSource is not None and self.LTarget is not None:\n self.lowResLSource = self._compute_low_res_label_map(LSource,self.params,spacing)\n self.lowResLTarget = self._compute_low_res_label_map(LTarget, self.params,spacing)\n\n def set_source_label(self, LSource):\n \"\"\"\n :param LSource:\n :return:\n \"\"\"\n self.LSource = LSource\n\n\n def set_target_label(self, LTarget):\n \"\"\"\n :param LTarget:\n :return:\n \"\"\"\n self.LTarget = LTarget\n\n\n def get_source_label(self):\n return self.LSource\n\n def get_target_label(self):\n return self.LTarget\n\n def set_target_image(self, I):\n \"\"\"\n Setting the target image which the source image should match after registration\n\n :param I: target image\n \"\"\"\n self.ITarget = I\n\n\n def set_optimizer_by_name(self, optimizer_name):\n \"\"\"\n Set the desired optimizer by name (only lbfgs and adam are currently supported)\n\n :param optimizer_name: name of the optimizer (string) to be used\n \"\"\"\n self.optimizer_name = optimizer_name\n self.params['optimizer']['name'] = optimizer_name\n\n def get_optimizer_by_name(self):\n \"\"\"\n Get the name (string) of the optimizer that was selected\n\n :return: name (string) of the optimizer\n \"\"\"\n return self.optimizer_name\n\n def set_optimizer(self, opt):\n \"\"\"\n Set the optimizer. Not by name, but instead by passing the optimizer object which should be instantiated\n\n :param opt: optimizer object\n \"\"\"\n self.optimizer = opt\n\n def get_optimizer(self):\n \"\"\"\n Returns the optimizer object which was set to perform the optimization\n\n :return: optimizer object\n \"\"\"\n return self.optimizer\n\n def set_optimizer_params(self, opt_params):\n \"\"\"\n Set the desired parameters of the optimizer. This is done by passing a dictionary, for example, dict(lr=0.01)\n\n :param opt_params: dictionary holding the parameters of an optimizer\n \"\"\"\n self.optimizer_params = opt_params\n\n\nclass SingleScaleRegistrationOptimizer(ImageRegistrationOptimizer):\n \"\"\"\n Optimizer operating on a single scale. Typically this will be the full image resolution.\n\n .. todo::\n Check what the best way to adapt the tolerances for the pre-defined optimizers;\n tying it to rel_ftol is not really correct.\n \"\"\"\n\n def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):\n super(SingleScaleRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n\n if self.mapLowResFactor is not None:\n # computes model at a lower resolution than the image similarity\n if self.compute_similarity_measure_at_low_res:\n self.mf = MF.ModelFactory(self.lowResSize, self.lowResSpacing, self.lowResSize, self.lowResSpacing )\n else:\n self.mf = MF.ModelFactory(self.sz, self.spacing, self.lowResSize, self.lowResSpacing )\n else:\n # computes model and similarity at the same resolution\n self.mf = MF.ModelFactory(self.sz, self.spacing, self.sz, self.spacing)\n \"\"\"model factory which will be used to create the model and its loss function\"\"\"\n\n self.model = None\n \"\"\"the model itself\"\"\"\n self.criterion = None\n \"\"\"the loss function\"\"\"\n\n self.initialMap = None\n \"\"\"initial map, will be needed for map-based solutions; by default this will be the identity map, but can be set to something different externally\"\"\"\n self.initialInverseMap = None\n \"\"\"initial inverse map; will be the same as the initial map, unless it was set externally\"\"\"\n self.map0_inverse_external = None\n \"\"\"initial inverse map, set externally, will be needed for map-based solutions; by default this will be the identity map, but can be set to something different externally\"\"\"\n self.map0_external = None\n \"\"\"intial map, set externally\"\"\"\n self.lowResInitialMap = None\n \"\"\"low res initial map, by default the identity map, will be needed for map-based solutions which are computed at lower resolution\"\"\"\n self.lowResInitialInverseMap = None\n \"\"\"low res initial inverse map, by default the identity map, will be needed for map-based solutions which are computed at lower resolution\"\"\"\n self.weight_map =None\n \"\"\"init_weight map, which only used by metric learning models\"\"\"\n self.optimizer_instance = None\n \"\"\"the optimizer instance to perform the actual optimization\"\"\"\n\n c_params = self.params[('optimizer', {}, 'optimizer settings')]\n self.weight_clipping_type = c_params[('weight_clipping_type','none','Type of weight clipping that should be used [l1|l2|l1_individual|l2_individual|l1_shared|l2_shared|None]')]\n self.weight_clipping_type = self.weight_clipping_type.lower()\n \"\"\"Type of weight clipping; applied to weights and bias indepdenendtly; norm restricted to weight_clipping_value\"\"\"\n if self.weight_clipping_type=='none':\n self.weight_clipping_type = None\n if self.weight_clipping_type!='pre_lsm_weights':\n self.weight_clipping_value = c_params[('weight_clipping_value', 1.0, 'Value to which the norm is being clipped')]\n \"\"\"Desired norm after clipping\"\"\"\n\n extent = self.spacing * self.sz[2:]\n max_extent = max(extent)\n\n clip_params = c_params[('gradient_clipping',{},'clipping settings for the gradient for optimization')]\n self.clip_display = clip_params[('clip_display', True, 'If set to True displays if clipping occurred')]\n self.clip_individual_gradient = clip_params[('clip_individual_gradient',False,'If set to True, the gradient for the individual parameters will be clipped')]\n self.clip_individual_gradient_value = clip_params[('clip_individual_gradient_value',max_extent,'Value to which the gradient for the individual parameters is clipped')]\n self.clip_shared_gradient = clip_params[('clip_shared_gradient', True, 'If set to True, the gradient for the shared parameters will be clipped')] # todo recover the clip gradient,or it may cause unstable\n self.clip_shared_gradient_value = clip_params[('clip_shared_gradient_value', 1.0, 'Value to which the gradient for the shared parameters is clipped')]\n\n self.scheduler = None # for the step size scheduler\n self.patience = None # for the step size scheduler\n self._use_external_scheduler = False\n\n self.rec_energy = None\n self.rec_similarityEnergy = None\n self.rec_regEnergy = None\n self.rec_opt_par_loss_energy = None\n self.rec_phiWarped = None\n self.rec_phiInverseWarped = None\n self.rec_IWarped = None\n self.last_energy = None\n self.rel_f = None\n self.rec_custom_optimizer_output_string = ''\n \"\"\"the evaluation information\"\"\"\n self.rec_custom_optimizer_output_values = None\n\n self.delayed_model_parameters = None\n self.delayed_model_parameters_still_to_be_set = False\n self.delayed_model_state_dict = None\n self.delayed_model_state_dict_still_to_be_set = False\n\n # to be able to transfer state and parameters\n self._sgd_par_list = None # holds the list of parameters\n self._sgd_par_names = None # holds the list of names associated with these parameters\n self._sgd_name_to_model_par = None # allows mapping from name to model parameter\n self._sgd_split_shared = None # keeps track if the shared states were split or not\n self._sgd_split_individual = None # keeps track if the individual states were split or not\n self.over_scale_iter_count = None #accumulated iter count over different scales\n self.n_scale = None #the index of current scale, torename and document todo\n\n\n def write_parameters_to_settings(self):\n if self.model is not None:\n self.model.write_parameters_to_settings()\n\n def get_sgd_split_shared(self):\n return self._sgd_split_shared\n\n def get_sgd_split_indvidual(self):\n return self._sgd_split_individual\n\n def get_checkpoint_dict(self):\n if self.model is not None and self.optimizer_instance is not None:\n d = super(SingleScaleRegistrationOptimizer, self).get_checkpoint_dict()\n d['model'] = dict()\n d['model']['parameters'] = self.model.get_registration_parameters_and_buffers()\n d['model']['size'] = self.model.sz\n d['model']['spacing'] = self.model.spacing\n d['optimizer_state'] = self.optimizer_instance.state_dict()\n return d\n else:\n raise ValueError('Unable to create checkpoint, because either the model or the optimizer have not been initialized')\n\n def load_checkpoint_dict(self,d,load_optimizer_state=False):\n if self.model is not None and self.optimizer_instance is not None:\n self.model.set_registration_parameters(d['model']['parameters'],d['model']['size'],d['model']['spacing'])\n if load_optimizer_state:\n try:\n self.optimizer_instance.load_state_dict(d['optimizer_state'])\n print('INFO: Was able to load the previous optimzer state from checkpoint data')\n except:\n print('INFO: Could not load the previous optimizer state')\n else:\n print('WARNING: Turned off the loading of the optimizer state')\n else:\n raise ValueError('Cannot load checkpoint dictionary, because either the model or the optimizer have not been initialized')\n\n def get_opt_par_energy(self):\n \"\"\"\n Energy for optimizer parameters\n\n :return:\n \"\"\"\n return self.rec_opt_par_loss_energy.cpu().item()\n\n def get_custom_output_values(self):\n \"\"\"\n Custom output values\n\n :return:\n \"\"\"\n return self.rec_custom_optimizer_output_values\n\n def get_energy(self):\n \"\"\"\n Returns the current energy\n :return: Returns a tuple (energy, similarity energy, regularization energy)\n \"\"\"\n return self.rec_energy.cpu().item(), self.rec_similarityEnergy.cpu().item(), self.rec_regEnergy.cpu().item()\n\n def get_warped_image(self):\n \"\"\"\n Returns the warped image\n :return: the warped image\n \"\"\"\n if self.useMap:\n cmap = self.get_map()\n # and now warp it\n return utils.compute_warped_image_multiNC(self.ISource, cmap, self.spacing, self.spline_order,zero_boundary=True)\n else:\n return self.rec_IWarped\n\n def get_warped_label(self):\n \"\"\"\n Returns the warped label\n :return: the warped label\n \"\"\"\n if self.useMap:\n cmap = self.get_map()\n return utils.get_warped_label_map(self.LSource, cmap, self.spacing)\n else:\n return None\n\n def get_map(self):\n \"\"\"\n Returns the deformation map\n :return: deformation map\n \"\"\"\n return self.rec_phiWarped\n\n def get_inverse_map(self):\n \"\"\"\n Returns the deformation map\n :return: deformation map\n \"\"\"\n return self.rec_phiInverseWarped\n\n def set_n_scale(self, n_scale):\n \"\"\"\n the path of saved figures, default is the ../data/expr_name\n :param save_fig_path:\n :return:\n \"\"\"\n self.n_scale = n_scale\n\n def set_over_scale_iter_count(self, iter_count):\n self.over_scale_iter_count = iter_count\n\n\n def _create_initial_maps(self):\n if self.useMap:\n # create the identity map [-1,1]^d, since we will use a map-based implementation\n if self.map0_external is not None:\n self.initialMap = self.map0_external\n else:\n id = utils.identity_map_multiN(self.sz, self.spacing)\n self.initialMap = AdaptVal(torch.from_numpy(id))\n\n if self.map0_inverse_external is not None:\n self.initialInverseMap = self.map0_inverse_external\n else:\n id =utils.identity_map_multiN(self.sz, self.spacing)\n self.initialInverseMap = AdaptVal(torch.from_numpy(id))\n\n if self.mapLowResFactor is not None:\n # create a lower resolution map for the computations\n if self.map0_external is None:\n lowres_id = utils.identity_map_multiN(self.lowResSize, self.lowResSpacing)\n self.lowResInitialMap = AdaptVal(torch.from_numpy(lowres_id))\n else:\n sampler = IS.ResampleImage()\n lowres_id, _ = sampler.downsample_image_to_size(self.initialMap , self.spacing,self.lowResSize[2::] , 1,zero_boundary=False)\n self.lowResInitialMap = AdaptVal(lowres_id)\n\n if self.map0_inverse_external is None:\n lowres_id = utils.identity_map_multiN(self.lowResSize, self.lowResSpacing)\n self.lowResInitialInverseMap = AdaptVal(torch.from_numpy(lowres_id))\n else:\n sampler = IS.ResampleImage()\n lowres_inverse_id, _ = sampler.downsample_image_to_size(self.initialInverseMap, self.spacing, self.lowResSize[2::],\n 1, zero_boundary=False)\n self.lowResInitialInverseMap = AdaptVal(lowres_inverse_id)\n\n def set_model(self, modelName):\n \"\"\"\n Sets the model that should be solved\n\n :param modelName: name of the model that should be solved (string)\n \"\"\"\n\n self.params['model']['registration_model']['type'] = ( modelName, \"['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with '_map' or '_image' suffix\" )\n\n self.model, self.criterion = self.mf.create_registration_model(modelName, self.params['model'],compute_inverse_map=self.compute_inverse_map)\n print(self.model)\n\n self._create_initial_maps()\n\n def set_initial_map(self,map0,map0_inverse=None):\n \"\"\"\n Sets the initial map (overwrites the default identity map)\n :param map0: intial map\n :param map0_inverse: initial inverse map\n :return: n/a\n \"\"\"\n\n self.map0_external = map0\n self.map0_inverse_external = map0_inverse\n\n if self.initialMap is not None:\n # was already set, so let's modify it\n self._create_initial_maps()\n\n def set_initial_weight_map(self,weight_map,freeze_weight=False):\n \"\"\"\n Sets the initial map (overwrites the default identity map)\n :param map0: intial map\n :param map0_inverse: initial inverse map\n :return: n/a\n \"\"\"\n if self.mapLowResFactor is not None:\n sampler = IS.ResampleImage()\n weight_map, _ = sampler.downsample_image_to_size(weight_map, self.spacing, self.lowResSize[2::], 1,\n zero_boundary=False)\n self.model.local_weights.data = weight_map\n if freeze_weight:\n self.model.freeze_adaptive_regularizer_param()\n\n def get_initial_map(self):\n \"\"\"\n Returns the initial map\n\n :return: initial map\n \"\"\"\n\n if self.initialMap is not None:\n return self.initialMap\n elif self.map0_external is not None:\n return self.map0_external\n else:\n return None\n\n def get_initial_inverse_map(self):\n \"\"\"\n Returns the initial inverse map\n\n :return: initial inverse map\n \"\"\"\n\n if self.initialInverseMap is not None:\n return self.initialInverseMap\n elif self.map0_inverse_external is not None:\n return self.map0_inverse_external\n else:\n return None\n\n def add_similarity_measure(self, sim_name, sim_measure):\n \"\"\"\n Adds a custom similarity measure.\n\n :param sim_name: name of the similarity measure (string)\n :param sim_measure: similarity measure itself (class object that can be instantiated)\n \"\"\"\n self.criterion.add_similarity_measure(sim_name, sim_measure)\n self.params['model']['registration_model']['similarity_measure']['type'] = (sim_name, 'was customized; needs to be expplicitly instantiated, cannot be loaded')\n\n def add_model(self, model_name, model_network_class, model_loss_class, use_map, model_description='custom model'):\n \"\"\"\n Adds a custom model and its loss function\n\n :param model_name: name of the model to be added (string)\n :param model_network_class: registration model itself (class object that can be instantiated)\n :param model_loss_class: registration loss (class object that can be instantiated)\n :param use_map: True/False: specifies if model uses a map or not\n :param model_description: optional model description\n \"\"\"\n self.mf.add_model(model_name, model_network_class, model_loss_class, use_map, model_description)\n self.params['model']['registration_model']['type'] = (model_name, 'was customized; needs to be explicitly instantiated, cannot be loaded')\n\n def set_model_state_dict(self,sd):\n \"\"\"\n Sets the state dictionary of the model\n\n :param sd: state dictionary\n :return: n/a\n \"\"\"\n\n if self.optimizer_has_been_initialized:\n self.model.load_state_dict(sd)\n self.delayed_model_state_dict_still_to_be_set = False\n else:\n self.delayed_model_state_dict_still_to_be_set = True\n self.delayed_model_state_dict = sd\n\n def get_model_state_dict(self):\n \"\"\"\n Returns the state dictionary of the model\n\n :return: state dictionary\n \"\"\"\n return self.model.state_dict()\n\n def set_model_parameters(self, p):\n \"\"\"\n Set the parameters of the registration model\n\n :param p: parameters\n \"\"\"\n\n if self.optimizer_has_been_initialized:\n if (self.useMap) and (self.mapLowResFactor is not None):\n self.model.set_registration_parameters(p, self.lowResSize, self.lowResSpacing)\n else:\n self.model.set_registration_parameters(p, self.sz, self.spacing)\n self.delayed_model_parameters_still_to_be_set = False\n else:\n self.delayed_model_parameters_still_to_be_set = True\n self.delayed_model_parameters = p\n\n def _is_vector(self,d):\n sz = d.size()\n if len(sz)==1:\n return True\n else:\n return False\n\n def _is_tensor(self,d):\n sz = d.size()\n if len(sz)>1:\n return True\n else:\n return False\n\n def _aux_do_weight_clipping_norm(self,pars,desired_norm):\n \"\"\"does weight clipping but only for conv or bias layers (assuming they are named as such); be careful with the namimg here\"\"\"\n if self.weight_clipping_value > 0:\n for key in pars:\n # only do the clipping if it is a conv layer or a bias term\n if key.lower().find('conv')>0 or key.lower().find('bias')>0:\n p = pars[key]\n if self._is_vector(p.data):\n # just normalize this vector component-by-component, norm does not matter here as these are only scalars\n p.data = p.data.clamp_(-self.weight_clipping_value, self.weight_clipping_value)\n elif self._is_tensor(p.data):\n # normalize sample-by-sample individually\n for b in range(p.data.size()[0]):\n param_norm = p.data[b, ...].norm(desired_norm)\n if param_norm > self.weight_clipping_value:\n clip_coef = self.weight_clipping_value / param_norm\n p.data[b, ...].mul_(clip_coef)\n else:\n raise ValueError('Unknown data type; I do not know how to clip this')\n\n def _do_shared_weight_clipping_pre_lsm(self):\n multi_gaussian_weights = self.params['model']['registration_model']['forward_model']['smoother'][('multi_gaussian_weights', -1, 'the used multi gaussian weights')]\n if multi_gaussian_weights==-1:\n raise ValueError('The multi-gaussian weights should have been set before')\n multi_gaussian_weights = np.array(multi_gaussian_weights)\n\n sp = self.get_shared_model_parameters()\n for key in sp:\n if key.lower().find('pre_lsm_weights') > 0:\n p = sp[key]\n sz = p.size() #0 dim is weight dimension\n if sz[0]!=len(multi_gaussian_weights):\n raise ValueError('Number of multi-Gaussian weights needs to be {}, but got {}'.format(sz[0],len(multi_gaussian_weights)))\n for w in range(sz[0]):\n # this is to assure that the weights are always between 0 and 1 (when using the WeightedLinearSoftmax\n p[w,...].data.clamp_(0.0-multi_gaussian_weights[w],1.0-multi_gaussian_weights[w])\n \n def _do_individual_weight_clipping_l1(self):\n ip = self.get_individual_model_parameters()\n self._aux_do_weight_clipping_norm(pars=ip,desired_norm=1)\n\n def _do_shared_weight_clipping_l1(self):\n sp = self.get_shared_model_parameters()\n self._aux_do_weight_clipping_norm(pars=sp,desired_norm=1)\n\n def _do_individual_weight_clipping_l2(self):\n ip = self.get_individual_model_parameters()\n self._aux_do_weight_clipping_norm(pars=ip, desired_norm=2)\n\n def _do_shared_weight_clipping_l2(self):\n sp = self.get_shared_model_parameters()\n self._aux_do_weight_clipping_norm(pars=sp, desired_norm=2)\n\n def _do_weight_clipping(self):\n \"\"\"performs weight clipping, if desired\"\"\"\n if self.weight_clipping_type is not None:\n possible_modes = ['l1', 'l2', 'l1_individual', 'l2_individual', 'l1_shared', 'l2_shared', 'pre_lsm_weights']\n if self.weight_clipping_type in possible_modes:\n if self.weight_clipping_type=='l1':\n self._do_shared_weight_clipping_l1()\n self._do_individual_weight_clipping_l1()\n elif self.weight_clipping_type=='l2':\n self._do_shared_weight_clipping_l2()\n self._do_individual_weight_clipping_l2()\n elif self.weight_clipping_type=='l1_individual':\n self._do_individual_weight_clipping_l1()\n elif self.weight_clipping_type=='l2_individual':\n self._do_individual_weight_clipping_l2()\n elif self.weight_clipping_type=='l1_shared':\n self._do_shared_weight_clipping_l1()\n elif self.weight_clipping_type=='l2_shared':\n self._do_shared_weight_clipping_l2()\n elif self.weight_clipping_type=='pre_lsm_weights':\n self._do_shared_weight_clipping_pre_lsm()\n else:\n raise ValueError('Illegal weight clipping type: {}'.format(self.weight_clipping_type))\n else:\n raise ValueError('Weight clipping needs to be: [None|l1|l2|l1_individual|l2_individual|l1_shared|l2_shared]')\n\n def get_model_parameters(self):\n \"\"\"\n Returns the parameters of the model\n\n :return: model parameters\n \"\"\"\n return self.model.get_registration_parameters()\n\n def set_shared_model_parameters(self,p):\n \"\"\"\n Set only the shared parameters of the model\n\n :param p: shared registration parameters as an ordered dict\n :return: n/a\n \"\"\"\n\n self.model.set_shared_registration_parameters(p)\n\n def get_shared_model_parameters_and_buffers(self):\n \"\"\"\n Returns only the model parameters that are shared between models and the shared buffers associated w/ it.\n\n :return: shared model parameters and buffers\n \"\"\"\n return self.model.get_shared_registration_parameters_and_buffers()\n\n def get_shared_model_parameters(self):\n \"\"\"\n Returns only the model parameters that are shared between models.\n\n :return: shared model parameters\n \"\"\"\n return self.model.get_shared_registration_parameters()\n\n def set_individual_model_parameters(self,p):\n \"\"\"\n Set only the individual parameters of the model\n\n :param p: individual registration parameters as an ordered dict\n :return: n/a\n \"\"\"\n\n self.model.set_individual_registration_parameters(p)\n\n def get_individual_model_parameters(self):\n \"\"\"\n Returns only the model parameters that individual to a model (i.e., not shared).\n\n :return: individual model parameters\n \"\"\"\n return self.model.get_individual_registration_parameters()\n\n def _collect_individual_or_shared_parameters_in_list(self,pars):\n pl = []\n for p_key in pars:\n pl.append(pars[p_key])\n return pl\n\n def load_shared_state_dict(self,sd):\n \"\"\"\n Loads the shared part of a state dictionary\n :param sd: shared state dictionary\n :return: n/a\n \"\"\"\n self.model.load_shared_state_dict(sd)\n\n def shared_state_dict(self):\n \"\"\"\n Returns the shared part of a state dictionary\n :return:\n \"\"\"\n return self.model.shared_state_dict()\n\n def load_individual_state_dict(self):\n raise ValueError('Not yet implemented')\n\n def individual_state_dict(self):\n raise ValueError('Not yet implemented')\n\n def upsample_model_parameters(self, desiredSize):\n \"\"\"\n Upsamples the model parameters\n\n :param desiredSize: desired size after upsampling, e.g., [100,20,50]\n :return: returns a tuple (upsampled_parameters,upsampled_spacing)\n \"\"\"\n return self.model.upsample_registration_parameters(desiredSize)\n\n def downsample_model_parameters(self, desiredSize):\n \"\"\"\n Downsamples the model parameters\n\n :param desiredSize: desired size after downsampling, e.g., [50,50,40]\n :return: returns a tuple (downsampled_parameters,downsampled_spacing)\n \"\"\"\n return self.model.downsample_registration_parameters(desiredSize)\n\n def _set_number_of_iterations_from_multi_scale(self, nrIter):\n \"\"\"\n Same as set_number_of_iterations with the exception that this is not recored in the parameter structure since it comes from the multi-scale setting\n :param nrIter: number of iterations\n \"\"\"\n self.nrOfIterations = nrIter\n\n def set_number_of_iterations(self, nrIter):\n \"\"\"\n Set the number of iterations of the optimizer\n\n :param nrIter: number of iterations\n \"\"\"\n self.params['optimizer'][('single_scale', {}, 'single scale settings')]\n self.params['optimizer']['single_scale']['nr_of_iterations'] = (nrIter, 'number of iterations')\n\n self.nrOfIterations = nrIter\n\n def get_number_of_iterations(self):\n \"\"\"\n Returns the number of iterations of the solver\n\n :return: number of set iterations\n \"\"\"\n return self.nrOfIterations\n\n def _closure(self):\n self.optimizer_instance.zero_grad()\n # 1) Forward pass: Compute predicted y by passing x to the model\n # 2) Compute loss\n\n # first define variables that will be passed to the model and the criterion (for further use)\n\n over_scale_iter_count = self.iter_count if self.over_scale_iter_count is None else self.over_scale_iter_count + self.iter_count\n opt_variables = {'iter': self.iter_count, 'epoch': self.current_epoch, 'scale': self.n_scale,\n 'over_scale_iter_count': over_scale_iter_count}\n\n self.rec_IWarped, self.rec_phiWarped, self.rec_phiInverseWarped = model_evaluation.evaluate_model_low_level_interface(\n model=self.model,\n I_source=self.ISource,\n opt_variables=opt_variables,\n use_map=self.useMap,\n initial_map=self.initialMap,\n compute_inverse_map=self.compute_inverse_map,\n initial_inverse_map=self.initialInverseMap,\n map_low_res_factor=self.mapLowResFactor,\n sampler=self.sampler,\n low_res_spacing=self.lowResSpacing,\n spline_order=self.spline_order,\n low_res_I_source=self.lowResISource,\n low_res_initial_map=self.lowResInitialMap,\n low_res_initial_inverse_map=self.lowResInitialInverseMap,\n compute_similarity_measure_at_low_res=self.compute_similarity_measure_at_low_res)\n\n # compute the respective losses\n if self.useMap:\n if self.mapLowResFactor is not None and self.compute_similarity_measure_at_low_res:\n loss_overall_energy, sim_energy, reg_energy = self.criterion(self.lowResInitialMap, self.rec_phiWarped,\n self.lowResISource, self.lowResITarget,\n self.lowResISource,\n self.model.get_variables_to_transfer_to_loss_function(),\n opt_variables)\n else:\n loss_overall_energy,sim_energy,reg_energy = self.criterion(self.initialMap, self.rec_phiWarped, self.ISource, self.ITarget, self.lowResISource,\n self.model.get_variables_to_transfer_to_loss_function(),\n opt_variables)\n else:\n loss_overall_energy,sim_energy,reg_energy = self.criterion(self.rec_IWarped, self.ISource, self.ITarget,\n self.model.get_variables_to_transfer_to_loss_function(),\n opt_variables )\n\n # to support consensus optimization we have the option of adding a penalty term\n # based on shared parameters\n opt_par_loss_energy = self.compute_optimizer_parameter_loss(self.model.get_shared_registration_parameters())\n loss_overall_energy = loss_overall_energy + opt_par_loss_energy\n loss_overall_energy.backward()\n\n # do gradient clipping\n if self.clip_individual_gradient:\n current_individual_grad_norm = torch.nn.utils.clip_grad_norm_(\n self._collect_individual_or_shared_parameters_in_list(self.get_individual_model_parameters()),\n self.clip_individual_gradient_value)\n\n if self.clip_display:\n if current_individual_grad_norm>self.clip_individual_gradient_value:\n print('INFO: Individual gradient was clipped: {} -> {}'.format(current_individual_grad_norm,self.clip_individual_gradient_value))\n\n if self.clip_shared_gradient:\n current_shared_grad_norm = torch.nn.utils.clip_grad_norm_(\n self._collect_individual_or_shared_parameters_in_list(self.get_shared_model_parameters()),\n self.clip_shared_gradient_value)\n\n if self.clip_display:\n if current_shared_grad_norm > self.clip_shared_gradient_value:\n print('INFO: Shared gradient was clipped: {} -> {}'.format(current_shared_grad_norm,\n self.clip_shared_gradient_value))\n\n self.rec_custom_optimizer_output_string = self.model.get_custom_optimizer_output_string()\n self.rec_custom_optimizer_output_values = self.model.get_custom_optimizer_output_values()\n\n self.rec_energy = loss_overall_energy\n self.rec_similarityEnergy = sim_energy\n self.rec_regEnergy = reg_energy\n self.rec_opt_par_loss_energy = opt_par_loss_energy\n\n # if self.useMap:\n #\n # if self.iter_count % 1 == 0:\n # self.rec_energy, self.rec_similarityEnergy, self.rec_regEnergy = self.criterion.get_energy(\n # self.identityMap, self.rec_phiWarped, self.ISource, self.ITarget, self.lowResISource, self.model.get_variables_to_transfer_to_loss_function())\n # else:\n # if self.iter_count % 1 == 0:\n # self.rec_energy, self.rec_similarityEnergy, self.rec_regEnergy = self.criterion.get_energy(\n # self.rec_IWarped, self.ISource, self.ITarget, self.model.get_variables_to_transfer_to_loss_function())\n\n return loss_overall_energy\n\n def analysis(self, energy, similarityEnergy, regEnergy, opt_par_energy, phi_or_warped_image, custom_optimizer_output_string ='', custom_optimizer_output_values=None, force_visualization=False):\n \"\"\"\n print out the and visualize the result\n :param energy:\n :param similarityEnergy:\n :param regEnergy:\n :param opt_par_energy\n :param phi_or_warped_image:\n :return: returns tuple: first entry True if termination tolerance was reached, otherwise returns False; second entry if the image was visualized\n \"\"\"\n\n current_batch_size = phi_or_warped_image.size()[0]\n\n was_visualized = False\n reached_tolerance = False\n\n cur_energy = utils.t2np(energy.float())\n # energy analysis\n\n self._add_to_history('iter', self.iter_count)\n self._add_to_history('energy', cur_energy[0])\n self._add_to_history('similarity_energy', utils.t2np(similarityEnergy.float()))\n self._add_to_history('regularization_energy', utils.t2np(regEnergy.float()))\n self._add_to_history('opt_par_energy', utils.t2np(opt_par_energy.float())[0])\n\n if custom_optimizer_output_values is not None:\n for key in custom_optimizer_output_values:\n self._add_to_history(key,custom_optimizer_output_values[key])\n\n if self.last_energy is not None:\n\n # relative function tolerance: |f(xi)-f(xi+1)|/(1+|f(xi)|)\n self.rel_f = abs(self.last_energy - cur_energy) / (1 + abs(cur_energy))\n self._add_to_history('relF', self.rel_f[0])\n\n if self.show_iteration_output:\n cprint('{iter:5d}-Tot: E={energy:08.4f} | simE={similarityE:08.4f} | regE={regE:08.4f} | optParE={optParE:08.4f} | relF={relF:08.4f} | {cos}'\n .format(iter=self.iter_count,\n energy=utils.get_scalar(cur_energy),\n similarityE=utils.get_scalar(utils.t2np(similarityEnergy.float())),\n regE=utils.get_scalar(utils.t2np(regEnergy.float())),\n optParE=utils.get_scalar(utils.t2np(opt_par_energy.float())),\n relF=utils.get_scalar(self.rel_f),\n cos=custom_optimizer_output_string), 'red')\n cprint('{iter:5d}-Img: E={energy:08.4f} | simE={similarityE:08.4f} | regE={regE:08.4f} |'\n .format(iter=self.iter_count,\n energy=utils.get_scalar(cur_energy) / current_batch_size,\n similarityE=utils.get_scalar(utils.t2np(similarityEnergy.float())) / current_batch_size,\n regE=utils.get_scalar(utils.t2np(regEnergy.float())) / current_batch_size), 'blue')\n\n # check if relative convergence tolerance is reached\n if self.rel_f < self.rel_ftol:\n if self.show_iteration_output:\n print('Reached relative function tolerance of = ' + str(self.rel_ftol))\n reached_tolerance = True\n\n else:\n self._add_to_history('relF', None)\n if self.show_iteration_output:\n cprint('{iter:5d}-Tot: E={energy:08.4f} | simE={similarityE:08.4f} | regE={regE:08.4f} | optParE={optParE:08.4f} | relF= n/a | {cos}'\n .format(iter=self.iter_count,\n energy=utils.get_scalar(cur_energy),\n similarityE=utils.get_scalar(utils.t2np(similarityEnergy.float())),\n regE=utils.get_scalar(utils.t2np(regEnergy.float())),\n optParE=utils.get_scalar(utils.t2np(opt_par_energy.float())),\n cos=custom_optimizer_output_string), 'red')\n cprint('{iter:5d}-Img: E={energy:08.4f} | simE={similarityE:08.4f} | regE={regE:08.4f} |'\n .format(iter=self.iter_count,\n energy=utils.get_scalar(cur_energy)/current_batch_size,\n similarityE=utils.get_scalar(utils.t2np(similarityEnergy.float()))/current_batch_size,\n regE=utils.get_scalar(utils.t2np(regEnergy.float()))/current_batch_size),'blue')\n\n iter_count = self.iter_count\n self.last_energy = cur_energy\n\n if self.recording_step is not None:\n if iter_count % self.recording_step == 0 or iter_count == 0:\n if self.useMap:\n if self.compute_similarity_measure_at_low_res:\n I1Warped = utils.compute_warped_image_multiNC(self.lowResISource,\n phi_or_warped_image,\n self.lowResSpacing,\n self.spline_order,\n zero_boundary=False)\n lowResLWarped = utils.get_warped_label_map(self.lowResLSource,\n phi_or_warped_image,\n self.spacing)\n self.history['recording'].append({\n 'iter': iter_count,\n 'iS': utils.t2np(self.ISource),\n 'iT': utils.t2np(self.ITarget),\n 'iW': utils.t2np(I1Warped),\n 'iSL': utils.t2np(self.lowResLSource) if self.lowResLSource is not None else None,\n 'iTL': utils.t2np(self.lowResLTarget) if self.lowResLTarget is not None else None,\n 'iWL': utils.t2np(lowResLWarped) if self.lowResLWarped is not None else None,\n 'phiWarped': utils.t2np(phi_or_warped_image)\n })\n else:\n I1Warped = utils.compute_warped_image_multiNC(self.ISource,\n phi_or_warped_image,\n self.spacing,\n self.spline_order,\n zero_boundary=False)\n LWarped = None\n if self.LSource is not None and self.LTarget is not None:\n LWarped = utils.get_warped_label_map(self.LSource,\n phi_or_warped_image,\n self.spacing)\n self.history['recording'].append({\n 'iter': iter_count,\n 'iS': utils.t2np(self.ISource),\n 'iT': utils.t2np(self.ITarget),\n 'iW': utils.t2np(I1Warped),\n 'iSL': utils.t2np(self.LSource) if self.LSource is not None else None,\n 'iTL': utils.t2np(self.LTarget) if self.LTarget is not None else None,\n 'iWL': utils.t2np(LWarped) if LWarped is not None else None,\n 'phiWarped': utils.t2np(phi_or_warped_image)\n })\n else:\n self.history['recording'].append({\n 'iter': iter_count,\n 'iS': utils.t2np(self.ISource),\n 'iT': utils.t2np(self.ITarget),\n 'iW': utils.t2np(phi_or_warped_image)\n })\n\n if self.visualize or self.save_fig:\n visual_param = {}\n visual_param['visualize'] = self.visualize\n visual_param['save_fig'] = self.save_fig\n visual_param['save_fig_num'] = self.save_fig_num\n if self.save_fig:\n visual_param['save_fig_path'] = self.save_fig_path\n visual_param['save_fig_path_byname'] = os.path.join(self.save_fig_path, 'byname')\n visual_param['save_fig_path_byiter'] = os.path.join(self.save_fig_path, 'byiter')\n visual_param['pair_name'] = self.pair_name\n visual_param['iter'] = 'scale_'+str(self.n_scale) + '_iter_' + str(self.iter_count)\n\n if self.visualize_step and (iter_count % self.visualize_step == 0) or (iter_count == self.nrOfIterations-1) or force_visualization:\n was_visualized = True\n if self.useMap and self.mapLowResFactor is not None:\n vizImage, vizName = self.model.get_parameter_image_and_name_to_visualize(self.lowResISource)\n else:\n vizImage, vizName = self.model.get_parameter_image_and_name_to_visualize(self.ISource)\n\n if self.useMap:\n if self.compute_similarity_measure_at_low_res:\n I1Warped = utils.compute_warped_image_multiNC(self.lowResISource,\n phi_or_warped_image,\n self.lowResSpacing,\n self.spline_order,\n zero_boundary=False)\n lowResLWarped = utils.get_warped_label_map(self.lowResLSource,\n phi_or_warped_image,\n self.spacing)\n vizReg.show_current_images(iter=iter_count,\n iS=self.lowResISource,\n iT=self.lowResITarget,\n iW=I1Warped,\n iSL=self.lowResLSource,\n iTL=self.lowResLTarget,\n iWL=lowResLWarped,\n vizImages=vizImage,\n vizName=vizName,\n phiWarped=phi_or_warped_image,\n visual_param=visual_param)\n\n else:\n I1Warped = utils.compute_warped_image_multiNC(self.ISource,\n phi_or_warped_image,\n self.spacing,\n self.spline_order,\n zero_boundary=False)\n vizImage = vizImage if len(vizImage)>2 else None\n LWarped = None\n if self.LSource is not None and self.LTarget is not None:\n LWarped = utils.get_warped_label_map(self.LSource,\n phi_or_warped_image,\n self.spacing)\n\n vizReg.show_current_images(iter=iter_count,\n iS=self.ISource,\n iT=self.ITarget,\n iW=I1Warped,\n iSL=self.LSource,\n iTL=self.LTarget,\n iWL=LWarped,\n vizImages=vizImage,\n vizName=vizName,\n phiWarped=phi_or_warped_image,\n visual_param=visual_param)\n else:\n vizReg.show_current_images(iter=iter_count,\n iS=self.ISource,\n iT=self.ITarget,\n iW=phi_or_warped_image,\n vizImages=vizImage,\n vizName=vizName,\n phiWarped=None,\n visual_param=visual_param)\n\n return reached_tolerance, was_visualized\n\n def _debugging_saving_intermid_img(self,img=None,is_label_map=False, append=''):\n folder_path = os.path.join(self.save_fig_path,'debugging')\n folder_path = os.path.join(folder_path, self.pair_name[0])\n make_dir(folder_path)\n file_name = 'scale_'+str(self.n_scale) + '_iter_' + str(self.iter_count)+append\n file_name=file_name.replace('.','_')\n if is_label_map:\n file_name += '_label'\n path = os.path.join(folder_path,file_name+'.nii.gz')\n im_io = FIO.ImageIO()\n im_io.write(path, np.squeeze(img.detach().cpu().numpy()))\n\n # todo: write these parameter/optimizer functions also for shared parameters and all parameters\n def set_sgd_shared_model_parameters_and_optimizer_states(self, pars):\n \"\"\"\n Set the individual model parameters and states that may be stored by the optimizer such as the momentum.\n Expects as input what get_sgd_individual_model_parameters_and_optimizer_states creates as output,\n but potentially multiple copies of it (as generated by a pyTorch dataloader). I.e., it takes in a dataloader sample.\n NOTE: currently only supports SGD\n\n :param pars: parameter list as produced by get_sgd_individual_model_parameters_and_optimizer_states\n :return: n/a\n \"\"\"\n if self.optimizer_instance is None:\n raise ValueError('Optimizer not yet created')\n\n if (self._sgd_par_list is None) or (self._sgd_par_names is None):\n raise ValueError(\n 'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')\n\n if len(pars) == 0:\n print('WARNING: found no values')\n return\n\n # the optimizer (if properly initialized) already holds pointers to the model parameters and the optimizer states\n # so we can set everything in one swoop here\n\n # loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)\n # this input will represent a sample from a pytorch dataloader\n\n # wrap the parameters in a list if needed (so we can mirror the setup from get_sgd_...\n if type(pars) == list:\n use_pars = pars\n else:\n use_pars = [pars]\n\n for p in use_pars:\n if 'is_shared' in p:\n if p['is_shared']:\n current_name = p['name']\n\n assert (torch.is_tensor(p['model_params']))\n current_model_params = p['model_params']\n\n if 'momentum_buffer' in p:\n assert (torch.is_tensor(p['momentum_buffer']))\n current_momentum_buffer = p['momentum_buffer']\n else:\n current_momentum_buffer = None\n\n # now we need to match this with the parameters and the state of the SGD optimizer\n model_par = self._sgd_name_to_model_par[current_name]\n model_par.data.copy_(current_model_params)\n\n # and now do the same with the state\n param_state = self.optimizer_instance.state[model_par]\n if 'momentum_buffer' in param_state:\n param_state['momentum_buffer'].copy_(current_momentum_buffer)\n\n def set_sgd_individual_model_parameters_and_optimizer_states(self, pars):\n \"\"\"\n Set the individual model parameters and states that may be stored by the optimizer such as the momentum.\n Expects as input what get_sgd_individual_model_parameters_and_optimizer_states creates as output,\n but potentially multiple copies of it (as generated by a pyTorch dataloader). I.e., it takes in a dataloader sample.\n NOTE: currently only supports SGD\n\n :param pars: parameter list as produced by get_sgd_individual_model_parameters_and_optimizer_states\n :return: n/a\n \"\"\"\n if self.optimizer_instance is None:\n raise ValueError('Optimizer not yet created')\n\n if (self._sgd_par_list is None) or (self._sgd_par_names is None):\n raise ValueError(\n 'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')\n\n if len(pars) == 0:\n print('WARNING: found no values')\n return\n\n # the optimizer (if properly initialized) already holds pointers to the model parameters and the optimizer states\n # so we can set everything in one swoop here\n\n # loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)\n # this input will represent a sample from a pytorch dataloader\n\n # wrap the parameters in a list if needed (so we can mirror the setup from get_sgd_...\n if type(pars)==list:\n use_pars = pars\n else:\n use_pars = [pars]\n\n for p in use_pars:\n if 'is_shared' in p:\n if not p['is_shared'][0]: # need to grab the first one, because the dataloader replicated these entries\n current_name = p['name'][0]\n\n assert( torch.is_tensor(p['model_params']))\n current_model_params = p['model_params']\n\n if 'momentum_buffer' in p:\n assert( torch.is_tensor(p['momentum_buffer']) )\n current_momentum_buffer = p['momentum_buffer']\n else:\n current_momentum_buffer = None\n\n # now we need to match this with the parameters and the state of the SGD optimizer\n model_par = self._sgd_name_to_model_par[current_name]\n model_par.data.copy_(current_model_params)\n\n # and now do the same with the state\n param_state = self.optimizer_instance.state[model_par]\n if 'momentum_buffer' in param_state:\n param_state['momentum_buffer'].copy_(current_momentum_buffer)\n\n def _convert_obj_with_parameters_to_obj_with_tensors(self, p):\n \"\"\"\n Converts structures that consist of lists and dictionaries with parameters to tensors\n\n :param p: parameter structure\n :return: object with parameters converted to tensors\n \"\"\"\n\n if type(p) == list:\n ret_p = []\n for e in p:\n ret_p.append(self._convert_obj_with_parameters_to_obj_with_tensors(e))\n return ret_p\n elif type(p) == dict:\n ret_p = dict()\n for key in p:\n ret_p[key] = self._convert_obj_with_parameters_to_obj_with_tensors((p[key]))\n return ret_p\n elif type(p) == torch.nn.parameter.Parameter:\n return p.data\n else:\n return p\n\n def get_sgd_shared_model_parameters(self):\n \"\"\"\n Gets the model parameters that are shared.\n\n :return:\n \"\"\"\n\n if self.optimizer_instance is None:\n raise ValueError('Optimizer not yet created')\n\n if (self._sgd_par_list is None) or (self._sgd_par_names is None):\n raise ValueError(\n 'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')\n\n d = []\n\n # loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)\n for group in self.optimizer_instance.param_groups:\n\n group_dict = dict()\n group_dict['params'] = []\n\n for p in group['params']:\n current_group_params = dict()\n # let's first see if this is a shared state\n if self._sgd_par_names[p]['is_shared']:\n # keep track of the names so we can and batch, so we can read it back in\n current_group_params.update(self._sgd_par_names[p])\n # now deal with the optimizer state if available\n current_group_params['model_params'] = self._convert_obj_with_parameters_to_obj_with_tensors(p)\n\n group_dict['params'].append(current_group_params)\n\n d.append(group_dict)\n\n return d\n\n\n def get_sgd_individual_model_parameters_and_optimizer_states(self):\n \"\"\"\n Gets the individual model parameters and states that may be stored by the optimizer such as the momentum.\n NOTE: currently only supports SGD\n\n :return:\n \"\"\"\n if self.optimizer_instance is None:\n raise ValueError('Optimizer not yet created')\n\n if (self._sgd_par_list is None) or (self._sgd_par_names is None):\n raise ValueError(\n 'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')\n\n d = []\n\n # loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)\n for group in self.optimizer_instance.param_groups:\n\n group_dict = dict()\n group_dict['weight_decay'] = group['weight_decay']\n group_dict['momentum'] = group['momentum']\n group_dict['dampening'] = group['dampening']\n group_dict['nesterov'] = group['nesterov']\n group_dict['lr'] = group['lr']\n\n group_dict['params'] = []\n\n for p in group['params']:\n current_group_params = dict()\n # let's first see if this is a shared state\n if not self._sgd_par_names[p]['is_shared']:\n # keep track of the names so we can and batch, so we can read it back in\n current_group_params.update(self._sgd_par_names[p])\n # now deal with the optimizer state if available\n current_group_params['model_params'] = self._convert_obj_with_parameters_to_obj_with_tensors(p)\n if group['momentum'] != 0:\n param_state = self.optimizer_instance.state[p]\n if 'momentum_buffer' in param_state:\n current_group_params['momentum_buffer'] = self._convert_obj_with_parameters_to_obj_with_tensors(param_state['momentum_buffer'])\n\n group_dict['params'].append(current_group_params)\n\n d.append(group_dict)\n\n return d\n\n def _remove_state_variables_for_individual_parameters(self,individual_pars):\n \"\"\"\n Removes the optimizer state for individual parameters.\n This is required at the beginning as we do not want to reuse the SGD momentum for example for an unrelated registration.\n\n :param individual_pars: individual parameters are returned by get_sgd_individual_model_parameters_and_optimizer_states\n :return: n/a\n \"\"\"\n\n if self.optimizer_instance is None:\n raise ValueError('Optimizer not yet created')\n\n if (self._sgd_par_list is None) or (self._sgd_par_names is None):\n raise ValueError(\n 'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')\n\n # loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)\n for group in self.optimizer_instance.param_groups:\n\n for p in group['params']:\n # let's first see if this is a shared state\n if not self._sgd_par_names[p]['is_shared']:\n # we want to delete the state of this one\n self.optimizer_instance.state.pop(p)\n\n\n def _create_optimizer_parameter_dictionary(self,individual_pars, shared_pars,\n settings_individual=dict(), settings_shared=dict()):\n\n par_list = []\n \"\"\"List of parameters that can directly be passed to an optimizer; different list elements define different parameter groups\"\"\"\n par_names = dict()\n \"\"\"dictionary which maps from a parameters id (i.e., memory) to its description: name/is_shared\"\"\"\n # name is the name of the variable\n # is_shared keeps track of if a parameter was declared shared (opposed to individual, which we need for registrations)\n\n names_to_par = dict()\n \"\"\"dictionary which maps from a parameter name back to the parameter\"\"\"\n\n # first deal with the individual parameters\n pl_ind, par_to_name_ind = utils.get_parameter_list_and_par_to_name_dict_from_parameter_dict(individual_pars)\n #cd = {'params': pl_ind}\n cd = {'params': [p for p in pl_ind if p.requires_grad]}\n cd.update(settings_individual)\n par_list.append(cd)\n # add all the names\n for current_par, key in zip(pl_ind, par_to_name_ind):\n par_names[key] = {'name': par_to_name_ind[key], 'is_shared': False}\n names_to_par[par_to_name_ind[key]] = current_par\n\n # now deal with the shared parameters\n pl_shared, par_to_name_shared = utils.get_parameter_list_and_par_to_name_dict_from_parameter_dict(shared_pars)\n #cd = {'params': pl_shared}\n cd = {'params': [p for p in pl_shared if p.requires_grad]}\n cd.update(settings_shared)\n par_list.append(cd)\n for current_par, key in zip(pl_shared, par_to_name_shared):\n par_names[key] = {'name': par_to_name_shared[key], 'is_shared': True}\n names_to_par[par_to_name_shared[key]] = current_par\n\n return par_list, par_names, names_to_par\n\n def _write_out_shared_parameters(self, model_pars, filename):\n\n # just write out the ones that are shared\n for group in model_pars:\n if 'params' in group:\n was_shared_group = False # there can only be one\n # create lists that will hold the information for the different batches\n cur_pars = []\n\n # now iterate through the current parameter list\n for p in group['params']:\n needs_to_be_saved = True\n if 'is_shared' in p:\n if not p['is_shared']:\n needs_to_be_saved = False\n\n if needs_to_be_saved:\n # we found a shared entry\n was_shared_group = True\n cur_pars.append(p)\n\n # now we have the parameter list for one of the elements of the batch and we can write it out\n if was_shared_group: # otherwise will be overwritten by a later parameter group\n torch.save(cur_pars, filename)\n\n\n def _write_out_individual_parameters(self, model_pars, filenames):\n\n batch_size = len(filenames)\n\n # just write out the ones that are individual\n for group in model_pars:\n if 'params' in group:\n was_individual_group = False # there can only be one\n # create lists that will hold the information for the different batches\n for b in range(batch_size):\n cur_pars = []\n\n # now iterate through the current parameter list\n for p in group['params']:\n if 'is_shared' in p:\n # we found an individual entry\n if not p['is_shared']:\n was_individual_group = True\n # now go through this dictionary, extract the current batch info in it,\n # and append it to the current batch parameter list\n cur_dict = dict()\n for p_el in p:\n if p_el == 'name':\n cur_dict['name'] = p[p_el]\n elif p_el == 'is_shared':\n cur_dict['is_shared'] = p[p_el]\n else:\n # this will be a tensor so we need to extract the information for the current batch\n cur_dict[p_el] = p[p_el][b, ...]\n\n cur_pars.append(cur_dict)\n\n # now we have the parameter list for one of the elements of the batch and we can write it out\n if was_individual_group: # otherwise will be overwritten by a later parameter group\n torch.save(cur_pars, filenames[b])\n\n def _get_optimizer_instance(self):\n\n if (self.model is None) or (self.criterion is None):\n raise ValueError('Please specify a model to solve with set_model first')\n\n # first check if an optimizer was specified externally\n\n if self.optimizer is not None:\n # simply instantiate it\n if self.optimizer_name is not None:\n print('Warning: optimizer name = ' + str(self.optimizer_name) +\n ' specified, but ignored since optimizer was set explicitly')\n opt_instance = self.optimizer(self.model.parameters(), **self.optimizer_params)\n return opt_instance\n else:\n # select it by name\n # TODO: Check what the best way to adapt the tolerances is here; tying it to rel_ftol is not really correct\n if self.optimizer_name is None:\n raise ValueError('Need to select an optimizer')\n elif self.optimizer_name == 'lbfgs_ls':\n if self.last_successful_step_size_taken is not None:\n desired_lr = self.last_successful_step_size_taken\n else:\n desired_lr = 1.0\n max_iter = self.params['optimizer']['lbfgs'][('max_iter',1,'maximum number of iterations')]\n max_eval = self.params['optimizer']['lbfgs'][('max_eval',5,'maximum number of evaluation')]\n history_size = self.params['optimizer']['lbfgs'][('history_size',5,'Size of the optimizer history')]\n line_search_fn = self.params['optimizer']['lbfgs'][('line_search_fn','backtracking','Type of line search function')]\n\n opt_instance = CO.LBFGS_LS(self.model.parameters(),\n lr=desired_lr, max_iter=max_iter, max_eval=max_eval,\n tolerance_grad=self.rel_ftol * 10, tolerance_change=self.rel_ftol,\n history_size=history_size, line_search_fn=line_search_fn)\n return opt_instance\n elif self.optimizer_name == 'sgd':\n #if self.last_successful_step_size_taken is not None:\n # desired_lr = self.last_successful_step_size_taken\n #else:\n\n if self.default_learning_rate is not None:\n current_default_learning_rate = self.default_learning_rate\n self.params['optimizer']['sgd']['individual']['lr'] = current_default_learning_rate\n self.params['optimizer']['sgd']['shared']['lr'] = current_default_learning_rate\n\n else:\n current_default_learning_rate = 0.01\n\n desired_lr_individual = self.params['optimizer']['sgd']['individual'][('lr',current_default_learning_rate,'desired learning rate')]\n sgd_momentum_individual = self.params['optimizer']['sgd']['individual'][('momentum',0.9,'sgd momentum')]\n sgd_dampening_individual = self.params['optimizer']['sgd']['individual'][('dampening',0.0,'sgd dampening')]\n sgd_weight_decay_individual = self.params['optimizer']['sgd']['individual'][('weight_decay',0.0,'sgd weight decay')]\n sgd_nesterov_individual = self.params['optimizer']['sgd']['individual'][('nesterov',True,'use Nesterove scheme')]\n\n desired_lr_shared = self.params['optimizer']['sgd']['shared'][('lr', current_default_learning_rate, 'desired learning rate')]\n sgd_momentum_shared = self.params['optimizer']['sgd']['shared'][('momentum', 0.9, 'sgd momentum')]\n sgd_dampening_shared = self.params['optimizer']['sgd']['shared'][('dampening', 0.0, 'sgd dampening')]\n sgd_weight_decay_shared = self.params['optimizer']['sgd']['shared'][('weight_decay', 0.0, 'sgd weight decay')]\n sgd_nesterov_shared = self.params['optimizer']['sgd']['shared'][('nesterov', True, 'use Nesterove scheme')]\n\n settings_shared = {'momentum': sgd_momentum_shared,\n 'dampening': sgd_dampening_shared,\n 'weight_decay': sgd_weight_decay_shared,\n 'nesterov': sgd_nesterov_shared,\n 'lr': desired_lr_shared}\n\n settings_individual = {'momentum': sgd_momentum_individual,\n 'dampening': sgd_dampening_individual,\n 'weight_decay': sgd_weight_decay_individual,\n 'nesterov': sgd_nesterov_individual,\n 'lr': desired_lr_individual}\n\n self._sgd_par_list, self._sgd_par_names, self._sgd_name_to_model_par = self._create_optimizer_parameter_dictionary(\n self.model.get_individual_registration_parameters(),\n self.model.get_shared_registration_parameters(),\n settings_individual=settings_individual,\n settings_shared=settings_shared)\n\n opt_instance = torch.optim.SGD(self._sgd_par_list)\n\n return opt_instance\n elif self.optimizer_name == 'adam':\n if self.last_successful_step_size_taken is not None:\n desired_lr = self.last_successful_step_size_taken\n else:\n if self.default_learning_rate is not None:\n current_default_learning_rate = self.default_learning_rate\n self.params['optimizer']['adam']['lr'] = current_default_learning_rate\n else:\n current_default_learning_rate = 0.01\n desired_lr = self.params['optimizer']['adam'][('lr',current_default_learning_rate,'desired learning rate')]\n\n adam_betas = self.params['optimizer']['adam'][('betas',[0.9,0.999],'adam betas')]\n adam_eps = self.params['optimizer']['adam'][('eps',self.rel_ftol,'adam eps')]\n adam_weight_decay = self.params['optimizer']['adam'][('weight_decay',0.0,'adam weight decay')]\n opt_instance = torch.optim.Adam(self.model.parameters(), lr=desired_lr,\n betas=adam_betas,\n eps=adam_eps,\n weight_decay=adam_weight_decay)\n return opt_instance\n else:\n raise ValueError('Optimizer = ' + str(self.optimizer_name) + ' not yet supported')\n\n def _set_all_still_missing_parameters(self):\n\n if self.optimizer_name is None:\n self.optimizer_name = self.params['optimizer'][('name','lbfgs_ls','Optimizer (lbfgs|adam|sgd)')]\n\n if self.model is None:\n model_name = self.params['model']['registration_model'][('type', 'lddmm_shooting_map', \"['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with suffix '_map' or '_image'\")]\n self.params['model']['deformation'][('use_map', True, 'use a map for the solution or not True/False' )]\n self.set_model( model_name )\n\n if self.nrOfIterations is None: # not externally set, so this will not be a multi-scale solution\n self.params['optimizer'][('single_scale', {}, 'single scale settings')]\n self.nrOfIterations = self.params['optimizer']['single_scale'][('nr_of_iterations', 10, 'number of iterations')]\n\n # get the optimizer\n if self.optimizer_instance is None:\n self.optimizer_instance = self._get_optimizer_instance()\n\n if USE_CUDA:\n self.model = self.model.cuda()\n\n self.compute_low_res_image_if_needed()\n self.optimizer_has_been_initialized = True\n\n def set_scheduler_patience(self,patience):\n self.params['optimizer']['scheduler']['patience'] = patience\n self.scheduler_patience = patience\n\n def set_scheduler_patience_silent(self,patience):\n self.scheduler_patience = patience\n\n def get_scheduler_patience(self):\n return self.scheduler_patience\n\n def _set_use_external_scheduler(self):\n self._use_external_scheduler = True\n\n def _set_use_internal_scheduler(self):\n self._use_external_scheduler = False\n\n def _get_use_external_scheduler(self):\n return self._use_external_scheduler\n\n def _get_dictionary_to_pass_to_integrator(self):\n \"\"\"\n This is experimental to allow passing additional parameters to integrators/smoothers, etc.\n\n :return: dictionary\n \"\"\"\n\n d = dict()\n\n if self.mapLowResFactor is not None:\n d['I0'] = self.lowResISource\n d['I1'] = self.lowResITarget\n else:\n d['I0'] = self.ISource\n d['I1'] = self.ITarget\n\n return d\n\n def optimize(self):\n \"\"\"\n Do the single scale optimization\n \"\"\"\n\n self._set_all_still_missing_parameters()\n\n # in this way model parameters can be \"set\" before the optimizer has been properly initialized\n if self.delayed_model_parameters_still_to_be_set:\n print('Setting model parameters, delayed')\n self.set_model_parameters(self.delayed_model_parameters)\n\n if self.delayed_model_state_dict_still_to_be_set:\n print('Setting model state dict, delayed')\n self.set_model_state_dict(self.delayed_model_state_dict)\n\n # this allows passing addtional parameters to the smoothers for all models and smoothers\n self.model.set_dictionary_to_pass_to_integrator(self._get_dictionary_to_pass_to_integrator())\n self.criterion.set_dictionary_to_pass_to_smoother(self._get_dictionary_to_pass_to_integrator())\n\n # optimize for a few steps\n start = time.time()\n\n self.last_energy = None\n could_not_find_successful_step = False\n\n if not self._use_external_scheduler:\n self.use_step_size_scheduler = self.params['optimizer'][('use_step_size_scheduler',True,'If set to True the step sizes are reduced if no progress is made')]\n\n if self.use_step_size_scheduler:\n self.params['optimizer'][('scheduler', {}, 'parameters for the ReduceLROnPlateau scheduler')]\n self.scheduler_verbose = self.params['optimizer']['scheduler'][\n ('verbose', True, 'if True prints out changes in learning rate')]\n self.scheduler_factor = self.params['optimizer']['scheduler'][('factor', 0.5, 'reduction factor')]\n self.scheduler_patience = self.params['optimizer']['scheduler'][\n ('patience', 10, 'how many steps without reduction before LR is changed')]\n\n if self.use_step_size_scheduler and self.scheduler is None:\n self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer_instance, 'min',\n verbose=self.scheduler_verbose,\n factor=self.scheduler_factor,\n patience=self.scheduler_patience)\n\n self.iter_count = 0\n for iter in range(self.nrOfIterations):\n\n # take a step of the optimizer\n # for p in self.optimizer_instance._params:\n # p.data = p.data.float()\n\n current_loss = self.optimizer_instance.step(self._closure)\n\n # do weight clipping if it is desired\n self._do_weight_clipping()\n\n # an external scheduler may for example be used in batch optimization\n if not self._use_external_scheduler:\n if self.use_step_size_scheduler:\n self.scheduler.step(current_loss.data[0])\n\n if hasattr(self.optimizer_instance,'last_step_size_taken'):\n self.last_successful_step_size_taken = self.optimizer_instance.last_step_size_taken()\n\n if self.last_successful_step_size_taken==0.0:\n print('Optimizer was not able to find a successful step. Stopping iterations.')\n could_not_find_successful_step = True\n if iter==0:\n print('The gradient was likely too large or the optimization started from an optimal point.')\n print('If this behavior is unexpected try adjusting the settings of the similiarity measure or allow the optimizer to try out smaller steps.')\n\n # to make sure warped images and the map is correct, call closure once more\n self._closure()\n\n if self.useMap:\n vis_arg = self.rec_phiWarped\n else:\n vis_arg = self.rec_IWarped\n\n tolerance_reached, was_visualized = self.analysis(self.rec_energy, self.rec_similarityEnergy,\n self.rec_regEnergy, self.rec_opt_par_loss_energy,\n vis_arg,\n self.rec_custom_optimizer_output_string,\n self.rec_custom_optimizer_output_values)\n\n if tolerance_reached or could_not_find_successful_step:\n if tolerance_reached:\n print('Terminating optimization, because the desired tolerance was reached.')\n\n # force the output of the last image in this case, if it has not been visualized previously\n if not was_visualized and (self.visualize or self.save_fig):\n _, _ = self.analysis(self.rec_energy, self.rec_similarityEnergy,\n self.rec_regEnergy, self.rec_opt_par_loss_energy,\n vis_arg,\n self.rec_custom_optimizer_output_string,\n self.rec_custom_optimizer_output_values,\n force_visualization=True)\n break\n\n self.iter_count = iter+1\n\n if self.show_iteration_output:\n cprint('-->Elapsed time {:.5f}[s]'.format(time.time() - start), 'green')\n\n\nclass SingleScaleBatchRegistrationOptimizer(ImageRegistrationOptimizer):\n\n def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):\n\n super(SingleScaleBatchRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n\n self.params[('optimizer', {}, 'optimizer settings')]\n cparams = self.params['optimizer']\n cparams[('batch_settings', {}, 'settings for the batch or optimizer')]\n cparams = cparams['batch_settings']\n\n self.batch_size = cparams[('batch_size',2,'how many images per batch (if set larger or equal to the number of images, it will be processed as one batch')]\n \"\"\"how many images per batch\"\"\"\n\n self.shuffle = cparams[('shuffle', True, 'if batches should be shuffled between epochs')]\n \"\"\"shuffle batches between epochshow many images per batch\"\"\"\n\n self.num_workers = cparams[('num_workers',0,'Number of workers to read the data. Set it to zero on the GPU or use >0 at your own risk.')]\n \"\"\"number of workers to read the data\"\"\"\n\n self.nr_of_epochs = cparams[('nr_of_epochs', 1,'how many epochs')]\n \"\"\"how many iterations for batch; i.e., how often to iterate over the entire dataset = epochs\"\"\"\n\n self.parameter_output_dir = cparams[('parameter_output_dir','parameters','output directory to store the shared and the individual parameters during the iterations')]\n \"\"\"output directory to store the shared and the individual parameters during the iterations\"\"\"\n\n self.individual_parameter_output_dir = os.path.join(self.parameter_output_dir,'individual')\n self.shared_parameter_output_dir = os.path.join(self.parameter_output_dir,'shared')\n\n self.start_from_previously_saved_parameters = cparams[('start_from_previously_saved_parameters',True,'If set to true checks already for the first batch of files in the output directories exist and uses them if they do.')]\n \"\"\"If true then checks if previously saved parameter files exists and load them at the beginning already\"\"\"\n\n self.individual_checkpoint_output_directory = os.path.join(self.individual_parameter_output_dir,'checkpoints')\n self.shared_checkpoint_output_directory = os.path.join(self.shared_parameter_output_dir,'checkpoints')\n\n self.checkpoint_interval = cparams[('checkpoint_interval',0,'after how many epochs, checkpoints are saved; if set to 0, checkpoint will not be saved')]\n \"\"\"after how many epochs checkpoints are saved\"\"\"\n\n self.verbose_output = cparams[('verbose_output',False,'turns on verbose output')]\n\n self.show_sample_optimizer_output = cparams[('show_sample_optimizer_output',False,'If true shows the energies during optimizaton of a sample')]\n \"\"\"Shows iterations for each sample being optimized\"\"\"\n\n self.also_eliminate_shared_state_between_samples_during_first_epoch = \\\n self.params['optimizer']['sgd'][('also_eliminate_shared_state_between_samples_during_first_epoch', False,\n 'if set to true all states are eliminated, otherwise only the individual ones')]\n\n self.use_step_size_scheduler = self.params['optimizer'][('use_step_size_scheduler', True, 'If set to True the step sizes are reduced if no progress is made')]\n self.scheduler = None\n\n if self.use_step_size_scheduler:\n self.params['optimizer'][('scheduler', {}, 'parameters for the ReduceLROnPlateau scheduler')]\n self.scheduler_verbose = self.params['optimizer']['scheduler'][\n ('verbose', True, 'if True prints out changes in learning rate')]\n self.scheduler_factor = self.params['optimizer']['scheduler'][('factor', 0.75, 'reduction factor')]\n self.scheduler_patience = self.params['optimizer']['scheduler'][\n ('patience', 5, 'how many steps without reduction before LR is changed')]\n\n self.model_name = None\n self.add_model_name = None\n self.add_model_networkClass = None\n self.add_model_lossClass = None\n self.addSimName = None\n self.addSimMeasure = None\n\n self.ssOpt = None\n\n def write_parameters_to_settings(self):\n if self.ssOpt is not None:\n self.ssOpt.write_parameters_to_settings()\n\n def add_similarity_measure(self, simName, simMeasure):\n \"\"\"\n Adds a custom similarity measure\n\n :param simName: name of the similarity measure (string)\n :param simMeasure: the similarity measure itself (an object that can be instantiated)\n \"\"\"\n self.addSimName = simName\n self.addSimMeasure = simMeasure\n\n\n def set_model(self, modelName):\n \"\"\"\n Sets the model that should be solved\n\n :param modelName: name of the model that should be solved (string)\n \"\"\"\n\n self.model_name = modelName\n\n def add_model(self, add_model_name, add_model_networkClass, add_model_lossClass):\n \"\"\"\n Adds a custom model to be optimized over\n\n :param add_model_name: name of the model (string)\n :param add_model_networkClass: network model itself (as an object that can be instantiated)\n :param add_model_lossClass: loss of the model (as an object that can be instantiated)\n \"\"\"\n self.add_model_name = add_model_name\n self.add_model_networkClass = add_model_networkClass\n self.add_model_lossClass = add_model_lossClass\n\n\n def get_checkpoint_dict(self):\n d = super(SingleScaleBatchRegistrationOptimizer, self).get_checkpoint_dict()\n if self.ssOpt is not None:\n d['shared_parameters'] = self.ssOpt.get_shared_model_parameters_and_buffers()\n return d\n\n def load_checkpoint_dict(self, d, load_optimizer_state=False):\n super(SingleScaleBatchRegistrationOptimizer, self).load_checkpoint_dict(d)\n if 'shared_parameters' in d:\n if self.ssOpt is not None:\n self.ssOpt.set_shared_model_parameters(d['shared_parameters'])\n else:\n raise ValueError('checkpoint does not contain: consensus_dual')\n\n def get_warped_image(self):\n \"\"\"\n Returns the warped image\n :return: the warped image\n \"\"\"\n\n p = dict()\n p['warped_images'] = []\n\n print('get_warped_image: not yet implemented')\n\n return p\n\n def get_map(self):\n \"\"\"\n Returns the deformation map\n :return: deformation map\n \"\"\"\n\n p = dict()\n p['phi'] = []\n\n print('get_map: not yet implemented')\n\n return p\n\n def get_inverse_map(self):\n \"\"\"\n Returns the inverse deformation map\n :return: deformation map\n \"\"\"\n\n p = dict()\n p['phi_inv'] = []\n\n print('get_inverse_map: not yet implemented')\n\n return p\n\n\n def get_model_parameters(self):\n \"\"\"\n Returns the parameters of the model\n\n :return: model parameters\n \"\"\"\n p = dict()\n if self.ssOpt is not None:\n p['shared_parameters'] = self.ssOpt.get_shared_model_parameters_and_buffers()\n\n return p\n\n def set_model_parameters(self, p):\n raise ValueError('Setting model parameters not yet supported by batch optimizer')\n\n def _set_all_still_missing_parameters(self):\n if self.model_name is None:\n model_name = self.params['model']['registration_model'][('type', 'lddmm_shooting_map',\n \"['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with suffix '_map' or '_image'\")]\n self.params['model']['deformation'][('use_map', True, 'use a map for the solution or not True/False')]\n self.set_model(model_name)\n\n if self.optimizer_name is None:\n self.optimizer_name = self.params['optimizer'][('name', 'sgd', 'Optimizer (lbfgs|adam|sgd)')]\n\n self.optimizer_has_been_initialized = True\n\n def _create_single_scale_optimizer(self,batch_size):\n ssOpt = SingleScaleRegistrationOptimizer(batch_size, self.spacing, self.useMap, self.mapLowResFactor, self.params, compute_inverse_map=self.compute_inverse_map, default_learning_rate=self.default_learning_rate)\n\n if ((self.add_model_name is not None) and\n (self.add_model_networkClass is not None) and\n (self.add_model_lossClass is not None)):\n ssOpt.add_model(self.add_model_name, self.add_model_networkClass, self.add_model_lossClass)\n\n # now set the actual model we want to solve\n ssOpt.set_model(self.model_name)\n\n if (self.addSimName is not None) and (self.addSimMeasure is not None):\n ssOpt.add_similarity_measure(self.addSimName, self.addSimMeasure)\n\n if self.optimizer_name is not None:\n ssOpt.set_optimizer_by_name(self.optimizer_name)\n else:\n raise ValueError('Optimizers need to be specified by name of consensus optimization at the moment.')\n\n ssOpt.set_rel_ftol(self.get_rel_ftol())\n\n ssOpt.set_visualization(self.get_visualization())\n ssOpt.set_visualize_step(self.get_visualize_step())\n\n return ssOpt\n\n def _get_individual_checkpoint_filenames(self,output_directory,idx,epoch_iter):\n filenames = []\n for v in idx:\n filenames.append(os.path.join(output_directory,'checkpoint_individual_parameter_pair_{:05d}_epoch_{:05d}.pt'.format(v,epoch_iter)))\n return filenames\n\n def _get_shared_checkpoint_filename(self,output_directory,epoch_iter):\n\n filename = os.path.join(output_directory,'checkpoint_shared_parameters_epoch_{:05d}.pt'.format(epoch_iter))\n return filename\n\n def _create_all_output_directories(self):\n\n if not os.path.exists(self.parameter_output_dir):\n os.makedirs(self.parameter_output_dir)\n print('Creating directory: ' + self.parameter_output_dir)\n\n if not os.path.exists(self.individual_parameter_output_dir):\n os.makedirs(self.individual_parameter_output_dir)\n print('Creating directory: ' + self.individual_parameter_output_dir)\n\n if not os.path.exists(self.shared_parameter_output_dir):\n os.makedirs(self.shared_parameter_output_dir)\n print('Creating directory: ' + self.shared_parameter_output_dir)\n\n if not os.path.exists(self.individual_checkpoint_output_directory):\n os.makedirs(self.individual_checkpoint_output_directory)\n print('Creating directory: ' + self.individual_checkpoint_output_directory)\n\n if not os.path.exists(self.shared_checkpoint_output_directory):\n os.makedirs(self.shared_checkpoint_output_directory)\n print('Creating directory: ' + self.shared_checkpoint_output_directory)\n\n\n def _get_shared_parameter_filename(self,output_dir):\n return os.path.join(output_dir,'shared_parameters.pt')\n\n def optimize(self):\n \"\"\"\n The optimizer to optimize over batches of images\n\n :return: n/a\n \"\"\"\n\n #todo: maybe switch loading and writing individual parameters to individual states; this would assure that all states (such as running averages, etc.) are included and not only parameters\n\n if self.optimizer is not None:\n raise ValueError('Custom optimizers are currently not supported for batch optimization.\\\n Set the optimizer by name (e.g., in the json configuration) instead. Should be some form of stochastic gradient descent.')\n\n\n self._set_all_still_missing_parameters()\n self._create_all_output_directories()\n\n iter_offset = 0\n\n if torch.is_tensor(self.ISource) or torch.is_tensor(self.ITarget):\n raise ValueError('Batch optimizer expects lists of filenames as inputs for the source and target images')\n\n registration_data_set = OD.PairwiseRegistrationDataset(output_directory=self.individual_parameter_output_dir,\n source_image_filenames=self.ISource,\n target_image_filenames=self.ITarget,\n params=self.params)\n\n nr_of_datasets = len(registration_data_set)\n if nr_of_datasets<self.batch_size:\n print('INFO: nr of datasets is smaller than batch-size. Reducing batch size to ' + str(nr_of_datasets))\n self.batch_size=nr_of_datasets\n\n if nr_of_datasets%self.batch_size!=0:\n raise ValueError('nr_of_datasets = {}; batch_size = {}: Number of registration pairs needs to be divisible by the batch size.'.format(nr_of_datasets,self.batch_size))\n\n dataloader = DataLoader(registration_data_set, batch_size=self.batch_size,\n shuffle=self.shuffle, num_workers=self.num_workers)\n\n self.ssOpt = None\n last_batch_size = None\n\n nr_of_samples = nr_of_datasets//self.batch_size\n\n last_energy = None\n last_sim_energy = None\n last_reg_energy = None\n last_opt_energy = None\n\n shared_parameter_filename = self._get_shared_parameter_filename(self.shared_parameter_output_dir)\n\n load_individual_parameters_during_first_epoch = False\n load_shared_parameters_before_first_epoch = False\n\n if self.start_from_previously_saved_parameters:\n # check if there are files in the output_directory\n has_all_filenames = True\n for idx in range(len(self.ISource)):\n cur_filename = registration_data_set._get_parameter_filename(idx)\n if not os.path.isfile(cur_filename):\n has_all_filenames = False\n break\n\n load_individual_parameters_during_first_epoch = has_all_filenames\n load_shared_parameters_before_first_epoch = os.path.isfile(shared_parameter_filename)\n\n if load_individual_parameters_during_first_epoch:\n print('INFO: Will load the individual parameters from the previous run in directory ' + self.individual_parameter_output_dir + ' for initialization.')\n else:\n print('INFO: Will NOT load the individual parameters from the previous run in directory ' + self.individual_parameter_output_dir + ' for initialization.')\n\n if load_shared_parameters_before_first_epoch:\n print('INFO: Will load the shared parameter file ' + shared_parameter_filename + ' before computing the first epoch')\n else:\n print('INFO: Will NOT load the shared parameter file ' + shared_parameter_filename + ' before computing the first epoch')\n\n for iter_epoch in range(iter_offset,self.nr_of_epochs+iter_offset):\n if self.verbose_output:\n print('Computing epoch ' + str(iter_epoch + 1) + ' of ' + str(iter_offset+self.nr_of_epochs))\n\n cur_running_energy = 0.0\n cur_running_sim_energy = 0.0\n cur_running_reg_energy = 0.0\n cur_running_opt_energy = 0.0\n\n cur_min_energy = None\n cur_max_energy = None\n cur_min_sim_energy = None\n cur_max_sim_energy = None\n cur_min_reg_energy = None\n cur_max_reg_energy = None\n cur_min_opt_energy = None\n cur_max_opt_energy = None\n\n for i, sample in enumerate(dataloader, 0):\n\n # get the data from the dataloader\n current_source_batch = AdaptVal(sample['ISource'])\n current_target_batch = AdaptVal(sample['ITarget'])\n\n # create the optimizer\n batch_size = current_source_batch.size()\n if (batch_size != last_batch_size) and (last_batch_size is not None):\n raise ValueError('Ooops, this should not have happened.')\n\n initialize_optimizer = False\n if (batch_size != last_batch_size) or (self.ssOpt is None):\n initialize_optimizer = True\n # we need to create a new optimizer; otherwise optimizer already exists\n self.ssOpt = self._create_single_scale_optimizer(batch_size)\n\n # images need to be set before calling _set_all_still_missing_parameters\n self.ssOpt.set_source_image(current_source_batch)\n self.ssOpt.set_target_image(current_target_batch)\n self.ssOpt.set_current_epoch(iter_epoch)\n\n if initialize_optimizer:\n # to make sure we have the model initialized, force parameter installation\n self.ssOpt._set_all_still_missing_parameters()\n # since this is chunked-up we increase the patience\n self.ssOpt._set_use_external_scheduler()\n\n if self.show_sample_optimizer_output:\n self.ssOpt.turn_iteration_output_on()\n else:\n self.ssOpt.turn_iteration_output_off()\n\n if self.use_step_size_scheduler and self.scheduler is None:\n self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.ssOpt.optimizer_instance, 'min',\n verbose=self.scheduler_verbose,\n factor=self.scheduler_factor,\n patience=self.scheduler_patience)\n\n if load_shared_parameters_before_first_epoch:\n print('Loading the shared parameters/state.')\n self.ssOpt.load_shared_state_dict(torch.load(shared_parameter_filename))\n\n last_batch_size = batch_size\n\n if iter_epoch!=0 or load_individual_parameters_during_first_epoch: # only load the individual parameters after the first epoch\n if 'individual_parameter' in sample:\n current_individual_parameters = sample['individual_parameter']\n if current_individual_parameters is not None:\n if self.verbose_output:\n print('INFO: loading current individual optimizer state')\n self.ssOpt.set_sgd_individual_model_parameters_and_optimizer_states(current_individual_parameters)\n else:\n print('WARNING: could not find previous parameter file')\n else:\n # this is the case when optimization is run for the first time for a batch or if previous results should not be used\n # In this case we want to have a fresh start for the initial conditions\n par_file = os.path.join(self.individual_parameter_output_dir,'default_init.pt')\n if i==0:\n # this is the first time, so we store the individual parameters\n torch.save(self.ssOpt.get_individual_model_parameters(),par_file)\n else:\n # now we load them\n if self.verbose_output:\n print('INFO: forcing the initial individual parameters to default')\n self.ssOpt.set_individual_model_parameters(torch.load(par_file))\n # and we need to kill the optimizer state (to get rid of the previous momentum)\n if self.also_eliminate_shared_state_between_samples_during_first_epoch:\n if self.verbose_output:\n print('INFO: discarding the entire optimizer state')\n self.ssOpt.optimizer_instance.state = defaultdict(dict)\n else:\n if self.verbose_output:\n print('INFO: discarding current *individual* optimizer states only')\n self.ssOpt._remove_state_variables_for_individual_parameters(self.ssOpt.get_sgd_individual_model_parameters_and_optimizer_states())\n\n\n if self.visualize:\n if i == 0:\n # to avoid excessive graphical output\n self.ssOpt.turn_visualization_on()\n else:\n self.ssOpt.turn_visualization_off()\n else:\n self.ssOpt.turn_visualization_off()\n\n self.ssOpt.optimize()\n\n cur_energy,cur_sim_energy,cur_reg_energy = self.ssOpt.get_energy()\n cur_opt_energy = self.ssOpt.get_opt_par_energy()\n\n cur_running_energy += 1./nr_of_samples*cur_energy\n cur_running_sim_energy += 1./nr_of_samples*cur_sim_energy\n cur_running_reg_energy += 1./nr_of_samples*cur_reg_energy\n cur_running_opt_energy += 1./nr_of_samples*cur_opt_energy\n\n if i==0:\n cur_min_energy = cur_energy\n cur_max_energy = cur_energy\n cur_min_sim_energy = cur_sim_energy\n cur_max_sim_energy = cur_sim_energy\n cur_min_reg_energy = cur_reg_energy\n cur_max_reg_energy = cur_reg_energy\n cur_min_opt_energy = cur_opt_energy\n cur_max_opt_energy = cur_opt_energy\n else:\n cur_min_energy = min(cur_energy,cur_min_energy)\n cur_max_energy = max(cur_energy,cur_max_energy)\n cur_min_sim_energy = min(cur_sim_energy,cur_min_sim_energy)\n cur_max_sim_energy = max(cur_sim_energy,cur_max_sim_energy)\n cur_min_reg_energy = min(cur_reg_energy,cur_min_reg_energy)\n cur_max_reg_energy = max(cur_reg_energy,cur_max_reg_energy)\n cur_min_opt_energy = min(cur_opt_energy,cur_min_opt_energy)\n cur_max_opt_energy = max(cur_opt_energy,cur_max_opt_energy)\n\n # need to save this index by index so we can shuffle\n self.ssOpt._write_out_individual_parameters(self.ssOpt.get_sgd_individual_model_parameters_and_optimizer_states(),sample['individual_parameter_filename'])\n\n if self.checkpoint_interval>0:\n if (iter_epoch%self.checkpoint_interval==0) or (iter_epoch==self.nr_of_epochs+iter_offset-1):\n if self.verbose_output:\n print('Writing out individual checkpoint data for epoch ' + str(iter_epoch) + ' for sample ' + str(i+1) + '/' + str(nr_of_samples))\n individual_filenames = self._get_individual_checkpoint_filenames(self.individual_checkpoint_output_directory,sample['idx'],iter_epoch)\n self.ssOpt._write_out_individual_parameters(self.ssOpt.get_sgd_individual_model_parameters_and_optimizer_states(),individual_filenames)\n\n if i==nr_of_samples-1:\n if self.verbose_output:\n print('Writing out shared checkpoint data for epoch ' + str(iter_epoch))\n shared_filename = self._get_shared_checkpoint_filename(self.shared_checkpoint_output_directory,iter_epoch)\n self.ssOpt._write_out_shared_parameters(self.ssOpt.get_sgd_shared_model_parameters(),shared_filename)\n\n if self.show_sample_optimizer_output:\n if (last_energy is not None) and (last_sim_energy is not None) and (last_reg_energy is not None):\n print('\\n\\nEpoch {:05d}: Last energies : E=[{:2.5f}], simE=[{:2.5f}], regE=[{:2.5f}], optE=[{:2.5f}]'\\\n .format(iter_epoch-1,last_energy,last_sim_energy,last_reg_energy,last_opt_energy))\n print(' / image: Last energies : E=[{:2.5f}], simE=[{:2.5f}], regE=[{:2.5f}]' \\\n .format(last_energy/batch_size[0], last_sim_energy/batch_size[0], last_reg_energy/batch_size[0]))\n else:\n print('\\n\\n')\n\n last_energy = cur_running_energy\n last_sim_energy = cur_running_sim_energy\n last_reg_energy = cur_running_reg_energy\n last_opt_energy = cur_running_opt_energy\n\n if self.show_sample_optimizer_output:\n print('Epoch {:05d}: Current energies: E=[{:2.5f}], simE=[{:2.5f}], regE=[{:2.5f}], optE=[{:2.5f}]'\\\n .format(iter_epoch,last_energy, last_sim_energy,last_reg_energy,last_opt_energy))\n print(' / image: Current energies: E=[{:2.5f}], simE=[{:2.5f}], regE=[{:2.5f}]' \\\n .format(last_energy/batch_size[0], last_sim_energy/batch_size[0], last_reg_energy/batch_size[0]))\n else:\n print('Epoch {:05d}: Current energies: E={:2.5f}:[{:1.2f},{:1.2f}], simE={:2.5f}:[{:1.2f},{:1.2f}], regE={:2.5f}:[{:1.2f},{:1.2f}], optE={:1.2f}:[{:1.2f},{:1.2f}]'\\\n .format(iter_epoch, last_energy, cur_min_energy, cur_max_energy,\n last_sim_energy, cur_min_sim_energy, cur_max_sim_energy,\n last_reg_energy, cur_min_reg_energy, cur_max_reg_energy,\n last_opt_energy, cur_min_opt_energy, cur_max_opt_energy))\n print(' / image: Current energies: E={:2.5f}:[{:1.2f},{:1.2f}], simE={:2.5f}:[{:1.2f},{:1.2f}], regE={:2.5f}:[{:1.2f},{:1.2f}]' \\\n .format(last_energy/batch_size[0], cur_min_energy/batch_size[0], cur_max_energy/batch_size[0],\n last_sim_energy/batch_size[0], cur_min_sim_energy/batch_size[0], cur_max_sim_energy/batch_size[0],\n last_reg_energy/batch_size[0], cur_min_reg_energy/batch_size[0], cur_max_reg_energy/batch_size[0]))\n\n if self.show_sample_optimizer_output:\n print('\\n\\n')\n\n if self.use_step_size_scheduler:\n self.scheduler.step(last_energy)\n\n print('Writing out shared parameter/state file to ' + shared_parameter_filename )\n torch.save(self.ssOpt.shared_state_dict(),shared_parameter_filename)\n\n\nclass SingleScaleConsensusRegistrationOptimizer(ImageRegistrationOptimizer):\n\n def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):\n\n super(SingleScaleConsensusRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n\n self.params[('optimizer', {}, 'optimizer settings')]\n cparams = self.params['optimizer']\n cparams[('consensus_settings', {}, 'settings for the consensus optimizer')]\n cparams = cparams['consensus_settings']\n\n self.sigma = cparams[('sigma', 1.0, 'sigma/2 is multiplier for squared augmented Lagrangian penalty')]\n \"\"\"Multiplier for squared augmented Lagrangian penalty\"\"\"\n\n self.nr_of_epochs = cparams[('nr_of_epochs', 1, 'how many iterations for consensus; i.e., how often to iterate over the entire dataset')]\n \"\"\"how many iterations for consensus; i.e., how often to iterate over the entire dataset\"\"\"\n self.batch_size = cparams[('batch_size',1,'how many images per batch (if set larger or equal to the number of images, it will be processed as one batch')]\n \"\"\"how many images per batch\"\"\"\n self.save_intermediate_checkpoints = cparams[('save_intermediate_checkpoints',False,'when set to True checkpoints are retained for each batch iterations')]\n \"\"\"when set to True checkpoints are retained for each batch iterations\"\"\"\n\n self.checkpoint_output_directory = cparams[('checkpoint_output_directory','checkpoints','directory where the checkpoints will be stored')]\n \"\"\"output directory where the checkpoints will be saved\"\"\"\n\n self.save_consensus_state_checkpoints = cparams[('save_consensus_state_checkpoints',True,'saves the current consensus state; typically only the individual states are saved as checkpoints')]\n \"\"\"saves the current consensus state; typically only the individual states are saved as checkpoints\"\"\"\n\n self.continue_from_last_checkpoint = cparams[('continue_from_last_checkpoint',False,'If true then iterations are resumed from last checkpoint. Allows restarting an optimization')]\n \"\"\"allows restarting an optimization by continuing from the last checkpoint\"\"\"\n\n self.load_optimizer_state_from_checkpoint = cparams[('load_optimizer_state_from_checkpoint',True,'If set to False only the state of the model is loaded when resuming from a checkpoint')]\n \"\"\"If set to False only the state of the model is loaded when resuming from a checkpoint\"\"\"\n\n self.nr_of_batches = None\n self.nr_of_images = None\n\n self.current_consensus_state = None\n self.current_consensus_dual = None\n self.next_consensus_state = None\n self.last_shared_state = None\n\n self.model_name = None\n self.add_model_name = None\n self.add_model_networkClass = None\n self.add_model_lossClass = None\n self.addSimName = None\n self.addSimMeasure = None\n\n self.iter_offset = None\n\n self.ssOpt = None\n\n def write_parameters_to_settings(self):\n if self.ssOpt is not None:\n self.ssOpt.write_parameters_to_settings()\n\n def _consensus_penalty_loss(self,shared_model_parameters):\n \"\"\"\n This allows to define additional terms for the loss which are based on parameters that are shared\n between models (for example for the smoother). Can be used to define a form of consensus optimization.\n :param shared_model_parameters: parameters that have been declared shared in a model\n :return: 0 by default, otherwise the corresponding penalty\n \"\"\"\n additional_loss = MyTensor(1).zero_()\n total_number_of_parameters = 1\n for k in shared_model_parameters:\n total_number_of_parameters += shared_model_parameters[k].numel()\n additional_loss += ((shared_model_parameters[k]\\\n -self.current_consensus_state[k]\\\n -self.current_consensus_dual[k])**2).sum()\n\n\n additional_loss *= self.sigma/(2.0*total_number_of_parameters)\n\n #print('sigma=' + str(self.sigma) + '; additional loss = ' + str( additional_loss.data.cpu().numpy()))\n\n return additional_loss\n\n def _set_state_to_zero(self,state):\n # set all the individual parameters to zero\n for k in state:\n state[k].zero_()\n\n def _add_scaled_difference_to_state(self,state,model_shared_state,current_dual,scaling_factor):\n for k in state:\n state[k] += scaling_factor*(model_shared_state[k]-current_dual[k])\n\n def _create_single_scale_optimizer(self,batch_size,consensus_penalty):\n\n ssOpt = SingleScaleRegistrationOptimizer(batch_size, self.spacing, self.useMap, self.mapLowResFactor, self.params, compute_inverse_map=self.compute_inverse_map, default_learning_rate=self.default_learning_rate)\n\n if ((self.add_model_name is not None) and\n (self.add_model_networkClass is not None) and\n (self.add_model_lossClass is not None)):\n ssOpt.add_model(self.add_model_name, self.add_model_networkClass, self.add_model_lossClass)\n\n # now set the actual model we want to solve\n ssOpt.set_model(self.model_name)\n\n if (self.addSimName is not None) and (self.addSimMeasure is not None):\n ssOpt.add_similarity_measure(self.addSimName, self.addSimMeasure)\n\n # setting the optimizer\n #if self.optimizer is not None:\n # ssOpt.set_optimizer(self.optimizer)\n # ssOpt.set_optimizer_params(self.optimizer_params)\n #elif self.optimizer_name is not None:\n if self.optimizer_name is not None:\n ssOpt.set_optimizer_by_name(self.optimizer_name)\n else:\n raise ValueError('Optimizers need to be specified by name of consensus optimization at the moment.')\n\n ssOpt.set_rel_ftol(self.get_rel_ftol())\n\n ssOpt.set_visualization(self.get_visualization())\n ssOpt.set_visualize_step(self.get_visualize_step())\n\n if consensus_penalty:\n ssOpt.set_external_optimizer_parameter_loss(self._consensus_penalty_loss)\n\n return ssOpt\n\n def _initialize_consensus_variables_if_needed(self,ssOpt):\n if self.current_consensus_state is None:\n self.current_consensus_state = copy.deepcopy(ssOpt.get_shared_model_parameters())\n self._set_state_to_zero(self.current_consensus_state)\n\n if self.current_consensus_dual is None:\n self.current_consensus_dual = copy.deepcopy(self.current_consensus_state)\n self._set_state_to_zero(self.current_consensus_dual)\n\n if self.last_shared_state is None:\n self.last_shared_state = copy.deepcopy(self.current_consensus_state)\n self._set_state_to_zero(self.last_shared_state)\n\n if self.next_consensus_state is None:\n self.next_consensus_state = copy.deepcopy(self.current_consensus_dual) # also make it zero\n self._set_state_to_zero(self.next_consensus_state)\n\n def add_similarity_measure(self, simName, simMeasure):\n \"\"\"\n Adds a custom similarity measure\n\n :param simName: name of the similarity measure (string)\n :param simMeasure: the similarity measure itself (an object that can be instantiated)\n \"\"\"\n self.addSimName = simName\n self.addSimMeasure = simMeasure\n\n\n def set_model(self, modelName):\n \"\"\"\n Sets the model that should be solved\n\n :param modelName: name of the model that should be solved (string)\n \"\"\"\n\n self.model_name = modelName\n\n def add_model(self, add_model_name, add_model_networkClass, add_model_lossClass):\n \"\"\"\n Adds a custom model to be optimized over\n\n :param add_model_name: name of the model (string)\n :param add_model_networkClass: network model itself (as an object that can be instantiated)\n :param add_model_lossClass: loss of the model (as an object that can be instantiated)\n \"\"\"\n self.add_model_name = add_model_name\n self.add_model_networkClass = add_model_networkClass\n self.add_model_lossClass = add_model_lossClass\n\n def get_checkpoint_dict(self):\n d = super(SingleScaleConsensusRegistrationOptimizer, self).get_checkpoint_dict()\n d['consensus_dual'] = self.current_consensus_dual\n return d\n\n def load_checkpoint_dict(self, d, load_optimizer_state=False):\n super(SingleScaleConsensusRegistrationOptimizer, self).load_checkpoint_dict(d)\n if 'consensus_dual' in d:\n self.current_consensus_dual = d['consensus_dual']\n else:\n raise ValueError('checkpoint does not contain: consensus_dual')\n\n def _custom_load_checkpoint(self,ssOpt,filename):\n d = torch.load(filename)\n ssOpt.load_checkpoint_dict(d)\n self.load_checkpoint_dict(d)\n\n def _custom_single_batch_load_checkpoint(self,ssOpt,filename):\n d = torch.load(filename)\n if self.load_optimizer_state_from_checkpoint:\n ssOpt.load_checkpoint_dict(d,load_optimizer_state=True)\n\n def _custom_save_checkpoint(self,ssOpt,filename):\n sd = ssOpt.get_checkpoint_dict()\n\n # todo: maybe make this optional to save storage\n sd['res'] = dict()\n sd['res']['Iw'] = ssOpt.get_warped_image()\n sd['res']['phi'] = ssOpt.get_map()\n\n cd = self.get_checkpoint_dict()\n # now merge these two dictionaries\n sd.update(cd)\n # and now save it\n torch.save(sd,filename)\n\n def _copy_state(self,state_to,state_from):\n\n for key in state_to:\n if key in state_from:\n state_to[key].copy_(state_from[key])\n else:\n raise ValueError('Could not copy key ' + key)\n\n\n def _set_all_still_missing_parameters(self):\n\n if self.model_name is None:\n model_name = self.params['model']['registration_model'][('type', 'lddmm_shooting_map', \"['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with suffix '_map' or '_image'\")]\n self.params['model']['deformation'][('use_map', True, 'use a map for the solution or not True/False' )]\n self.set_model( model_name )\n\n if self.optimizer_name is None:\n self.optimizer_name = self.params['optimizer'][('name','lbfgs_ls','Optimizer (lbfgs|adam|sgd)')]\n\n self.optimizer_has_been_initialized = True\n\n def get_warped_image(self):\n \"\"\"\n Returns the warped image\n :return: the warped image\n \"\"\"\n\n p = dict()\n p['warped_images'] = []\n for current_batch in range(self.nr_of_batches):\n current_checkpoint_filename = self._get_checkpoint_filename(current_batch, self.iter_offset+self.nr_of_epochs - 1)\n dc = torch.load(current_checkpoint_filename)\n p['warped_images'].append(dc['res']['Iw'])\n\n return p\n\n\n def get_map(self):\n \"\"\"\n Returns the deformation map\n :return: deformation map\n \"\"\"\n\n p = dict()\n p['phi'] = []\n for current_batch in range(self.nr_of_batches):\n current_checkpoint_filename = self._get_checkpoint_filename(current_batch, self.iter_offset+self.nr_of_epochs - 1)\n dc = torch.load(current_checkpoint_filename)\n p['phi'].append(dc['res']['phi'])\n\n return p\n\n def get_model_parameters(self):\n \"\"\"\n Returns the parameters of the model\n\n :return: model parameters\n \"\"\"\n p = dict()\n p['consensus_state'] = self.current_consensus_state\n p['registration_pars'] = []\n for current_batch in range(self.nr_of_batches):\n current_checkpoint_filename = self._get_checkpoint_filename(current_batch,self.iter_offset+self.nr_of_epochs-1)\n dc = torch.load(current_checkpoint_filename)\n d = dict()\n d['model'] = dc['model']\n d['consensus_dual'] = dc['consensus_dual']\n p['registration_pars'].append(d)\n\n return p\n\n def set_model_parameters(self, p):\n raise ValueError('Setting model parameters not yet supported by consensus optimizer')\n\n def _get_checkpoint_filename(self,batch_nr,batch_iter):\n if self.save_intermediate_checkpoints:\n return os.path.join(self.checkpoint_output_directory,\n \"checkpoint_batch{:05d}_iter{:05d}.pt\".format(batch_nr,batch_iter))\n else:\n return os.path.join(self.checkpoint_output_directory,\n \"checkpoint_batch{:05d}.pt\".format(batch_nr))\n\n def _get_consensus_checkpoint_filename(self,batch_iter):\n return os.path.join(self.checkpoint_output_directory,\n \"consensus_state_iter{:05d}.pt\".format(batch_iter))\n\n def _optimize_as_single_batch(self,resume_from_iter=None):\n \"\"\"\n Does optimization where everything is represented as a single batch. This is essentially like an individual\n optimization, but supports checkpointing.\n\n :param resume_from_iter: resumes computations from this iteration (assumes the corresponding checkpoint exists here)\n :return: n/a\n \"\"\"\n\n if resume_from_iter is not None:\n self.iter_offset = resume_from_iter+1\n print('Resuming from checkpoint iteration: ' + str(resume_from_iter))\n else:\n self.iter_offset = 0\n\n for iter_batch in range(self.iter_offset,self.nr_of_epochs+self.iter_offset):\n print('Computing epoch ' + str(iter_batch + 1) + ' of ' + str(self.iter_offset+self.nr_of_epochs))\n\n all_histories = []\n current_batch = 0 # there is only one batch, this one\n\n current_source_batch = self.ISource[:, ...].data\n current_target_batch = self.ITarget[:, ...].data\n current_batch_image_size = np.array(current_source_batch.size())\n\n # there is not consensus penalty here as this is technically not consensus optimization\n # todo: could ultimately replace the single scale optimizer; here used to write out checkpoints\n self.ssOpt = self._create_single_scale_optimizer(current_batch_image_size, consensus_penalty=False)\n\n # needs to be set before calling _set_all_still_missing_parameters\n self.ssOpt.set_source_image(current_source_batch)\n self.ssOpt.set_target_image(current_target_batch)\n\n # to make sure we have the model initialized, force parameter installation\n self.ssOpt._set_all_still_missing_parameters()\n\n # this loads the optimizer state and the model state, but here not the self.current_consensus_dual\n if iter_batch>0:\n previous_checkpoint_filename = self._get_checkpoint_filename(current_batch, iter_batch - 1)\n self._custom_single_batch_load_checkpoint(self.ssOpt, previous_checkpoint_filename)\n\n self.ssOpt.optimize()\n\n if (current_batch == self.nr_of_batches - 1) and (iter_batch == self.nr_of_epochs - 1):\n # the last time we run this\n all_histories.append(self.ssOpt.get_history())\n\n current_checkpoint_filename = self._get_checkpoint_filename(current_batch, iter_batch)\n self._custom_save_checkpoint(self.ssOpt, current_checkpoint_filename)\n\n self._add_to_history('batch_history', copy.deepcopy(all_histories))\n\n\n def _optimize_with_multiple_batches(self, resume_from_iter=None):\n \"\"\"\n Does consensus optimization over multiple batches.\n\n :param resume_from_iter: resumes computations from this iteration (assumes the corresponding checkpoint exists here)\n :return: n/a\n \"\"\"\n\n if resume_from_iter is not None:\n iter_offset = resume_from_iter+1\n print('Resuming from checkpoint iteration: ' + str(resume_from_iter))\n else:\n iter_offset = 0\n\n for iter_batch in range(iter_offset,self.nr_of_epochs+iter_offset):\n print('Computing epoch ' + str(iter_batch+1) + ' of ' + str(iter_offset+self.nr_of_epochs))\n\n next_consensus_initialized = False\n all_histories = []\n\n for current_batch in range(self.nr_of_batches):\n\n from_image = current_batch*self.batch_size\n to_image = min(self.nr_of_images,(current_batch+1)*self.batch_size)\n\n nr_of_images_in_batch = to_image-from_image\n\n current_source_batch = self.ISource[from_image:to_image, ...].data\n current_target_batch = self.ITarget[from_image:to_image, ...].data\n current_batch_image_size = np.array(current_source_batch.size())\n\n print('Computing image pair batch ' + str(current_batch+1) + ' of ' + str(self.nr_of_batches) +\n ' of batch iteration ' + str(iter_batch+1) + ' of ' + str(iter_offset+self.nr_of_epochs))\n print('Image range: [' + str(from_image) + ',' + str(to_image) + ')')\n\n # create new optimizer\n if iter_batch==0:\n # do not apply the penalty the first time around\n self.ssOpt = self._create_single_scale_optimizer(current_batch_image_size,consensus_penalty=False)\n else:\n self.ssOpt = self._create_single_scale_optimizer(current_batch_image_size,consensus_penalty=True)\n\n # to make sure we have the model initialized, force parameter installation\n self.ssOpt._set_all_still_missing_parameters()\n\n if iter_batch==0:\n # in the first round just initialize the shared state with what was computed previously\n if self.last_shared_state is not None:\n self.ssOpt.set_shared_model_parameters(self.last_shared_state)\n\n self._initialize_consensus_variables_if_needed(self.ssOpt)\n\n if not next_consensus_initialized:\n self._set_state_to_zero(self.next_consensus_state)\n next_consensus_initialized = True\n\n if iter_batch==0:\n # for the first time, just set the dual to zero\n self._set_state_to_zero(self.current_consensus_dual)\n # load the last\n else:\n # this loads the optimizer state and the model state and also self.current_consensus_dual\n previous_checkpoint_filename = self._get_checkpoint_filename(current_batch, iter_batch-1)\n self._custom_load_checkpoint(self.ssOpt,previous_checkpoint_filename)\n\n # first update the dual variable (we do this now that we have the consensus state still\n self._add_scaled_difference_to_state(self.current_consensus_dual,\n self.ssOpt.get_shared_model_parameters(),\n self.current_consensus_state,-1.0)\n\n\n self.ssOpt.set_source_image(current_source_batch)\n self.ssOpt.set_target_image(current_target_batch)\n\n self.ssOpt.optimize()\n\n self._copy_state(self.last_shared_state,self.ssOpt.get_shared_model_parameters())\n\n if (current_batch==self.nr_of_batches-1) and (iter_batch==self.nr_of_epochs-1):\n # the last time we run this\n all_histories.append( self.ssOpt.get_history() )\n\n # update the consensus state (is done via next_consensus_state as\n # self.current_consensus_state is used as part of the optimization for all optimizations in the batch\n self._add_scaled_difference_to_state(self.next_consensus_state,\n self.ssOpt.get_shared_model_parameters(),\n self.current_consensus_dual,float(nr_of_images_in_batch)/float(self.nr_of_images))\n\n current_checkpoint_filename = self._get_checkpoint_filename(current_batch, iter_batch)\n self._custom_save_checkpoint(self.ssOpt,current_checkpoint_filename)\n\n self._add_to_history('batch_history', copy.deepcopy(all_histories))\n self._copy_state(self.current_consensus_state, self.next_consensus_state)\n\n if self.save_consensus_state_checkpoints:\n consensus_filename = self._get_consensus_checkpoint_filename(iter_batch)\n torch.save({'consensus_state':self.current_consensus_state},consensus_filename)\n\n\n def _get_checkpoint_iter_with_complete_batch(self,start_at_iter):\n\n if start_at_iter<0:\n print('Could NOT find a complete checkpoint batch.')\n return None\n\n is_complete_batch = True\n for current_batch in range(self.nr_of_batches):\n cfilename = self._get_checkpoint_filename(current_batch, start_at_iter)\n if os.path.isfile(cfilename):\n print('Checkpoint file: ' + cfilename + \" exists.\")\n else:\n print('Checkpoint file: ' + cfilename + \" does NOT exist.\")\n is_complete_batch = False\n break\n\n if is_complete_batch:\n print('Found complete batch for batch iteration ' + str(start_at_iter))\n return start_at_iter\n else:\n return self._get_checkpoint_iter_with_complete_batch(start_at_iter-1)\n\n\n def _get_last_checkpoint_iteration_from_checkpoint_files(self):\n \"\"\"\n Looks through the checkpoint files and checks which ones were the last saved ones.\n This allows for picking up the iterations after a completed or terminated optimization.\n Also checks that the same number of batches are used, otherwise an optimization cannot be resumed\n from a checkpoint.\n\n :return: last iteration performed for complete batch\n \"\"\"\n\n print('Attempting to resume optimization from checkpoint data.')\n print('Searching for existing checkpoint data ...')\n\n # first find all the computed iters\n largest_found_iter = None\n\n if self.save_intermediate_checkpoints:\n current_iter_batch = 0\n while os.path.isfile(self._get_checkpoint_filename(0,current_iter_batch)):\n print('Found checkpoint iteration: ' + str(current_iter_batch) + ' : ' + self._get_checkpoint_filename(0,current_iter_batch))\n largest_found_iter = current_iter_batch\n current_iter_batch +=1\n\n else:\n if os.path.isfile(self._get_checkpoint_filename(0,0)):\n print('Found checkpoint: ' + str(self._get_checkpoint_filename(0,0)))\n largest_found_iter = 0\n \n if largest_found_iter is None:\n print('Could not find any checkpoint data from which to resume.')\n return None\n else:\n largest_iter_with_complete_batch = self._get_checkpoint_iter_with_complete_batch(largest_found_iter)\n return largest_iter_with_complete_batch\n\n def optimize(self):\n\n \"\"\"\n This optimizer performs consensus optimization:\n\n 1) (u_i_shared,u_i_individual)^{k+1} = argmin \\sum_i f_i(u_i_shared,u_i_individual) + \\sigma/2\\|u_i_shared-u_consensus^k-z_i^k\\|^2\n 2) (u_consensus)^{k+1} = 1/n\\sum_{i=1}^n ((u_i_shared)^{k+1}-z_i^k)\n 3) z_i^{k+1} = z_i^k-((u_i_shared)^{k+1}-u_consensus_{k+1})\n\n :return: n/a\n \"\"\"\n\n if self.optimizer is not None:\n raise ValueError('Custom optimizers are currently not supported for consensus optimization.\\\n Set the optimizer by name (e.g., in the json configuration) instead.')\n\n self._set_all_still_missing_parameters()\n\n # todo: support reading images from file\n self.nr_of_images = self.ISource.size()[0]\n self.nr_of_batches = np.ceil(float(self.nr_of_images)/float(self.batch_size)).astype('int')\n\n if self.continue_from_last_checkpoint:\n last_checkpoint_iteration = self._get_last_checkpoint_iteration_from_checkpoint_files()\n else:\n last_checkpoint_iteration = None\n\n if self.nr_of_batches==1:\n compute_as_single_batch = True\n else:\n compute_as_single_batch = False\n\n if not os.path.exists(self.checkpoint_output_directory):\n os.makedirs(self.checkpoint_output_directory)\n\n if compute_as_single_batch:\n self._optimize_as_single_batch(resume_from_iter=last_checkpoint_iteration)\n else:\n self._optimize_with_multiple_batches(resume_from_iter=last_checkpoint_iteration)\n\n\nclass MultiScaleRegistrationOptimizer(ImageRegistrationOptimizer):\n \"\"\"\n Class to perform multi-scale optimization. Essentially puts a loop around multiple calls of the\n single scale optimizer and starts with the registration of downsampled images. When moving up\n the hierarchy, the registration parameters are upsampled from the solution at the previous lower resolution\n \"\"\"\n\n def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None ):\n super(MultiScaleRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n self.scaleFactors = None\n \"\"\"At what image scales optimization should be computed\"\"\"\n self.scaleIterations = None\n \"\"\"number of iterations per scale\"\"\"\n\n self.addSimName = None\n \"\"\"name of the similarity measure to be added\"\"\"\n self.addSimMeasure = None\n \"\"\"similarity measure itself that should be added\"\"\"\n self.add_model_name = None\n \"\"\"name of the model that should be added\"\"\"\n self.add_model_networkClass = None\n \"\"\"network object of the model to be added\"\"\"\n self.add_model_lossClass = None\n \"\"\"loss object of the model to be added\"\"\"\n self.model_name = None\n \"\"\"name of the model to be added (if specified by name; gets dominated by specifying an optimizer directly\"\"\"\n self.ssOpt = None\n \"\"\"Single scale optimizer\"\"\"\n self.params['optimizer'][('multi_scale', {}, 'multi scale settings')]\n\n def write_parameters_to_settings(self):\n if self.ssOpt is not None:\n self.ssOpt.write_parameters_to_settings()\n\n def add_similarity_measure(self, simName, simMeasure):\n \"\"\"\n Adds a custom similarity measure\n\n :param simName: name of the similarity measure (string)\n :param simMeasure: the similarity measure itself (an object that can be instantiated)\n \"\"\"\n self.addSimName = simName\n self.addSimMeasure = simMeasure\n\n def set_model(self, modelName):\n \"\"\"\n Set the model to be optimized over by name\n\n :param modelName: the name of the model (string)\n \"\"\"\n self.model_name = modelName\n\n def set_initial_map(self, map0, map0_inverse=None):\n \"\"\"\n Sets the initial map (overwrites the default identity map)\n :param map0: intial map\n :return: n/a\n \"\"\"\n if self.ssOpt is None:\n self.initialMap = map0\n self.initialInverseMap = map0_inverse\n\n def set_initial_weight_map(self,weight_map,freeze_weight=False):\n if self.ssOpt is None:\n self.weight_map = weight_map\n self.freeze_weight = freeze_weight\n\n def set_pair_name(self,pair_name):\n # f = lambda name: os.path.split(name)\n # get_in = lambda x: os.path.splitext(f(x)[1])[0]\n # get_fn = lambda x: f(f(x)[0])[1]\n # get_img_name = lambda x: get_fn(x)+'_'+get_in(x)\n # img_pair_name = [get_img_name(pair_name[0])+'_'+get_img_name(pair_name[1]) for pair_name in pair_names]\n self.pair_name = pair_name\n\n def set_save_fig_path(self, save_fig_path):\n \"\"\"\n the path of saved figures, default is the ../data/expr_name\n :param save_fig_path:\n :return:\n \"\"\"\n self.save_fig_path = os.path.join(save_fig_path, self.expr_name)\n\n\n\n def add_model(self, add_model_name, add_model_networkClass, add_model_lossClass, use_map):\n \"\"\"\n Adds a custom model to be optimized over\n\n :param add_model_name: name of the model (string)\n :param add_model_networkClass: network model itself (as an object that can be instantiated)\n :param add_model_lossClass: loss of the model (as an object that can be instantiated)\n :param use_map: if set to true, model using a map, otherwise direcly works with the image\n \"\"\"\n self.add_model_name = add_model_name\n self.add_model_networkClass = add_model_networkClass\n self.add_model_lossClass = add_model_lossClass\n self.add_model_use_map = use_map\n\n def set_scale_factors(self, scaleFactors):\n \"\"\"\n Set the scale factors for the solution. Should be in decending order, e.g., [1.0, 0.5, 0.25]\n\n :param scaleFactors: scale factors for the multi-scale solution hierarchy\n \"\"\"\n\n self.params['optimizer']['multi_scale']['scale_factors'] = (scaleFactors, 'how images are scaled')\n self.scaleFactors = scaleFactors\n\n def set_number_of_iterations_per_scale(self, scaleIterations):\n \"\"\"\n Sets the number of iterations that will be performed per scale of the multi-resolution hierarchy. E.g, [50,100,200]\n\n :param scaleIterations: number of iterations per scale (array)\n \"\"\"\n\n self.params['optimizer']['multi_scale']['scale_iterations'] = (scaleIterations, 'number of iterations per scale')\n self.scaleIterations = scaleIterations\n\n def _get_desired_size_from_scale(self, origSz, scale):\n\n osz = np.array(list(origSz))\n dsz = osz\n dsz[2::] = (np.round( scale*osz[2::] )).astype('int')\n\n return dsz\n\n def get_energy(self):\n \"\"\"\n Returns the current energy\n :return: Returns a tuple (energy, similarity energy, regularization energy)\n \"\"\"\n if self.ssOpt is not None:\n return self.ssOpt.get_energy()\n else:\n return None\n\n def get_warped_image(self):\n \"\"\"\n Returns the warped image\n :return: the warped image\n \"\"\"\n if self.ssOpt is not None:\n return self.ssOpt.get_warped_image()\n else:\n return None\n\n\n def get_warped_label(self):\n \"\"\"\n Returns the warped label\n :return: the warped label\n \"\"\"\n if self.ssOpt is not None:\n return self.ssOpt.get_warped_label()\n else:\n return None\n\n def get_map(self):\n \"\"\"\n Returns the deformation map\n :return: deformation map\n \"\"\"\n if self.ssOpt is not None:\n return self.ssOpt.get_map()\n else:\n return None\n\n def get_inverse_map(self):\n \"\"\"\n Returns the inverse deformation map\n :return: deformation map\n \"\"\"\n if self.ssOpt is not None:\n return self.ssOpt.get_inverse_map()\n else:\n return None\n\n def get_model_parameters(self):\n \"\"\"\n Returns the parameters of the model\n\n :return: model parameters\n \"\"\"\n if self.ssOpt is not None:\n return self.ssOpt.get_model_parameters()\n else:\n return None\n\n def set_model_parameters(self,p):\n raise ValueError('Setting model parameters not yet supported for multi-scale optimizer')\n\n def _set_all_still_missing_parameters(self):\n\n self.scaleFactors = self.params['optimizer']['multi_scale'][('scale_factors', [1.0, 0.5, 0.25], 'how images are scaled')]\n self.scaleIterations = self.params['optimizer']['multi_scale'][('scale_iterations', [10, 20, 20], 'number of iterations per scale')]\n\n if (self.optimizer is None) and (self.optimizer_name is None):\n self.optimizer_name = self.params['optimizer'][('name','lbfgs_ls','Optimizer (lbfgs|adam|sgd)')]\n\n if self.model_name is None:\n model_name = self.params['model']['registration_model'][('type', 'lddmm_shooting_map', \"['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with suffix '_map' or '_image'\")]\n self.params['model']['deformation'][('use_map', True, 'use a map for the solution or not True/False' )]\n self.set_model( model_name )\n\n self.optimizer_has_been_initialized = True\n\n def optimize(self):\n \"\"\"\n Perform the actual multi-scale optimization\n \"\"\"\n self._set_all_still_missing_parameters()\n\n if (self.ISource is None) or (self.ITarget is None):\n raise ValueError('Source and target images need to be set first')\n\n upsampledParameters = None\n upsampledParameterSpacing = None\n upsampledSz = None\n lastSuccessfulStepSizeTaken = None\n\n nrOfScales = len(self.scaleFactors)\n\n # check that we have the right number of iteration parameters\n assert (nrOfScales == len(self.scaleIterations))\n\n print('Performing multiscale optmization with scales: ' + str(self.scaleFactors))\n\n # go from lowest to highest scale\n reverseScales = self.scaleFactors[-1::-1]\n reverseIterations = self.scaleIterations[-1::-1]\n over_scale_iter_count = 0\n\n for en_scale in enumerate(reverseScales):\n print('Optimizing for scale = ' + str(en_scale[1]))\n\n # create the images\n currentScaleFactor = en_scale[1]\n currentScaleNumber = en_scale[0]\n\n currentDesiredSz = self._get_desired_size_from_scale(self.ISource.size(), currentScaleFactor)\n\n currentNrOfIteratons = reverseIterations[currentScaleNumber]\n\n ISourceC, spacingC = self.sampler.downsample_image_to_size(self.ISource, self.spacing, currentDesiredSz[2::],self.spline_order)\n ITargetC, spacingC = self.sampler.downsample_image_to_size(self.ITarget, self.spacing, currentDesiredSz[2::],self.spline_order)\n LSourceC = None\n LTargetC = None\n if self.LSource is not None and self.LTarget is not None:\n LSourceC, spacingC = self.sampler.downsample_image_to_size(self.LSource, self.spacing, currentDesiredSz[2::],0)\n LTargetC, spacingC = self.sampler.downsample_image_to_size(self.LTarget, self.spacing, currentDesiredSz[2::],0)\n initialMap = None\n initialInverseMap = None\n weight_map=None\n if self.initialMap is not None:\n initialMap,_ = self.sampler.downsample_image_to_size(self.initialMap,self.spacing, currentDesiredSz[2::],1,zero_boundary=False)\n if self.initialInverseMap is not None:\n initialInverseMap,_ = self.sampler.downsample_image_to_size(self.initialInverseMap,self.spacing, currentDesiredSz[2::],1,zero_boundary=False)\n if self.weight_map is not None:\n weight_map,_ =self.sampler.downsample_image_to_size(self.weight_map,self.spacing, currentDesiredSz[2::],1,zero_boundary=False)\n szC = np.array(ISourceC.size()) # this assumes the BxCxXxYxZ format\n mapLowResFactor = None if currentScaleNumber==0 else self.mapLowResFactor\n self.ssOpt = SingleScaleRegistrationOptimizer(szC, spacingC, self.useMap, mapLowResFactor, self.params, compute_inverse_map=self.compute_inverse_map,default_learning_rate=self.default_learning_rate)\n print('Setting learning rate to ' + str( lastSuccessfulStepSizeTaken ))\n self.ssOpt.set_last_successful_step_size_taken( lastSuccessfulStepSizeTaken )\n self.ssOpt.set_initial_map(initialMap,initialInverseMap)\n\n if ((self.add_model_name is not None) and\n (self.add_model_networkClass is not None) and\n (self.add_model_lossClass is not None)):\n self.ssOpt.add_model(self.add_model_name, self.add_model_networkClass, self.add_model_lossClass, use_map=self.add_model_use_map)\n\n # now set the actual model we want to solve\n self.ssOpt.set_model(self.model_name)\n if weight_map is not None:\n self.ssOpt.set_initial_weight_map(weight_map,self.freeze_weight)\n\n\n if (self.addSimName is not None) and (self.addSimMeasure is not None):\n self.ssOpt.add_similarity_measure(self.addSimName, self.addSimMeasure)\n\n # setting the optimizer\n if self.optimizer is not None:\n self.ssOpt.set_optimizer(self.optimizer)\n self.ssOpt.set_optimizer_params(self.optimizer_params)\n elif self.optimizer_name is not None:\n self.ssOpt.set_optimizer_by_name(self.optimizer_name)\n\n self.ssOpt.set_rel_ftol(self.get_rel_ftol())\n\n self.ssOpt.set_visualization(self.get_visualization())\n self.ssOpt.set_visualize_step(self.get_visualize_step())\n self.ssOpt.set_n_scale(en_scale[1])\n self.ssOpt.set_over_scale_iter_count(over_scale_iter_count)\n\n if self.get_save_fig():\n self.ssOpt.set_expr_name(self.get_expr_name())\n self.ssOpt.set_save_fig(self.get_save_fig())\n self.ssOpt.set_save_fig_path(self.get_save_fig_path())\n self.ssOpt.set_save_fig_num(self.get_save_fig_num())\n self.ssOpt.set_pair_name(self.get_pair_name())\n self.ssOpt.set_n_scale(en_scale[1])\n self.ssOpt.set_source_label(self.get_source_label())\n self.ssOpt.set_target_label(self.get_target_label())\n\n\n self.ssOpt.set_source_image(ISourceC)\n self.ssOpt.set_target_image(ITargetC)\n self.ssOpt.set_multi_scale_info(self.ISource,self.ITarget,self.spacing,self.LSource,self.LTarget)\n if self.LSource is not None and self.LTarget is not None:\n self.ssOpt.set_source_label(LSourceC)\n self.ssOpt.set_target_label(LTargetC)\n\n if upsampledParameters is not None:\n # check that the upsampled parameters are consistent with the downsampled images\n spacingError = False\n expectedSpacing = None\n\n if mapLowResFactor is not None:\n expectedSpacing = utils._get_low_res_spacing_from_spacing(spacingC, szC, upsampledSz)\n # the spacing of the upsampled parameters will be different\n if not (abs(expectedSpacing - upsampledParameterSpacing) < 0.000001).all():\n spacingError = True\n elif not (abs(spacingC - upsampledParameterSpacing) < 0.000001).all():\n expectedSpacing = spacingC\n spacingError = True\n\n if spacingError:\n print(expectedSpacing)\n print(upsampledParameterSpacing)\n raise ValueError('Upsampled parameters and downsampled images are of inconsistent dimension')\n\n # now that everything is fine, we can use the upsampled parameters\n print('Explicitly setting the optimization parameters')\n self.ssOpt.set_model_parameters(upsampledParameters)\n\n # do the actual optimization\n print('Optimizing for at most ' + str(currentNrOfIteratons) + ' iterations')\n self.ssOpt._set_number_of_iterations_from_multi_scale(currentNrOfIteratons)\n self.ssOpt.optimize()\n\n self._add_to_history('scale_nr',currentScaleNumber)\n self._add_to_history('scale_factor',currentScaleFactor)\n self._add_to_history('ss_history',self.ssOpt.get_history())\n\n lastSuccessfulStepSizeTaken = self.ssOpt.get_last_successful_step_size_taken()\n over_scale_iter_count += currentNrOfIteratons\n\n # if we are not at the very last scale, then upsample the parameters\n if currentScaleNumber != nrOfScales - 1:\n # we need to revert the downsampling to the next higher level\n scaleTo = reverseScales[currentScaleNumber + 1]\n upsampledSz = self._get_desired_size_from_scale(self.ISource.size(), scaleTo)\n print('Before')\n print(upsampledSz)\n if self.useMap:\n if self.mapLowResFactor is not None:\n # parameters are upsampled differently here, because they are computed at low res\n upsampledSz = utils._get_low_res_size_from_size(upsampledSz,self.mapLowResFactor)\n print(self.mapLowResFactor)\n print('After')\n print(upsampledSz)\n upsampledParameters, upsampledParameterSpacing = self.ssOpt.upsample_model_parameters(upsampledSz[2::])\n"
] | [
[
"torch.sum",
"numpy.sum",
"torch.empty_like",
"numpy.histogram",
"torch.zeros_like",
"numpy.min"
],
[
"torch.utils.data.DataLoader",
"torch.optim.SGD",
"torch.load",
"torch.save",
"torch.from_numpy",
"torch.is_tensor",
"numpy.array",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"numpy.round"
]
] |
bekirduran/AI_Puzzle_Solver | [
"7e8c007802d1e4596dd09edd97bafeb7a4ff7f61"
] | [
"com/puzzlesolver/cross_over.py"
] | [
"import numpy as np\n\n\n# This class generating new list item given first of list item row and second of list item row\nclass Crossover:\n\n @staticmethod\n def crossover(best):\n row_begin_index = 0\n row_half = 2\n\n cross_list = []\n for i in range(len(best) - 1):\n first_part1 = best[i][row_begin_index:row_half, :]\n first_part2 = best[i + 1][row_half:, :]\n\n cross_list.append(np.concatenate((first_part1, first_part2)))\n\n second_part1 = best[i][row_half:, :]\n second_part2 = best[i + 1][row_begin_index:row_half, :]\n\n cross_list.append(np.concatenate((second_part2, second_part1)))\n return cross_list\n"
] | [
[
"numpy.concatenate"
]
] |
huonw/tensorflow | [
"85f47254af7cc230a4a031998dffe770b7edbb9d"
] | [
"tensorflow/python/compiler/tensorrt/test/trt_mode_test.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Model script to test TF-TensorRT integration.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom unittest import SkipTest # pylint: disable=g-importing-member\n\nfrom tensorflow.compiler.tf2tensorrt.wrap_py_utils import get_linked_tensorrt_version\nfrom tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass TrtModeTestBase(trt_test.TfTrtIntegrationTestBase):\n \"\"\"Test squeeze on batch dim and some unary operations in TF-TRT.\"\"\"\n\n def GraphFn(self, x1):\n q = math_ops.abs(x1)\n q = q + 1.0\n q = q * 3.0\n q = array_ops.squeeze(q, 0)\n q = math_ops.abs(q)\n q = q + 5.0\n return array_ops.identity(q, name=\"output_0\")\n\n def GetParams(self):\n \"\"\"The input has 1 as a first dimension, which is removed by the squeeze.\n\n op in the graph.\n\n In explicit batch mode, TensorRT can convert the whole graph. In this mode\n it is possible to manipulate the batch dimension using the squeeze op.\n\n In implicit batch mode TensorRT cannot convert the whole graph. We are not\n allowed to manipulate (squeeze) the first dimension in implicit batch mode.\n Therefore the graph will be converted using multiple segments.\n \"\"\"\n return self.BuildParams(self.GraphFn, dtypes.float32, [[1, 12, 5]],\n [[12, 5]])\n\n def GetConversionParams(self, run_params, implicit_batch=False):\n \"\"\"Return a TrtConversionParams for test.\"\"\"\n\n conversion_params = super(TrtModeTestBase,\n self).GetConversionParams(run_params)\n rewriter_config = self.GetTrtRewriterConfig(\n run_params=run_params,\n conversion_params=conversion_params,\n use_implicit_batch=implicit_batch)\n return conversion_params._replace(rewriter_config_template=rewriter_config)\n\n @classmethod\n def setUpClass(cls):\n if cls is TrtModeTestBase:\n raise SkipTest(\"TrtModeTestBase defines base class for other test.\")\n super(TrtModeTestBase, cls).setUpClass()\n\n\nclass ImplicitBatchTest(TrtModeTestBase):\n\n def GetConversionParams(self, run_params):\n \"\"\"Return a TrtConversionParams for test using implicit batch mdoe.\"\"\"\n return super(ImplicitBatchTest, self).GetConversionParams(run_params, True)\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Check that the expected engine is built.\n\n Args:\n run_params: the run parameters.\n\n Returns:\n the expected engines to build.\n\n The squeeze op is not converted by TensorRT in implicit batch mode.\n Because of this we have two TRTEngineOp in the graphs: one for the\n subgraph before 'squeeze(q,0)', and another one for the rest of the ops\n after the 'squeeze(q,0)'.\n \"\"\"\n return [\"TRTEngineOp_0\", \"TRTEngineOp_1\"]\n\n\nclass ExplicitBatchTest(TrtModeTestBase):\n\n def GetParams(self):\n \"\"\"We specify input/output masks with static (known) shapes.\"\"\"\n return self.BuildParamsWithMask(\n self.GraphFn,\n dtypes.float32, [[1, 12, 5]], [[12, 5]],\n input_mask=[[True, True, True]],\n output_mask=[[True, True]])\n\n def GetConversionParams(self, run_params):\n \"\"\"Return a TrtConversionParams for test that enables explicit batch.\"\"\"\n return super(ExplicitBatchTest, self).GetConversionParams(run_params, False)\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Check that the expected engine is built.\n\n Args:\n run_params: the run parameters.\n\n Returns:\n the expected engines to build.\n\n In explicit batch mode the whole graph is converted using a single engine.\n \"\"\"\n return [\"TRTEngineOp_0\"]\n\n def ShouldRunTest(self, run_params):\n # Only run for TRT 6 and above.\n ver = get_linked_tensorrt_version()\n return ver[0] >= 6 and (not run_params.use_calibration)\n\n\nclass DynamicShapesTest(TrtModeTestBase):\n \"\"\"Test with dynamic input shapes.\n\n DynamicShapesTest is different from ExplicitBatchTest in that it uses input\n and output masks to change the input and output shapes to unknown shapes.\n \"\"\"\n\n def GetParams(self):\n \"\"\"We specify input/output mask with dynamic (unknown) shapes.\"\"\"\n return self.BuildParamsWithMask(\n self.GraphFn,\n dtypes.float32, [[1, 12, 5]], [[12, 5]],\n input_mask=[[False, False, False]],\n output_mask=[[False, False]])\n\n def GetConversionParams(self, run_params):\n \"\"\"Return a TrtConversionParams for test that enables explicit batch.\"\"\"\n return super(DynamicShapesTest, self).GetConversionParams(run_params, False)\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n return [\"TRTEngineOp_0\"]\n\n def ShouldRunTest(self, run_params):\n # Only run for TRT 6 and above.\n ver = get_linked_tensorrt_version()\n return ver[0] >= 6 and (not run_params.use_calibration)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.compiler.tf2tensorrt.wrap_py_utils.get_linked_tensorrt_version",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.platform.test.main"
]
] |
hlahkar/transformers | [
"c19d04623eacfbc2c452397a5eda0fde42db3fc5"
] | [
"src/transformers/training_args.py"
] | [
"# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport dataclasses\nimport json\nimport os\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom .file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required\nfrom .trainer_utils import EvaluationStrategy\nfrom .utils import logging\n\n\nif is_torch_available():\n import torch\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\nlogger = logging.get_logger(__name__)\n\n\ndef default_logdir() -> str:\n \"\"\"\n Same default as PyTorch\n \"\"\"\n import socket\n from datetime import datetime\n\n current_time = datetime.now().strftime(\"%b%d_%H-%M-%S\")\n return os.path.join(\"runs\", current_time + \"_\" + socket.gethostname())\n\n\n@dataclass\nclass TrainingArguments:\n \"\"\"\n TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop\n itself**.\n\n Using :class:`~transformers.HfArgumentParser` we can turn this class into argparse arguments to be able to specify\n them on the command line.\n\n\n\n\n Parameters:\n output_dir (:obj:`str`):\n The output directory where the model predictions and checkpoints will be written.\n overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):\n If :obj:`True`, overwrite the content of the output directory. Use this to continue training if\n :obj:`output_dir` points to a checkpoint directory.\n do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to run training or not. This argument is not directly used by :class:`~transformers.Trainer`, it's\n intended to be used by your training/evaluation scripts instead. See the `example scripts\n <https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.\n do_eval (:obj:`bool`, `optional`):\n Whether to run evaluation on the dev set or not. Will be set to :obj:`True` if :obj:`evaluation_strategy`\n is different from :obj:`\"no\"`. This argument is not directly used by :class:`~transformers.Trainer`, it's\n intended to be used by your training/evaluation scripts instead. See the `example scripts\n <https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.\n do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to run predictions on the test set or not. This argument is not directly used by\n :class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See\n the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more\n details.\n evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`\"no\"`):\n The evaluation strategy to adopt during training. Possible values are:\n\n * :obj:`\"no\"`: No evaluation is done during training.\n * :obj:`\"steps\"`: Evaluation is done (and logged) every :obj:`eval_steps`.\n * :obj:`\"epoch\"`: Evaluation is done at the end of each epoch.\n\n prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):\n When performing evaluation and predictions, only returns the loss.\n per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):\n The batch size per GPU/TPU core/CPU for training.\n per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):\n The batch size per GPU/TPU core/CPU for evaluation.\n gradient_accumulation_steps (:obj:`int`, `optional`, defaults to 1):\n Number of updates steps to accumulate the gradients for, before performing a backward/update pass.\n\n .. warning::\n\n When using gradient accumulation, one step is counted as one step with backward pass. Therefore,\n logging, evaluation, save will be conducted every ``gradient_accumulation_steps * xxx_step`` training\n examples.\n eval_accumulation_steps (:obj:`int`, `optional`):\n Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If\n left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but\n requires more memory).\n learning_rate (:obj:`float`, `optional`, defaults to 5e-5):\n The initial learning rate for Adam.\n weight_decay (:obj:`float`, `optional`, defaults to 0):\n The weight decay to apply (if not zero).\n adam_beta1 (:obj:`float`, `optional`, defaults to 0.9):\n The beta1 for the Adam optimizer.\n adam_beta2 (:obj:`float`, `optional`, defaults to 0.999):\n The beta2 for the Adam optimizer.\n adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):\n Epsilon for the Adam optimizer.\n max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):\n Maximum gradient norm (for gradient clipping).\n num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):\n Total number of training epochs to perform (if not an integer, will perform the decimal part percents of\n the last epoch before stopping training).\n max_steps (:obj:`int`, `optional`, defaults to -1):\n If set to a positive number, the total number of training steps to perform. Overrides\n :obj:`num_train_epochs`.\n warmup_steps (:obj:`int`, `optional`, defaults to 0):\n Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.\n logging_dir (:obj:`str`, `optional`):\n Tensorboard log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`.\n logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to log and evaluate the first :obj:`global_step` or not.\n logging_steps (:obj:`int`, `optional`, defaults to 500):\n Number of update steps between two logs.\n save_steps (:obj:`int`, `optional`, defaults to 500):\n Number of updates steps before two checkpoint saves.\n save_total_limit (:obj:`int`, `optional`):\n If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in\n :obj:`output_dir`.\n no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to not use CUDA even when it is available or not.\n seed (:obj:`int`, `optional`, defaults to 42):\n Random seed for initialization.\n fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to use 16-bit (mixed) precision training (through NVIDIA apex) instead of 32-bit training.\n fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):\n For :obj:`fp16` training, apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details\n on the `apex documentation <https://nvidia.github.io/apex/amp.html>`__.\n local_rank (:obj:`int`, `optional`, defaults to -1):\n During distributed training, the rank of the process.\n tpu_num_cores (:obj:`int`, `optional`):\n When training on TPU, the number of TPU cores (automatically passed by launcher script).\n debug (:obj:`bool`, `optional`, defaults to :obj:`False`):\n When training on TPU, whether to print debug metrics or not.\n dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)\n or not.\n eval_steps (:obj:`int`, `optional`):\n Number of update steps between two evaluations if :obj:`evaluation_strategy=\"steps\"`. Will default to the\n same value as :obj:`logging_steps` if not set.\n dataloader_num_workers (:obj:`int`, `optional`, defaults to 0):\n Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the\n main process.\n past_index (:obj:`int`, `optional`, defaults to -1):\n Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can\n make use of the past hidden states for their predictions. If this argument is set to a positive int, the\n ``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model\n at the next training step under the keyword argument ``mems``.\n run_name (:obj:`str`, `optional`):\n A descriptor for the run. Notably used for wandb logging.\n disable_tqdm (:obj:`bool`, `optional`):\n Whether or not to disable the tqdm progress bars. Will default to :obj:`True` if the logging level is set\n to warn or lower (default), :obj:`False` otherwise.\n remove_unused_columns (:obj:`bool`, `optional`, defaults to :obj:`True`):\n If using `nlp.Dataset` datasets, whether or not to automatically remove the columns unused by the model\n forward method.\n\n (Note that this behavior is not implemented for :class:`~transformers.TFTrainer` yet.)\n label_names (:obj:`List[str]`, `optional`):\n The list of keys in your dictionary of inputs that correspond to the labels.\n\n Will eventually default to :obj:`[\"labels\"]` except if the model used is one of the\n :obj:`XxxForQuestionAnswering` in which case it will default to :obj:`[\"start_positions\",\n \"end_positions\"]`.\n load_best_model_at_end (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to load the best model found during training at the end of training.\n\n .. note::\n\n When set to :obj:`True`, the parameters :obj:`save_steps` will be ignored and the model will be saved\n after each evaluation.\n metric_for_best_model (:obj:`str`, `optional`):\n Use in conjunction with :obj:`load_best_model_at_end` to specify the metric to use to compare two different\n models. Must be the name of a metric returned by the evaluation with or without the prefix :obj:`\"eval_\"`.\n Will default to :obj:`\"loss\"` if unspecified and :obj:`load_best_model_at_end=True` (to use the evaluation\n loss).\n\n If you set this value, :obj:`greater_is_better` will default to :obj:`True`. Don't forget to set it to\n :obj:`False` if your metric is better when lower.\n greater_is_better (:obj:`bool`, `optional`):\n Use in conjunction with :obj:`load_best_model_at_end` and :obj:`metric_for_best_model` to specify if better\n models should have a greater metric or not. Will default to:\n\n - :obj:`True` if :obj:`metric_for_best_model` is set to a value that isn't :obj:`\"loss\"` or\n :obj:`\"eval_loss\"`.\n - :obj:`False` if :obj:`metric_for_best_model` is not set, or set to :obj:`\"loss\"` or :obj:`\"eval_loss\"`.\n model_parallel (:obj:`bool`, `optional`, defaults to :obj:`False`):\n If there are more than one devices, whether to use model parallelism to distribute the model's modules\n across devices or not.\n ignore_data_skip (:obj:`bool`, `optional`, defaults to :obj:`False`):\n When resuming training, whether or not to skip the epochs and batches to get the data loading at the same\n stage as in the previous training. If set to :obj:`True`, the training will begin faster (as that skipping\n step can take a long time) but will not yield the same results as the interrupted training would have.\n \"\"\"\n\n output_dir: str = field(\n metadata={\"help\": \"The output directory where the model predictions and checkpoints will be written.\"}\n )\n overwrite_output_dir: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Overwrite the content of the output directory.\"\n \"Use this to continue training if output_dir points to a checkpoint directory.\"\n )\n },\n )\n\n do_train: bool = field(default=False, metadata={\"help\": \"Whether to run training.\"})\n do_eval: bool = field(default=None, metadata={\"help\": \"Whether to run eval on the dev set.\"})\n do_predict: bool = field(default=False, metadata={\"help\": \"Whether to run predictions on the test set.\"})\n model_parallel: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"If there are more than one devices, whether to use model parallelism to distribute the \"\n \"model's modules across devices.\"\n )\n },\n )\n evaluation_strategy: EvaluationStrategy = field(\n default=\"no\",\n metadata={\"help\": \"Run evaluation during training at each logging step.\"},\n )\n prediction_loss_only: bool = field(\n default=False,\n metadata={\"help\": \"When performing evaluation and predictions, only returns the loss.\"},\n )\n\n per_device_train_batch_size: int = field(\n default=8, metadata={\"help\": \"Batch size per GPU/TPU core/CPU for training.\"}\n )\n per_device_eval_batch_size: int = field(\n default=8, metadata={\"help\": \"Batch size per GPU/TPU core/CPU for evaluation.\"}\n )\n\n per_gpu_train_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Deprecated, the use of `--per_device_train_batch_size` is preferred. \"\n \"Batch size per GPU/TPU core/CPU for training.\"\n },\n )\n per_gpu_eval_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Deprecated, the use of `--per_device_eval_batch_size` is preferred.\"\n \"Batch size per GPU/TPU core/CPU for evaluation.\"\n },\n )\n\n gradient_accumulation_steps: int = field(\n default=1,\n metadata={\"help\": \"Number of updates steps to accumulate before performing a backward/update pass.\"},\n )\n eval_accumulation_steps: Optional[int] = field(\n default=None,\n metadata={\"help\": \"Number of predictions steps to accumulate before moving the tensors to the CPU.\"},\n )\n\n learning_rate: float = field(default=5e-5, metadata={\"help\": \"The initial learning rate for Adam.\"})\n weight_decay: float = field(default=0.0, metadata={\"help\": \"Weight decay if we apply some.\"})\n adam_beta1: float = field(default=0.9, metadata={\"help\": \"Beta1 for Adam optimizer\"})\n adam_beta2: float = field(default=0.999, metadata={\"help\": \"Beta2 for Adam optimizer\"})\n adam_epsilon: float = field(default=1e-8, metadata={\"help\": \"Epsilon for Adam optimizer.\"})\n max_grad_norm: float = field(default=1.0, metadata={\"help\": \"Max gradient norm.\"})\n\n num_train_epochs: float = field(default=3.0, metadata={\"help\": \"Total number of training epochs to perform.\"})\n max_steps: int = field(\n default=-1,\n metadata={\"help\": \"If > 0: set total number of training steps to perform. Override num_train_epochs.\"},\n )\n warmup_steps: int = field(default=0, metadata={\"help\": \"Linear warmup over warmup_steps.\"})\n\n logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={\"help\": \"Tensorboard log dir.\"})\n logging_first_step: bool = field(default=False, metadata={\"help\": \"Log the first global_step\"})\n logging_steps: int = field(default=500, metadata={\"help\": \"Log every X updates steps.\"})\n save_steps: int = field(default=500, metadata={\"help\": \"Save checkpoint every X updates steps.\"})\n save_total_limit: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"Limit the total amount of checkpoints.\"\n \"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints\"\n )\n },\n )\n no_cuda: bool = field(default=False, metadata={\"help\": \"Do not use CUDA even when it is available\"})\n seed: int = field(default=42, metadata={\"help\": \"random seed for initialization\"})\n\n fp16: bool = field(\n default=False,\n metadata={\"help\": \"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\"},\n )\n fp16_opt_level: str = field(\n default=\"O1\",\n metadata={\n \"help\": (\n \"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\"\n )\n },\n )\n local_rank: int = field(default=-1, metadata={\"help\": \"For distributed training: local_rank\"})\n\n tpu_num_cores: Optional[int] = field(\n default=None, metadata={\"help\": \"TPU: Number of TPU cores (automatically passed by launcher script)\"}\n )\n tpu_metrics_debug: bool = field(\n default=False,\n metadata={\"help\": \"Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics\"},\n )\n debug: bool = field(default=False, metadata={\"help\": \"Whether to print debug metrics on TPU\"})\n\n dataloader_drop_last: bool = field(\n default=False, metadata={\"help\": \"Drop the last incomplete batch if it is not divisible by the batch size.\"}\n )\n eval_steps: int = field(default=None, metadata={\"help\": \"Run an evaluation every X steps.\"})\n dataloader_num_workers: int = field(\n default=0,\n metadata={\n \"help\": \"Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.\"\n },\n )\n\n past_index: int = field(\n default=-1,\n metadata={\"help\": \"If >=0, uses the corresponding part of the output as the past state for next step.\"},\n )\n\n run_name: Optional[str] = field(\n default=None, metadata={\"help\": \"An optional descriptor for the run. Notably used for wandb logging.\"}\n )\n disable_tqdm: Optional[bool] = field(\n default=None, metadata={\"help\": \"Whether or not to disable the tqdm progress bars.\"}\n )\n\n remove_unused_columns: Optional[bool] = field(\n default=True, metadata={\"help\": \"Remove columns not required by the model when using an nlp.Dataset.\"}\n )\n label_names: Optional[List[str]] = field(\n default=None, metadata={\"help\": \"The list of keys in your dictionary of inputs that correspond to the labels.\"}\n )\n\n load_best_model_at_end: Optional[bool] = field(\n default=False,\n metadata={\"help\": \"Whether or not to load the best model found during training at the end of training.\"},\n )\n metric_for_best_model: Optional[str] = field(\n default=None, metadata={\"help\": \"The metric to use to compare two different models.\"}\n )\n greater_is_better: Optional[bool] = field(\n default=None, metadata={\"help\": \"Whether the `metric_for_best_model` should be maximized or not.\"}\n )\n ignore_data_skip: bool = field(\n default=False,\n metadata={\n \"help\": \"When resuming training, whether or not to skip the first epochs and batches to get to the same training data.\"\n },\n )\n\n def __post_init__(self):\n if self.disable_tqdm is None:\n self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN\n self.evaluation_strategy = EvaluationStrategy(self.evaluation_strategy)\n if self.do_eval is False and self.evaluation_strategy != EvaluationStrategy.NO:\n self.do_eval = True\n if self.eval_steps is None:\n self.eval_steps = self.logging_steps\n\n if self.load_best_model_at_end and self.metric_for_best_model is None:\n self.metric_for_best_model = \"loss\"\n if self.greater_is_better is None and self.metric_for_best_model is not None:\n self.greater_is_better = self.metric_for_best_model not in [\"loss\", \"eval_loss\"]\n if self.run_name is None:\n self.run_name = self.output_dir\n\n if is_torch_available() and self.device.type != \"cuda\" and self.fp16:\n raise ValueError(\"AMP (`--fp16`) can only be used on CUDA devices.\")\n\n @property\n def train_batch_size(self) -> int:\n \"\"\"\n The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).\n \"\"\"\n if self.per_gpu_train_batch_size:\n logger.warning(\n \"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future \"\n \"version. Using `--per_device_train_batch_size` is preferred.\"\n )\n per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size\n if not self.model_parallel:\n train_batch_size = per_device_batch_size * max(1, self.n_gpu)\n else:\n train_batch_size = per_device_batch_size\n return train_batch_size\n\n @property\n def eval_batch_size(self) -> int:\n \"\"\"\n The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).\n \"\"\"\n if self.per_gpu_eval_batch_size:\n logger.warning(\n \"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future \"\n \"version. Using `--per_device_eval_batch_size` is preferred.\"\n )\n per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size\n if not self.model_parallel:\n eval_batch_size = per_device_batch_size * max(1, self.n_gpu)\n else:\n eval_batch_size = per_device_batch_size\n return eval_batch_size\n\n @cached_property\n @torch_required\n def _setup_devices(self) -> Tuple[\"torch.device\", int]:\n logger.info(\"PyTorch: setting up devices\")\n if self.no_cuda:\n device = torch.device(\"cpu\")\n n_gpu = 0\n elif is_torch_tpu_available():\n device = xm.xla_device()\n n_gpu = 0\n elif self.local_rank == -1:\n # if n_gpu is > 1 we'll use nn.DataParallel.\n # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`\n # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will\n # trigger an error that a device index is missing. Index 0 takes into account the\n # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`\n # will use the first GPU in that env, i.e. GPU#1\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n # Here, we'll use torch.distributed.\n # Initializes the distributed backend which will take care of synchronizing nodes/GPUs\n torch.distributed.init_process_group(backend=\"nccl\")\n device = torch.device(\"cuda\", self.local_rank)\n n_gpu = 1\n\n if device.type == \"cuda\":\n torch.cuda.set_device(device)\n\n return device, n_gpu\n\n @property\n @torch_required\n def device(self) -> \"torch.device\":\n \"\"\"\n The device used by this process.\n \"\"\"\n return self._setup_devices[0]\n\n @property\n @torch_required\n def n_gpu(self):\n \"\"\"\n The number of GPUs used by this process.\n\n Note:\n This will only be greater than one when you have multiple GPUs available but are not using distributed\n training. For distributed training, it will always be 1.\n \"\"\"\n return self._setup_devices[1]\n\n @property\n @torch_required\n def parallel_mode(self):\n \"\"\"\n The current mode used for parallelism if multiple GPUs/TPU cores are available. One of:\n\n - :obj:`ParallelMode.NOT_PARALLEL`: no parallelism (CPU or one GPU).\n - :obj:`ParallelMode.NOT_DISTRIBUTED`: several GPUs in one single process (uses :obj:`torch.nn.DataParallel`).\n - :obj:`ParallelMode.DISTRIBUTED`: several GPUs, each ahving its own process (uses\n :obj:`torch.nn.DistributedDataParallel`).\n - :obj:`ParallelMode.TPU`: several TPU cores.\n \"\"\"\n if is_torch_tpu_available():\n return ParallelMode.TPU\n elif self.local_rank != -1:\n return ParallelMode.DISTRIBUTED\n elif self.n_gpu > 1:\n return ParallelMode.NOT_DISTRIBUTED\n else:\n return ParallelMode.NOT_PARALLEL\n\n def to_dict(self):\n \"\"\"\n Serializes this instance while replace `Enum` by their values (for JSON serialization support).\n \"\"\"\n d = dataclasses.asdict(self)\n for k, v in d.items():\n if isinstance(v, Enum):\n d[k] = v.value\n return d\n\n def to_json_string(self):\n \"\"\"\n Serializes this instance to a JSON string.\n \"\"\"\n return json.dumps(self.to_dict(), indent=2)\n\n def to_sanitized_dict(self) -> Dict[str, Any]:\n \"\"\"\n Sanitized serialization to use with TensorBoard’s hparams\n \"\"\"\n d = self.to_dict()\n d = {**d, **{\"train_batch_size\": self.train_batch_size, \"eval_batch_size\": self.eval_batch_size}}\n\n valid_types = [bool, int, float, str]\n if is_torch_available():\n valid_types.append(torch.Tensor)\n\n return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}\n\n\nclass ParallelMode(Enum):\n NOT_PARALLEL = \"not_parallel\"\n NOT_DISTRIBUTED = \"not_distributed\"\n DISTRIBUTED = \"distributed\"\n TPU = \"tpu\"\n"
] | [
[
"torch.distributed.init_process_group",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.device",
"torch.cuda.set_device"
]
] |
vedanthpadigelwar/AI_projects | [
"885bbe76800f9a449414b3735ab4a4c4bd2e7aa0"
] | [
"test method/tensorflow2.0/deep-sort-yolov4/demo.py"
] | [
"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom timeit import time\nimport warnings\nimport cv2\nimport numpy as np\nfrom PIL import Image\nfrom yolo import YOLO\n\nfrom deep_sort import preprocessing\nfrom deep_sort import nn_matching\nfrom deep_sort.detection import Detection\nfrom deep_sort.detection_yolo import Detection_YOLO\nfrom deep_sort.tracker import Tracker\nfrom tools import generate_detections as gdet\nimport imutils.video\nfrom videocaptureasync import VideoCaptureAsync\n\nwarnings.filterwarnings('ignore')\n\n\ndef main(yolo):\n\n # Definition of the parameters\n max_cosine_distance = 0.3\n nn_budget = None\n nms_max_overlap = 1.0\n\n # Deep SORT\n model_filename = 'model_data/mars-small128.pb'\n encoder = gdet.create_box_encoder(model_filename, batch_size=1)\n\n metric = nn_matching.NearestNeighborDistanceMetric(\n \"cosine\", max_cosine_distance, nn_budget)\n tracker = Tracker(metric)\n\n tracking = True\n writeVideo_flag = True\n asyncVideo_flag = False\n\n file_path = 'video.webm'\n if asyncVideo_flag:\n video_capture = VideoCaptureAsync(file_path)\n else:\n video_capture = cv2.VideoCapture(file_path)\n\n if asyncVideo_flag:\n video_capture.start()\n\n if writeVideo_flag:\n if asyncVideo_flag:\n w = int(video_capture.cap.get(3))\n h = int(video_capture.cap.get(4))\n else:\n w = int(video_capture.get(3))\n h = int(video_capture.get(4))\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))\n frame_index = -1\n\n fps = 0.0\n fps_imutils = imutils.video.FPS().start()\n\n while True:\n ret, frame = video_capture.read() # frame shape 640*480*3\n if ret != True:\n break\n\n t1 = time.time()\n\n image = Image.fromarray(frame[..., ::-1]) # bgr to rgb\n boxes, confidence, classes = yolo.detect_image(image)\n\n if tracking:\n features = encoder(frame, boxes)\n\n detections = [Detection(bbox, confidence, cls, feature) for bbox, confidence, cls, feature in\n zip(boxes, confidence, classes, features)]\n else:\n detections = [Detection_YOLO(bbox, confidence, cls) for bbox, confidence, cls in\n zip(boxes, confidence, classes)]\n\n # Run non-maxima suppression.\n boxes = np.array([d.tlwh for d in detections])\n scores = np.array([d.confidence for d in detections])\n indices = preprocessing.non_max_suppression(\n boxes, nms_max_overlap, scores)\n detections = [detections[i] for i in indices]\n\n if tracking:\n # Call the tracker\n tracker.predict()\n tracker.update(detections)\n\n for track in tracker.tracks:\n if not track.is_confirmed() or track.time_since_update > 1:\n continue\n bbox = track.to_tlbr()\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(\n bbox[2]), int(bbox[3])), (255, 255, 255), 2)\n cv2.putText(frame, \"ID: \" + str(track.track_id), (int(bbox[0]), int(bbox[1])), 0,\n 1.5e-3 * frame.shape[0], (0, 255, 0), 1)\n\n for det in detections:\n bbox = det.to_tlbr()\n score = \"%.2f\" % round(det.confidence * 100, 2) + \"%\"\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(\n bbox[2]), int(bbox[3])), (255, 0, 0), 2)\n if len(classes) > 0:\n cls = det.cls\n cv2.putText(frame, str(cls) + \" \" + score, (int(bbox[0]), int(bbox[3])), 0,\n 1.5e-3 * frame.shape[0], (0, 255, 0), 1)\n\n cv2.imshow('', frame)\n\n if writeVideo_flag: # and not asyncVideo_flag:\n # save a frame\n out.write(frame)\n frame_index = frame_index + 1\n\n fps_imutils.update()\n\n if not asyncVideo_flag:\n fps = (fps + (1./(time.time()-t1))) / 2\n print(\"FPS = %f\" % (fps))\n\n # Press Q to stop!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n fps_imutils.stop()\n print('imutils FPS: {}'.format(fps_imutils.fps()))\n\n if asyncVideo_flag:\n video_capture.stop()\n else:\n video_capture.release()\n\n if writeVideo_flag:\n out.release()\n\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main(YOLO())\n"
] | [
[
"numpy.array"
]
] |
ktpolanski/scirpy | [
"2d6e3a6347ad54425a8dea635fa04609aaf33c57"
] | [
"scirpy/tests/test_util.py"
] | [
"from scirpy.util import (\n _is_na,\n _is_false,\n _is_true,\n _normalize_counts,\n _is_symmetric,\n _reduce_nonzero,\n _translate_dna_to_protein,\n)\nfrom scirpy.util.graph import layout_components\nfrom itertools import combinations\nimport igraph as ig\nimport numpy as np\nimport pandas as pd\nimport numpy.testing as npt\nimport pytest\nimport scipy.sparse\nfrom .fixtures import adata_tra\n\nimport warnings\n\n\ndef test_reduce_nonzero():\n A = np.array([[0, 0, 3], [1, 2, 5], [7, 0, 0]])\n B = np.array([[1, 0, 3], [2, 1, 0], [6, 0, 5]])\n A_csr = scipy.sparse.csr_matrix(A)\n B_csr = scipy.sparse.csr_matrix(B)\n A_csc = scipy.sparse.csc_matrix(A)\n B_csc = scipy.sparse.csc_matrix(B)\n\n expected = np.array([[1, 0, 3], [1, 1, 5], [6, 0, 5]])\n\n with pytest.raises(ValueError):\n _reduce_nonzero(A, B)\n npt.assert_equal(_reduce_nonzero(A_csr, B_csr).toarray(), expected)\n npt.assert_equal(_reduce_nonzero(A_csc, B_csc).toarray(), expected)\n npt.assert_equal(_reduce_nonzero(A_csr, A_csr.copy()).toarray(), A_csr.toarray())\n\n\ndef test_is_symmatric():\n M = np.array([[1, 2, 2], [2, 1, 3], [2, 3, 1]])\n S_csr = scipy.sparse.csr_matrix(M)\n S_csc = scipy.sparse.csc_matrix(M)\n S_lil = scipy.sparse.lil_matrix(M)\n assert _is_symmetric(M)\n assert _is_symmetric(S_csr)\n assert _is_symmetric(S_csc)\n assert _is_symmetric(S_lil)\n\n M = np.array([[1, 2, 2], [2, 1, np.nan], [2, np.nan, np.nan]])\n S_csr = scipy.sparse.csr_matrix(M)\n S_csc = scipy.sparse.csc_matrix(M)\n S_lil = scipy.sparse.lil_matrix(M)\n assert _is_symmetric(M)\n assert _is_symmetric(S_csr)\n assert _is_symmetric(S_csc)\n assert _is_symmetric(S_lil)\n\n M = np.array([[1, 2, 2], [2, 1, 3], [3, 2, 1]])\n S_csr = scipy.sparse.csr_matrix(M)\n S_csc = scipy.sparse.csc_matrix(M)\n S_lil = scipy.sparse.lil_matrix(M)\n assert not _is_symmetric(M)\n assert not _is_symmetric(S_csr)\n assert not _is_symmetric(S_csc)\n assert not _is_symmetric(S_lil)\n\n\ndef test_is_na():\n warnings.filterwarnings(\"error\")\n assert _is_na(None)\n assert _is_na(np.nan)\n assert _is_na(\"nan\")\n assert not _is_na(42)\n assert not _is_na(\"Foobar\")\n assert not _is_na(dict())\n array_test = np.array([\"None\", \"nan\", None, np.nan, \"foobar\"])\n array_expect = np.array([True, True, True, True, False])\n array_test_bool = np.array([True, False, True])\n array_expect_bool = np.array([False, False, False])\n\n npt.assert_equal(_is_na(array_test), array_expect)\n npt.assert_equal(_is_na(pd.Series(array_test)), array_expect)\n\n npt.assert_equal(_is_na(array_test_bool), array_expect_bool)\n npt.assert_equal(_is_na(pd.Series(array_test_bool)), array_expect_bool)\n\n\ndef test_is_false():\n warnings.filterwarnings(\"error\")\n assert _is_false(False)\n assert _is_false(0)\n assert _is_false(\"\")\n assert _is_false(\"False\")\n assert _is_false(\"false\")\n assert not _is_false(42)\n assert not _is_false(True)\n assert not _is_false(\"true\")\n assert not _is_false(\"foobar\")\n assert not _is_false(np.nan)\n assert not _is_false(None)\n assert not _is_false(\"nan\")\n assert not _is_false(\"None\")\n array_test = np.array(\n [\"False\", \"false\", 0, 1, True, False, \"true\", \"Foobar\", np.nan, \"nan\"],\n dtype=object,\n )\n array_test_str = array_test.astype(\"str\")\n array_expect = np.array(\n [True, True, True, False, False, True, False, False, False, False]\n )\n array_test_bool = np.array([True, False, True])\n array_expect_bool = np.array([False, True, False])\n\n npt.assert_equal(_is_false(array_test), array_expect)\n npt.assert_equal(_is_false(array_test_str), array_expect)\n npt.assert_equal(_is_false(pd.Series(array_test)), array_expect)\n npt.assert_equal(_is_false(pd.Series(array_test_str)), array_expect)\n npt.assert_equal(_is_false(array_test_bool), array_expect_bool)\n npt.assert_equal(_is_false(pd.Series(array_test_bool)), array_expect_bool)\n\n\ndef test_is_true():\n warnings.filterwarnings(\"error\")\n assert not _is_true(False)\n assert not _is_true(0)\n assert not _is_true(\"\")\n assert not _is_true(\"False\")\n assert not _is_true(\"false\")\n assert not _is_true(\"0\")\n assert not _is_true(np.nan)\n assert not _is_true(None)\n assert not _is_true(\"nan\")\n assert not _is_true(\"None\")\n assert _is_true(42)\n assert _is_true(True)\n assert _is_true(\"true\")\n assert _is_true(\"foobar\")\n assert _is_true(\"True\")\n array_test = np.array(\n [\"False\", \"false\", 0, 1, True, False, \"true\", \"Foobar\", np.nan, \"nan\"],\n dtype=object,\n )\n array_test_str = array_test.astype(\"str\")\n array_expect = np.array(\n [False, False, False, True, True, False, True, True, False, False]\n )\n array_test_bool = np.array([True, False, True])\n array_expect_bool = np.array([True, False, True])\n\n npt.assert_equal(_is_true(array_test), array_expect)\n npt.assert_equal(_is_true(array_test_str), array_expect)\n npt.assert_equal(_is_true(pd.Series(array_test)), array_expect)\n npt.assert_equal(_is_true(pd.Series(array_test_str)), array_expect)\n npt.assert_equal(_is_true(array_test_bool), array_expect_bool)\n npt.assert_equal(_is_true(pd.Series(array_test_bool)), array_expect_bool)\n\n\[email protected]\ndef group_df():\n return pd.DataFrame().assign(\n cell=[\"c1\", \"c2\", \"c3\", \"c4\", \"c5\", \"c6\"],\n sample=[\"s2\", \"s1\", \"s2\", \"s2\", \"s2\", \"s1\"],\n )\n\n\ndef test_normalize_counts(group_df):\n with pytest.raises(ValueError):\n _normalize_counts(group_df, True, None)\n\n npt.assert_equal(_normalize_counts(group_df, False), [1] * 6)\n npt.assert_equal(\n _normalize_counts(group_df, \"sample\"), [0.25, 0.5, 0.25, 0.25, 0.25, 0.5]\n )\n npt.assert_equal(\n _normalize_counts(group_df, True, \"sample\"), [0.25, 0.5, 0.25, 0.25, 0.25, 0.5]\n )\n\n\ndef test_layout_components():\n g = ig.Graph()\n\n # add 100 unconnected nodes\n g.add_vertices(100)\n\n # add 50 2-node components\n g.add_vertices(100)\n g.add_edges([(ii, ii + 1) for ii in range(100, 200, 2)])\n\n # add 33 3-node components\n g.add_vertices(100)\n for ii in range(200, 299, 3):\n g.add_edges([(ii, ii + 1), (ii, ii + 2), (ii + 1, ii + 2)])\n\n # add a couple of larger components\n n = 300\n for ii in np.random.randint(4, 30, size=10):\n g.add_vertices(ii)\n g.add_edges(combinations(range(n, n + ii), 2))\n n += ii\n\n layout_components(g, arrange_boxes=\"size\", component_layout=\"fr\")\n try:\n layout_components(g, arrange_boxes=\"rpack\", component_layout=\"fr\")\n except ImportError:\n warnings.warn(\n \"The 'rpack' layout-test was skipped because rectangle \"\n \"packer is not installed. \"\n )\n layout_components(g, arrange_boxes=\"squarify\", component_layout=\"fr\")\n\n\ndef test_translate_dna_to_protein(adata_tra):\n for nt, aa in zip(adata_tra.obs[\"IR_VJ_1_cdr3_nt\"], adata_tra.obs[\"IR_VJ_1_cdr3\"]):\n assert _translate_dna_to_protein(nt) == aa\n"
] | [
[
"numpy.array",
"pandas.Series",
"numpy.random.randint",
"pandas.DataFrame"
]
] |
mauicv/alibi | [
"30fea76391c255963c8818c2b54aa615b0d6f858"
] | [
"alibi/explainers/anchors/anchor_image.py"
] | [
"import copy\nimport logging\nfrom functools import partial\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union\n\nimport numpy as np\nfrom skimage.segmentation import felzenszwalb, quickshift, slic\n\nfrom alibi.api.defaults import DEFAULT_DATA_ANCHOR_IMG, DEFAULT_META_ANCHOR\nfrom alibi.api.interfaces import Explainer, Explanation\nfrom alibi.exceptions import (AlibiPredictorCallException,\n AlibiPredictorReturnTypeError)\nfrom alibi.utils.wrappers import ArgmaxTransformer\n\nfrom .anchor_base import AnchorBaseBeam\nfrom .anchor_explanation import AnchorExplanation\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_SEGMENTATION_KWARGS = {\n 'felzenszwalb': {},\n 'quickshift': {},\n 'slic': {'n_segments': 10, 'compactness': 10, 'sigma': .5}\n} # type: Dict[str, Dict]\n\n\ndef scale_image(image: np.ndarray, scale: tuple = (0, 255)) -> np.ndarray:\n \"\"\"\n Scales an image in a specified range.\n\n Parameters\n ----------\n image\n Image to be scale.\n scale\n The scaling interval.\n\n Returns\n -------\n img_scaled\n Scaled image.\n \"\"\"\n\n img_max, img_min = image.max(), image.min()\n img_std = (image - img_min) / (img_max - img_min)\n img_scaled = img_std * (scale[1] - scale[0]) + scale[0]\n\n return img_scaled\n\n\nclass AnchorImageSampler:\n def __init__(\n self,\n predictor: Callable,\n segmentation_fn: Callable,\n custom_segmentation: bool,\n image: np.ndarray,\n images_background: Optional[np.ndarray] = None,\n p_sample: float = 0.5,\n n_covered_ex: int = 10,\n ):\n \"\"\"\n Initialize anchor image sampler.\n\n Parameters\n ----------\n predictor\n A callable that takes a `numpy` array of `N` data points as inputs and returns `N` outputs.\n segmentation_fn\n Function used to segment the images.\n image\n Image to be explained.\n images_background\n Images to overlay superpixels on.\n p_sample\n Probability for a pixel to be represented by the average value of its superpixel.\n n_covered_ex\n How many examples where anchors apply to store for each anchor sampled during search\n (both examples where prediction on samples agrees/disagrees with `desired_label` are stored).\n \"\"\"\n self.predictor = predictor\n self.segmentation_fn = segmentation_fn\n self.custom_segmentation = custom_segmentation\n self.image = image\n self.images_background = images_background\n self.n_covered_ex = n_covered_ex\n self.p_sample = p_sample\n self.segments = self.generate_superpixels(image)\n self.segment_labels = list(np.unique(self.segments))\n self.instance_label = self.predictor(image[np.newaxis, ...])[0]\n\n def __call__(\n self, anchor: Tuple[int, tuple], num_samples: int, compute_labels: bool = True\n ) -> List[Union[np.ndarray, float, int]]:\n \"\"\"\n Sample images from a perturbation distribution by masking randomly chosen superpixels\n from the original image and replacing them with pixel values from superimposed images\n if background images are provided to the explainer. Otherwise, the superpixels from the\n original image are replaced with their average values.\n\n Parameters\n ----------\n anchor\n - ``int`` - order of anchor in the batch.\n - ``tuple`` - features (= superpixels) present in the proposed anchor.\n num_samples\n Number of samples used.\n compute_labels\n If ``True``, an array of comparisons between predictions on perturbed samples and\n instance to be explained is returned.\n\n Returns\n -------\n If ``compute_labels=True``, a list containing the following is returned\n\n - `covered_true` - perturbed examples where the anchor applies and the model prediction on perturbed is the \\\n same as the instance prediction.\n\n - `covered_false` - perturbed examples where the anchor applies and the model prediction on pertrurbed sample \\\n is NOT the same as the instance prediction.\n\n - `labels` - `num_samples` ints indicating whether the prediction on the perturbed sample matches (1) \\\n the label of the instance to be explained or not (0).\n\n - `data` - Matrix with 1s and 0s indicating whether the values in a superpixel will remain unchanged (1) or \\\n will be perturbed (0), for each sample.\n\n - `1.0` - indicates exact coverage is not computed for this algorithm.\n\n - `anchor[0]` - position of anchor in the batch request\n\n Otherwise, a list containing the data matrix only is returned.\n \"\"\"\n\n if compute_labels:\n raw_data, data = self.perturbation(anchor[1], num_samples)\n labels = self.compare_labels(raw_data)\n covered_true = raw_data[labels][: self.n_covered_ex]\n covered_true = [scale_image(img) for img in covered_true]\n covered_false = raw_data[np.logical_not(labels)][: self.n_covered_ex]\n covered_false = [scale_image(img) for img in covered_false]\n # coverage set to -1.0 as we can't compute 'true'coverage for this model\n\n return [covered_true, covered_false, labels.astype(int), data, -1.0, anchor[0]] # type: ignore\n\n else:\n data = self._choose_superpixels(num_samples)\n data[:, anchor[1]] = 1 # superpixels in candidate anchor are not perturbed\n\n return [data]\n\n def compare_labels(self, samples: np.ndarray) -> np.ndarray:\n \"\"\"\n Compute the agreement between a classifier prediction on an instance to be explained\n and the prediction on a set of samples which have a subset of perturbed superpixels.\n\n Parameters\n ----------\n samples\n Samples whose labels are to be compared with the instance label.\n\n Returns\n -------\n A boolean array indicating whether the prediction was the same as the instance label.\n \"\"\"\n\n return self.predictor(samples) == self.instance_label\n\n def _choose_superpixels(\n self, num_samples: int, p_sample: float = 0.5\n ) -> np.ndarray:\n \"\"\"\n Generates a binary mask of dimension [num_samples, M] where M is the number of\n image superpixels (segments).\n\n Parameters\n ----------\n num_samples\n Number of perturbed images to be generated\n p_sample:\n The probability that a superpixel is perturbed\n\n Returns\n -------\n data\n Binary 2D mask, where each non-zero entry in a row indicates that\n the values of the particular image segment will not be perturbed.\n \"\"\"\n\n n_features = len(self.segment_labels)\n data = np.random.choice(\n [0, 1], num_samples * n_features, p=[p_sample, 1 - p_sample]\n )\n data = data.reshape((num_samples, n_features))\n\n return data\n\n def perturbation(\n self, anchor: tuple, num_samples: int\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Perturbs an image by altering the values of selected superpixels. If a dataset of image\n backgrounds is provided to the explainer, then the superpixels are replaced with the\n equivalent superpixels from the background image. Otherwise, the superpixels are replaced\n by their average value.\n\n Parameters\n ----------\n anchor:\n Contains the superpixels whose values are not going to be perturbed.\n num_samples:\n Number of perturbed samples to be returned.\n\n Returns\n -------\n imgs\n A `[num_samples, H, W, C]` array of perturbed images.\n segments_mask\n A `[num_samples, M]` binary mask, where `M` is the number of image superpixels\n segments. 1 indicates the values in that particular superpixels are not\n perturbed.\n \"\"\"\n\n image = self.image\n segments = self.segments\n backgrounds: Union[np.ndarray, List[None]]\n\n # choose superpixels to be perturbed\n segments_mask = self._choose_superpixels(num_samples, p_sample=self.p_sample)\n segments_mask[:, anchor] = 1\n\n # for each sample, need to sample one of the background images if provided\n if self.images_background is not None:\n backgrounds = np.random.choice(\n range(len(self.images_background)),\n segments_mask.shape[0],\n replace=True,\n )\n else:\n backgrounds = [None] * segments_mask.shape[0]\n # create fudged image where the pixel value in each superpixel is set to the\n # average over the superpixel for each channel\n fudged_image = image.copy()\n n_channels = image.shape[-1]\n for x in np.unique(segments):\n fudged_image[segments == x] = [\n np.mean(image[segments == x][:, i]) for i in range(n_channels)\n ]\n\n pert_imgs = []\n for mask, background_idx in zip(segments_mask, backgrounds):\n temp = copy.deepcopy(image)\n to_perturb = np.where(mask == 0)[0]\n # create mask for each superpixel not present in the sample\n mask = np.zeros(segments.shape).astype(bool)\n for superpixel in to_perturb:\n mask[segments == superpixel] = True\n if background_idx is not None:\n # replace values with those of background image\n temp[mask] = self.images_background[background_idx][mask] # type: ignore[index]\n else:\n # ... or with the averaged superpixel value\n temp[mask] = fudged_image[mask]\n pert_imgs.append(temp)\n\n return np.array(pert_imgs), segments_mask\n\n def generate_superpixels(self, image: np.ndarray) -> np.ndarray:\n \"\"\"\n Generates superpixels from (i.e., segments) an image.\n\n Parameters\n ----------\n image\n A grayscale or RGB image.\n\n Returns\n -------\n A `[H, W]` array of integers. Each integer is a segment (superpixel) label.\n \"\"\"\n\n image_preproc = self._preprocess_img(image)\n\n return self.segmentation_fn(image_preproc)\n\n def _preprocess_img(self, image: np.ndarray) -> np.ndarray:\n \"\"\"\n Applies necessary transformations to the image prior to segmentation.\n\n Parameters\n ----------\n image\n A grayscale or RGB image.\n\n Returns\n -------\n A preprocessed image.\n \"\"\"\n\n # Grayscale images are repeated across channels\n if not self.custom_segmentation and image.shape[-1] == 1:\n image_preproc = np.repeat(image, 3, axis=2)\n else:\n image_preproc = image.copy()\n\n return image_preproc\n\n\nclass AnchorImage(Explainer):\n def __init__(self,\n predictor: Callable[[np.ndarray], np.ndarray],\n image_shape: tuple,\n dtype: Type[np.generic] = np.float32,\n segmentation_fn: Any = 'slic',\n segmentation_kwargs: Optional[dict] = None,\n images_background: Optional[np.ndarray] = None,\n seed: Optional[int] = None) -> None:\n \"\"\"\n Initialize anchor image explainer.\n\n Parameters\n ----------\n predictor\n A callable that takes a `numpy` array of `N` data points as inputs and returns `N` outputs.\n image_shape\n Shape of the image to be explained. The channel axis is expected to be last.\n dtype\n A `numpy` scalar type that corresponds to the type of input array expected by `predictor`. This may be\n used to construct arrays of the given type to be passed through the `predictor`. For most use cases\n this argument should have no effect, but it is exposed for use with predictors that would break when\n called with an array of unsupported type.\n segmentation_fn\n Any of the built in segmentation function strings: ``'felzenszwalb'``, ``'slic'`` or ``'quickshift'`` or\n a custom segmentation function (callable) which returns an image mask with labels for each superpixel.\n See http://scikit-image.org/docs/dev/api/skimage.segmentation.html for more info.\n segmentation_kwargs\n Keyword arguments for the built in segmentation functions.\n images_background\n Images to overlay superpixels on.\n seed\n If set, ensures different runs with the same input will yield same explanation.\n\n Raises\n ------\n :py:class:`alibi.exceptions.AlibiPredictorCallException`\n If calling `predictor` fails at runtime.\n :py:class:`alibi.exceptions.AlibiPredictorReturnTypeError`\n If the return type of `predictor` is not `np.ndarray`.\n \"\"\"\n super().__init__(meta=copy.deepcopy(DEFAULT_META_ANCHOR))\n np.random.seed(seed)\n\n # TODO: this logic needs improvement. We should check against a fixed set of strings\n # for built-ins instead of any `str`.\n if isinstance(segmentation_fn, str) and segmentation_kwargs is None:\n try:\n segmentation_kwargs = DEFAULT_SEGMENTATION_KWARGS[segmentation_fn]\n except KeyError:\n logger.warning(\n 'DEFAULT_SEGMENTATION_KWARGS did not contain any entry'\n 'for segmentation method {}. No kwargs will be passed to'\n 'the segmentation function!'.format(segmentation_fn)\n )\n segmentation_kwargs = {}\n elif callable(segmentation_fn) and segmentation_kwargs:\n logger.warning(\n 'Specified both a segmentation function to create superpixels and '\n 'keyword arguments for built-in segmentation functions. By default '\n 'the specified segmentation function will be used.'\n )\n\n # set the predictor\n self.image_shape = tuple(image_shape) # coerce lists\n self.dtype = dtype\n self.predictor = self._transform_predictor(predictor)\n\n # segmentation function is either a user-defined function or one of the values in\n fn_options = {'felzenszwalb': felzenszwalb, 'slic': slic, 'quickshift': quickshift}\n if callable(segmentation_fn):\n self.custom_segmentation = True\n self.segmentation_fn = segmentation_fn\n else:\n self.custom_segmentation = False\n self.segmentation_fn = partial(fn_options[segmentation_fn], **segmentation_kwargs) # type: ignore[arg-type]\n\n self.images_background = images_background\n # a superpixel is perturbed with prob 1 - p_sample\n self.p_sample = 0.5 # type: float\n\n # update metadata\n self.meta['params'].update(\n custom_segmentation=self.custom_segmentation,\n segmentation_kwargs=segmentation_kwargs,\n p_sample=self.p_sample,\n seed=seed,\n image_shape=self.image_shape,\n images_background=self.images_background\n )\n if not self.custom_segmentation:\n self.meta['params'].update(segmentation_fn=segmentation_fn)\n else:\n self.meta['params'].update(segmentation_fn='custom')\n\n def generate_superpixels(self, image: np.ndarray) -> np.ndarray:\n \"\"\"\n Generates superpixels from (i.e., segments) an image.\n\n Parameters\n ----------\n image\n A grayscale or RGB image.\n\n Returns\n -------\n A `[H, W]` array of integers. Each integer is a segment (superpixel) label.\n \"\"\"\n\n image_preproc = self._preprocess_img(image)\n\n return self.segmentation_fn(image_preproc)\n\n def _preprocess_img(self, image: np.ndarray) -> np.ndarray:\n \"\"\"\n Applies necessary transformations to the image prior to segmentation.\n\n Parameters\n ----------\n image\n A grayscale or RGB image.\n\n Returns\n -------\n A preprocessed image.\n \"\"\"\n\n # Grayscale images are repeated across channels\n if not self.custom_segmentation and image.shape[-1] == 1:\n image_preproc = np.repeat(image, 3, axis=2)\n else:\n image_preproc = image.copy()\n\n return image_preproc\n\n def explain(self, # type: ignore[override]\n image: np.ndarray,\n p_sample: float = 0.5,\n threshold: float = 0.95,\n delta: float = 0.1,\n tau: float = 0.15,\n batch_size: int = 100,\n coverage_samples: int = 10000,\n beam_size: int = 1,\n stop_on_first: bool = False,\n max_anchor_size: Optional[int] = None,\n min_samples_start: int = 100,\n n_covered_ex: int = 10,\n binary_cache_size: int = 10000,\n cache_margin: int = 1000,\n verbose: bool = False,\n verbose_every: int = 1,\n **kwargs: Any) -> Explanation:\n \"\"\"\n Explain instance and return anchor with metadata.\n\n Parameters\n ----------\n image\n Image to be explained.\n p_sample\n Probability for a pixel to be represented by the average value of its superpixel.\n threshold\n Minimum precision threshold.\n delta\n Used to compute `beta`.\n tau\n Margin between lower confidence bound and minimum precision of upper bound.\n batch_size\n Batch size used for sampling.\n coverage_samples\n Number of samples used to estimate coverage from during result search.\n beam_size\n The number of anchors extended at each step of new anchors construction.\n stop_on_first\n If ``True``, the beam search algorithm will return the first anchor that has satisfies the\n probability constraint.\n max_anchor_size\n Maximum number of features in result.\n min_samples_start\n Min number of initial samples.\n n_covered_ex\n How many examples where anchors apply to store for each anchor sampled during search\n (both examples where prediction on samples agrees/disagrees with `desired_label` are stored).\n binary_cache_size\n The result search pre-allocates `binary_cache_size` batches for storing the binary arrays\n returned during sampling.\n cache_margin\n When only ``max(cache_margin, batch_size)`` positions in the binary cache remain empty, a new cache\n of the same size is pre-allocated to continue buffering samples.\n verbose\n Display updates during the anchor search iterations.\n verbose_every\n Frequency of displayed iterations during anchor search process.\n\n Returns\n -------\n explanation\n `Explanation` object containing the anchor explaining the instance with additional metadata as attributes.\n See usage at `AnchorImage examples`_ for details.\n\n .. _AnchorImage examples:\n https://docs.seldon.io/projects/alibi/en/stable/methods/Anchors.html\n \"\"\"\n # get params for storage in meta\n params = locals()\n remove = ['image', 'self']\n for key in remove:\n params.pop(key)\n\n sampler = AnchorImageSampler(\n predictor=self.predictor,\n segmentation_fn=self.segmentation_fn,\n custom_segmentation=self.custom_segmentation,\n image=image,\n images_background=self.images_background,\n p_sample=p_sample,\n n_covered_ex=n_covered_ex,\n )\n\n # get anchors and add metadata\n mab = AnchorBaseBeam(\n samplers=[sampler],\n sample_cache_size=binary_cache_size,\n cache_margin=cache_margin,\n **kwargs)\n result = mab.anchor_beam(\n desired_confidence=threshold,\n delta=delta,\n epsilon=tau,\n batch_size=batch_size,\n coverage_samples=coverage_samples,\n beam_size=beam_size,\n stop_on_first=stop_on_first,\n max_anchor_size=max_anchor_size,\n min_samples_start=min_samples_start,\n verbose=verbose,\n verbose_every=verbose_every,\n **kwargs,\n ) # type: Any\n\n return self._build_explanation(\n image, result, sampler.instance_label, params, sampler\n )\n\n def _build_explanation(\n self,\n image: np.ndarray,\n result: dict,\n predicted_label: int,\n params: dict,\n sampler: AnchorImageSampler,\n ) -> Explanation:\n \"\"\"\n Uses the metadata returned by the anchor search algorithm together with\n the instance to be explained to build an explanation object.\n\n Parameters\n ----------\n image\n Instance to be explained.\n result\n Dictionary containing the search anchor and metadata.\n predicted_label\n Label of the instance to be explained.\n params\n Parameters passed to `:py:meth:alibi.explainers.anchor_image.AnchorImage.explain`.\n \"\"\"\n\n result['instance'] = image\n result['instances'] = np.expand_dims(image, 0)\n result['prediction'] = np.array([predicted_label])\n\n # overlay image with anchor mask\n anchor = self.overlay_mask(image, sampler.segments, result['feature'])\n exp = AnchorExplanation('image', result)\n\n # output explanation dictionary\n data = copy.deepcopy(DEFAULT_DATA_ANCHOR_IMG)\n data.update(\n anchor=anchor,\n segments=sampler.segments,\n precision=exp.precision(),\n coverage=exp.coverage(),\n raw=exp.exp_map\n )\n\n # create explanation object\n explanation = Explanation(meta=copy.deepcopy(self.meta), data=data)\n\n # params passed to explain\n explanation.meta['params'].update(params)\n return explanation\n\n def overlay_mask(self, image: np.ndarray, segments: np.ndarray, mask_features: list,\n scale: tuple = (0, 255)) -> np.ndarray:\n \"\"\"\n Overlay image with mask described by the mask features.\n\n Parameters\n ----------\n image\n Image to be explained.\n segments\n Superpixels.\n mask_features\n List with superpixels present in mask.\n scale\n Pixel scale for masked image.\n\n Returns\n -------\n masked_image\n Image overlaid with mask.\n \"\"\"\n\n mask = np.zeros(segments.shape)\n for f in mask_features:\n mask[segments == f] = 1\n image = scale_image(image, scale=scale)\n masked_image = (image * np.expand_dims(mask, 2)).astype(int)\n\n return masked_image\n\n def _transform_predictor(self, predictor: Callable) -> Callable:\n # check if predictor returns predicted class or prediction probabilities for each class\n # if needed adjust predictor so it returns the predicted class\n x = np.zeros((1,) + self.image_shape, dtype=self.dtype)\n try:\n prediction = predictor(x)\n except Exception as e:\n msg = f\"Predictor failed to be called on {type(x)} of shape {x.shape} and dtype {x.dtype}. \" \\\n f\"Check that the parameter `image_shape` is correctly specified.\"\n raise AlibiPredictorCallException(msg) from e\n\n if not isinstance(prediction, np.ndarray):\n msg = f\"Excepted predictor return type to be {np.ndarray} but got {type(prediction)}.\"\n raise AlibiPredictorReturnTypeError(msg)\n\n if np.argmax(prediction.shape) == 0:\n return predictor\n else:\n transformer = ArgmaxTransformer(predictor)\n return transformer\n\n def reset_predictor(self, predictor: Callable) -> None:\n \"\"\"\n Resets the predictor function.\n\n Parameters\n ----------\n predictor\n New predictor function.\n \"\"\"\n self.predictor = self._transform_predictor(predictor)\n"
] | [
[
"numpy.zeros",
"numpy.random.seed",
"numpy.random.choice",
"numpy.repeat",
"numpy.argmax",
"numpy.logical_not",
"numpy.expand_dims",
"numpy.array",
"numpy.where",
"numpy.unique",
"numpy.mean"
]
] |
kpe/tensor2tensor | [
"453c473030c354a3d9a4c27b12bcec8942334bf4"
] | [
"tensor2tensor/models/research/moe.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Mixture-of-experts code.\n\nInterfaces and algorithms are under development and subject to rapid change\nwithout notice.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport mesh_tensorflow as mtf\nimport tensorflow as tf\n\n\ndef transformer_moe_layer_v1(inputs, output_dim, hparams, train,\n master_dtype=tf.bfloat16,\n slice_dtype=tf.float32):\n \"\"\"Local mixture of experts that works well on TPU.\n\n Adapted from the paper https://arxiv.org/abs/1701.06538\n\n Note: until the algorithm and inferface solidify, we pass in a hyperparameters\n dictionary in order not to complicate the interface in mtf_transformer.py .\n Once this code moves out of \"research\", we should pass the hyperparameters\n separately.\n\n Hyperparameters used:\n hparams.moe_num_experts: number of experts\n hparams.moe_hidden_size: size of hidden layer in each expert\n hparams.moe_group_size: size of each \"group\" for gating purposes\n hparams.moe_capacity_factor_train: a float\n hparams.moe_capacity_factor_eval: a float\n hparams.moe_gating: a string\n + all hyperparmeters used by _top_2_gating()\n\n The number of parameters in the gating network is:\n (input_dim.size * hparams.num_experts) +\n\n The number of parameters in the experts themselves is:\n (hparams.num_experts\n * (input_dim.size + output_dim.size)\n * hparams.moe_hidden_size)\n\n The input is n-dimensional: [<batch_and_length_dims>, input_dim], consisting\n of the representations of all positions in a batch of sequences.\n\n Each position of each sequence is sent to 0-2 experts. The expert\n choices and the combination weights are determined by a learned gating\n function.\n\n This function returns a small auxiliary loss that should be added to the\n training loss of the model. This loss helps to balance expert usage.\n Without the loss, it is very likely that a few experts will be trained and\n the rest will starve.\n\n Several hacks are necessary to get around current TPU limitations:\n\n - To ensure static shapes, we enforce (by truncation/padding)\n that each sequence send the same number of elements to each expert.\n\n It would make more sense to enforce this equality over the entire batch,\n but due to our hacked-up gather-by-matmul implementation, we need to divide\n the batch into \"groups\". For each group, the same number of elements\n are sent to each expert.\n\n TODO(noam): Factor this code better. We want to be able to substitute\n different code for the experts themselves.\n\n Args:\n inputs: a mtf.Tensor with shape [<batch_dims...>, length_dim, input_dim]\n output_dim: a mtf.Dimension (for Transformer, this is input_dim)\n hparams: model hyperparameters\n train: a boolean\n master_dtype: a tf.dtype\n slice_dtype: a tf.dtype\n\n Returns:\n outputs: a Tensor with shape [<batch_dims...>, length_dim, output_dim]\n loss: a mtf scalar\n\n Raises:\n ValueError: on unrecognized hparams.moe_gating\n \"\"\"\n orig_inputs = inputs\n input_dim = inputs.shape.dims[-1]\n hidden_dim = mtf.Dimension(\"expert_hidden\", hparams.moe_hidden_size)\n experts_dim = mtf.Dimension(\"experts\", hparams.moe_num_experts)\n group_size_dim = mtf.Dimension(\"group\", hparams.moe_group_size)\n batch_dim = mtf.Dimension(\n orig_inputs.shape[0].name,\n orig_inputs.shape.size // (group_size_dim.size * input_dim.size))\n inputs = mtf.reshape(inputs, [batch_dim, group_size_dim, input_dim])\n\n # Each sequence sends expert_capacity positions to each expert.\n capacity_factor = (\n hparams.moe_capacity_factor_train if train else\n hparams.moe_capacity_factor_eval)\n expert_capacity = min(\n group_size_dim.size,\n int((group_size_dim.size * capacity_factor) / experts_dim.size))\n expert_capacity_dim = mtf.Dimension(\"expert_capacity\", expert_capacity)\n\n experts_dim_unsplit = mtf.Dimension(\"expert_unsplit\", experts_dim.size)\n batch_dim_unsplit = mtf.Dimension(\"batch_unsplit\", batch_dim.size)\n\n if hparams.moe_gating == \"top_2\":\n dispatch_tensor, combine_tensor, loss = _top_2_gating(\n inputs=inputs,\n outer_expert_dims=None,\n experts_dim=experts_dim_unsplit,\n expert_capacity_dim=expert_capacity_dim,\n hparams=hparams,\n train=train)\n else:\n raise ValueError(\"unknown hparams.moe_gating=%s\" % hparams.moe_gating)\n\n # put num_experts dimension first to make split easier in alltoall\n expert_inputs = mtf.einsum([inputs, dispatch_tensor], mtf.Shape(\n [experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim]))\n\n expert_inputs = mtf.reshape(expert_inputs, mtf.Shape(\n [experts_dim, batch_dim_unsplit, expert_capacity_dim, input_dim]))\n\n # Now feed the expert inputs through the experts.\n h = mtf.layers.dense(\n expert_inputs, hidden_dim, expert_dims=[experts_dim],\n activation=mtf.relu, use_bias=False, master_dtype=master_dtype,\n slice_dtype=slice_dtype, name=\"x0\")\n expert_output = mtf.layers.dense(\n h, output_dim, expert_dims=[experts_dim], use_bias=False,\n master_dtype=master_dtype, slice_dtype=slice_dtype, name=\"x1\")\n\n expert_output = mtf.reshape(expert_output, mtf.Shape(\n [experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim]))\n\n output = mtf.einsum([expert_output, combine_tensor], mtf.Shape(\n [batch_dim, group_size_dim, output_dim]))\n\n output = mtf.reshape(output, orig_inputs.shape.dims[:-1] + [output_dim])\n\n return output, loss * hparams.moe_loss_coef\n\n\ndef transformer_moe_layer_v2(inputs, output_dim, hparams, train,\n master_dtype=tf.bfloat16, slice_dtype=tf.float32):\n \"\"\"2-level mixture of experts.\n\n Adapted from the paper https://arxiv.org/abs/1701.06538\n\n Note: until the algorithm and inferface solidify, we pass in a hyperparameters\n dictionary in order not to complicate the interface in mtf_transformer.py .\n Once this code moves out of \"research\", we should pass the hyperparameters\n separately.\n\n Hyperparameters used:\n hparams.moe_num_experts: number of experts\n hparams.moe_hidden_size: size of hidden layer in each expert\n hparams.moe_group_size: size of each \"group\" for gating purposes\n hparams.moe_capacity_factor_train: a float\n hparams.moe_capacity_factor_eval: a float\n hparams.moe_capacity_factor_second_level: a float\n hparams.moe_gating: a string\n + all hyperparmeters used by _top_2_gating()\n\n One set of params for experts in first level and different of hparams\n per expert in the second level.\n The number of parameters in the gating network is:\n (input_dim.size * (hparams.num_experts) +\n (moe_hidden_size * hparams.num_experts) * hparams.num_experts\n\n\n The number of parameters in the experts themselves is:\n (hparams.num_experts\n * (input_dim.size + output_dim.size)\n * hparams.moe_hidden_size)\n\n The input is n-dimensional: [<batch_and_length_dims>, input_dim], consisting\n of the representations of all positions in a batch of sequences.\n\n Each position of each sequence is sent to 0-3 experts. The expert\n choices and the combination weights are determined by a learned gating\n function.\n\n This function returns a small auxiliary loss that should be added to the\n training loss of the model. This loss helps to balance expert usage.\n Without the loss, it is very likely that a few experts will be trained and\n the rest will starve.\n\n Several hacks are necessary to get around current TPU limitations:\n\n - To ensure static shapes, we enforce (by truncation/padding)\n that each sequence send the same number of elements to each expert.\n\n It would make more sense to enforce this equality over the entire batch,\n but due to our hacked-up gather-by-matmul implementation, we need to divide\n the batch into \"groups\". For each group, the same number of elements\n are sent to each expert.\n\n TODO(noam): Factor this code better. We want to be able to substitute\n different code for the experts themselves.\n\n Dimensions cheat sheet:\n a, b: batch size\n l: original sequence length\n m: input depth\n n: output depth\n g, h: number of groups\n s, t: group size\n x, y: number of experts\n c, d: expert capacity\n\n input: [a0, b1, l, m]\n input: [a0, g1, s, m]\n dispatch_tensor_x: [a0, g1, s, x, c]\n expert_input: [a0, g1, x, c, m]\n alltoall: [a0, g, x1, c, m]\n alltoall: [a0, g, x1, c, m]\n transpose: [x1, a0, g, c, m]\n reshape: [x1, h0, s, m]\n assignment2: [x1, h0, t, y, d]\n expert_input2: [x1, h0, y, d, m]\n alltoall: [x1, h, y0, d, m]\n ...\n reverse of that\n\n gating params 0: [m, x]\n gating params 1: [x1, m, y]\n\n expert params:\n [x1, y0, m, hidden]\n [x1, y0, hidden, n]\n\n Args:\n inputs: a mtf.Tensor with shape [a, b, l, m]\n output_dim: a mtf.Dimension (for Transformer, this is input_dim)\n hparams: model hyperparameters\n train: a boolean\n master_dtype: a tf.dtype\n slice_dtype: a tf.dtype\n\n Returns:\n outputs: a Tensor with shape [a, b, l, n]\n loss: a mtf scalar\n\n Raises:\n ValueError: on unrecognized hparams.moe_gating\n \"\"\"\n insert_outer_batch_dim = (len(inputs.shape.dims) == 3)\n if insert_outer_batch_dim:\n inputs = mtf.reshape(\n inputs, [mtf.Dimension(\"outer_batch\", 1)] + inputs.shape.dims)\n\n assert len(hparams.moe_num_experts) == 2\n a0, b1, l, m = inputs.shape.dims\n hidden_dim = mtf.Dimension(\"expert_hidden\", hparams.moe_hidden_size)\n x1 = mtf.Dimension(\"expert_x\", hparams.moe_num_experts[0])\n y0 = mtf.Dimension(\"expert_y\", hparams.moe_num_experts[1])\n x = mtf.Dimension(\"expert_x_unsplit\", hparams.moe_num_experts[0])\n y = mtf.Dimension(\"expert_y_unsplit\", hparams.moe_num_experts[1])\n n = output_dim\n\n # We \"cheat\" here and look at the mesh shape and layout. This is to ensure\n # that the number of groups (g.size) is a multiple of the mesh dimension\n # over which those groups are split.\n num_groups, group_size = _split_into_groups(\n b1.size * l.size, hparams.moe_group_size,\n mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, b1))\n g1 = mtf.Dimension(b1.name, num_groups)\n g = mtf.Dimension(b1.name + \"_unsplit\", g1.size)\n s = mtf.Dimension(\"group_size_x\", group_size)\n\n # Each sequence sends (at most?) expert_capacity positions to each expert.\n # Static expert_capacity dimension is needed for expert batch sizes\n capacity_factor = (\n hparams.moe_capacity_factor_train if train else\n hparams.moe_capacity_factor_eval)\n expert_capacity = min(s.size, int((s.size * capacity_factor) / x.size))\n expert_capacity = max(expert_capacity, 4)\n c = mtf.Dimension(\"expert_capacity_x\", expert_capacity)\n\n # We \"cheat\" here and look at the mesh shape and layout. This is to ensure\n # that the number of groups (h.size) is a multiple of the mesh dimension\n # over which those groups are split.\n num_groups, group_size = _split_into_groups(\n a0.size * g.size * c.size,\n hparams.moe_group_size,\n mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, a0))\n t = mtf.Dimension(\"group_size_y\", group_size)\n h0 = mtf.Dimension(a0.name, num_groups)\n h = mtf.Dimension(a0.name + \"_unsplit\", h0.size)\n\n expert_capacity = min(\n t.size,\n int((t.size * hparams.moe_capacity_factor_second_level) / y.size))\n expert_capacity = max(expert_capacity, 4)\n d = mtf.Dimension(\"expert_capacity_y\", expert_capacity)\n\n # First level of expert routing\n # Reshape the inner batch size to a multiple of group_dim g1 and\n # group_size_dim s.\n inputs = mtf.reshape(inputs, [a0, g1, s, m])\n\n # Get the assignments for the first level.\n # dispatch_tensor_x has shape [a0, g1, s, x, c]\n if hparams.moe_gating == \"top_2\":\n dispatch_tensor_x, combine_tensor_x, loss_outer = _top_2_gating(\n inputs=inputs,\n outer_expert_dims=None,\n experts_dim=x,\n expert_capacity_dim=c,\n hparams=hparams,\n train=train)\n else:\n raise ValueError(\"unknown hparams.moe_gating=%s\" % hparams.moe_gating)\n\n # Now create expert_inputs based on the assignments.\n # put num_experts dimension first to make split easier in alltoall\n expert_inputs_x = mtf.einsum([inputs, dispatch_tensor_x], [x, a0, g1, c, m])\n\n # we construct an \"importance\" Tensor for the inputs to the second-level\n # gating. The importance of an input is 1.0 if it represents the\n # first-choice expert-group and 0.5 if it represents the second-choice expert\n # group. This is used by the second-level gating.\n importance = mtf.reduce_sum(combine_tensor_x, output_shape=[x, a0, g1, c])\n importance = 0.5 * (\n mtf.to_float(mtf.greater(importance, 0.5)) +\n mtf.to_float(mtf.greater(importance, 0.0)))\n\n # First level, all to all. Here we change the split dimension from g1 to x1.\n expert_inputs_x = mtf.reshape(expert_inputs_x, mtf.Shape(\n [x1, a0, g, c, m]))\n importance = mtf.reshape(importance, [x1, a0, g, c])\n\n # Second level of expert routing\n # Reshape the expert_inputs outer batch dim to be a multiple of group_dim h0\n # and group_size_dim t.\n inputs_y = mtf.reshape(expert_inputs_x, [x1, h0, t, m])\n importance = mtf.reshape(importance, [x1, h0, t])\n\n # Get the assignments for the second level.\n # dispatch_tensor_y has shape [x1, h0, t, y, d]\n if hparams.moe_gating == \"top_2\":\n dispatch_tensor_y, combine_tensor_y, loss_inner = _top_2_gating(\n inputs=inputs_y,\n outer_expert_dims=[x1],\n experts_dim=y,\n expert_capacity_dim=d,\n hparams=hparams,\n train=train,\n importance=importance)\n else:\n raise ValueError(\"unknown hparams.moe_gating=%s\" % hparams.moe_gating)\n\n # Now create expert_inputs based on the assignments.\n # put num_experts dimension first to make split easier in alltoall\n expert_inputs_y = mtf.einsum([inputs_y, dispatch_tensor_y], [y, x1, h0, d, m])\n\n # Second level, all to all. Here we change the split dimension from h0 to y0.\n expert_inputs_y = mtf.reshape(expert_inputs_y, mtf.Shape(\n [y0, x1, h, d, m]))\n\n hidden_output = mtf.layers.dense(\n expert_inputs_y, hidden_dim, expert_dims=[y0, x1],\n activation=mtf.relu, use_bias=False, master_dtype=master_dtype,\n slice_dtype=slice_dtype, name=\"expert0\")\n expert_output = mtf.layers.dense(\n hidden_output, output_dim, expert_dims=[y0, x1],\n use_bias=False, master_dtype=master_dtype, slice_dtype=slice_dtype,\n name=\"expert1\")\n\n # NOW COMBINE EXPERT OUTPUTS (reversing everything we have done)\n # expert_output has shape [y0, x1, h, d, n]\n\n # alltoall\n expert_output = mtf.reshape(expert_output, mtf.Shape(\n [y, x1, h0, d, n]))\n\n # combine results from inner level\n output_y = mtf.einsum([expert_output, combine_tensor_y], [x1, h0, t, n])\n\n # Reshape the combined tensor from inner level to now contain outer_batch_dim\n # a0 and group_dim g\n output = mtf.reshape(output_y, [x1, a0, g, c, n])\n\n # alltoall from expert_dim x to group_dim g1\n expert_output_x = mtf.reshape(output, mtf.Shape([x, a0, g1, c, n]))\n\n # combine results from outer level\n output_x = mtf.einsum([expert_output_x, combine_tensor_x], [a0, g1, s, n])\n\n # Reshape the combined tensor to now contain inner_batch_dim\n # b1 and the original sequence length\n output = mtf.reshape(output_x, [a0, b1, l, n])\n if insert_outer_batch_dim:\n output = mtf.reshape(output, [b1, l, n])\n return output, (loss_outer + loss_inner) * hparams.moe_loss_coef\n\n\ndef _top_2_gating(\n inputs, outer_expert_dims, experts_dim, expert_capacity_dim,\n hparams, train, importance=None):\n \"\"\"Compute gating for mixture-of-experts in TensorFlow.\n\n Note: until the algorithm and inferface solidify, we pass in a hyperparameters\n dictionary in order not to complicate the interface in mtf_transformer.py .\n Once this code moves out of \"research\", we should pass the hyperparameters\n separately.\n\n Hyperparameters used:\n hparams.moe_use_second_place_loss: a boolean\n hparams.moe_second_policy_train: a string\n hparams.moe_second_policy_eval: a string\n hparams.moe_second_threshold: a float\n\n The returned forward assignment is a tensor used to map (via einsum) from the\n inputs to the expert_inputs. Likewise, the returned combine_tensor is\n used to map (via einsum) from the expert outputs to the outputs. Both the\n forward and backward assignments are mostly zeros. The shapes of the tensors\n are as follows.\n\n inputs: [<batch_dims>, group_size_dim, input_dim]\n importance: [<batch_dims>, group_size_dim]\n dispatch_tensor:\n [<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]\n expert_inputs:\n [<batch_dims>, experts_dim, expert_capacity_dim, input_dim]\n\n expert_outputs: [<batch_dims>, experts_dim, expert_capacity_dim, output_dim]\n combine_tensor:\n [<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]\n outputs: [<batch_dims>, group_size_dim, output_dim]\n\n \"importance\" is an optional tensor with one floating-point value for each\n input vector. If the importance of an input is 1.0, then we send it to\n up to 2 experts. If 0.0 < importance < 1.0, then we send it to at most\n one expert. If importance == 0.0, then we send it to no experts.\n\n We use \"importance\" at the second-level gating function of a hierarchical\n mixture of experts. Inputs to the first-choice expert-group get importance\n 1.0. Inputs to the second-choice expert group get importance 0.5.\n Inputs that represent padding get importance 0.0.\n\n Args:\n inputs: a mtf.Tensor with shape [<batch_dims>, group_size_dim, input_dim]\n outer_expert_dims: an optional list of dimensions. This is for the case\n where we are at an inner level of a hierarchical MoE.\n experts_dim: a Dimension (the number of experts)\n expert_capacity_dim: a Dimension (number of examples per group per expert)\n hparams: model hyperparameters.\n train: a boolean\n importance: an optional tensor with shape [<batch_dims>, group_size_dim]\n\n Returns:\n dispatch_tensor: a Tensor with shape\n [<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]\n combine_tensor: a Tensor with shape\n [<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]\n loss: a mtf scalar\n\n Raises:\n ValueError: on illegal hyperparameters\n \"\"\"\n group_size_dim, unused_input_dim = inputs.shape.dims[-2:]\n\n raw_gates = mtf.softmax(mtf.layers.dense(\n inputs, experts_dim, use_bias=False,\n expert_dims=outer_expert_dims), experts_dim)\n\n # The internals of this function run in float32.\n # bfloat16 seems to reduce quality.\n raw_gates = mtf.to_float(raw_gates)\n\n expert_capacity_f = float(expert_capacity_dim.size)\n\n # FIND TOP 2 EXPERTS PER POSITON\n # Find the top expert for each position. shape=[batch, group]\n index_1, gate_1 = mtf.top_1(raw_gates, experts_dim)\n # [batch, group, experts]\n mask_1 = mtf.one_hot(index_1, experts_dim, dtype=raw_gates.dtype)\n density_1_proxy = raw_gates\n if importance is not None:\n mask_1 *= mtf.to_float(mtf.equal(importance, 1.0))\n gate_1 *= mtf.to_float(mtf.equal(importance, 1.0))\n density_1_proxy *= mtf.to_float(mtf.equal(importance, 1.0))\n gates_without_top_1 = raw_gates * (1.0 - mask_1)\n # [batch, group]\n index_2, gate_2 = mtf.top_1(gates_without_top_1, experts_dim)\n # [batch, group, experts]\n mask_2 = mtf.one_hot(index_2, experts_dim, dtype=raw_gates.dtype)\n if importance is not None:\n mask_2 *= mtf.to_float(mtf.greater(importance, 0.0))\n\n denom = gate_1 + gate_2 + 1e-9\n gate_1 /= denom\n gate_2 /= denom\n\n # BALANCING LOSSES\n # shape = [batch, experts]\n # We want to equalize the fraction of the batch assigned to each expert\n density_1 = mtf.reduce_mean(mask_1, reduced_dim=group_size_dim)\n # Something continuous that is correlated with what we want to equalize.\n density_1_proxy = mtf.reduce_mean(density_1_proxy, reduced_dim=group_size_dim)\n density_1 = mtf.Print(\n density_1, [mtf.reduce_mean(density_1, output_shape=[experts_dim])],\n \"density_1\", summarize=1000)\n loss = (mtf.reduce_mean(density_1_proxy * density_1)\n * float(experts_dim.size * experts_dim.size))\n\n if hparams.moe_use_second_place_loss:\n # Also add a loss to encourage all experts to be used equally also as the\n # second-place expert. Experimentally, this seems to be a wash.\n # We want to equalize the fraction of the batch assigned to each expert:\n density_2 = mtf.reduce_mean(mask_2, reduced_dim=group_size_dim)\n # As a proxy for density_2, we renormalize the raw gates after the top one\n # has been removed.\n normalized = gates_without_top_1 / (\n mtf.reduce_sum(gates_without_top_1, reduced_dim=experts_dim) + 1e-9)\n density_2_proxy = mtf.reduce_mean(normalized, reduced_dim=group_size_dim)\n loss_2 = (mtf.reduce_mean(density_2_proxy * density_2)\n * float(experts_dim.size * experts_dim.size))\n loss += loss_2 * 0.5\n\n # Depending on the policy in the hparams, we may drop out some of the\n # second-place experts.\n policy = (\n hparams.moe_second_policy_train if train else\n hparams.moe_second_policy_eval)\n threshold = (\n hparams.moe_second_threshold_train if train else\n hparams.moe_second_threshold_eval)\n if policy == \"all\":\n # Use second-place experts for all examples.\n pass\n elif policy == \"none\":\n # Never use second-place experts for all examples.\n mask_2 = mtf.zeros_like(mask_2)\n elif policy == \"threshold\":\n # Use second-place experts if gate_2 > threshold.\n mask_2 *= mtf.to_float(mtf.greater(gate_2, threshold))\n elif policy == \"random\":\n # Use second-place experts with probablity min(1.0, gate_2 / threshold).\n mask_2 *= mtf.to_float(\n mtf.less(mtf.random_uniform(gate_2.mesh, gate_2.shape),\n gate_2 / max(threshold, 1e-9)))\n else:\n raise ValueError(\"Unknown policy %s\" % policy)\n mask_2 = mtf.Print(\n mask_2, [mtf.reduce_mean(mask_2, output_shape=[experts_dim])],\n \"density_2\", summarize=1000)\n\n # COMPUTE ASSIGNMENT TO EXPERTS\n # [batch, group, experts]\n # This is the position within the expert's mini-batch for this sequence\n position_in_expert_1 = mtf.cumsum(\n mask_1, group_size_dim, exclusive=True) * mask_1\n # Remove the elements that don't fit. [batch, group, experts]\n mask_1 *= mtf.to_float(mtf.less(position_in_expert_1, expert_capacity_f))\n # [batch, experts]\n # How many examples in this sequence go to this expert\n mask_1_count = mtf.reduce_sum(mask_1, reduced_dim=group_size_dim)\n # [batch, group] - mostly ones, but zeros where something didn't fit\n mask_1_flat = mtf.reduce_sum(mask_1, reduced_dim=experts_dim)\n # [batch, group]\n position_in_expert_1 = mtf.reduce_sum(\n position_in_expert_1, reduced_dim=experts_dim)\n # Weight assigned to first expert. [batch, group]\n gate_1 *= mask_1_flat\n\n # [batch, group, experts]\n position_in_expert_2 = (\n mtf.cumsum(mask_2, group_size_dim, exclusive=True) + mask_1_count)\n position_in_expert_2 *= mask_2\n mask_2 *= mtf.to_float(mtf.less(position_in_expert_2, expert_capacity_f))\n # mask_2_count = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)\n mask_2_flat = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)\n gate_2 *= mask_2_flat\n position_in_expert_2 = mtf.reduce_sum(\n position_in_expert_2, reduced_dim=experts_dim)\n\n # [batch, group, experts, expert_capacity]\n combine_tensor = (\n gate_1 * mask_1_flat\n * mtf.one_hot(index_1, experts_dim)\n * mtf.one_hot(mtf.to_int32(position_in_expert_1), expert_capacity_dim) +\n gate_2 * mask_2_flat\n * mtf.one_hot(index_2, experts_dim)\n * mtf.one_hot(mtf.to_int32(position_in_expert_2), expert_capacity_dim))\n\n combine_tensor = mtf.cast(combine_tensor, inputs.dtype)\n loss = mtf.cast(loss, inputs.dtype)\n\n dispatch_tensor = mtf.cast(\n mtf.cast(combine_tensor, tf.bool), combine_tensor.dtype)\n\n return dispatch_tensor, combine_tensor, loss\n\n\ndef set_default_moe_hparams(hparams):\n \"\"\"Add necessary hyperparameters for mixture-of-experts.\"\"\"\n hparams.moe_num_experts = 16\n hparams.moe_loss_coef = 1e-2\n hparams.add_hparam(\"moe_gating\", \"top_2\")\n # Experts have fixed capacity per batch. We need some extra capacity\n # in case gating is not perfectly balanced.\n # moe_capacity_factor_* should be set to a value >=1.\n hparams.add_hparam(\"moe_capacity_factor_train\", 1.25)\n hparams.add_hparam(\"moe_capacity_factor_eval\", 2.0)\n hparams.add_hparam(\"moe_capacity_factor_second_level\", 1.0)\n # Each expert has a hidden layer with this size.\n hparams.add_hparam(\"moe_hidden_size\", 4096)\n # For gating, divide inputs into groups of this size before gating.\n # Each group sends the same number of inputs to each expert.\n # Ideally, the group size would be the whole batch, but this is expensive\n # due to our use of matrix multiplication for reordering.\n hparams.add_hparam(\"moe_group_size\", 1024)\n # For top_2 gating, whether to impose an additional loss in order to make\n # the experts equally used as the second-place expert.\n hparams.add_hparam(\"moe_use_second_place_loss\", 0)\n # In top_2 gating, policy for whether to use a second-place expert.\n # Legal values are:\n # \"all\": always\n # \"none\": never\n # \"threshold\": if gate value > the given threshold\n # \"random\": if gate value > threshold*random_uniform(0,1)\n hparams.add_hparam(\"moe_second_policy_train\", \"random\")\n hparams.add_hparam(\"moe_second_policy_eval\", \"random\")\n hparams.add_hparam(\"moe_second_threshold_train\", 0.2)\n hparams.add_hparam(\"moe_second_threshold_eval\", 0.2)\n\n\ndef _split_into_groups(n, max_group_size, mesh_dim_size):\n \"\"\"Helper function for figuring out how to split a dimensino into groups.\n\n We have a dimension with size n and we want to split it into\n two dimensions: n = num_groups * group_size\n\n group_size should be the largest possible value meeting the constraints:\n group_size <= max_group_size\n (num_groups = n/group_size) is a multiple of mesh_dim_size\n\n Args:\n n: an integer\n max_group_size: an integer\n mesh_dim_size: an integer\n\n Returns:\n num_groups: an integer\n group_size: an integer\n\n Raises:\n ValueError: if n is not a multiple of mesh_dim_size\n \"\"\"\n if n % mesh_dim_size != 0:\n raise ValueError(\n \"n=%d is not a multiple of mesh_dim_size=%d\" % (n, mesh_dim_size))\n num_groups = max(1, n // max_group_size)\n while (num_groups % mesh_dim_size != 0 or n % num_groups != 0):\n num_groups += 1\n group_size = n // num_groups\n tf.logging.info(\n \"_split_into_groups(n=%d, max_group_size=%d, mesh_dim_size=%d)\"\n \" = (num_groups=%d group_size=%d)\" %\n (n, max_group_size, mesh_dim_size, num_groups, group_size))\n return num_groups, group_size\n"
] | [
[
"tensorflow.logging.info"
]
] |
Kotwic4/SCOTR | [
"6afabedb672641a9777d8aa9d7b75f998e53c0c9"
] | [
"generator/mnistGenerator.py"
] | [
"import random\n\nfrom sklearn.datasets import fetch_mldata\n\nfrom util import open_file_in_directory\n\nMNIST_DIR = './tmp/mnist'\nMNIST_TRAIN_DIR = './mnist/train'\nMNIST_TEST_DIR = './mnist/test'\nMNIST_SAMPLE_DIR = './mnist/sample'\nTEST_CASES = 60000\n\n\ndef mnist_img_to_file(mnist_img, file):\n for x in range(28):\n for y in range(28):\n file.write(str(mnist_img[x * 28 + y]) + \" \")\n file.write('\\n')\n\n\ndef generate_samples(data, labels, directory='.', filename='results.txt', sampleNumber=100):\n result = open_file_in_directory(directory, filename)\n for i in range(sampleNumber):\n index = random.randrange(data.shape[0])\n label = labels[index]\n img = data[index]\n img_filename = str(index) + \".txt\"\n line = img_filename + ' ' + str(label) + '\\n'\n result.write(line)\n file = open_file_in_directory(directory, img_filename)\n mnist_img_to_file(img, file)\n file.close()\n result.close()\n\n\ndef generate_test_file(data, labels, directory='.', filename='results.txt'):\n result = open_file_in_directory(directory, filename)\n result.write(str(data.shape[0]) + '\\n')\n indexes = [i for i in range(data.shape[0])]\n random.shuffle(indexes)\n for i in indexes:\n label = labels[i]\n img = data[i]\n line = str(label) + '\\n'\n result.write(line)\n mnist_img_to_file(img, result)\n result.close()\n\n\ndef generate_test_data(data, labels):\n test_data = data[TEST_CASES:]\n test_labels = labels[TEST_CASES:]\n generate_test_file(test_data, test_labels, MNIST_TEST_DIR)\n\n\ndef generate_train_data(data, labels):\n train_data = data[:TEST_CASES]\n train_labels = labels[:TEST_CASES]\n generate_test_file(train_data, train_labels, MNIST_TRAIN_DIR)\n\n\ndef main():\n mnist = fetch_mldata('MNIST original', data_home=MNIST_DIR)\n labels = mnist.target.astype(int)\n data = mnist.data\n generate_train_data(data, labels)\n generate_test_data(data, labels)\n generate_samples(data, labels, MNIST_SAMPLE_DIR)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"sklearn.datasets.fetch_mldata"
]
] |
SwagJ/SuperPoint | [
"ecbf1d6e809ea8c7c832078ad26d2a74ed2fae29"
] | [
"superpoint/models/simple_classifier.py"
] | [
"import tensorflow as tf\nfrom tensorflow import layers as tfl\n\nfrom .base_model import BaseModel, Mode\n\n\nclass SimpleClassifier(BaseModel):\n input_spec = {\n 'image': {'shape': [None, None, None, 1], 'type': tf.float32}\n }\n required_config_keys = []\n default_config = {'data_format': 'channels_first'}\n\n def _model(self, inputs, mode, **config):\n x = inputs['image']\n if config['data_format'] == 'channels_first':\n x = tf.transpose(x, [0, 3, 1, 2])\n\n params = {'padding': 'SAME', 'data_format': config['data_format']}\n\n x = tfl.conv2d(x, 32, 5, activation=tf.nn.relu, name='conv1', **params)\n x = tfl.max_pooling2d(x, 2, 2, name='pool1', **params)\n\n x = tfl.conv2d(x, 64, 5, activation=tf.nn.relu, name='conv2', **params)\n x = tfl.max_pooling2d(x, 2, 2, name='pool2', **params)\n\n x = tfl.flatten(x)\n x = tfl.dense(x, 1024, activation=tf.nn.relu, name='fc1')\n x = tfl.dense(x, 10, name='fc2')\n\n if mode == Mode.TRAIN:\n return {'logits': x}\n else:\n return {'logits': x, 'prob': tf.nn.softmax(x), 'pred': tf.argmax(x, axis=-1)}\n\n def _loss(self, outputs, inputs, **config):\n with tf.name_scope('loss'):\n loss = tf.reduce_mean(tf.compat.v1.losses.sparse_softmax_cross_entropy(\n labels=inputs['label'], logits=outputs['logits']))\n return loss\n\n def _metrics(self, outputs, inputs, **config):\n metrics = {}\n with tf.name_scope('metrics'):\n correct_count = tf.equal(outputs['pred'], inputs['label'])\n correct_count = tf.cast(correct_count, tf.float32)\n metrics['accuracy'] = tf.reduce_mean(correct_count)\n return metrics\n"
] | [
[
"tensorflow.layers.conv2d",
"tensorflow.equal",
"tensorflow.compat.v1.losses.sparse_softmax_cross_entropy",
"tensorflow.layers.flatten",
"tensorflow.reduce_mean",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.layers.max_pooling2d",
"tensorflow.name_scope",
"tensorflow.cast",
"tensorflow.argmax",
"tensorflow.layers.dense"
]
] |
robertdstein/flarestack | [
"2ce7e67da336514f6f38f06126a1fbd82131e441"
] | [
"flarestack/core/astro.py"
] | [
"\"\"\"\nFunction taken from IceCube astro package.\n\"\"\"\nimport numpy as np\n\n\ndef angular_distance(lon1, lat1, lon2, lat2):\n \"\"\"\n calculate the angular distince along the great circle\n on the surface of a shpere between the points\n (`lon1`,`lat1`) and (`lon2`,`lat2`)\n This function Works for equatorial coordinates\n with right ascension as longitude and declination\n as latitude. This function uses the Vincenty formula\n for calculating the distance.\n Parameters\n ----------\n lon1 : array_like\n longitude of first point in radians\n lat1 : array_like\n latitude of the first point in radians\n lon2 : array_like\n longitude of second point in radians\n lat2 : array_like\n latitude of the second point in radians\n \"\"\"\n c1 = np.cos(lat1)\n c2 = np.cos(lat2)\n s1 = np.sin(lat1)\n s2 = np.sin(lat2)\n sd = np.sin(lon2 - lon1)\n cd = np.cos(lon2 - lon1)\n\n return np.arctan2(np.hypot(c2 * sd, c1 * s2 - s1 * c2 * cd), s1 * s2 + c1 * c2 * cd)\n"
] | [
[
"numpy.sin",
"numpy.cos",
"numpy.hypot"
]
] |
aypan17/value_learning | [
"240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe"
] | [
"bgp/simglucose/controller/basal_bolus_ctrller.py"
] | [
"from .base import Controller\nfrom .base import Action\nimport numpy as np\nimport pandas as pd\nimport pkg_resources\nimport logging\nfrom collections import namedtuple\n\nlogger = logging.getLogger(__name__)\nCONTROL_QUEST = '/source/dir/simglucose/params/Quest.csv'\nPATIENT_PARA_FILE = '/source/dir/simglucose/params/vpatient_params.csv'\nParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])\n\nclass BBController(Controller):\n def __init__(self, target=140):\n self.quest = pd.read_csv(CONTROL_QUEST)\n self.patient_params = pd.read_csv(\n PATIENT_PARA_FILE)\n self.target = target\n\n def policy(self, observation, reward, done, **kwargs):\n sample_time = kwargs.get('sample_time', 1)\n pname = kwargs.get('patient_name')\n\n meal = kwargs.get('meal')\n\n action = self._bb_policy(\n pname,\n meal,\n observation.CGM,\n sample_time)\n return action\n\n def _bb_policy(self, name, meal, glucose, env_sample_time):\n if any(self.quest.Name.str.match(name)):\n q = self.quest[self.quest.Name.str.match(name)]\n params = self.patient_params[self.patient_params.Name.str.match(\n name)]\n u2ss = np.asscalar(params.u2ss.values)\n BW = np.asscalar(params.BW.values)\n else:\n q = pd.DataFrame([['Average', 13.5, 23.52, 50, 30]],\n columns=['Name', 'CR', 'CF', 'TDI', 'Age'])\n u2ss = 1.43\n BW = 57.0\n\n basal = u2ss * BW / 6000\n if meal > 0:\n logger.info('Calculating bolus ...')\n logger.debug('glucose = {}'.format(glucose))\n bolus = np.asscalar(meal / q.CR.values + (glucose > 150)\n * (glucose - self.target) / q.CF.values)\n else:\n bolus = 0\n\n bolus = bolus / env_sample_time\n action = Action(basal=basal, bolus=bolus)\n return action\n\n def reset(self):\n pass\n\n\nclass ManualBBController(Controller):\n def __init__(self, target, cr, cf, basal, sample_rate=5, use_cf=True, use_bol=True, cooldown=0,\n corrected=True, use_low_lim=False, low_lim=70):\n super().__init__(self)\n self.target = target\n self.orig_cr = self.cr = cr\n self.orig_cf = self.cf = cf\n self.orig_basal = self.basal = basal\n self.sample_rate = sample_rate\n self.use_cf = use_cf\n self.use_bol = use_bol\n self.cooldown = cooldown\n self.last_cf = np.inf\n self.corrected = corrected\n self.use_low_lim = low_lim\n self.low_lim = low_lim\n\n def increment(self, cr_incr=0, cf_incr=0, basal_incr=0):\n self.cr += cr_incr\n self.cf += cf_incr\n self.basal += basal_incr\n\n def policy(self, observation, reward, done, **kwargs):\n carbs = kwargs.get('carbs')\n glucose = kwargs.get('glucose')\n action = self.manual_bb_policy(carbs, glucose)\n return action\n\n def manual_bb_policy(self, carbs, glucose, log=False):\n if carbs > 0:\n if self.corrected:\n carb_correct = carbs / self.cr\n else:\n # assuming carbs are already multiplied by sampling rate\n carb_correct = (carbs/self.sample_rate) / self.cr # TODO: not sure about this\n hyper_correct = (glucose > self.target) * (glucose - self.target) / self.cf\n hypo_correct = (glucose < self.low_lim) * (self.low_lim - glucose) / self.cf\n bolus = 0\n if self.use_low_lim:\n bolus -= hypo_correct\n if self.use_cf:\n if self.last_cf > self.cooldown and hyper_correct > 0:\n bolus += hyper_correct\n self.last_cf = 0\n if self.use_bol:\n bolus += carb_correct\n bolus = bolus / self.sample_rate\n else:\n bolus = 0\n carb_correct = 0\n hyper_correct = 0\n hypo_correct = 0\n self.last_cf += self.sample_rate\n if log:\n return Action(basal=self.basal, bolus=bolus), hyper_correct, hypo_correct, carb_correct\n else:\n return Action(basal=self.basal, bolus=bolus)\n\n def get_params(self):\n return ParamTup(basal=self.basal, cf=self.cf, cr=self.cr)\n\n def adjust(self, basal_adj, cr_adj):\n self.basal += self.orig_basal * basal_adj\n self.cr += self.orig_cr * cr_adj\n\n def reset(self):\n self.cr = self.orig_cr\n self.cf = self.orig_cf\n self.basal = self.orig_basal\n self.last_cf = np.inf\n\nclass MyController(Controller):\n def __init__(self, init_state):\n self.init_state = init_state\n self.state = init_state\n\n def policy(self, observation, reward, done, **info):\n '''\n Every controller must have this implementation!\n ----\n Inputs:\n observation - a namedtuple defined in simglucose.simulation.env. For\n now, it only has one entry: blood glucose level measured\n by CGM sensor.\n reward - current reward returned by environment\n done - True, game over. False, game continues\n info - additional information as key word arguments,\n simglucose.simulation.env.T1DSimEnv returns patient_name\n and sample_time\n ----\n Output:\n action - a namedtuple defined at the beginning of this file. The\n controller action contains two entries: basal, bolus\n '''\n self.state = observation\n action = Action(basal=0, bolus=0)\n return action\n\n def reset(self):\n '''\n Reset the controller state to inital state, must be implemented\n '''\n self.state = self.init_state\n"
] | [
[
"pandas.read_csv",
"numpy.asscalar",
"pandas.DataFrame"
]
] |
ai-di/Brancher | [
"01d51137b0e6fc81512994c21cc3a19287353767"
] | [
"tests/test_autoregressive.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom brancher.variables import RootVariable, RandomVariable, ProbabilisticModel\nfrom brancher.standard_variables import NormalVariable, LogNormalVariable, BetaVariable\nfrom brancher import inference\nimport brancher.functions as BF\n\n# Probabilistic model #\nT = 100\n\nnu = LogNormalVariable(0.3, 1., 'nu')\nx0 = NormalVariable(0., 1., 'x0')\nb = BetaVariable(0.5, 1.5, 'b')\n\nx = [x0]\nnames = [\"x0\"]\nfor t in range(1,T):\n names.append(\"x{}\".format(t))\n x.append(NormalVariable(b * x[t - 1], nu, names[t]))\nAR_model = ProbabilisticModel(x)\n\n# Generate data #\ndata = AR_model._get_sample(number_samples=1)\ntime_series = [float(data[xt].cpu().detach().numpy()) for xt in x]\ntrue_b = data[b].cpu().detach().numpy()\ntrue_nu = data[nu].cpu().detach().numpy()\nprint(\"The true coefficient is: {}\".format(float(true_b)))\n\n# Observe data #\n[xt.observe(data[xt][:, 0, :]) for xt in x]\n\n# Variational distribution #\nQnu = LogNormalVariable(0.5, 1., \"nu\", learnable=True)\nQb = BetaVariable(0.5, 0.5, \"b\", learnable=True)\nvariational_posterior = ProbabilisticModel([Qb, Qnu])\nAR_model.set_posterior_model(variational_posterior)\n\n# Inference #\ninference.perform_inference(AR_model,\n number_iterations=200,\n number_samples=300,\n optimizer='Adam',\n lr=0.05)\nloss_list = AR_model.diagnostics[\"loss curve\"]\n\n\n# Statistics\nposterior_samples = AR_model._get_posterior_sample(2000)\nnu_posterior_samples = posterior_samples[nu].cpu().detach().numpy().flatten()\nb_posterior_samples = posterior_samples[b].cpu().detach().numpy().flatten()\nb_mean = np.mean(b_posterior_samples)\nb_sd = np.sqrt(np.var(b_posterior_samples))\nprint(\"The estimated coefficient is: {} +- {}\".format(b_mean, b_sd))\n\n# Two subplots, unpack the axes array immediately\nf, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)\nax1.plot(time_series)\nax1.set_title(\"Time series\")\nax2.plot(np.array(loss_list))\nax2.set_title(\"Convergence\")\nax2.set_xlabel(\"Iteration\")\nax3.hist(b_posterior_samples, 25)\nax3.axvline(x=true_b, lw=2, c=\"r\")\nax3.set_title(\"Posterior samples (b)\")\nax3.set_xlim(0,1)\nax4.hist(nu_posterior_samples, 25)\nax4.axvline(x=true_nu, lw=2, c=\"r\")\nax4.set_title(\"Posterior samples (nu)\")\nplt.show()"
] | [
[
"numpy.var",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.mean"
]
] |
AyazSaiyed/hub | [
"597c5726fd72d17f562bffec25e114115dadcac5"
] | [
"tensorflow_hub/tools/module_search/search.py"
] | [
"# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tool to rank modules to use in a downstream classification task.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_hub.tools.module_search import utils\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"dataset\", None,\n \"Specification of a dataset. E.g. use `cifar10#1000` to \"\n \"perform search using 1000 examples from tfds `cifar10` \"\n \"dataset.\")\n\nflags.DEFINE_multi_string(\"module\", None, \"Module to consider in the search\")\n\nflags.DEFINE_string(\"module_list\", None,\n \"Path to text file with a module per line to be considered in the search.\"\n \"Empty lines and lines starting with # are ignored\")\n\n\ndef load_data(data_spec):\n return utils.load_data(**data_spec)\n\n\ndef load_raw_features(data_spec):\n data = load_data(data_spec=data_spec)\n return data.map(lambda x: tf.image.resize(x[\"image\"], (224, 224)))\n\n\ndef load_labels(data_spec):\n data = load_data(data_spec=data_spec)\n return np.array([x for x in data.map(lambda x: x[\"label\"])])\n\n\ndef compute_embeddings(module_spec, data_spec):\n raw_features = load_raw_features(data_spec=data_spec)\n embedding_fn = utils.load_embedding_fn(\n module=module_spec)\n outputs = []\n for batch in raw_features.batch(10):\n outputs.extend(embedding_fn(batch))\n return np.array(outputs)\n\n\ndef compute_score(module_spec, data_spec):\n embeddings = compute_embeddings(module_spec=module_spec,\n data_spec=data_spec)\n distances = utils.compute_distance_matrix_loo(embeddings)\n labels = load_labels(data_spec=data_spec)\n error_rate = utils.knn_errorrate_loo(distances, labels, k=1)\n return np.array(error_rate)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n if not FLAGS.dataset:\n raise app.UsageError(\"--dataset is a required argument.\")\n\n module_list = []\n if FLAGS.module:\n module_list.extend(FLAGS.module)\n\n if FLAGS.module_list:\n with tf.io.gfile.GFile(FLAGS.module_list) as f:\n lines = f.read().split(\"\\n\")\n module_list.extend([l for l in lines if l != \"\" and not l.startswith(\"#\")])\n\n ds_sections = FLAGS.dataset.split(\"#\")\n dataset = ds_sections[0]\n train_examples = int(ds_sections[1]) if len(ds_sections) != 0 else None\n data_spec = {\n \"dataset\": dataset,\n \"split\": \"train\",\n \"num_examples\": train_examples,\n }\n\n results = []\n for module in module_list:\n results.append((\n module, data_spec,\n compute_score(module_spec=module, data_spec=data_spec)))\n\n df = pd.DataFrame(results, columns=[\"module\", \"data\", \"1nn\"])\n df = df.filter([\"module\", \"1nn\"])\n df.sort_values([\"1nn\"])\n df.reset_index(drop=True)\n df.set_index(\"module\")\n\n with pd.option_context(\n \"display.max_rows\", None,\n \"display.max_columns\", None,\n \"display.precision\", 3,\n \"max_colwidth\", -1, # Don't truncate columns (e.g. module name).\n \"display.expand_frame_repr\", False, # Don't wrap output.\n ):\n print(\"# Module ranking for %s\" % data_spec)\n print(df)\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] | [
[
"tensorflow.compat.v2.io.gfile.GFile",
"pandas.DataFrame",
"tensorflow.compat.v2.image.resize",
"numpy.array",
"pandas.option_context"
]
] |
tbose20/D-Ref | [
"eda6170a72838b89637df241dd5619e001f3afdb"
] | [
"captum/captum/_utils/gradient.py"
] | [
"#!/usr/bin/env python3\nimport threading\nimport typing\nimport warnings\nfrom collections import defaultdict\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast\n\nimport torch\nfrom captum._utils.common import (\n _reduce_list,\n _run_forward,\n _sort_key_list,\n _verify_select_neuron,\n)\nfrom captum._utils.typing import (\n Literal,\n ModuleOrModuleList,\n TargetType,\n TensorOrTupleOfTensorsGeneric,\n)\nfrom torch import Tensor, device\nfrom torch.nn import Module\n\n\ndef apply_gradient_requirements(\n inputs: Tuple[Tensor, ...], warn: bool = True\n) -> List[bool]:\n \"\"\"\n Iterates through tuple on input tensors and sets requires_grad to be true on\n each Tensor, and ensures all grads are set to zero. To ensure that the input\n is returned to its initial state, a list of flags representing whether or not\n a tensor originally required grad is returned.\n \"\"\"\n assert isinstance(\n inputs, tuple\n ), \"Inputs should be wrapped in a tuple prior to preparing for gradients\"\n grad_required = []\n for index, input in enumerate(inputs):\n assert isinstance(input, torch.Tensor), \"Given input is not a torch.Tensor\"\n grad_required.append(input.requires_grad)\n inputs_dtype = input.dtype\n # Note: torch 1.2 doesn't support is_complex for dtype that's why we check\n # on the existance of is_complex method.\n if not inputs_dtype.is_floating_point and not (\n hasattr(inputs_dtype, \"is_complex\") and inputs_dtype.is_complex\n ):\n if warn:\n warnings.warn(\n \"\"\"Input Tensor %d has a dtype of %s.\n Gradients cannot be activated\n for these data types.\"\"\"\n % (index, str(inputs_dtype))\n )\n elif not input.requires_grad:\n if warn:\n warnings.warn(\n \"Input Tensor %d did not already require gradients, \"\n \"required_grads has been set automatically.\" % index\n )\n input.requires_grad_()\n return grad_required\n\n\ndef undo_gradient_requirements(\n inputs: Tuple[Tensor, ...], grad_required: List[bool]\n) -> None:\n \"\"\"\n Iterates through list of tensors, zeros each gradient, and sets required\n grad to false if the corresponding index in grad_required is False.\n This method is used to undo the effects of prepare_gradient_inputs, making\n grads not required for any input tensor that did not initially require\n gradients.\n \"\"\"\n\n assert isinstance(\n inputs, tuple\n ), \"Inputs should be wrapped in a tuple prior to preparing for gradients.\"\n assert len(inputs) == len(\n grad_required\n ), \"Input tuple length should match gradient mask.\"\n for index, input in enumerate(inputs):\n assert isinstance(input, torch.Tensor), \"Given input is not a torch.Tensor\"\n if not grad_required[index]:\n input.requires_grad_(False)\n\n\ndef compute_gradients(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n) -> Tuple[Tensor, ...]:\n r\"\"\"\n Computes gradients of the output with respect to inputs for an\n arbitrary forward function.\n\n Args:\n\n forward_fn: forward function. This can be for example model's\n forward function.\n input: Input at which gradients are evaluated,\n will be passed to forward_fn.\n target_ind: Index of the target class for which gradients\n must be computed (classification only).\n additional_forward_args: Additional input arguments that forward\n function requires. It takes an empty tuple (no additional\n arguments) if no additional arguments are required\n \"\"\"\n with torch.autograd.set_grad_enabled(True):\n # runs forward pass\n outputs = _run_forward(forward_fn, inputs, target_ind, additional_forward_args)\n assert outputs[0].numel() == 1, (\n \"Target not provided when necessary, cannot\"\n \" take gradient with respect to multiple outputs.\"\n )\n # torch.unbind(forward_out) is a list of scalar tensor tuples and\n # contains batch_size * #steps elements\n grads = torch.autograd.grad(torch.unbind(outputs), inputs,create_graph=True, retain_graph=True) #create_graph True, allow_unused is added TB\n return grads\n\n\ndef _neuron_gradients(\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n saved_layer: Dict[device, Tuple[Tensor, ...]],\n key_list: List[device],\n gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],\n) -> Tuple[Tensor, ...]:\n with torch.autograd.set_grad_enabled(True):\n gradient_tensors = []\n for key in key_list:\n current_out_tensor = _verify_select_neuron(\n saved_layer[key], gradient_neuron_selector\n )\n gradient_tensors.append(\n torch.autograd.grad(\n torch.unbind(current_out_tensor)\n if current_out_tensor.numel() > 1\n else current_out_tensor,\n inputs,\n )\n )\n _total_gradients = _reduce_list(gradient_tensors, sum)\n return _total_gradients\n\n\[email protected]\ndef _forward_layer_eval(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: Module,\n additional_forward_args: Any = None,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n grad_enabled: bool = False,\n) -> Tuple[Tensor, ...]:\n ...\n\n\[email protected]\ndef _forward_layer_eval(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: List[Module],\n additional_forward_args: Any = None,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n grad_enabled: bool = False,\n) -> List[Tuple[Tensor, ...]]:\n ...\n\n\ndef _forward_layer_eval(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: ModuleOrModuleList,\n additional_forward_args: Any = None,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n grad_enabled: bool = False,\n) -> Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]:\n return _forward_layer_eval_with_neuron_grads(\n forward_fn,\n inputs,\n layer,\n additional_forward_args=additional_forward_args,\n gradient_neuron_selector=None,\n grad_enabled=grad_enabled,\n device_ids=device_ids,\n attribute_to_layer_input=attribute_to_layer_input,\n )\n\n\[email protected]\ndef _forward_layer_distributed_eval(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: ModuleOrModuleList,\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n attribute_to_layer_input: bool = False,\n forward_hook_with_return: Literal[False] = False,\n require_layer_grads: bool = False,\n) -> Dict[Module, Dict[device, Tuple[Tensor, ...]]]:\n ...\n\n\[email protected]\ndef _forward_layer_distributed_eval(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: ModuleOrModuleList,\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n attribute_to_layer_input: bool = False,\n *,\n forward_hook_with_return: Literal[True],\n require_layer_grads: bool = False,\n) -> Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor]:\n ...\n\n\ndef _forward_layer_distributed_eval(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: ModuleOrModuleList,\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n attribute_to_layer_input: bool = False,\n forward_hook_with_return: bool = False,\n require_layer_grads: bool = False,\n) -> Union[\n Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor],\n Dict[Module, Dict[device, Tuple[Tensor, ...]]],\n]:\n r\"\"\"\n A helper function that allows to set a hook on model's `layer`, run the forward\n pass and returns intermediate layer results, stored in a dictionary,\n and optionally also the output of the forward function. The keys in the\n dictionary are the device ids and the values are corresponding intermediate layer\n results, either the inputs or the outputs of the layer depending on whether we set\n `attribute_to_layer_input` to True or False.\n This is especially useful when we execute forward pass in a distributed setting,\n using `DataParallel`s for example.\n \"\"\"\n saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]] = defaultdict(dict)\n lock = threading.Lock()\n all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer\n\n # Set a forward hook on specified module and run forward pass to\n # get layer output tensor(s).\n # For DataParallel models, each partition adds entry to dictionary\n # with key as device and value as corresponding Tensor.\n def hook_wrapper(original_module):\n def forward_hook(module, inp, out=None):\n eval_tsrs = inp if attribute_to_layer_input else out\n is_eval_tuple = isinstance(eval_tsrs, tuple)\n\n if not is_eval_tuple:\n eval_tsrs = (eval_tsrs,)\n if require_layer_grads:\n apply_gradient_requirements(eval_tsrs, warn=False)\n with lock:\n nonlocal saved_layer\n # Note that cloning behaviour of `eval_tsr` is different\n # when `forward_hook_with_return` is set to True. This is because\n # otherwise `backward()` on the last output layer won't execute.\n if forward_hook_with_return:\n saved_layer[original_module][eval_tsrs[0].device] = eval_tsrs\n eval_tsrs_to_return = tuple(\n eval_tsr.clone() for eval_tsr in eval_tsrs\n )\n if not is_eval_tuple:\n eval_tsrs_to_return = eval_tsrs_to_return[0]\n return eval_tsrs_to_return\n else:\n saved_layer[original_module][eval_tsrs[0].device] = tuple(\n eval_tsr.clone() for eval_tsr in eval_tsrs\n )\n\n return forward_hook\n\n all_hooks = []\n try:\n for single_layer in all_layers:\n if attribute_to_layer_input:\n all_hooks.append(\n single_layer.register_forward_pre_hook(hook_wrapper(single_layer))\n )\n else:\n all_hooks.append(\n single_layer.register_forward_hook(hook_wrapper(single_layer))\n )\n output = _run_forward(\n forward_fn,\n inputs,\n target=target_ind,\n additional_forward_args=additional_forward_args,\n )\n finally:\n for hook in all_hooks:\n hook.remove()\n\n if len(saved_layer) == 0:\n raise AssertionError(\"Forward hook did not obtain any outputs for given layer\")\n\n if forward_hook_with_return:\n return saved_layer, output\n return saved_layer\n\n\ndef _gather_distributed_tensors(\n saved_layer: Dict[device, Tuple[Tensor, ...]],\n device_ids: Union[None, List[int]] = None,\n key_list: Union[None, List[device]] = None,\n) -> Tuple[Tensor, ...]:\n r\"\"\"\n A helper function to concatenate intermediate layer results stored on\n different devices in `saved_layer`. `saved_layer` is a dictionary that\n contains `device_id` as a key and intermediate layer results (either\n the input or the output of the layer) stored on the device corresponding to\n the key.\n `key_list` is a list of devices in appropriate ordering for concatenation\n and if not provided, keys are sorted based on device ids.\n\n If only one key exists (standard model), key list simply has one element.\n \"\"\"\n if key_list is None:\n key_list = _sort_key_list(list(saved_layer.keys()), device_ids)\n return _reduce_list([saved_layer[device_id] for device_id in key_list])\n\n\ndef _extract_device_ids(\n forward_fn: Callable,\n saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]],\n device_ids: Union[None, List[int]],\n) -> Union[None, List[int]]:\n r\"\"\"\n A helper function to extract device_ids from `forward_function` in case it is\n provided as part of a `DataParallel` model or if is accessible from\n `forward_fn`.\n In case input device_ids is not None, this function returns that value.\n \"\"\"\n # Multiple devices / keys implies a DataParallel model, so we look for\n # device IDs if given or available from forward function\n # (DataParallel model object).\n if (\n max(len(saved_layer[single_layer]) for single_layer in saved_layer) > 1\n and device_ids is None\n ):\n if (\n hasattr(forward_fn, \"device_ids\")\n and cast(Any, forward_fn).device_ids is not None\n ):\n device_ids = cast(Any, forward_fn).device_ids\n else:\n raise AssertionError(\n \"Layer tensors are saved on multiple devices, however unable to access\"\n \" device ID list from the `forward_fn`. Device ID list must be\"\n \" accessible from `forward_fn`. For example, they can be retrieved\"\n \" if `forward_fn` is a model of type `DataParallel`. It is used\"\n \" for identifying device batch ordering.\"\n )\n return device_ids\n\n\[email protected]\ndef _forward_layer_eval_with_neuron_grads(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: Module,\n additional_forward_args: Any = None,\n *,\n gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],\n grad_enabled: bool = False,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:\n ...\n\n\[email protected]\ndef _forward_layer_eval_with_neuron_grads(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: Module,\n additional_forward_args: Any = None,\n gradient_neuron_selector: None = None,\n grad_enabled: bool = False,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n) -> Tuple[Tensor, ...]:\n ...\n\n\[email protected]\ndef _forward_layer_eval_with_neuron_grads(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: List[Module],\n additional_forward_args: Any = None,\n gradient_neuron_selector: None = None,\n grad_enabled: bool = False,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n) -> List[Tuple[Tensor, ...]]:\n ...\n\n\ndef _forward_layer_eval_with_neuron_grads(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: ModuleOrModuleList,\n additional_forward_args: Any = None,\n gradient_neuron_selector: Union[\n None, int, Tuple[Union[int, slice], ...], Callable\n ] = None,\n grad_enabled: bool = False,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n) -> Union[\n Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],\n Tuple[Tensor, ...],\n List[Tuple[Tensor, ...]],\n]:\n \"\"\"\n This method computes forward evaluation for a particular layer using a\n forward hook. If a gradient_neuron_selector is provided, then gradients with\n respect to that neuron in the layer output are also returned.\n\n These functionalities are combined due to the behavior of DataParallel models\n with hooks, in which hooks are executed once per device. We need to internally\n combine the separated tensors from devices by concatenating based on device_ids.\n Any necessary gradients must be taken with respect to each independent batched\n tensor, so the gradients are computed and combined appropriately.\n\n More information regarding the behavior of forward hooks with DataParallel models\n can be found in the PyTorch data parallel documentation. We maintain the separate\n evals in a dictionary protected by a lock, analogous to the gather implementation\n for the core PyTorch DataParallel implementation.\n \"\"\"\n grad_enabled = True if gradient_neuron_selector is not None else grad_enabled\n\n with torch.autograd.set_grad_enabled(grad_enabled):\n saved_layer = _forward_layer_distributed_eval(\n forward_fn,\n inputs,\n layer,\n additional_forward_args=additional_forward_args,\n attribute_to_layer_input=attribute_to_layer_input,\n )\n device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)\n # Identifies correct device ordering based on device ids.\n # key_list is a list of devices in appropriate ordering for concatenation.\n # If only one key exists (standard model), key list simply has one element.\n key_list = _sort_key_list(list(next(iter(saved_layer.values())).keys()), device_ids)\n if gradient_neuron_selector is not None:\n assert isinstance(\n layer, Module\n ), \"Cannot compute neuron gradients for multiple layers simultaneously!\"\n inp_grads = _neuron_gradients(\n inputs, saved_layer[layer], key_list, gradient_neuron_selector\n )\n return (\n _gather_distributed_tensors(saved_layer[layer], key_list=key_list),\n inp_grads,\n )\n else:\n if isinstance(layer, Module):\n return _gather_distributed_tensors(saved_layer[layer], key_list=key_list)\n else:\n return [\n _gather_distributed_tensors(saved_layer[curr_layer], key_list=key_list)\n for curr_layer in layer\n ]\n\n\[email protected]\ndef compute_layer_gradients_and_eval(\n forward_fn: Callable,\n layer: Module,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n *,\n gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n output_fn: Union[None, Callable] = None,\n) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]]:\n ...\n\n\[email protected]\ndef compute_layer_gradients_and_eval(\n forward_fn: Callable,\n layer: List[Module],\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n gradient_neuron_selector: None = None,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n output_fn: Union[None, Callable] = None,\n) -> Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]]:\n ...\n\n\[email protected]\ndef compute_layer_gradients_and_eval(\n forward_fn: Callable,\n layer: Module,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n gradient_neuron_selector: None = None,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n output_fn: Union[None, Callable] = None,\n) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:\n ...\n\n\ndef compute_layer_gradients_and_eval(\n forward_fn: Callable,\n layer: ModuleOrModuleList,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n gradient_neuron_selector: Union[\n None, int, Tuple[Union[int, slice], ...], Callable\n ] = None,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n output_fn: Union[None, Callable] = None,\n) -> Union[\n Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],\n Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]],\n Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]],\n]:\n r\"\"\"\n Computes gradients of the output with respect to a given layer as well\n as the output evaluation of the layer for an arbitrary forward function\n and given input.\n\n For data parallel models, hooks are executed once per device ,so we\n need to internally combine the separated tensors from devices by\n concatenating based on device_ids. Any necessary gradients must be taken\n with respect to each independent batched tensor, so the gradients are\n computed and combined appropriately.\n\n More information regarding the behavior of forward hooks with DataParallel\n models can be found in the PyTorch data parallel documentation. We maintain\n the separate inputs in a dictionary protected by a lock, analogous to the\n gather implementation for the core PyTorch DataParallel implementation.\n\n NOTE: To properly handle inplace operations, a clone of the layer output\n is stored. This structure inhibits execution of a backward hook on the last\n module for the layer output when computing the gradient with respect to\n the input, since we store an intermediate clone, as\n opposed to the true module output. If backward module hooks are necessary\n for the final module when computing input gradients, utilize\n _forward_layer_eval_with_neuron_grads instead.\n\n Args:\n\n forward_fn: forward function. This can be for example model's\n forward function.\n layer: Layer for which gradients / output will be evaluated.\n inputs: Input at which gradients are evaluated,\n will be passed to forward_fn.\n target_ind: Index of the target class for which gradients\n must be computed (classification only).\n output_fn: An optional function that is applied to the layer inputs or\n outputs depending whether the `attribute_to_layer_input` is\n set to `True` or `False`\n args: Additional input arguments that forward function requires.\n It takes an empty tuple (no additional arguments) if no\n additional arguments are required\n\n\n Returns:\n 2-element tuple of **gradients**, **evals**:\n - **gradients**:\n Gradients of output with respect to target layer output.\n - **evals**:\n Target layer output for given input.\n \"\"\"\n with torch.autograd.set_grad_enabled(True):\n # saved_layer is a dictionary mapping device to a tuple of\n # layer evaluations on that device.\n saved_layer, output = _forward_layer_distributed_eval(\n forward_fn,\n inputs,\n layer,\n target_ind=target_ind,\n additional_forward_args=additional_forward_args,\n attribute_to_layer_input=attribute_to_layer_input,\n forward_hook_with_return=True,\n require_layer_grads=True,\n )\n assert output[0].numel() == 1, (\n \"Target not provided when necessary, cannot\"\n \" take gradient with respect to multiple outputs.\"\n )\n\n device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)\n\n # Identifies correct device ordering based on device ids.\n # key_list is a list of devices in appropriate ordering for concatenation.\n # If only one key exists (standard model), key list simply has one element.\n key_list = _sort_key_list(\n list(next(iter(saved_layer.values())).keys()), device_ids\n )\n all_outputs: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]\n if isinstance(layer, Module):\n all_outputs = _reduce_list(\n [\n saved_layer[layer][device_id]\n if output_fn is None\n else output_fn(saved_layer[layer][device_id])\n for device_id in key_list\n ]\n )\n else:\n all_outputs = [\n _reduce_list(\n [\n saved_layer[single_layer][device_id]\n if output_fn is None\n else output_fn(saved_layer[single_layer][device_id])\n for device_id in key_list\n ]\n )\n for single_layer in layer\n ]\n all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer\n grad_inputs = tuple(\n layer_tensor\n for single_layer in all_layers\n for device_id in key_list\n for layer_tensor in saved_layer[single_layer][device_id]\n )\n saved_grads = torch.autograd.grad(torch.unbind(output), grad_inputs)\n\n offset = 0\n all_grads: List[Tuple[Tensor, ...]] = []\n for single_layer in all_layers:\n num_tensors = len(next(iter(saved_layer[single_layer].values())))\n curr_saved_grads = [\n saved_grads[i : i + num_tensors]\n for i in range(\n offset, offset + len(key_list) * num_tensors, num_tensors\n )\n ]\n offset += len(key_list) * num_tensors\n if output_fn is not None:\n curr_saved_grads = [\n output_fn(curr_saved_grad) for curr_saved_grad in curr_saved_grads\n ]\n\n all_grads.append(_reduce_list(curr_saved_grads))\n\n layer_grads: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]\n layer_grads = all_grads\n if isinstance(layer, Module):\n layer_grads = all_grads[0]\n\n if gradient_neuron_selector is not None:\n assert isinstance(\n layer, Module\n ), \"Cannot compute neuron gradients for multiple layers simultaneously!\"\n inp_grads = _neuron_gradients(\n inputs, saved_layer[layer], key_list, gradient_neuron_selector\n )\n return (\n cast(Tuple[Tensor, ...], layer_grads),\n cast(Tuple[Tensor, ...], all_outputs),\n inp_grads,\n )\n return layer_grads, all_outputs # type: ignore\n\n\ndef construct_neuron_grad_fn(\n layer: Module,\n neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],\n device_ids: Union[None, List[int]] = None,\n attribute_to_neuron_input: bool = False,\n) -> Callable:\n def grad_fn(\n forward_fn: Callable,\n inputs: TensorOrTupleOfTensorsGeneric,\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n ) -> Tuple[Tensor, ...]:\n _, grads = _forward_layer_eval_with_neuron_grads(\n forward_fn,\n inputs,\n layer,\n additional_forward_args,\n gradient_neuron_selector=neuron_selector,\n device_ids=device_ids,\n attribute_to_layer_input=attribute_to_neuron_input,\n )\n return grads\n\n return grad_fn\n\n\ndef _compute_jacobian_wrt_params(\n model: Module,\n inputs: Union[Tuple[Tensor], Tensor],\n labels: Optional[Tensor] = None,\n loss_fn: Optional[Union[Module, Callable]] = None,\n) -> Tuple[Tensor, ...]:\n r\"\"\"\n Computes the Jacobian of a batch of test examples given a model, and optional\n loss function and target labels. This method is equivalent to calculating the\n gradient for every individual example in the minibatch.\n\n Args:\n model (torch.nn.Module): The trainable model providing the forward pass\n inputs (Tensor): The minibatch for which the forward pass is computed.\n The dimensions of input are (N, *) where N is the batch_size.\n The input must have a batch dimension, even if batch_size = 1.\n labels (Tensor or None): Labels for input if computing a loss function.\n loss_fn (torch.nn.Module or Callable or None): The loss function. If a library\n defined loss function is provided, it would be expected to be a\n torch.nn.Module. If a custom loss is provided, it can be either type,\n but must behave as a library loss function would if `reduction='none'`.\n\n Returns:\n grads (Tuple of Tensor): Returns the Jacobian for the minibatch as a\n tuple of gradients corresponding to the tuple of trainable parameters\n returned by `model.parameters()`. Each object grads[i] references to the\n gradients for the parameters in the i-th trainable layer of the model.\n Each grads[i] object is a tensor with the gradients for the `inputs`\n batch. For example, grads[i][j] would reference the gradients for the\n parameters of the i-th layer, for the j-th member of the minibatch.\n \"\"\"\n with torch.autograd.set_grad_enabled(True):\n out = model(inputs)\n assert out.dim() != 0, \"Please ensure model output has at least one dimension.\"\n\n if labels is not None and loss_fn is not None:\n loss = loss_fn(out, labels)\n if hasattr(loss_fn, \"reduction\"):\n msg0 = \"Please ensure loss_fn.reduction is set to `none`\"\n assert loss_fn.reduction == \"none\", msg0 # type: ignore\n else:\n msg1 = (\n \"Loss function is applying a reduction. Please ensure \"\n f\"Output shape: {out.shape} and Loss shape: {loss.shape} \"\n \"are matching.\"\n )\n assert loss.dim() != 0, msg1\n assert out.shape[0] == loss.shape[0], msg1\n out = loss\n\n grads_list = [\n torch.autograd.grad(\n outputs=out[i],\n inputs=model.parameters(), # type: ignore\n grad_outputs=torch.ones_like(out[i]),\n retain_graph=True,\n )\n for i in range(out.shape[0])\n ]\n\n grads = tuple([torch.stack(x) for x in zip(*grads_list)])\n\n return tuple(grads)\n\n\ndef _compute_jacobian_wrt_params_autograd_hacks(\n model: Module,\n inputs: Union[Tuple[Tensor], Tensor],\n labels: Optional[Tensor] = None,\n loss_fn: Optional[Module] = None,\n reduction_type: Optional[str] = \"sum\",\n) -> Tuple[Any, ...]:\n r\"\"\"\n NOT SUPPORTED FOR OPEN SOURCE. This method uses an internal 'hack` and is currently\n not supported.\n\n Computes the Jacobian of a batch of test examples given a model, and optional\n loss function and target labels. This method uses autograd_hacks to fully vectorize\n the Jacobian calculation. Currently, only linear and conv2d layers are supported.\n\n User must `add_hooks(model)` before calling this function.\n\n Args:\n model (torch.nn.Module): The trainable model providing the forward pass\n inputs (Tensor): The minibatch for which the forward pass is computed.\n The dimensions of input are (N, *) where N is the batch_size.\n The input must have a batch dimension, even if batch_size = 1.\n labels (Tensor or None): Labels for input if computing a loss function.\n loss_fn (torch.nn.Module or Callable or None): The loss function. If a library\n defined loss function is provided, it would be expected to be a\n torch.nn.Module. If a custom loss is provided, it can be either type,\n but must behave as a library loss function would if `reduction='sum'` or\n `reduction='mean'`.\n reduction_type (str): The type of reduction applied. If a loss_fn is passed,\n this should match `loss_fn.reduction`. Else if gradients are being\n computed on direct model outputs (scores), then 'sum' should be used.\n Defaults to 'sum'.\n\n Returns:\n grads (Tuple of Tensor): Returns the Jacobian for the minibatch as a\n tuple of gradients corresponding to the tuple of trainable parameters\n returned by `model.parameters()`. Each object grads[i] references to the\n gradients for the parameters in the i-th trainable layer of the model.\n Each grads[i] object is a tensor with the gradients for the `inputs`\n batch. For example, grads[i][j] would reference the gradients for the\n parameters of the i-th layer, for the j-th member of the minibatch.\n \"\"\"\n from captum._utils.fb import autograd_hacks\n\n with torch.autograd.set_grad_enabled(True):\n autograd_hacks.add_hooks(model)\n\n out = model(inputs)\n assert out.dim() != 0, \"Please ensure model output has at least one dimension.\"\n\n if labels is not None and loss_fn is not None:\n loss = loss_fn(out, labels)\n if hasattr(loss_fn, \"reduction\"):\n msg0 = \"Please ensure loss_fn.reduction is set to `sum` or `mean`\"\n assert loss_fn.reduction != \"none\", msg0\n msg1 = (\n f\"loss_fn.reduction ({loss_fn.reduction}) does not match reduction \"\n f\"type ({reduction_type}). Please ensure they are matching.\"\n )\n assert loss_fn.reduction == reduction_type, msg1\n msg2 = (\n \"Please ensure custom loss function is applying either a \"\n \"sum or mean reduction.\"\n )\n assert out.shape != loss.shape, msg2\n\n if reduction_type != \"sum\" and reduction_type != \"mean\":\n raise ValueError(\n f\"{reduction_type} is not a valid value for reduction_type. \"\n \"Must be either 'sum' or 'mean'.\"\n )\n out = loss\n\n model.zero_grad()\n out.backward(gradient=torch.ones_like(out))\n autograd_hacks.compute_grad1(model, loss_type=reduction_type)\n\n grads = tuple(\n param.grad1 # type: ignore\n for param in model.parameters()\n if hasattr(param, \"grad1\")\n )\n\n autograd_hacks.clear_backprops(model)\n autograd_hacks.remove_hooks(model)\n\n return grads\n"
] | [
[
"torch.stack",
"torch.unbind",
"torch.autograd.set_grad_enabled",
"torch.ones_like"
]
] |
BonnerLab/model-tools | [
"ac90617cd79bb70a308e34a1e834971498329fb0"
] | [
"model_tools/activations/hooks.py"
] | [
"from abc import ABC, abstractmethod\nimport logging\nimport os\nfrom typing import Optional, Union, Iterable, Dict\n\nimport h5py\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom tqdm import tqdm\n\nfrom brainio.stimuli import StimulusSet\nfrom model_tools.activations import ActivationsModel\nfrom model_tools.activations.core import flatten, change_dict\nfrom model_tools.utils import fullname, s3\nfrom model_tools.utils.pca import IncrementalPCAPytorch, PCAPytorch\nfrom result_caching import store_dict\n\nStimuli = Union[Iterable[str], StimulusSet, Iterable[os.PathLike]]\nBasePCA = Union[IncrementalPCAPytorch, PCAPytorch]\n\n\nclass LayerHookBase(ABC):\n\n def __init__(self, activations_extractor: ActivationsModel, identifier: Optional[str] = None):\n self._extractor = activations_extractor\n self.identifier = identifier\n self.handle = None\n\n def __call__(self, batch_activations: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n self.setup(batch_activations)\n return change_dict(batch_activations, self.layer_apply, keep_name=True,\n multithread=os.getenv('MT_MULTITHREAD', '1') == '1')\n\n @classmethod\n def hook(cls, activations_extractor: ActivationsModel, identifier: Optional[str] = None, **kwargs):\n hook = cls(activations_extractor=activations_extractor, identifier=identifier, **kwargs)\n assert not cls.is_hooked(activations_extractor), f\"{cls.__name__} is already hooked\"\n handle = activations_extractor.register_batch_activations_hook(hook)\n hook.handle = handle\n return handle\n\n @classmethod\n def is_hooked(cls, activations_extractor: ActivationsModel) -> bool:\n return any(isinstance(hook, cls) for hook in\n activations_extractor._extractor._batch_activations_hooks.values())\n\n def setup(self, batch_activations: Dict[str, np.ndarray]) -> None:\n pass\n\n @abstractmethod\n def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:\n pass\n\n\nclass LayerGlobalMaxPool2d(LayerHookBase):\n\n def __init__(self, *args, identifier: Optional[str] = None, **kwargs):\n if identifier is None:\n identifier = 'maxpool'\n\n super(LayerGlobalMaxPool2d, self).__init__(*args, **kwargs, identifier=identifier)\n \n def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:\n if activations.ndim != 4:\n return activations\n return np.max(activations, axis=(2, 3))\n\n\nclass LayerRandomProjection(LayerHookBase):\n\n def __init__(self, *args,\n n_components: int = 1000,\n force: bool = False,\n identifier: Optional[str] = None,\n **kwargs):\n if identifier is None:\n identifier = f'randproj_ncomponents={n_components}_force={force}'\n\n super(LayerRandomProjection, self).__init__(*args, **kwargs, identifier=identifier)\n self._n_components = n_components\n self._force = force\n self._layer_ws = {}\n\n def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:\n activations = flatten(activations)\n if activations.shape[1] <= self._n_components and not self._force:\n return activations\n if layer not in self._layer_ws:\n w = np.random.normal(size=(activations.shape[-1], self._n_components)) / np.sqrt(self._n_components)\n self._layer_ws[layer] = w\n else:\n w = self._layer_ws[layer]\n activations = activations @ w\n return activations\n\n\nclass LayerPCA(LayerHookBase):\n\n def __init__(self, *args,\n n_components: int = 1000,\n force: bool = False,\n stimuli: Optional[Stimuli] = None,\n stimuli_identifier: Optional[str] = None,\n identifier: Optional[str] = None,\n batch_size: Optional[int] = None,\n device: Optional[Union[str, torch.device]] = None,\n **kwargs):\n if stimuli is None:\n # Default to ImageNet validation with 1 image per class\n stimuli = _get_imagenet_val(n_components)\n stimuli_identifier = 'brainscore-imagenetval'\n if isinstance(stimuli, StimulusSet) and stimuli_identifier is None and hasattr(stimuli, 'identifier'):\n stimuli_identifier = stimuli.identifier\n if stimuli_identifier is None:\n raise ValueError('If passing a list of paths for stimuli '\n 'or a StimulusSet without an identifier attribute, '\n 'you must provide a stimuli_identifier')\n\n if identifier is None:\n identifier = f'pca_ncomponents={n_components}_force={force}_stimuli_identifier={stimuli_identifier}'\n\n super(LayerPCA, self).__init__(*args, **kwargs, identifier=identifier)\n self._n_components = n_components\n self._force = force\n self._stimuli_identifier = stimuli_identifier\n self._stimuli = stimuli\n self._batch_size = batch_size\n self._device = device\n self._logger = logging.getLogger(fullname(self))\n self._layer_pcas = {}\n\n def setup(self, batch_activations) -> None:\n layers = batch_activations.keys()\n missing_layers = [layer for layer in layers if layer not in self._layer_pcas]\n if len(missing_layers) == 0:\n return\n layer_pcas = self._pcas(identifier=self._extractor.identifier,\n layers=missing_layers,\n n_components=self._n_components,\n force=self._force,\n stimuli_identifier=self._stimuli_identifier)\n self._layer_pcas = {**self._layer_pcas, **layer_pcas}\n \n def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:\n pca = self._layer_pcas[layer]\n activations = flatten(activations)\n if pca is None:\n return activations\n return pca.transform(torch.from_numpy(activations).to(self._device))\n\n @store_dict(dict_key='layers', identifier_ignore=['layers'])\n def _pcas(self, identifier, layers, n_components, force, stimuli_identifier) -> Dict[str, BasePCA]:\n self._logger.debug(f'Retrieving {stimuli_identifier} activations')\n self.handle.disable()\n activations = self._extractor(self._stimuli, layers=layers, stimuli_identifier=False)\n activations = {layer: activations.sel(layer=layer).values\n for layer in np.unique(activations['layer'])}\n assert len(set(layer_activations.shape[0] for layer_activations in activations.values())) == 1, \"stimuli differ\"\n self.handle.enable()\n\n self._logger.debug(f'Computing {stimuli_identifier} principal components')\n progress = tqdm(total=len(activations), desc=\"layer principal components\", leave=False)\n\n def init_and_progress(layer, activations):\n activations = flatten(activations)\n if activations.shape[1] <= n_components and not force:\n self._logger.debug(f\"Not computing principal components for {layer} \"\n f\"activations {activations.shape} as shape is small enough already\")\n progress.update(1)\n return None\n n_components_ = n_components if activations.shape[1] > n_components else activations.shape[1]\n if self._batch_size is None:\n pca = PCAPytorch(n_components_, device=self._device)\n pca.fit(torch.from_numpy(activations).to(self._device))\n else:\n pca = IncrementalPCAPytorch(n_components_, device=self._device)\n for i in range(0, activations.shape[0], self._batch_size):\n activations_batch = torch.from_numpy(activations[i:i + self._batch_size]).to(self._device)\n pca.fit_partial(activations_batch)\n return pca\n\n layer_pcas = change_dict(activations, init_and_progress, keep_name=True,\n multithread=os.getenv('MT_MULTITHREAD', '1') == '1')\n progress.close()\n return layer_pcas\n\n\ndef _get_imagenet_val(num_images):\n _logger = logging.getLogger(fullname(_get_imagenet_val))\n num_classes = 1000\n num_images_per_class = (num_images - 1) // num_classes\n base_indices = np.arange(num_images_per_class).astype(int)\n indices = []\n for i in range(num_classes):\n indices.extend(50 * i + base_indices)\n for i in range((num_images - 1) % num_classes + 1):\n indices.extend(50 * i + np.array([num_images_per_class]).astype(int))\n\n framework_home = os.path.expanduser(os.getenv('MT_HOME', '~/.model-tools'))\n imagenet_filepath = os.getenv('MT_IMAGENET_PATH', os.path.join(framework_home, 'imagenet2012.hdf5'))\n imagenet_dir = f\"{imagenet_filepath}-files\"\n os.makedirs(imagenet_dir, exist_ok=True)\n\n if not os.path.isfile(imagenet_filepath):\n os.makedirs(os.path.dirname(imagenet_filepath), exist_ok=True)\n _logger.debug(f\"Downloading ImageNet validation to {imagenet_filepath}\")\n s3.download_file(\"imagenet2012-val.hdf5\", imagenet_filepath)\n\n filepaths = []\n with h5py.File(imagenet_filepath, 'r') as f:\n for index in indices:\n imagepath = os.path.join(imagenet_dir, f\"{index}.png\")\n if not os.path.isfile(imagepath):\n image = np.array(f['val/images'][index])\n Image.fromarray(image).save(imagepath)\n filepaths.append(imagepath)\n\n return filepaths\n"
] | [
[
"numpy.sqrt",
"numpy.arange",
"numpy.max",
"torch.from_numpy",
"numpy.random.normal",
"numpy.array",
"numpy.unique"
]
] |
hrrsjeong/METEORE | [
"ba8e517c51dbfd3fea5130f297c480c4626c2ff0"
] | [
"combination_model_prediction.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 9 18:03:39 2020\r\n\r\n@author: akanksha\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport joblib\r\nfrom itertools import combinations\r\nimport sklearn\r\nfrom functools import reduce\r\nimport argparse\r\nimport os\r\n\r\n\r\nparser = argparse.ArgumentParser(description = 'Prediction from combined models for the reads.')\r\n\r\nparser.add_argument('--methodsfile','-i', type = str, required = True,\r\n help = 'TSV file containing name and path of the method output tsv file. The output tsv file from the method should be in the format [ID,Pos,Strand,Score]. Can be compressed in gz.')\r\n\r\nparser.add_argument('--model','-m', choices = [\"default\",\"optimized\"], required = True, type = str,\r\n help = 'which model to select from default RF or optimized RF with max_depth 3 and n_estimator 10')\r\n\r\nparser.add_argument('--output', '-o',type = str, required = True,\r\n\t\thelp = 'Where to store the outputs')\r\noptions = parser.parse_args()\r\n\r\ndef mod_file(data_file_path):\r\n data_file=pd.read_csv(data_file_path, header=0, sep=\"\\t\")\r\n name=data_file_path.split(\"\\\\\")[-1].split(\".\")[0]\r\n data_file.drop_duplicates(subset=['Chr',\"ID\",\"Pos\",\"Strand\"],inplace=True) # add chr\r\n data_file.reset_index(inplace=True,drop=True)\r\n mask=data_file.index[data_file.Strand==\"-\"].tolist()\r\n data_file[\"Pos\"][mask]=data_file[\"Pos\"][mask]-1\r\n data_file.drop([\"Strand\"], axis=1, inplace=True)\r\n data_file.rename(columns={\"Score\":name}, inplace=True)\r\n data_file.reset_index(inplace=True, drop=True)\r\n return(data_file)\r\n\r\ndef main(mp,combine_file):\r\n\r\n loaded_model = joblib.load(open(mp, 'rb'))\r\n X=combine_file[combine_file.columns[3:]] #2:\r\n X=sklearn.preprocessing.MinMaxScaler().fit_transform(X)\r\n prediction=pd.DataFrame(loaded_model.predict(X)) ##\r\n prediction_prob=pd.DataFrame(loaded_model.predict_proba(X))\r\n prediction.rename(columns={0:\"Prediction\"}, inplace=True)\r\n prediction_prob=prediction_prob[[1]]\r\n prediction_prob.rename(columns={1:\"Prob_methylation\"}, inplace=True)\r\n final_output=pd.concat([combine_file[combine_file.columns[:3]],prediction,prediction_prob], axis=1) #:2\r\n #os.makedirs(options.output)\r\n #final_output.to_csv(options.output+'/predictions_combination_method.tsv', header=True, index=None, sep='\\t')\r\n dir = (\"combined_model_results\")\r\n if not os.path.isdir(dir):\r\n os.makedirs(dir)\r\n final_output.to_csv(dir+'/'+options.output, header=True, index=None, sep='\\t')\r\n else:\r\n final_output.to_csv(dir+'/'+options.output, header=True, index=None, sep='\\t')\r\n\r\nif __name__ == '__main__':\r\n\r\n df_file=pd.read_csv(options.methodsfile, header=None, sep='\\t')\r\n if options.model==\"default\":\r\n fillval=\"default\"\r\n else:\r\n fillval=\"max_depth_3_n_estimator_10\"\r\n modelname='_'.join(df_file[0])\r\n mp='saved_models/rf_model_'+fillval+'_'+modelname+'.model'\r\n dfs=[]\r\n for i in df_file[1]:\r\n dfs.append(mod_file(i))\r\n combine_file=reduce(lambda left,right: pd.merge(left, right, how='inner',on=[\"ID\",\"Chr\",\"Pos\"]), dfs) # add chr\r\n combine_file.drop_duplicates(subset=[\"ID\",\"Chr\",\"Pos\"],inplace=True) # add chr\r\n combine_file.reset_index(inplace=True, drop=True)\r\n main(mp,combine_file) ##\r\n"
] | [
[
"pandas.read_csv",
"sklearn.preprocessing.MinMaxScaler",
"pandas.merge",
"pandas.concat"
]
] |
simone-codeluppi/storm-analysis | [
"fa50fb7d670e9e4d712fa6fafb398963b39e209b"
] | [
"storm_analysis/diagnostics/sCMOS/configure.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nConfigure folder for sCMOS testing.\n\nHazen 09/17\n\"\"\"\nimport numpy\nimport os\n\nimport storm_analysis\nimport storm_analysis.sa_library.parameters as parameters\n\nimport storm_analysis.simulator.emitters_on_grid as emittersOnGrid\nimport storm_analysis.simulator.emitters_uniform_random as emittersUniformRandom\n\nimport storm_analysis.diagnostics.sCMOS.settings as settings\n\n\ndef testingParameters(cal_file):\n \"\"\"\n Create a sCMOS parameters object.\n \"\"\"\n params = parameters.ParametersSCMOS()\n\n params.setAttr(\"max_frame\", \"int\", -1) \n params.setAttr(\"start_frame\", \"int\", -1) \n\n params.setAttr(\"background_sigma\", \"float\", 8.0)\n params.setAttr(\"camera_calibration\", \"filename\", cal_file)\n params.setAttr(\"find_max_radius\", \"int\", 5)\n params.setAttr(\"fit_error_model\", \"string\", settings.fit_error_model)\n params.setAttr(\"foreground_sigma\", \"float\", 1.5)\n params.setAttr(\"iterations\", \"int\", settings.iterations)\n params.setAttr(\"model\", \"string\", settings.model)\n params.setAttr(\"pixel_size\", \"float\", settings.pixel_size)\n params.setAttr(\"roi_size\", \"int\", settings.roi_size)\n params.setAttr(\"sigma\", \"float\", 1.5)\n params.setAttr(\"threshold\", \"float\", settings.threshold)\n\n # Don't do tracking.\n params.setAttr(\"descriptor\", \"string\", \"1\")\n params.setAttr(\"radius\", \"float\", \"0.0\")\n\n # Don't do drift-correction.\n params.setAttr(\"d_scale\", \"int\", 2)\n params.setAttr(\"drift_correction\", \"int\", 0)\n params.setAttr(\"frame_step\", \"int\", 500)\n params.setAttr(\"z_correction\", \"int\", 0)\n\n # Z fitting.\n #\n # These are nonsense values. We test either '2D' of '3D' mode\n # and check how well we do at fitting the localization widths.\n #\n params.setAttr(\"do_zfit\", \"int\", 0)\n\n params.setAttr(\"cutoff\", \"float\", 0.0) \n params.setAttr(\"max_z\", \"float\", 0.5)\n params.setAttr(\"min_z\", \"float\", -0.5)\n params.setAttr(\"z_value\", \"float\", 0.0)\n params.setAttr(\"z_step\", \"float\", 1.0)\n\n params.setAttr(\"wx_wo\", \"float\", 1.0)\n params.setAttr(\"wx_c\", \"float\", 1.0)\n params.setAttr(\"wx_d\", \"float\", 1.0)\n params.setAttr(\"wxA\", \"float\", 0.0)\n params.setAttr(\"wxB\", \"float\", 0.0)\n params.setAttr(\"wxC\", \"float\", 0.0)\n params.setAttr(\"wxD\", \"float\", 0.0)\n\n params.setAttr(\"wy_wo\", \"float\", 1.0)\n params.setAttr(\"wy_c\", \"float\", 1.0)\n params.setAttr(\"wy_d\", \"float\", 1.0)\n params.setAttr(\"wyA\", \"float\", 0.0)\n params.setAttr(\"wyB\", \"float\", 0.0)\n params.setAttr(\"wyC\", \"float\", 0.0)\n params.setAttr(\"wyD\", \"float\", 0.0)\n\n # 'peak_locations' testing.\n if hasattr(settings, \"peak_locations\") and (settings.peak_locations is not None):\n params.setAttr(\"peak_locations\", \"filename\", settings.peak_locations)\n\n return params\n\n \ndef configure(cal_file = None):\n\n # Create sCMOS calibration file if not specified.\n #\n if cal_file is None:\n cal_file = \"calib.npy\"\n offset = numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset\n variance = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_variance\n gain = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain\n rqe = numpy.ones((settings.y_size, settings.x_size))\n numpy.save(cal_file, [offset, variance, gain, rqe, 2])\n\n # Create parameters file for analysis.\n #\n print(\"Creating XML file.\")\n params = testingParameters(cal_file)\n params.toXMLFile(\"scmos.xml\", pretty = True)\n\n # Create localization on a grid file.\n #\n print(\"Creating gridded localization.\")\n emittersOnGrid.emittersOnGrid(\"grid_list.hdf5\",\n settings.nx,\n settings.ny,\n 1.5,\n 20,\n 0.0,\n 0.0)\n\n # Create randomly located localizations file.\n #\n print(\"Creating random localization.\")\n emittersUniformRandom.emittersUniformRandom(\"random_list.hdf5\",\n 1.0,\n 10,\n settings.x_size,\n settings.y_size,\n 0.0)\n\nif (__name__ == \"__main__\"):\n configure()\n \n"
] | [
[
"numpy.ones",
"numpy.save",
"numpy.zeros"
]
] |
Mic-Tsai/Power-Consumption-Current-Sense-System-V22 | [
"7fe8348171efe53a2985a591ef7cf657bacc5fbd"
] | [
"example/Python_Plot/Battery example/ee_0120_Y_consist.py"
] | [
"import argparse, re, sys, os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\npath = ''\nflname = sys.argv[1]\ntry:\n\tchartType = sys.argv[2]\nexcept:\n\tchartType = 'ch1_vload'\nprint('chartType:'+chartType)\t\n\nfl = flname.split('/')\nfor i in fl[:-1]:\n\tpath = path+i+'/'\n\nfw = open(flname, 'r')\nrawdata = fw.read().strip()\n\nch1_list = []\nch2_list = []\n\nch1_vload = []\nch1_volt = []\nch1_iload = []\nch1_pload = []\n\nch2_vload = []\nch2_volt = []\nch2_iload = []\nch2_pload = []\n\nunit = ''\n\nline = rawdata.split('\\n')\nfor aline in line:\n\t\n\ttmp = aline.split('||')\n\tch1_list.append(tmp[0].lstrip())\n\tch2_list.append(tmp[2].lstrip())\n\nfor item in ch1_list:\n\ttmp = item.split(' | ')\n\tfor sub in tmp:\n\t\tif sub.count(\"V-load\"):\n\t\t\tch1_vload.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\t\telif sub.count(\"Voltage\"):\n\t\t\tch1_volt.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\t\telif sub.count(\"I-load\"):\n\t\t\tch1_iload.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\t\telif sub.count(\"P-load\"):\n\t\t\tch1_pload.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\nfor item in ch2_list:\n\ttmp = item.split(' | ')\n\tfor sub in tmp:\n\t\tif sub.count(\"V-load\"):\n\t\t\tch2_vload.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\t\telif sub.count(\"Voltage\"):\n\t\t\tch2_volt.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\t\telif sub.count(\"I-load\"):\n\t\t\tch2_iload.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\t\telif sub.count(\"P-load\"):\n\t\t\tch2_pload.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\nif chartType.lower().count('vload') or chartType.lower().count('v-load'):\n\tprint('**vload')\n\tunit = 'V'\n\tif chartType.lower().count('ch1'):\n\t\ty = ch1_vload\n\telse:\n\t\ty = ch2_vload\nelif chartType.lower().count('volt'):\n\tprint('**volt')\n\tunit = 'mV'\n\tif chartType.lower().count('ch1'):\n\t\ty = ch1_volt\n\telse:\n\t\ty = ch2_volt\nelif chartType.lower().count('iload') or chartType.lower().count('i-load'):\n\tprint('**iload')\n\tunit = 'mA'\n\tif chartType.lower().count('ch1'):\n\t\ty = ch1_iload\n\telse:\n\t\ty = ch2_iload\nelif chartType.lower().count('pload') or chartType.lower().count('p-load'):\n\tprint('**pload')\n\tunit = 'mW'\n\tif chartType.lower().count('ch1'):\n\t\ty = ch1_pload\n\telse:\n\t\ty = ch2_pload\n\nx = np.linspace(1,len(y),len(y))\nfig = plt.figure(1)\nax = plt.axes()\nplt.xlim([0, len(y)])\nplt.ylim([0,160])\nplt.plot(x,y,ls='-',c='b')\nplt.grid('on')\nplt.title(chartType)\nplt.ylabel('['+unit+']')\nplt.savefig(path+chartType+'.png')\nprint(\"File Path:\"+path+chartType+'.png')\n\n\n\n\n\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot"
]
] |
andytorrestb/rarefiedPlume | [
"c09234c701c395d16519d8a361eae17540711530"
] | [
"cases/1d/graphCaseValidation.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\n\n# Find path for cases\ncurr_dir_path = os.path.dirname(os.path.realpath(__file__))\n# print(curr_dir_path)\n# cases = os.listdir(curr_dir_path + '/Cases')\n# pop = cases.index('baseCase')\n# cases.pop(pop)\n\n# Label graph with bold characters\nfont_axis_publish = {\n 'color': 'black',\n 'weight': 'bold',\n 'size': 22,\n }\n\n# Read in digitized data\ndigi_n = pd.read_csv(\n curr_dir_path + '/n_nstar_radius.dat',\n header = 0,\n sep = '\\t',\n names = ['r', 'n_nstar']\n )\n\ndigi_T = pd.read_csv(\n curr_dir_path + '/T_Tstar_radius_DAC.dat',\n header = 0,\n sep = '\\t',\n names = ['r', 'T_Tstar']\n )\n\n# Read in simulated data. \nsim = pd.read_csv(\n curr_dir_path + '/postProcessing/sampleDict/0.3/horizontalLine_Ttra_Ar_rhoN_Ar.csv'\n )\n\n# Used to see what the values trend to. \nprint(sim['Ttra_Ar'])\n\nsim = sim[['x', 'rhoN_Ar', 'Ttra_Ar']].dropna()\nsim['rhoN_Ar'] = sim['rhoN_Ar'] / 8.377e20\nsim['Ttra_Ar'] = sim['Ttra_Ar'] / 1000.0\n \n\n# Producde Analytical Data\ndef TTt_Ma(Ma, ga = 1.4):\n return (ga + 1) / (2 + (ga - 1) * Ma ** 2)\n\ndef rrt_Ma(Ma, ga = 1.4):\n rrt = (1 / TTt_Ma(Ma, ga)) ** ((ga + 1) / (ga - 1))\n rrt = np.sqrt(np.sqrt(rrt) / Ma)\n return rrt\n\ndef nnt_Ma(Ma, ga = 1.4):\n return TTt_Ma(Ma, ga) ** (1 / (ga - 1))\n\ndef a(T, ga = 1.4, R = 287):\n return np.sqrt(ga * R * T)\n\nMa_domain = np.linspace(1, 25, 100) \nga = 1.67\nTTt = TTt_Ma(Ma_domain, ga = ga)\nrrt = rrt_Ma(Ma_domain, ga = ga)\nnnt = nnt_Ma(Ma_domain, ga = ga)\n\nprint(\"Printing rrt\")\nprint(rrt)\n\n# Graph Results\nplt.title('OpenFOAM vs DAC', fontdict = font_axis_publish)\nplt.ylabel('n/n*', fontdict = font_axis_publish)\nplt.xlabel('Radial distance, r (m)', fontdict = font_axis_publish)\n\nplt.plot(sim['x'], sim['rhoN_Ar'], label = 'OpenFOAM (Torres, Pitt, Kinzel)')\nplt.plot(digi_n['r'], digi_n['n_nstar'], label = 'DAC (Lumpkin, Stewart)')\nplt.plot(rrt, nnt, label = 'Analytical Solution')\nplt.legend()\nplt.yscale('log')\nplt.ylim(bottom = 1e-4, top = 1)\nplt.savefig(curr_dir_path + '/digitized_vs_analytical_n.png')\nplt.close()\n\nplt.title('OpenFOAM vs DAC', fontdict = font_axis_publish)\nplt.ylabel('T/T*', fontdict = font_axis_publish)\nplt.xlabel('Radial distance, r (m)', fontdict = font_axis_publish)\n\nplt.plot(sim['x'], sim['Ttra_Ar'], label = 'OpenFOAM (Torres, Pitt, Kinzel)')\nplt.plot(digi_T['r'], digi_T['T_Tstar'], label = 'DAC (Lumpkin, Stewart)')\nplt.plot(rrt, TTt, label = 'Analytical Solution')\nplt.legend()\nplt.yscale('log')\nplt.ylim(bottom = 1e-3, top = 1)\nplt.savefig(curr_dir_path + '/digitized_vs_analytical_T.png')\nplt.close()\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"numpy.sqrt",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
nakashima-kodai/FractalDB_Pretrained_ViT_PyTorch | [
"5d1df4023f05f5a8ff7e8a8810bf95119a0eeb96"
] | [
"pretrain.py"
] | [
"import os, sys\nimport math\n\nimport hydra\nimport torch\nimport timm\nfrom hydra.utils import instantiate\nfrom timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy\nfrom timm.utils import NativeScaler\n\nimport models\nfrom data import create_dataloader\nfrom utils import MetricLogger, SmoothedValue\nfrom utils import fix_random_seed\n\n\[email protected](config_path='./configs', config_name='pretrain')\ndef main(cfg):\n if cfg.seed is not None:\n fix_random_seed(cfg.seed)\n torch.backends.cudnn.benchmark = True\n\n # dataloader\n trainloader, num_classes = create_dataloader(cfg.data)\n\n # additional data augmentation (mixup/cutmix)\n mixup_fn = None\n mixup_enable = (cfg.data.mixup.mixup_alpha > 0.) or (cfg.data.mixup.cutmix_alpha > 0.)\n if mixup_enable:\n mixup_fn = instantiate(cfg.data.mixup, num_classes=num_classes)\n print(f'MixUp/Cutmix was enabled\\n')\n\n # create model\n model = instantiate(cfg.model, num_classes=num_classes)\n print(f'Model[{cfg.model.model_name}] was created')\n\n # wrap model with DP\n model = torch.nn.parallel.DataParallel(model)\n model.cuda()\n model_without_dp = model.module\n\n # optimizer\n scaled_lr = cfg.optim.args.lr * cfg.data.loader.batch_size / 512.0\n cfg.optim.args.lr = scaled_lr\n optimizer = instantiate(cfg.optim, model=model)\n print(f'Optimizer: \\n{optimizer}\\n')\n\n # scheduler\n lr_scheduler, _ = instantiate(cfg.scheduler, optimizer=optimizer)\n print(f'Scheduler: \\n{lr_scheduler}\\n')\n \n # criterion\n if cfg.data.mixup.mixup_alpha > 0.:\n criterion = SoftTargetCrossEntropy().cuda()\n print('SoftTargetCrossEntropy is used for criterion\\n')\n elif cfg.data.mixup.label_smoothing > 0.:\n criterion = LabelSmoothingCrossEntropy(cfg.data.mixup.label_smoothing).cuda()\n print('LabelSmoothingCrossEntropy is used for criterion\\n')\n else:\n criterion = torch.nn.CrossEntropyLoss().cuda()\n print('CrossEntropyLoss is used for criterion\\n')\n loss_scaler = NativeScaler()\n\n # load resume\n start_epoch = 1\n if cfg.resume is not None:\n checkpoint = torch.load(cfg.resume, map_location='cpu')\n model_without_dp.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n loss_scaler.load_state_dict(checkpoint['scaler'])\n start_epoch = checkpoint['epoch'] + 1\n print(f'Resume was loaded from {cfg.resume}\\n')\n\n print(f'Start training for {cfg.epochs} epochs')\n for epoch in range(start_epoch, cfg.epochs + 1):\n # train one epoch\n model.train()\n metric_logger = MetricLogger(delimiter=' ')\n metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = f'Epoch: [{epoch:03}/{cfg.epochs:03}]'\n for data in metric_logger.log_every(trainloader, cfg.print_iter_freq, header):\n images = data[0].cuda(non_blocking=True)\n labels = data[1].cuda(non_blocking=True)\n\n if mixup_fn is not None:\n images, labels = mixup_fn(images, labels)\n \n with torch.cuda.amp.autocast():\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n loss_value = loss.item()\n if not math.isfinite(loss_value):\n print(f'Loss is {loss_value}, stopping training')\n sys.exit(1)\n\n optimizer.zero_grad()\n is_second_order = (hasattr(optimizer, 'is_second_order')) and (optimizer.is_second_order)\n loss_scaler(\n loss=loss,\n optimizer=optimizer,\n parameters=model.parameters(),\n create_graph=is_second_order\n )\n\n torch.cuda.synchronize()\n \n metric_logger.update(loss=loss_value)\n metric_logger.update(lr=optimizer.param_groups[0]['lr'])\n \n # gather the stats from all process\n metric_logger.synchronize_between_processes()\n print(f'Averaged stats: {metric_logger}')\n\n lr_scheduler.step(epoch)\n\n if epoch % cfg.save_epoch_freq == 0:\n save_path = f'{os.getcwd()}/{cfg.model.model_name}_{cfg.data.name}_{epoch:03}ep.pth'\n torch.save({\n 'model': model_without_dp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'scaler': loss_scaler.state_dict(),\n 'epoch': epoch\n }, save_path)\n\n save_path = f'{os.getcwd()}/{cfg.model.model_name}_{cfg.data.name}_{epoch:03}ep.pth'\n torch.save({\n 'model': model_without_dp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'scaler': loss_scaler.state_dict(),\n 'epoch': epoch\n }, save_path)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.load",
"torch.cuda.synchronize",
"torch.nn.CrossEntropyLoss",
"torch.cuda.amp.autocast",
"torch.nn.parallel.DataParallel"
]
] |
chekoduadarsh/deep_autoviml | [
"157fbdc2611dc0fbaee5fc4ebebe3e7c1eeb9b52"
] | [
"deep_autoviml/preprocessing/preprocessing_images.py"
] | [
"#Copyright 2021 Google LLC\r\n\r\n#Licensed under the Apache License, Version 2.0 (the \"License\");\r\n#you may not use this file except in compliance with the License.\r\n#You may obtain a copy of the License at\r\n#\r\n# https://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n#Unless required by applicable law or agreed to in writing, software\r\n#distributed under the License is distributed on an \"AS IS\" BASIS,\r\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n#See the License for the specific language governing permissions and\r\n#limitations under the License.\r\n############################################################################################\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport tempfile\r\nimport pdb\r\nimport copy\r\nimport warnings\r\nwarnings.filterwarnings(action='ignore')\r\nimport functools\r\nfrom itertools import combinations\r\nfrom collections import defaultdict\r\n\r\n# Make numpy values easier to read.\r\nnp.set_printoptions(precision=3, suppress=True)\r\n############################################################################################\r\n# data pipelines and feature engg here\r\n\r\n# pre-defined TF2 Keras models and your own models here\r\nfrom deep_autoviml.data_load.classify_features import check_model_options\r\n\r\n# Utils\r\n\r\n############################################################################################\r\n# TensorFlow ≥2.4 is required\r\nimport tensorflow as tf\r\nnp.random.seed(42)\r\ntf.random.set_seed(42)\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.layers.experimental.preprocessing import Normalization, StringLookup, Hashing\r\nfrom tensorflow.keras.layers.experimental.preprocessing import IntegerLookup, CategoryEncoding, CategoryCrossing\r\nfrom tensorflow.keras.layers.experimental.preprocessing import TextVectorization, Discretization\r\nfrom tensorflow.keras.layers import Embedding, Flatten\r\n\r\nfrom tensorflow.keras.optimizers import SGD, Adam, RMSprop\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras import optimizers\r\nfrom tensorflow.keras.models import Model, load_model\r\nfrom tensorflow.keras import callbacks\r\nfrom tensorflow.keras import backend as K\r\nfrom tensorflow.keras import utils\r\nfrom tensorflow.keras.layers import BatchNormalization\r\nfrom tensorflow.keras.optimizers import SGD\r\nfrom tensorflow.keras import regularizers\r\nimport tensorflow_hub as hub\r\nimport tensorflow_text as text\r\n\r\nfrom sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error\r\nfrom IPython.core.display import Image, display\r\nimport pickle\r\n#############################################################################################\r\n##### Suppress all TF2 and TF1.x warnings ###################\r\ntry:\r\n tf.logging.set_verbosity(tf.logging.ERROR)\r\nexcept:\r\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\r\n############################################################################################\r\nfrom tensorflow.keras.layers import Reshape, MaxPooling1D, MaxPooling2D, AveragePooling2D, AveragePooling1D\r\nfrom tensorflow.keras import Model, Sequential\r\nfrom tensorflow.keras.layers import Activation, Dense, Embedding, GlobalAveragePooling1D, GlobalMaxPooling1D, Dropout, Conv1D\r\nfrom tensorflow.keras.layers.experimental.preprocessing import TextVectorization\r\n############################################################################################\r\ndef preprocessing_images(train_ds, model_options):\r\n \"\"\"\r\n This produces a preprocessing layer for an incoming tf.data.Dataset. It can be images only.\r\n You need to just send in a tf.data.DataSet from the training folder and a model_options dictionary.\r\n It will return a full-model-ready layer that you can add to your Keras Functional model as image layer!\r\n ########### Motivation and suggestions for coding for Image processing came from this blog #########\r\n Greatly indebted to Srivatsan for his Github and notebooks: https://github.com/srivatsan88/YouTubeLI\r\n ####################################################################################################\r\n \"\"\"\r\n try:\r\n ####### L O A D F E A T U R E E X T R A C T O R ################\r\n url = \"https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4\"\r\n feature_extractor = check_model_options(model_options, \"tf_hub_model\", url)\r\n img_height = model_options[\"image_height\"]\r\n img_width = model_options[\"image_width\"]\r\n image_channels = model_options[\"image_channels\"]\r\n num_predicts = model_options[\"num_predicts\"]\r\n try:\r\n feature_extractor_layer = hub.KerasLayer(feature_extractor, input_shape=(\r\n img_height,img_width,image_channels))\r\n except:\r\n print('Loading model from Tensorflow Hub failed. Check the URL and try again...')\r\n return\r\n feature_extractor_layer.trainable = False\r\n normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)\r\n tf.random.set_seed(111)\r\n model = tf.keras.Sequential([\r\n normalization_layer,\r\n feature_extractor_layer,\r\n tf.keras.layers.Dropout(0.3),\r\n tf.keras.layers.Dense(num_predicts,activation='softmax')\r\n ])\r\n model.compile(\r\n optimizer='adam',\r\n loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),\r\n metrics=['accuracy'])\r\n except:\r\n print(' Error: Failed image preprocessing layer. Returning...')\r\n return\r\n return model\r\n"
] | [
[
"tensorflow.keras.layers.Dropout",
"tensorflow.logging.set_verbosity",
"tensorflow.losses.SparseCategoricalCrossentropy",
"numpy.random.seed",
"numpy.set_printoptions",
"tensorflow.keras.layers.experimental.preprocessing.Rescaling",
"tensorflow.keras.layers.Dense",
"tensorflow.random.set_seed",
"tensorflow.compat.v1.logging.set_verbosity"
]
] |
gdmcbain/quadpy | [
"c083d500027d7c1b2187ae06ff2b7fbdd360ccc7"
] | [
"quadpy/e3r/tools.py"
] | [
"# -*- coding: utf-8 -*-\n#\nimport numpy\n\nfrom .. import helpers\n\n\ndef integrate(f, rule, dot=numpy.dot):\n flt = numpy.vectorize(float)\n return dot(f(flt(rule.points).T), flt(rule.weights))\n\n\ndef show(scheme, backend=\"mpl\"):\n \"\"\"Displays scheme for E_3^r quadrature.\n \"\"\"\n helpers.backend_to_function[backend](\n scheme.points, scheme.weights, volume=8 * numpy.pi, edges=[]\n )\n return\n"
] | [
[
"numpy.vectorize"
]
] |
Stomach-ache/GLaS | [
"253092cce1922711e7d9c9df601f117f3ec56e0c"
] | [
"MIPS.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport hnswlib\nimport numpy as np\n\ndef buildIndex(X):\n dim = X.shape[1]\n num_elements = X.shape[0]\n data_labels = np.arange(num_elements)\n p = hnswlib.Index(space = 'cosine', dim = dim)\n p.init_index(max_elements = num_elements, ef_construction = 200, M = 16)\n p.add_items(X, data_labels)\n p.set_ef(5)\n return p\n\ndef searchIndex(p, X, k=5):\n labels, distances = p.knn_query(X, k = k)\n return labels\n\n"
] | [
[
"numpy.arange"
]
] |
ayman3000/keras-preprocessing | [
"845c423e01acfe251d4276e52cf2b86e73f1646a"
] | [
"tests/image/utils_test.py"
] | [
"import io\nimport resource\nfrom pathlib import Path\n\nimport numpy as np\nimport PIL\nimport pytest\n\nfrom keras_preprocessing.image import utils\n\n\ndef test_validate_filename(tmpdir):\n valid_extensions = ('png', 'jpg')\n filename = tmpdir.ensure('test.png')\n assert utils.validate_filename(str(filename), valid_extensions)\n\n filename = tmpdir.ensure('test.PnG')\n assert utils.validate_filename(str(filename), valid_extensions)\n\n filename = tmpdir.ensure('test.some_extension')\n assert not utils.validate_filename(str(filename), valid_extensions)\n assert not utils.validate_filename('some_test_file.png', valid_extensions)\n\n\ndef test_load_img(tmpdir):\n filename_rgb = str(tmpdir / 'rgb_utils.png')\n filename_rgba = str(tmpdir / 'rgba_utils.png')\n filename_grayscale_8bit = str(tmpdir / 'grayscale_8bit_utils.png')\n filename_grayscale_16bit = str(tmpdir / 'grayscale_16bit_utils.tiff')\n filename_grayscale_32bit = str(tmpdir / 'grayscale_32bit_utils.tiff')\n\n original_rgb_array = np.array(255 * np.random.rand(100, 100, 3),\n dtype=np.uint8)\n original_rgb = utils.array_to_img(original_rgb_array, scale=False)\n original_rgb.save(filename_rgb)\n\n original_rgba_array = np.array(255 * np.random.rand(100, 100, 4),\n dtype=np.uint8)\n original_rgba = utils.array_to_img(original_rgba_array, scale=False)\n original_rgba.save(filename_rgba)\n\n original_grayscale_8bit_array = np.array(255 * np.random.rand(100, 100, 1),\n dtype=np.uint8)\n original_grayscale_8bit = utils.array_to_img(original_grayscale_8bit_array,\n scale=False)\n original_grayscale_8bit.save(filename_grayscale_8bit)\n\n original_grayscale_16bit_array = np.array(\n np.random.randint(-2147483648, 2147483647, (100, 100, 1)), dtype=np.int16\n )\n original_grayscale_16bit = utils.array_to_img(original_grayscale_16bit_array,\n scale=False, dtype='int16')\n original_grayscale_16bit.save(filename_grayscale_16bit)\n\n original_grayscale_32bit_array = np.array(\n np.random.randint(-2147483648, 2147483647, (100, 100, 1)), dtype=np.int32\n )\n original_grayscale_32bit = utils.array_to_img(original_grayscale_32bit_array,\n scale=False, dtype='int32')\n original_grayscale_32bit.save(filename_grayscale_32bit)\n\n # Test that loaded image is exactly equal to original.\n\n loaded_im = utils.load_img(filename_rgb)\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == original_rgb_array.shape\n assert np.all(loaded_im_array == original_rgb_array)\n\n loaded_im = utils.load_img(filename_rgba, color_mode='rgba')\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == original_rgba_array.shape\n assert np.all(loaded_im_array == original_rgba_array)\n\n loaded_im = utils.load_img(filename_rgb, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (original_rgb_array.shape[0],\n original_rgb_array.shape[1], 1)\n\n loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == original_grayscale_8bit_array.shape\n assert np.all(loaded_im_array == original_grayscale_8bit_array)\n\n loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')\n assert loaded_im_array.shape == original_grayscale_16bit_array.shape\n assert np.all(loaded_im_array == original_grayscale_16bit_array)\n # test casting int16 image to float32\n loaded_im_array = utils.img_to_array(loaded_im)\n assert np.allclose(loaded_im_array, original_grayscale_16bit_array)\n\n loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')\n assert loaded_im_array.shape == original_grayscale_32bit_array.shape\n assert np.all(loaded_im_array == original_grayscale_32bit_array)\n # test casting int32 image to float32\n loaded_im_array = utils.img_to_array(loaded_im)\n assert np.allclose(loaded_im_array, original_grayscale_32bit_array)\n\n # Test that nothing is changed when target size is equal to original.\n\n loaded_im = utils.load_img(filename_rgb, target_size=(100, 100))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == original_rgb_array.shape\n assert np.all(loaded_im_array == original_rgb_array)\n\n loaded_im = utils.load_img(filename_rgba, color_mode='rgba',\n target_size=(100, 100))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == original_rgba_array.shape\n assert np.all(loaded_im_array == original_rgba_array)\n\n loaded_im = utils.load_img(filename_rgb, color_mode='grayscale',\n target_size=(100, 100))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (original_rgba_array.shape[0],\n original_rgba_array.shape[1], 1)\n\n loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',\n target_size=(100, 100))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == original_grayscale_8bit_array.shape\n assert np.all(loaded_im_array == original_grayscale_8bit_array)\n\n loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',\n target_size=(100, 100))\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')\n assert loaded_im_array.shape == original_grayscale_16bit_array.shape\n assert np.all(loaded_im_array == original_grayscale_16bit_array)\n\n loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',\n target_size=(100, 100))\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')\n assert loaded_im_array.shape == original_grayscale_32bit_array.shape\n assert np.all(loaded_im_array == original_grayscale_32bit_array)\n\n # Test down-sampling with bilinear interpolation.\n\n loaded_im = utils.load_img(filename_rgb, target_size=(25, 25))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (25, 25, 3)\n\n loaded_im = utils.load_img(filename_rgba, color_mode='rgba',\n target_size=(25, 25))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (25, 25, 4)\n\n loaded_im = utils.load_img(filename_rgb, color_mode='grayscale',\n target_size=(25, 25))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (25, 25, 1)\n\n loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',\n target_size=(25, 25))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (25, 25, 1)\n\n loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',\n target_size=(25, 25))\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')\n assert loaded_im_array.shape == (25, 25, 1)\n\n loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',\n target_size=(25, 25))\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')\n assert loaded_im_array.shape == (25, 25, 1)\n\n # Test down-sampling with nearest neighbor interpolation.\n\n loaded_im_nearest = utils.load_img(filename_rgb, target_size=(25, 25),\n interpolation=\"nearest\")\n loaded_im_array_nearest = utils.img_to_array(loaded_im_nearest)\n assert loaded_im_array_nearest.shape == (25, 25, 3)\n assert np.any(loaded_im_array_nearest != loaded_im_array)\n\n loaded_im_nearest = utils.load_img(filename_rgba, color_mode='rgba',\n target_size=(25, 25),\n interpolation=\"nearest\")\n loaded_im_array_nearest = utils.img_to_array(loaded_im_nearest)\n assert loaded_im_array_nearest.shape == (25, 25, 4)\n assert np.any(loaded_im_array_nearest != loaded_im_array)\n\n loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',\n target_size=(25, 25), interpolation=\"nearest\")\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (25, 25, 1)\n\n loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',\n target_size=(25, 25), interpolation=\"nearest\")\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')\n assert loaded_im_array.shape == (25, 25, 1)\n\n loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',\n target_size=(25, 25), interpolation=\"nearest\")\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')\n assert loaded_im_array.shape == (25, 25, 1)\n\n # Test different path type\n with open(filename_grayscale_32bit, 'rb') as f:\n _path = io.BytesIO(f.read()) # io.Bytesio\n loaded_im = utils.load_img(_path, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)\n assert np.all(loaded_im_array == original_grayscale_32bit_array)\n\n _path = filename_grayscale_32bit # str\n loaded_im = utils.load_img(_path, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)\n assert np.all(loaded_im_array == original_grayscale_32bit_array)\n\n _path = filename_grayscale_32bit.encode() # bytes\n loaded_im = utils.load_img(_path, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)\n assert np.all(loaded_im_array == original_grayscale_32bit_array)\n\n _path = Path(tmpdir / 'grayscale_32bit_utils.tiff') # Path\n loaded_im = utils.load_img(_path, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)\n assert np.all(loaded_im_array == original_grayscale_32bit_array)\n\n # Check that exception is raised if interpolation not supported.\n\n loaded_im = utils.load_img(filename_rgb, interpolation=\"unsupported\")\n with pytest.raises(ValueError):\n loaded_im = utils.load_img(filename_rgb, target_size=(25, 25),\n interpolation=\"unsupported\")\n\n # Check that the aspect ratio of a square is the same\n\n filename_red_square = str(tmpdir / 'red_square_utils.png')\n A = np.zeros((50, 100, 3), dtype=np.uint8) # rectangle image 100x50\n A[20:30, 45:55, 0] = 255 # red square 10x10\n red_square_array = np.array(A)\n red_square = utils.array_to_img(red_square_array, scale=False)\n red_square.save(filename_red_square)\n\n loaded_im = utils.load_img(filename_red_square, target_size=(25, 25),\n keep_aspect_ratio=True)\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (25, 25, 3)\n\n red_channel_arr = loaded_im_array[:, :, 0].astype(np.bool)\n square_width = np.sum(np.sum(red_channel_arr, axis=0))\n square_height = np.sum(np.sum(red_channel_arr, axis=1))\n aspect_ratio_result = square_width / square_height\n\n # original square had 1:1 ratio\n assert aspect_ratio_result == pytest.approx(1.0)\n\n\ndef test_list_pictures(tmpdir):\n filenames = ['test.png', 'test0.jpg', 'test-1.jpeg', '2test.bmp',\n '2-test.ppm', '3.png', '1.jpeg', 'test.bmp', 'test0.ppm',\n 'test4.tiff', '5-test.tif', 'test.txt', 'foo.csv',\n 'face.gif', 'bar.txt']\n subdirs = ['', 'subdir1', 'subdir2']\n filenames = [tmpdir.ensure(subdir, f) for subdir in subdirs\n for f in filenames]\n\n found_images = utils.list_pictures(str(tmpdir))\n assert len(found_images) == 33\n\n found_images = utils.list_pictures(str(tmpdir), ext='png')\n assert len(found_images) == 6\n\n\ndef test_array_to_img_and_img_to_array():\n height, width = 10, 8\n\n # Test the data format\n # Test RGB 3D\n x = np.random.random((3, height, width))\n img = utils.array_to_img(x, data_format='channels_first')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_first')\n assert x.shape == (3, height, width)\n\n # Test RGBA 3D\n x = np.random.random((4, height, width))\n img = utils.array_to_img(x, data_format='channels_first')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_first')\n assert x.shape == (4, height, width)\n\n # Test 2D\n x = np.random.random((1, height, width))\n img = utils.array_to_img(x, data_format='channels_first')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_first')\n assert x.shape == (1, height, width)\n\n # grayscale 32-bit signed integer\n x = np.array(\n np.random.randint(-2147483648, 2147483647, (1, height, width)),\n dtype=np.int32\n )\n img = utils.array_to_img(x, data_format='channels_first')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_first')\n assert x.shape == (1, height, width)\n\n # Test tf data format\n # Test RGB 3D\n x = np.random.random((height, width, 3))\n img = utils.array_to_img(x, data_format='channels_last')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_last')\n assert x.shape == (height, width, 3)\n\n # Test RGBA 3D\n x = np.random.random((height, width, 4))\n img = utils.array_to_img(x, data_format='channels_last')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_last')\n assert x.shape == (height, width, 4)\n\n # Test 2D\n x = np.random.random((height, width, 1))\n img = utils.array_to_img(x, data_format='channels_last')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_last')\n assert x.shape == (height, width, 1)\n\n # grayscale 16-bit signed integer\n x = np.array(\n np.random.randint(-2147483648, 2147483647, (height, width, 1)),\n dtype=np.int16\n )\n img = utils.array_to_img(x, data_format='channels_last')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_last')\n assert x.shape == (height, width, 1)\n\n # grayscale 32-bit signed integer\n x = np.array(\n np.random.randint(-2147483648, 2147483647, (height, width, 1)),\n dtype=np.int32\n )\n img = utils.array_to_img(x, data_format='channels_last')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_last')\n assert x.shape == (height, width, 1)\n\n # Test invalid use case\n with pytest.raises(ValueError):\n x = np.random.random((height, width)) # not 3D\n img = utils.array_to_img(x, data_format='channels_first')\n\n with pytest.raises(ValueError):\n x = np.random.random((height, width, 3))\n # unknown data_format\n img = utils.array_to_img(x, data_format='channels')\n\n with pytest.raises(ValueError):\n # neither RGB, RGBA, or gray-scale\n x = np.random.random((height, width, 5))\n img = utils.array_to_img(x, data_format='channels_last')\n\n with pytest.raises(ValueError):\n x = np.random.random((height, width, 3))\n # unknown data_format\n img = utils.img_to_array(x, data_format='channels')\n\n with pytest.raises(ValueError):\n # neither RGB, RGBA, or gray-scale\n x = np.random.random((height, width, 5, 3))\n img = utils.img_to_array(x, data_format='channels_last')\n\n\ndef write_sample_image(tmpdir):\n im = utils.array_to_img(np.random.rand(1, 1, 3))\n path = str(tmpdir / 'sample_image.png')\n utils.save_img(path, im)\n return path\n\n\ndef test_image_file_handlers_close(tmpdir):\n path = write_sample_image(tmpdir)\n max_open_files, _ = resource.getrlimit(resource.RLIMIT_NOFILE)\n for i in range(max_open_files+1):\n utils.load_img(path)\n\n\ndef test_load_img_returns_image(tmpdir):\n path = write_sample_image(tmpdir)\n im = utils.load_img(path)\n assert isinstance(im, PIL.Image.Image)\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n"
] | [
[
"numpy.allclose",
"numpy.sum",
"numpy.zeros",
"numpy.any",
"numpy.random.random",
"numpy.all",
"numpy.random.rand",
"numpy.array",
"numpy.random.randint"
]
] |
chence17/fcaf3d | [
"636aaa0410430deedd7bd4979e8c1bc307424a84"
] | [
"mmdet3d/datasets/s3dis_dataset.py"
] | [
"import numpy as np\nfrom os import path as osp\n\nfrom mmdet3d.core import show_result, show_seg_result\nfrom mmdet3d.core.bbox import DepthInstance3DBoxes\nfrom mmdet.datasets import DATASETS\nfrom mmseg.datasets import DATASETS as SEG_DATASETS\nfrom .custom_3d import Custom3DDataset\nfrom .custom_3d_seg import Custom3DSegDataset\nfrom .pipelines import Compose\n\n\[email protected]_module()\nclass S3DISDataset(Custom3DDataset):\n \"\"\"S3DIS Dataset for Detection Task.\n\n This class is the inner dataset for S3DIS. Since S3DIS has 6 areas, we\n often train on 5 of them and test on the remaining one. The one for\n test is Area_5 as suggested in `GSDN <https://arxiv.org/abs/2006.12356>`_.\n To concatenate 5 areas during training\n `mmdet.datasets.dataset_wrappers.ConcatDataset` should be used.\n\n Args:\n data_root (str): Path of dataset root.\n ann_file (str): Path of annotation file.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n box_type_3d (str, optional): Type of 3D box of this dataset.\n Based on the `box_type_3d`, the dataset will encapsulate the box\n to its original format then converted them to `box_type_3d`.\n Defaults to 'Depth' in this dataset. Available options includes\n\n - 'LiDAR': Box in LiDAR coordinates.\n - 'Depth': Box in depth coordinates, usually for indoor dataset.\n - 'Camera': Box in camera coordinates.\n filter_empty_gt (bool, optional): Whether to filter empty GT.\n Defaults to True.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n \"\"\"\n CLASSES = ('table', 'chair', 'sofa', 'bookcase', 'board')\n\n def __init__(self,\n data_root,\n ann_file,\n pipeline=None,\n classes=None,\n modality=None,\n box_type_3d='Depth',\n filter_empty_gt=True,\n test_mode=False):\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n modality=modality,\n box_type_3d=box_type_3d,\n filter_empty_gt=filter_empty_gt,\n test_mode=test_mode)\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n\n - gt_bboxes_3d (:obj:`DepthInstance3DBoxes`): \\\n 3D ground truth bboxes\n - gt_labels_3d (np.ndarray): Labels of ground truths.\n - pts_instance_mask_path (str): Path of instance masks.\n - pts_semantic_mask_path (str): Path of semantic masks.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n if info['annos']['gt_num'] != 0:\n gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(\n np.float32) # k, 6\n gt_labels_3d = info['annos']['class'].astype(np.long)\n else:\n gt_bboxes_3d = np.zeros((0, 6), dtype=np.float32)\n gt_labels_3d = np.zeros((0, ), dtype=np.long)\n\n # to target box structure\n gt_bboxes_3d = DepthInstance3DBoxes(\n gt_bboxes_3d,\n box_dim=gt_bboxes_3d.shape[-1],\n with_yaw=False,\n origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)\n\n pts_instance_mask_path = osp.join(self.data_root,\n info['pts_instance_mask_path'])\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n anns_results = dict(\n gt_bboxes_3d=gt_bboxes_3d,\n gt_labels_3d=gt_labels_3d,\n pts_instance_mask_path=pts_instance_mask_path,\n pts_semantic_mask_path=pts_semantic_mask_path)\n return anns_results\n\n def get_data_info(self, index):\n \"\"\"Get data info according to the given index.\n\n Args:\n index (int): Index of the sample data to get.\n\n Returns:\n dict: Data information that will be passed to the data \\\n preprocessing pipelines. It includes the following keys:\n\n - pts_filename (str): Filename of point clouds.\n - file_name (str): Filename of point clouds.\n - ann_info (dict): Annotation info.\n \"\"\"\n info = self.data_infos[index]\n pts_filename = osp.join(self.data_root, info['pts_path'])\n input_dict = dict(pts_filename=pts_filename)\n\n if not self.test_mode:\n annos = self.get_ann_info(index)\n input_dict['ann_info'] = annos\n if self.filter_empty_gt and ~(annos['gt_labels_3d'] != -1).any():\n return None\n return input_dict\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='DefaultFormatBundle3D',\n class_names=self.CLASSES,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ]\n return Compose(pipeline)\n\n def show(self, results, out_dir, show=True, pipeline=None):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Visualize the results online.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n pipeline = self._get_pipeline(pipeline)\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points = self._extract_data(i, pipeline, 'points').numpy()\n gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d']\n gt_bboxes = gt_bboxes.corners.numpy() if len(gt_bboxes) else None\n gt_labels = self.get_ann_info(i)['gt_labels_3d']\n pred_bboxes = result['boxes_3d']\n pred_bboxes = pred_bboxes.corners.numpy() if len(pred_bboxes) else None\n pred_labels = result['labels_3d']\n show_result(points, gt_bboxes, gt_labels,\n pred_bboxes, pred_labels, out_dir, file_name, False)\n\n\nclass _S3DISSegDataset(Custom3DSegDataset):\n r\"\"\"S3DIS Dataset for Semantic Segmentation Task.\n\n This class is the inner dataset for S3DIS. Since S3DIS has 6 areas, we\n often train on 5 of them and test on the remaining one.\n However, there is not a fixed train-test split of S3DIS. People often test\n on Area_5 as suggested by `SEGCloud <https://arxiv.org/abs/1710.07563>`_.\n But many papers also report the average results of 6-fold cross validation\n over the 6 areas (e.g. `DGCNN <https://arxiv.org/abs/1801.07829>`_).\n Therefore, we use an inner dataset for one area, and further use a dataset\n wrapper to concat all the provided data in different areas.\n\n Args:\n data_root (str): Path of dataset root.\n ann_file (str): Path of annotation file.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n palette (list[list[int]], optional): The palette of segmentation map.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n ignore_index (int, optional): The label index to be ignored, e.g. \\\n unannotated points. If None is given, set to len(self.CLASSES).\n Defaults to None.\n scene_idxs (np.ndarray | str, optional): Precomputed index to load\n data. For scenes with many points, we may sample it several times.\n Defaults to None.\n \"\"\"\n CLASSES = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door',\n 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter')\n\n VALID_CLASS_IDS = tuple(range(13))\n\n ALL_CLASS_IDS = tuple(range(14)) # possibly with 'stair' class\n\n PALETTE = [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0],\n [255, 0, 255], [100, 100, 255], [200, 200, 100],\n [170, 120, 200], [255, 0, 0], [200, 100, 100], [10, 200, 100],\n [200, 200, 200], [50, 50, 50]]\n\n def __init__(self,\n data_root,\n ann_file,\n pipeline=None,\n classes=None,\n palette=None,\n modality=None,\n test_mode=False,\n ignore_index=None,\n scene_idxs=None):\n\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n palette=palette,\n modality=modality,\n test_mode=test_mode,\n ignore_index=ignore_index,\n scene_idxs=scene_idxs)\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n\n - pts_semantic_mask_path (str): Path of semantic masks.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n anns_results = dict(pts_semantic_mask_path=pts_semantic_mask_path)\n return anns_results\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n use_color=True,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=False,\n with_label_3d=False,\n with_mask_3d=False,\n with_seg_3d=True),\n dict(\n type='PointSegClassMapping',\n valid_cat_ids=self.VALID_CLASS_IDS,\n max_cat_id=np.max(self.ALL_CLASS_IDS)),\n dict(\n type='DefaultFormatBundle3D',\n with_label=False,\n class_names=self.CLASSES),\n dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])\n ]\n return Compose(pipeline)\n\n def show(self, results, out_dir, show=True, pipeline=None):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Visualize the results online.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n pipeline = self._get_pipeline(pipeline)\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points, gt_sem_mask = self._extract_data(\n i, pipeline, ['points', 'pts_semantic_mask'], load_annos=True)\n points = points.numpy()\n pred_sem_mask = result['semantic_mask'].numpy()\n show_seg_result(points, gt_sem_mask,\n pred_sem_mask, out_dir, file_name,\n np.array(self.PALETTE), self.ignore_index, show)\n\n def get_scene_idxs(self, scene_idxs):\n \"\"\"Compute scene_idxs for data sampling.\n\n We sample more times for scenes with more points.\n \"\"\"\n # when testing, we load one whole scene every time\n if not self.test_mode and scene_idxs is None:\n raise NotImplementedError(\n 'please provide re-sampled scene indexes for training')\n\n return super().get_scene_idxs(scene_idxs)\n\n\[email protected]_module()\n@SEG_DATASETS.register_module()\nclass S3DISSegDataset(_S3DISSegDataset):\n r\"\"\"S3DIS Dataset for Semantic Segmentation Task.\n\n This class serves as the API for experiments on the S3DIS Dataset.\n It wraps the provided datasets of different areas.\n We don't use `mmdet.datasets.dataset_wrappers.ConcatDataset` because we\n need to concat the `scene_idxs` of different areas.\n\n Please refer to the `google form <https://docs.google.com/forms/d/e/1FAIpQL\n ScDimvNMCGhy_rmBA2gHfDu3naktRm6A8BPwAWWDv-Uhm6Shw/viewform?c=0&w=1>`_ for\n data downloading.\n\n Args:\n data_root (str): Path of dataset root.\n ann_files (list[str]): Path of several annotation files.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n palette (list[list[int]], optional): The palette of segmentation map.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n ignore_index (int, optional): The label index to be ignored, e.g. \\\n unannotated points. If None is given, set to len(self.CLASSES).\n Defaults to None.\n scene_idxs (list[np.ndarray] | list[str], optional): Precomputed index\n to load data. For scenes with many points, we may sample it several\n times. Defaults to None.\n \"\"\"\n\n def __init__(self,\n data_root,\n ann_files,\n pipeline=None,\n classes=None,\n palette=None,\n modality=None,\n test_mode=False,\n ignore_index=None,\n scene_idxs=None):\n\n # make sure that ann_files and scene_idxs have same length\n ann_files = self._check_ann_files(ann_files)\n scene_idxs = self._check_scene_idxs(scene_idxs, len(ann_files))\n\n # initialize some attributes as datasets[0]\n super().__init__(\n data_root=data_root,\n ann_file=ann_files[0],\n pipeline=pipeline,\n classes=classes,\n palette=palette,\n modality=modality,\n test_mode=test_mode,\n ignore_index=ignore_index,\n scene_idxs=scene_idxs[0])\n\n datasets = [\n _S3DISSegDataset(\n data_root=data_root,\n ann_file=ann_files[i],\n pipeline=pipeline,\n classes=classes,\n palette=palette,\n modality=modality,\n test_mode=test_mode,\n ignore_index=ignore_index,\n scene_idxs=scene_idxs[i]) for i in range(len(ann_files))\n ]\n\n # data_infos and scene_idxs need to be concat\n self.concat_data_infos([dst.data_infos for dst in datasets])\n self.concat_scene_idxs([dst.scene_idxs for dst in datasets])\n\n # set group flag for the sampler\n if not self.test_mode:\n self._set_group_flag()\n\n def concat_data_infos(self, data_infos):\n \"\"\"Concat data_infos from several datasets to form self.data_infos.\n\n Args:\n data_infos (list[list[dict]])\n \"\"\"\n self.data_infos = [\n info for one_data_infos in data_infos for info in one_data_infos\n ]\n\n def concat_scene_idxs(self, scene_idxs):\n \"\"\"Concat scene_idxs from several datasets to form self.scene_idxs.\n\n Needs to manually add offset to scene_idxs[1, 2, ...].\n\n Args:\n scene_idxs (list[np.ndarray])\n \"\"\"\n self.scene_idxs = np.array([], dtype=np.int32)\n offset = 0\n for one_scene_idxs in scene_idxs:\n self.scene_idxs = np.concatenate(\n [self.scene_idxs, one_scene_idxs + offset]).astype(np.int32)\n offset = np.unique(self.scene_idxs).max() + 1\n\n @staticmethod\n def _duplicate_to_list(x, num):\n \"\"\"Repeat x `num` times to form a list.\"\"\"\n return [x for _ in range(num)]\n\n def _check_ann_files(self, ann_file):\n \"\"\"Make ann_files as list/tuple.\"\"\"\n # ann_file could be str\n if not isinstance(ann_file, (list, tuple)):\n ann_file = self._duplicate_to_list(ann_file, 1)\n return ann_file\n\n def _check_scene_idxs(self, scene_idx, num):\n \"\"\"Make scene_idxs as list/tuple.\"\"\"\n if scene_idx is None:\n return self._duplicate_to_list(scene_idx, num)\n # scene_idx could be str, np.ndarray, list or tuple\n if isinstance(scene_idx, str): # str\n return self._duplicate_to_list(scene_idx, num)\n if isinstance(scene_idx[0], str): # list of str\n return scene_idx\n if isinstance(scene_idx[0], (list, tuple, np.ndarray)): # list of idx\n return scene_idx\n # single idx\n return self._duplicate_to_list(scene_idx, num)\n"
] | [
[
"numpy.zeros",
"numpy.max",
"numpy.array",
"numpy.concatenate",
"numpy.unique"
]
] |
PaulTHong/STDA-inf | [
"3d87a7843f879d17a343ba4838caa1f58f1e8e65"
] | [
"data/cal_mean_std.py"
] | [
"import numpy as np\nimport cv2\nimport os\n\nmean = []\nstd = []\nimg_list = []\n\ndir_path = './STL10-data/train'\nclass_paths = os.listdir(dir_path)\nprint(class_paths)\nfor cls in class_paths:\n img_paths = os.listdir(dir_path + os.sep + cls)\n print(len(img_paths))\n for img_path in img_paths:\n print(img_path)\n img_path = dir_path + os.sep + cls + os.sep + img_path\n img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)\n img = img[::, np.newaxis]\n img_list.append(img)\n\n# dir_path = './STL10-data/test'\n# class_paths = os.listdir(dir_path)\n# print(class_paths)\n# for cls in class_paths:\n # img_paths = os.listdir(dir_path + os.sep + cls)\n # print(len(img_paths))\n # for img_path in img_paths:\n # print(img_path)\n # img_path = dir_path + os.sep + cls + os.sep + img_path\n # img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)\n # img = img[::, np.newaxis]\n # img_list.append(img)\n\nimgs = np.concatenate(img_list, axis=3)\nimgs = imgs.astype(np.float32) / 255.0\n\nfor i in range(3):\n channel = imgs[:, :, i, :].ravel()\n mean.append(np.mean(channel))\n std.append(np.std(channel))\n\nmean.reverse()\nstd.reverse()\n\nprint(mean)\nprint(std)\n\n\n\n\n\n"
] | [
[
"numpy.std",
"numpy.concatenate",
"numpy.mean"
]
] |
vrushank-agrawal/opencv-x64-cmake | [
"3f9486510d706c8ac579ac82f5d58f667f948124",
"3f9486510d706c8ac579ac82f5d58f667f948124"
] | [
"opencv/sources/modules/dnn/test/cityscapes_semsegm_test_enet.py",
"opencv/sources/samples/python/tutorial_code/imgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.py"
] | [
"import numpy as np\r\nimport sys\r\nimport os\r\nimport fnmatch\r\nimport argparse\r\n\r\ntry:\r\n import cv2 as cv\r\nexcept ImportError:\r\n raise ImportError('Can\\'t find OpenCV Python module. If you\\'ve built it from sources without installation, '\r\n 'configure environment variable PYTHONPATH to \"opencv_build_dir/lib\" directory (with \"python3\" subdirectory if required)')\r\ntry:\r\n import torch\r\nexcept ImportError:\r\n raise ImportError('Can\\'t find pytorch. Please install it by following instructions on the official site')\r\n\r\nfrom torch.utils.serialization import load_lua\r\nfrom pascal_semsegm_test_fcn import eval_segm_result, get_conf_mat, get_metrics, DatasetImageFetch, SemSegmEvaluation\r\nfrom imagenet_cls_test_alexnet import Framework, DnnCaffeModel\r\n\r\n\r\nclass NormalizePreproc:\r\n def __init__(self):\r\n pass\r\n\r\n @staticmethod\r\n def process(img):\r\n image_data = np.array(img).transpose(2, 0, 1).astype(np.float32)\r\n image_data = np.expand_dims(image_data, 0)\r\n image_data /= 255.0\r\n return image_data\r\n\r\n\r\nclass CityscapesDataFetch(DatasetImageFetch):\r\n img_dir = ''\r\n segm_dir = ''\r\n segm_files = []\r\n colors = []\r\n i = 0\r\n\r\n def __init__(self, img_dir, segm_dir, preproc):\r\n self.img_dir = img_dir\r\n self.segm_dir = segm_dir\r\n self.segm_files = sorted([img for img in self.locate('*_color.png', segm_dir)])\r\n self.colors = self.get_colors()\r\n self.data_prepoc = preproc\r\n self.i = 0\r\n\r\n @staticmethod\r\n def get_colors():\r\n result = []\r\n colors_list = (\r\n (0, 0, 0), (128, 64, 128), (244, 35, 232), (70, 70, 70), (102, 102, 156), (190, 153, 153), (153, 153, 153),\r\n (250, 170, 30), (220, 220, 0), (107, 142, 35), (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0),\r\n (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32))\r\n\r\n for c in colors_list:\r\n result.append(DatasetImageFetch.pix_to_c(c))\r\n return result\r\n\r\n def __iter__(self):\r\n return self\r\n\r\n def next(self):\r\n if self.i < len(self.segm_files):\r\n segm_file = self.segm_files[self.i]\r\n segm = cv.imread(segm_file, cv.IMREAD_COLOR)[:, :, ::-1]\r\n segm = cv.resize(segm, (1024, 512), interpolation=cv.INTER_NEAREST)\r\n\r\n img_file = self.rreplace(self.img_dir + segm_file[len(self.segm_dir):], 'gtFine_color', 'leftImg8bit')\r\n assert os.path.exists(img_file)\r\n img = cv.imread(img_file, cv.IMREAD_COLOR)[:, :, ::-1]\r\n img = cv.resize(img, (1024, 512))\r\n\r\n self.i += 1\r\n gt = self.color_to_gt(segm, self.colors)\r\n img = self.data_prepoc.process(img)\r\n return img, gt\r\n else:\r\n self.i = 0\r\n raise StopIteration\r\n\r\n def get_num_classes(self):\r\n return len(self.colors)\r\n\r\n @staticmethod\r\n def locate(pattern, root_path):\r\n for path, dirs, files in os.walk(os.path.abspath(root_path)):\r\n for filename in fnmatch.filter(files, pattern):\r\n yield os.path.join(path, filename)\r\n\r\n @staticmethod\r\n def rreplace(s, old, new, occurrence=1):\r\n li = s.rsplit(old, occurrence)\r\n return new.join(li)\r\n\r\n\r\nclass TorchModel(Framework):\r\n net = object\r\n\r\n def __init__(self, model_file):\r\n self.net = load_lua(model_file)\r\n\r\n def get_name(self):\r\n return 'Torch'\r\n\r\n def get_output(self, input_blob):\r\n tensor = torch.FloatTensor(input_blob)\r\n out = self.net.forward(tensor).numpy()\r\n return out\r\n\r\n\r\nclass DnnTorchModel(DnnCaffeModel):\r\n net = cv.dnn.Net()\r\n\r\n def __init__(self, model_file):\r\n self.net = cv.dnn.readNetFromTorch(model_file)\r\n\r\n def get_output(self, input_blob):\r\n self.net.setBlob(\"\", input_blob)\r\n self.net.forward()\r\n return self.net.getBlob(self.net.getLayerNames()[-1])\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--imgs_dir\", help=\"path to Cityscapes validation images dir, imgsfine/leftImg8bit/val\")\r\n parser.add_argument(\"--segm_dir\", help=\"path to Cityscapes dir with segmentation, gtfine/gtFine/val\")\r\n parser.add_argument(\"--model\", help=\"path to torch model, download it here: \"\r\n \"https://www.dropbox.com/sh/dywzk3gyb12hpe5/AAD5YkUa8XgMpHs2gCRgmCVCa\")\r\n parser.add_argument(\"--log\", help=\"path to logging file\")\r\n args = parser.parse_args()\r\n\r\n prep = NormalizePreproc()\r\n df = CityscapesDataFetch(args.imgs_dir, args.segm_dir, prep)\r\n\r\n fw = [TorchModel(args.model),\r\n DnnTorchModel(args.model)]\r\n\r\n segm_eval = SemSegmEvaluation(args.log)\r\n segm_eval.process(fw, df)\r\n",
"\r\nimport cv2 as cv\r\nimport numpy as np\r\nimport argparse\r\n\r\nW = 52 # window size is WxW\r\nC_Thr = 0.43 # threshold for coherency\r\nLowThr = 35 # threshold1 for orientation, it ranges from 0 to 180\r\nHighThr = 57 # threshold2 for orientation, it ranges from 0 to 180\r\n\r\n## [calcGST]\r\n## [calcJ_header]\r\n## [calcGST_proto]\r\ndef calcGST(inputIMG, w):\r\n## [calcGST_proto]\r\n img = inputIMG.astype(np.float32)\r\n\r\n # GST components calculation (start)\r\n # J = (J11 J12; J12 J22) - GST\r\n imgDiffX = cv.Sobel(img, cv.CV_32F, 1, 0, 3)\r\n imgDiffY = cv.Sobel(img, cv.CV_32F, 0, 1, 3)\r\n imgDiffXY = cv.multiply(imgDiffX, imgDiffY)\r\n ## [calcJ_header]\r\n\r\n imgDiffXX = cv.multiply(imgDiffX, imgDiffX)\r\n imgDiffYY = cv.multiply(imgDiffY, imgDiffY)\r\n\r\n J11 = cv.boxFilter(imgDiffXX, cv.CV_32F, (w,w))\r\n J22 = cv.boxFilter(imgDiffYY, cv.CV_32F, (w,w))\r\n J12 = cv.boxFilter(imgDiffXY, cv.CV_32F, (w,w))\r\n # GST components calculations (stop)\r\n\r\n # eigenvalue calculation (start)\r\n # lambda1 = 0.5*(J11 + J22 + sqrt((J11-J22)^2 + 4*J12^2))\r\n # lambda2 = 0.5*(J11 + J22 - sqrt((J11-J22)^2 + 4*J12^2))\r\n tmp1 = J11 + J22\r\n tmp2 = J11 - J22\r\n tmp2 = cv.multiply(tmp2, tmp2)\r\n tmp3 = cv.multiply(J12, J12)\r\n tmp4 = np.sqrt(tmp2 + 4.0 * tmp3)\r\n\r\n lambda1 = 0.5*(tmp1 + tmp4) # biggest eigenvalue\r\n lambda2 = 0.5*(tmp1 - tmp4) # smallest eigenvalue\r\n # eigenvalue calculation (stop)\r\n\r\n # Coherency calculation (start)\r\n # Coherency = (lambda1 - lambda2)/(lambda1 + lambda2)) - measure of anisotropism\r\n # Coherency is anisotropy degree (consistency of local orientation)\r\n imgCoherencyOut = cv.divide(lambda1 - lambda2, lambda1 + lambda2)\r\n # Coherency calculation (stop)\r\n\r\n # orientation angle calculation (start)\r\n # tan(2*Alpha) = 2*J12/(J22 - J11)\r\n # Alpha = 0.5 atan2(2*J12/(J22 - J11))\r\n imgOrientationOut = cv.phase(J22 - J11, 2.0 * J12, angleInDegrees = True)\r\n imgOrientationOut = 0.5 * imgOrientationOut\r\n # orientation angle calculation (stop)\r\n\r\n return imgCoherencyOut, imgOrientationOut\r\n## [calcGST]\r\n\r\nparser = argparse.ArgumentParser(description='Code for Anisotropic image segmentation tutorial.')\r\nparser.add_argument('-i', '--input', help='Path to input image.', required=True)\r\nargs = parser.parse_args()\r\n\r\nimgIn = cv.imread(args.input, cv.IMREAD_GRAYSCALE)\r\nif imgIn is None:\r\n print('Could not open or find the image: {}'.format(args.input))\r\n exit(0)\r\n\r\n## [main_extra]\r\n## [main]\r\nimgCoherency, imgOrientation = calcGST(imgIn, W)\r\n\r\n## [thresholding]\r\n_, imgCoherencyBin = cv.threshold(imgCoherency, C_Thr, 255, cv.THRESH_BINARY)\r\n_, imgOrientationBin = cv.threshold(imgOrientation, LowThr, HighThr, cv.THRESH_BINARY)\r\n## [thresholding]\r\n\r\n## [combining]\r\nimgBin = cv.bitwise_and(imgCoherencyBin, imgOrientationBin)\r\n## [combining]\r\n## [main]\r\n\r\nimgCoherency = cv.normalize(imgCoherency, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)\r\nimgOrientation = cv.normalize(imgOrientation, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)\r\n\r\ncv.imshow('result.jpg', np.uint8(0.5*(imgIn + imgBin)))\r\ncv.imshow('Coherency.jpg', imgCoherency)\r\ncv.imshow('Orientation.jpg', imgOrientation)\r\ncv.waitKey(0)\r\n## [main_extra]\r\n"
] | [
[
"numpy.array",
"torch.utils.serialization.load_lua",
"numpy.expand_dims",
"torch.FloatTensor"
],
[
"numpy.sqrt",
"numpy.uint8"
]
] |
jmetz/momanalysis | [
"8d71490c99127568b184784890258e9a6ef876ef"
] | [
"mmhelper/output.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 09 09:59:13 2017\n\n@author: as624\n\"\"\"\nimport csv\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef output_detection_figures(\n image, wells, bacteria, timeindex, output_dir):\n \"\"\"\n Produces and saves figures showing the output from the detection\n\n Parameters\n ------\n image : ndarray (2D)\n The initial image that detection was run on\n wells : ndarray (2D) of dtype int\n A labelled image showing the detected wells\n bacteria : ndarray (2D) of dtype int\n A labelled image showing the detected bacteria\n timeindex : int\n The timepoint that has been analysed\n output_dir : str (path)\n Where to save the images\n \"\"\"\n # For detection figures, labels not needed (I think)?\n plt.figure(figsize=(16, 12))\n plt.imshow(image, cmap='gray')\n plt.contour(wells > 0, levels=[0.5], colors=['y'])\n #plt.contour(channel>0, levels=[0.5], colors=['r'])\n for lab_bac in range(1, bacteria.max() + 1):\n col = plt.cm.gist_rainbow((lab_bac / 9.1) % 1)\n plt.contour(bacteria == lab_bac, levels=[0.5], colors=[col])\n plt.savefig(os.path.join(\n output_dir, \"detection_frame_{:06d}\".format(timeindex)))\n plt.close()\n\n\ndef output_tracking_figures(\n data,\n fullwellimages,\n wellcoords,\n allbacteria,\n output_dir,\n bacteria_lineage):\n \"\"\"\n Produces and saves figures showing the output after tracking\n\n Parameters\n ------\n data : list of ndarrays\n List of initial image that detection was run on\n fullwellimages : list of ndarrays\n List of labelled images showing the detected wells\n wellcoords : list of arrays\n Each entry contains a further list where each entry contains well coordinates\n allbacteria : list of arrays\n List of labelled images showing the detected bacteria\n output_dir : str (path)\n Where to save the images\n bacteria_lineage : dictionary\n A dictionary that links the physical unique label of a bacteria\n to one which shows information on its lineage\n \"\"\"\n for tpoint, (image, fullwells, bacteria, coords) in enumerate(\n zip(data, fullwellimages, allbacteria, wellcoords)):\n # For detection figures, labels not needed (I think)?\n plt.figure(figsize=(16, 12))\n plt.imshow(image, cmap='gray')\n if len(np.unique(fullwells)) == 1:\n plt.savefig(os.path.join(\n output_dir, \"tracking_frame_{:06d}\".format(tpoint)))\n plt.close()\n continue\n plt.contour(fullwells > 0, levels=[0.5], colors=['y'])\n bacteriaim = np.zeros_like(fullwells)\n for welllabel in coords:\n bacteriaim[coords[welllabel]] = bacteria[welllabel]\n # Add in well labels top left(?) of well contour\n #bw = fullwells == welllabel\n # if not np.any(bw):\n # continue\n #pos0 = bw.nonzero()\n pos = (np.min(coords[welllabel][0]), np.max(coords[welllabel][1]))\n plt.text(pos[1], pos[0], \"%d\" % welllabel, color=\"y\")\n\n for lab_bac in range(1, bacteriaim.max() + 1):\n col = plt.cm.gist_rainbow((lab_bac / 9.1) % 1)\n bw0 = bacteriaim == lab_bac\n if not np.any(bw0):\n continue\n plt.contour(bw0, levels=[0.5], colors=[col])\n pos0 = bw0.nonzero()\n if len(pos0[0]) == 0 or len(pos0[1]) == 0:\n continue\n #lab_string = label_dict_string[lab_bac]\n pos = (np.min(pos0[0]), np.max(pos0[1]))\n plt.text(pos[1], pos[0], str(bacteria_lineage[lab_bac]), color=col)\n plt.savefig(os.path.join(\n output_dir, \"tracking_frame_{:06d}\".format(tpoint)))\n plt.close()\n\n\ndef final_output(measurements, output_dir):\n \"\"\"outputs a final csv with information on the bacteria detected\n\n Parameters\n ------\n measurements : Custom class instance\n Its attribute \"bacteria\" is a dictionary containing information on\n each individual bacteria\n output_dir : str (path)\n Where to write the csv\n \"\"\"\n output_csv_file = os.path.join(output_dir, 'Results.csv')\n with open(output_csv_file, \"w\", newline='') as file0:\n writer = csv.writer(file0)\n for numbac, (bac) in enumerate(measurements.bacteria.values()):\n if numbac == 0:\n writer.writerow(bac.headings_line)\n writer.writerow(bac.measurements_output)\n"
] | [
[
"numpy.zeros_like",
"matplotlib.pyplot.figure",
"numpy.any",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.imshow",
"numpy.max",
"matplotlib.pyplot.text",
"matplotlib.pyplot.cm.gist_rainbow",
"matplotlib.pyplot.close",
"numpy.min",
"numpy.unique"
]
] |
iurteaga/menstrual_cycle_analysis | [
"799c7cb59d759e0c3929164bccdc5c7ce80324d0"
] | [
"src/characterization/cycle_period_length_analysis.py"
] | [
"#!/usr/bin/python\n\n# Imports\nimport sys, os, re, time\nimport argparse\nimport pdb\nimport pickle\nfrom itertools import *\n# Science\nimport numpy as np\nimport scipy.stats as stats\nimport pandas as pd\n# Plotting\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nfrom mpl_toolkits.mplot3d import Axes3D\n\n################################## FUNCTIONS ############################\n# Population time-series\ndef population_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, sample_style, save_dir):\n '''\n Function that plots a population level time series embedding of cycle and period lengths\n In plot:\n x axis is length_attribute for cycle 1,\n y axis is length attribute for cycle 2,\n z is for cycle 3\n Input:\n cycle_stats_df: pandas dataframe, with information about user's cycle statistics\n attribute: whether to consider 'cycle_lengths' or 'period_lengths'\n cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)\n cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)\n sample_style: whether to pick 3 consecutive 'random' or 'first' cycles per-user\n save_dir: path where to save plot\n Output:\n None\n '''\n #get users with color by attribute > cutoff, and <= cutoff\n cycle_stats_df_greater_than = cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]\n cycle_stats_df_less_than = cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]\n cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]\n cycle_lengths_less_than = cycle_stats_df_less_than[attribute]\n \n # Filename\n if sample_style == 'first':\n filename = '{}/population_time_series_embedding_for_{}_split_by_{}_{}_first_3.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)\n if sample_style == 'random':\n filename = '{}/population_time_series_embedding_for_{}_split_by_{}_{}_sample_3.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)\n \n # Plot\n colors = ['orange', 'c']\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n for index, cycle_lengths in enumerate([cycle_lengths_greater_than, cycle_lengths_less_than]):\n print('Start selecting cycles for one group')\n if sample_style=='first':\n sample_cycle_lengths = [cycle_length[:3] for cycle_length in cycle_lengths if len(cycle_length) >= 3]\n if sample_style=='random':\n sample_cycle_lengths = []\n for cycle_length in cycle_lengths:\n if len(cycle_length) >= 3:\n num_cycles_array = np.linspace(0, len(cycle_length)-3, len(cycle_length)-2)\n start_index = np.random.choice(num_cycles_array, size=1).astype(int)[0]\n sample_cycle_lengths.append(cycle_length[start_index:start_index+3])\n print('Finished selecting cycles for one group')\n \n print('Start plotting one group')\n for i in range(len(sample_cycle_lengths)):\n xs = sample_cycle_lengths[i][0]\n ys = sample_cycle_lengths[i][1]\n zs = sample_cycle_lengths[i][2]\n # Plot this point\n ax.scatter(xs, ys, zs, color = colors[index], s=1, alpha=0.3)\n print('Finished plotting one group')\n\n ax.set_xlabel(attribute+ '[i]')\n ax.set_ylabel(attribute+ '[i+1]')\n ax.set_zlabel(attribute+ '[i+2]')\n if attribute == 'cycle_lengths':\n #ref_line_points = np.linspace(10, 90, 10)\n #ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)\n ax.set_xlim3d(10,90)\n ax.set_ylim3d(10,90)\n ax.set_zlim3d(10,90)\n elif attribute == 'period_lengths':\n max_period_days=28\n #ref_line_points = np.linspace(1, max_period_days, 4)\n #ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)\n ax.set_xlim3d(1,max_period_days)\n ax.set_ylim3d(1,max_period_days)\n ax.set_zlim3d(1,max_period_days)\n ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n \n plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')\n print('Finished one view')\n # With angles\n for angle in [30, 60, 90, 180]:\n print('Start one view')\n filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'\n ax.view_init(elev=None, azim=angle)\n # Add (a)/(b) labels for paper\n ax.text2D(12, 7,'(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)\n plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')\n print('Finished one view')\n\n plt.close()\n\n# Time series embedding for a randomly chosen user\ndef random_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, save_dir):\n '''\n Function that plots a time series embedding of cycle and period lengths for a randomly chosen user per group\n In plot:\n x axis is length_attribute for cycle i,\n y axis is length attribute for cycle i+1,\n z is for cycle i+2\n Input:\n cycle_stats_df: pandas dataframe, with information about user's cycle statistics\n attribute: whether to consider 'cycle_lengths' or 'period_lengths'\n cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)\n cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)\n save_dir: path where to save plot\n Output:\n None\n '''\n # Select users with median number of cycles tracked\n cycle_stats_df_median = cycle_stats_df[cycle_stats_df['num_cycles_tracked'] == 11]\n filename = '{}/random_time_series_embedding_for_{}_split_by_{}_{}.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)\n\n #get users with color by attribute > cutoff, and <= cutoff\n cycle_stats_df_greater_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] > cutoff]\n cycle_stats_df_less_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] <= cutoff]\n cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]\n cycle_lengths_less_than = cycle_stats_df_less_than[attribute]\n \n # Randomly pick a user from each group\n cycle_lengths_greater_than_user = np.random.choice(cycle_lengths_greater_than, size=1, replace=False)\n cycle_lengths_less_than_user = np.random.choice(cycle_lengths_less_than, size=1, replace=False)\n\n # Plot\n colors = ['orange', 'c']\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n #plot each user, color by median intercycle length\n xs = list(cycle_lengths_greater_than_user[0][0:-2])\n ys = list(cycle_lengths_greater_than_user[0][1:-1])\n zs = list(cycle_lengths_greater_than_user[0][2:])\n ax.scatter(xs, ys, zs, color = 'orange')\n ax.plot(xs, ys, zs, color='orange', linestyle='dashed', alpha=0.8)\n\n xs = list(cycle_lengths_less_than_user[0][0:-2])\n ys = list(cycle_lengths_less_than_user[0][1:-1])\n zs = list(cycle_lengths_less_than_user[0][2:])\n ax.scatter(xs, ys, zs, color = 'c')\n ax.plot(xs, ys, zs, color='c', linestyle='dashed', alpha=0.8)\n \n ax.set_xlabel(attribute+ '[i]')\n ax.set_ylabel(attribute+ '[i+1]')\n ax.set_zlabel(attribute+ '[i+2]')\n if attribute == 'cycle_lengths':\n #ref_line_points = np.linspace(10, 90, 10)\n #ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)\n ax.set_xlim3d(10,90)\n ax.set_ylim3d(10,90)\n ax.set_zlim3d(10,90)\n elif attribute == 'period_lengths':\n max_period_days=28\n #ref_line_points = np.linspace(1, max_period_days, 4)\n #ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)\n ax.set_xlim3d(1,max_period_days)\n ax.set_ylim3d(1,max_period_days)\n ax.set_zlim3d(1,max_period_days)\n ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n \n plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')\n print('Finished one view')\n # With angles\n for angle in [30, 60, 90, 180]:\n print('Start one view')\n filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'\n ax.view_init(elev=None, azim=angle)\n plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')\n print('Finished one view')\n\n plt.close()\n\n# Plot period and cycle length distributions per group\ndef plot_lengths_hist_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, pdf_or_cdf, save_dir):\n '''\n Function that plots cycle and period length distributions across groups\n Input:\n cycle_stats_df: pandas dataframe, with information about user's cycle statistics\n cycle_df: pandas dataframe, with information about each user's cycle\n attribute: whether to consider 'cycle_lengths' or 'period_lengths'\n cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)\n cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)\n pdf_or_cdf: whether to plot 'pdf's or 'cdf's\n save_dir: path where to save plot\n Output:\n None\n '''\n # Identify groups per cutoff criteria\n users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])\n users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])\n cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]\n cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]\n\n colors = ['orange', 'c']\n labels=['Highly variable', 'NOT highly variable']\n\n if attribute == 'cycle_length':\n # Compute histogram\n # Bins based on integer range of values\n my_bins=np.arange(\n np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),\n np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)\n all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)\n counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)\n counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)\n\n # Separate PDF/CDF plots\n if pdf_or_cdf=='pdf':\n # PDF\n hist_type='stepfilled'\n cumulative=False\n y_label='P(Cycle length = n)'\n cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)\n per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)\n elif pdf_or_cdf=='cdf':\n # CDF\n hist_type='step'\n cumulative=True\n y_label='P(Cycle length $\\leq$ n)'\n cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)\n per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)\n else:\n raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))\n \n # Population\n plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)\n plt.autoscale(enable=True, tight=True)\n plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))\n plt.xlabel('Cycle length in days')\n plt.ylabel(y_label)\n plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')\n plt.close()\n \n # Per-group\n plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)\n plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)\n plt.autoscale(enable=True, tight=True)\n plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))\n plt.xlabel('Cycle length in days')\n plt.ylabel(y_label)\n # Add (a)/(b) labels for paper\n plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)\n plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')\n plt.close()\n \n elif attribute == 'period_length':\n # Compute histogram\n # Bins based on integer range of values\n my_bins=np.arange(\n np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),\n np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)\n all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)\n counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)\n counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)\n \n # Separate PDF/CDF plots\n max_period_days=28\n if pdf_or_cdf=='pdf':\n # PDF\n hist_type='stepfilled'\n cumulative=False\n y_label='P(Period length = n)'\n cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)\n per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)\n elif pdf_or_cdf=='cdf':\n # CDF\n hist_type='step'\n cumulative=True\n y_label='P(Period length $\\leq$ n)'\n cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)\n per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)\n else:\n raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))\n \n # Population\n plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)\n plt.autoscale(enable=True, tight=True)\n plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n plt.xlim(1,max_period_days)\n plt.xlabel('Period length in days')\n plt.ylabel(y_label)\n plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')\n plt.close()\n \n # Per-group\n plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)\n plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)\n plt.autoscale(enable=True, tight=True)\n plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n plt.xlim(1,max_period_days)\n plt.xlabel('Period length in days')\n plt.ylabel(y_label)\n # Add (a)/(b) labels for paper\n plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)\n plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')\n plt.close()\n \n else:\n raise ValueError('Unknown attribute {}'.format(attribute))\n\n# Bootstrapped-KS for cycle and period length\ndef bootstrapped_cycle_period_lengths_KS(cycle_stats_df, cycle_df, cutoff_criteria, cutoff, n_bootstrapping, results_dir):\n '''\n Function that computes cycle and period length Kolmogorov-Smirnov tests between group distributions, based on bootstrapping\n Input:\n cycle_stats_df: pandas dataframe, with information about user's cycle statistics\n cycle_df: pandas dataframe, with information about user's cycle\n cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)\n cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)\n n_bootstrapping: Number of bootstrapped samples to use for the analysis\n save_dir: path where to save plot\n Output:\n None\n '''\n # True separation of users into groups\n true_users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])\n true_users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])\n n_users_greater_than_cutoff=true_users_greater_than_cutoff.size\n n_users_less_than_cutoff=true_users_less_than_cutoff.size\n \n ########### TRUE OBSERVERD STATISTICS ##########\n # Cycles per-group\n true_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(true_users_greater_than_cutoff)]\n true_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(true_users_less_than_cutoff)]\n # KS cycle_length\n true_KS_cycle_length, true_p_val_cycle_length = stats.ks_2samp(true_cycles_users_greater_than_cutoff['cycle_length'].dropna(), true_cycles_users_less_than_cutoff['cycle_length'].dropna())\n # KS period_length\n true_KS_period_length, true_p_val_period_length = stats.ks_2samp(true_cycles_users_greater_than_cutoff['period_length'].dropna(), true_cycles_users_less_than_cutoff['period_length'].dropna())\n \n ########### BOOTSTRAP BASED STATISTICS ##########\n # Computed suff statistics\n bootstrapped_KS_cycle_length=np.zeros(n_bootstrapping)\n bootstrapped_p_val_cycle_length=np.zeros(n_bootstrapping)\n bootstrapped_KS_period_length=np.zeros(n_bootstrapping)\n bootstrapped_p_val_period_length=np.zeros(n_bootstrapping)\n\n for n_bootstrap in np.arange(n_bootstrapping):\n #print('Sample={}/{}'.format(n_bootstrap,n_bootstrapping))\n # Bootstrapped sample indicators\n bootstrapped_users_greater_than_cutoff=np.random.choice(true_users_greater_than_cutoff,n_bootstrapping)\n bootstrapped_users_less_than_cutoff=np.random.choice(true_users_less_than_cutoff,n_bootstrapping)\n # Cycles per-group\n bootstrapped_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_greater_than_cutoff)]\n bootstrapped_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_less_than_cutoff)]\n # KS cycle_length\n bootstrapped_KS_cycle_length[n_bootstrap], bootstrapped_p_val_cycle_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['cycle_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['cycle_length'].dropna())\n # KS period_length\n bootstrapped_KS_period_length[n_bootstrap], bootstrapped_p_val_period_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['period_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['period_length'].dropna())\n\n # Print bootstrap results\n print('*************************************************************************')\n print('******** Cycle-length KS={} (p={}) ***********'.format(true_KS_cycle_length, true_p_val_cycle_length))\n print('******** Cycle-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(\n bootstrapped_KS_cycle_length.mean(), bootstrapped_KS_cycle_length.std(), bootstrapped_p_val_cycle_length.mean(), bootstrapped_p_val_cycle_length.std()\n ))\n print('******** Cycle-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(\n bootstrapped_KS_cycle_length.mean(), np.percentile(bootstrapped_KS_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_KS_cycle_length, 97.5, axis=0),\n bootstrapped_p_val_cycle_length.mean(), np.percentile(bootstrapped_p_val_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_cycle_length, 97.5, axis=0)\n ))\n print('*************************************************************************')\n print('******** Period-length KS={} (p={}) ***********'.format(true_KS_period_length, true_p_val_period_length))\n print('******** Period-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(\n bootstrapped_KS_period_length.mean(), bootstrapped_KS_period_length.std(), bootstrapped_p_val_period_length.mean(), bootstrapped_p_val_period_length.std()\n ))\n print('******** Period-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(\n bootstrapped_KS_period_length.mean(), np.percentile(bootstrapped_KS_period_length, 2.5, axis=0), np.percentile(bootstrapped_KS_period_length, 97.5, axis=0),\n bootstrapped_p_val_period_length.mean(), np.percentile(bootstrapped_p_val_period_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_period_length, 97.5, axis=0)\n ))\n print('*************************************************************************')\n\n# Average statistics over cycle-id\ndef plot_avg_lengths_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, save_dir):\n '''\n Function that plots cycle and period length average and standard deviation across user's timeline (i.e., by cycle-id) across groups\n Input:\n cycle_stats_df: pandas dataframe, with information about user's cycle statistics\n cycle_df: pandas dataframe, with information about each user's cycle\n attribute: whether to consider 'cycle_lengths' or 'period_lengths'\n cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)\n cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)\n save_dir: path where to save plot\n Output:\n None\n '''\n # Identify groups per cutoff criteria\n users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])\n users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])\n cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]\n cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]\n \n # Plotting\n colors = ['slateblue', 'c', 'orange']\n max_cycle_id=20\n \n if attribute == 'cycle_length':\n fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))\n \n for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):\n means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]\n std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]\n # Plot\n axes[index].plot(np.unique(dataset['cycle_id'])[:20], means, color = colors[index])\n axes[index].autoscale(enable=True, tight=True, axis='x')\n axes[index].fill_between(np.unique(dataset['cycle_id'])[:max_cycle_id], means - std, means + std, alpha=0.4, color=colors[index])\n axes[index].set_xticks(np.append([1],np.arange(2,max_cycle_id+1,2)))\n axes[index].set_xlabel('Cycle ID')\n axes[index].set_ylabel('Cycle length')\n axes[index].set_ylim(20,55)\n \n # Add (a)/(b) labels for paper\n plt.text(12, 7, '(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)\n # Save and close\n plt.savefig('{}/avg_{}_per_cycle_id.pdf'.format(save_dir,attribute), format='pdf', bbox_inches='tight')\n plt.close()\n \n elif attribute == 'period_length':\n fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))\n \n for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):\n means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]\n std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]\n # Plot\n axes[index].plot(np.unique(dataset['cycle_id'])[:20], means, color = colors[index])\n axes[index].autoscale(enable=True, tight=True, axis='x')\n axes[index].fill_between(np.unique(dataset['cycle_id'])[:max_cycle_id], means - std, means + std, alpha=0.4, color=colors[index])\n axes[index].set_xticks(np.append([1],np.arange(2,max_cycle_id+1,2)))\n axes[index].set_xlabel('Cycle ID')\n axes[index].set_ylabel('Period length')\n axes[index].set_ylim(1,9)\n \n # Add (a)/(b) labels for paper\n plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)\n # Save and close\n plt.savefig('{}/avg_{}_per_cycle_id.pdf'.format(save_dir,attribute), format='pdf', bbox_inches='tight')\n plt.close()\n\n else:\n raise ValueError('Unknown attribute {}'.format(attribute))\n\n# Plot for max intercycle length (i.e., CLD) histogram \ndef plot_max_intercycle_length_hists(cycle_stats, cycle_stats_exclude_flagged, save_dir):\n '''\n Function that plots max inter cycle length (max CLD) histograms with and without excluded cycles\n Input:\n cycle_stats: pandas dataframe, with information about user's cycle statistics\n cycle_stats_exclude_flagged: pandas dataframe for users after removing excluded flags, with information about user's cycle statistics \n save_dir: path where to save plot\n Output:\n None\n '''\n my_bins=np.arange(min(cycle_stats['max_inter_cycle_length']), max(cycle_stats['max_inter_cycle_length']) + 1)\n plt.hist(cycle_stats['max_inter_cycle_length'], bins=my_bins, label='With behaviorally-tainted cycles', color='blue', histtype='step')\n plt.hist(cycle_stats_exclude_flagged['max_inter_cycle_length'], bins=my_bins, label='Excluding behaviorally-tainted cycles', color='red', histtype='step')\n plt.autoscale(enable=True, tight=True, axis='x')\n plt.ylim(0,38000)\n plt.xlabel('Maximum CLD in days')\n plt.ylabel('User count with maximum CLD')\n plt.savefig('{}/hist_max_inter_cycle_length_with_and_without_excluded_flags.pdf'.format(save_dir), format='pdf', bbox_inches='tight')\n plt.close()\n\n# Plot for median Vs max intercycle length (i.e., CLD) histogram \ndef plot_median_vs_max_intercycle_length(cycle_stats, save_dir):\n '''\n Function that plots median Vs max inter cycle length (CLD) 2D scatter histogram\n Input:\n cycle_stats: pandas dataframe, with information about user's cycle statistics\n save_dir: path where to save plot\n Output:\n None\n '''\n plt.hist2d(cycle_stats['median_inter_cycle_length'], cycle_stats['max_inter_cycle_length'], bins=(75, 75), cmap='jet', norm=colors.LogNorm())\n plt.autoscale(enable=True, tight=True)\n range_vals_median = np.linspace(min(cycle_stats['median_inter_cycle_length']), max(cycle_stats['median_inter_cycle_length']), 100)\n plt.plot(range_vals_median, range_vals_median+10, label='Median CLD + 10', color='red')\n plt.xlabel('Median CLD')\n plt.ylabel('Maximum CLD')\n plt.xlim((0,75))\n plt.ylim((0, 75))\n plt.colorbar()\n plt.savefig('{}/median_vs_max_scatter_2d_hist.pdf'.format(save_dir), format='pdf', bbox_inches='tight')\n plt.close()\n\n# Plot for median intercycle length (i.e., CLD) histogram \ndef plot_median_CLD_hist(cycle_stats, pdf_or_cdf, save_dir):\n '''\n Function that plots median CLD histograms \n Input:\n cycle_stats: pandas dataframe, with information about user's cycle statistics\n pdf_or_cdf: whether to plot 'pdf's or 'cdf's\n save_dir: path where to save plot\n Output:\n None\n '''\n \n # Median CLD histogram\n my_bins=np.arange(cycle_stats['median_inter_cycle_length'].dropna().min(),cycle_stats['median_inter_cycle_length'].dropna().max()+1)\n all_counts, all_bins = np.histogram(cycle_stats['median_inter_cycle_length'].dropna(), bins=my_bins, density=True) \n \n # Separate PDF/CDF plots\n if pdf_or_cdf=='pdf':\n # PDF\n hist_type='stepfilled'\n cumulative=False\n y_label='P(Median CLD = n)'\n cohort_filename = '{}/median_CLD_pdf_cohort.pdf'.format(save_dir)\n elif pdf_or_cdf=='cdf':\n # CDF\n hist_type='step'\n cumulative=True\n y_label='P(Median CLD $\\leq$ n)'\n cohort_filename = '{}/median_CLD_cdf_cohort.pdf'.format(save_dir)\n else:\n raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))\n \n # Actual plot\n plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)\n plt.autoscale(enable=True, tight=True)\n plt.xlabel('Median CLD in days')\n plt.ylabel('P(Median CLD $\\leq$ n)')\n plt.grid(True)\n plt.savefig('{}/median_CLD_cdf.pdf'.format(save_dir), format='pdf', bbox_inches='tight')\n plt.close()\n\n################################## MAIN ############################\ndef main():\n '''\n Main function of the script that runs the cycle and period length related analysis\n\n Input:\n None\n Output:\n None\n '''\n \n ### Directories\n data_dir='../data'\n preprocessed_data_dir='../preprocessed_data'\n results_dir = '../results/characterizing_cycle_and_symptoms/cycle_period_length_analysis'\n os.makedirs(results_dir, exist_ok = True)\n \n ################# SYMPTOMS TRACKED #################\n # Tracking\n with open('{}/tracking_enriched.pickle'.format(data_dir), 'rb') as f:\n\t tracking = pickle.load(f)\n\n print('Tracking-data loaded')\n\n ################# CYCLES #################\n with open('{}/cohort_cycle_stats.pickle'.format(preprocessed_data_dir), 'rb') as f:\n cohort_cycle_stats = pickle.load(f)\n\n # Cycles flagged\n with open('{}/cohort_cycles_flagged.pickle'.format(preprocessed_data_dir), 'rb') as f:\n cohort_cycles_flagged = pickle.load(f)\n\n # Exclude cycles flagged as badly tracked \n cohort_cycles = cohort_cycles_flagged[cohort_cycles_flagged['badly_tracked_cycle'] == 'f']\n \n # Cycles stats\n with open('{}/cohort_clean_cycle_stats.pickle'.format(preprocessed_data_dir), 'rb') as f:\n cohort_clean_cycle_stats = pickle.load(f)\n\n print('Cycles-data loaded')\n \n ################# PLOTTING #################\n #### PLOT histogram of max intercycle length, with and without excluding flagged cycles\n plot_max_intercycle_length_hists(cohort_cycle_stats, cohort_clean_cycle_stats, results_dir)\n #### PLOT Median Vs Max CLD 2D histogram \n plot_median_vs_max_intercycle_length(cohort_clean_cycle_stats, results_dir)\n #### PLOT Median CLD histogram \n plot_median_CLD_hist(cohort_clean_cycle_stats, 'cdf', results_dir)\n \n #### PLOT cycle and period length histograms: pdf\n plot_lengths_hist_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'cycle_length', 'median_inter_cycle_length', 9, 'pdf', results_dir)\n plot_lengths_hist_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'period_length', 'median_inter_cycle_length', 9, 'pdf', results_dir)\n \n #### Bootstrapped-KS cycle and period length\n bootstrapped_cycle_period_lengths_KS(cohort_clean_cycle_stats, cohort_cycles, 'median_inter_cycle_length', 9, 100000, results_dir)\n \n #### PLOT average cycle and average length over cycle-id\n plot_avg_lengths_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'cycle_length', 'median_inter_cycle_length', 9, results_dir)\n plot_avg_lengths_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'period_length', 'median_inter_cycle_length', 9, results_dir)\n\n #### PLOT random cycle length time-series\n random_time_series_embedding_lengths(cohort_clean_cycle_stats, 'cycle_lengths', 'median_inter_cycle_length', 9, results_dir)\n \n #### PLOT population level cycle and period length time-series\n population_time_series_embedding_lengths(cohort_clean_cycle_stats, 'cycle_lengths', 'median_inter_cycle_length', 9, 'random', results_dir)\n population_time_series_embedding_lengths(cohort_clean_cycle_stats, 'period_lengths', 'median_inter_cycle_length', 9, 'random', results_dir)\n \n# Making sure the main program is not executed when the module is imported\nif __name__ == '__main__':\n # Just run the main\n main()\n"
] | [
[
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.random.choice",
"matplotlib.pyplot.xlim",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.text",
"matplotlib.pyplot.hist",
"matplotlib.use",
"numpy.unique",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.close",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.colorbar",
"numpy.percentile",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel"
]
] |
dylanljones/cmpy | [
"21adcf4dd9f873ae29d47aeaef4fbcd914bfce2c"
] | [
"cmpy/disorder.py"
] | [
"# coding: utf-8\n#\n# This code is part of cmpy.\n#\n# Copyright (c) 2022, Dylan Jones\n\n\"\"\"This module contains methods for modeling disorder.\"\"\"\n\nimport numpy as np\nfrom typing import Union, Sequence\n\n\ndef create_subst_array(\n size: int, values: Sequence[float], conc: Union[float, Sequence[float]]\n) -> np.ndarray:\n \"\"\"Creates an (ordered) array of values.\n\n Parameters\n ----------\n size : int\n The size of the output array.\n values : Sequence of float\n The values for filling the array. The size must match the size of the\n concentrations. If one concentration is given the value-array must be of size 2.\n conc : float or Sequence of float\n The concentrations of the values. If a single concentration is given\n it is interpreted as the concentration of the first of two values.\n\n Returns\n -------\n array : np.ndarray\n The (ordered) array filled with the given values.\n \"\"\"\n # Get sizes of sub-arrays\n if isinstance(conc, float):\n conc = [conc, 1 - conc]\n if sum(conc) != 1:\n raise ValueError(\"Fractions have to add up to 1!\")\n sizes = (size * np.array(conc)).astype(np.int64)\n sizes[-1] += size - sum(sizes)\n\n # create sub-arrays\n arrays = [np.full(size, val) for size, val in zip(sizes, values)]\n return np.concatenate(arrays)\n\n\ndef random_permutations(\n arr: Sequence[float], size: int, replace: bool = False, seed: int = None\n):\n \"\"\"Creates (optionally unique) permutations of a given array.\n\n Parameters\n ----------\n arr : (N) np.ndarray\n The input array to permute.\n size : int\n The number of permutations to generate.\n replace : bool, optional\n If `True`, only unique permutations are returned. The default is `True`.\n seed : int, optional\n A optional seed to initialize the random number generator.\n\n Yields\n ------\n perm : (N) np.ndarray\n The permuted array.\n\n Examples\n --------\n >>> a = [0, 0, 1, 1, 1]\n >>> perm = random_permutations(a, size=2, seed=0)\n >>> next(perm)\n array([1, 1, 1, 0, 0])\n >>> next(perm)\n array([0, 1, 1, 1, 0])\n \"\"\"\n rng = np.random.default_rng(seed)\n\n p = np.array(arr)\n seen = set()\n count = 0\n while True:\n if count >= size:\n break\n rng.shuffle(p)\n if not replace:\n phash = hash(p.data.tobytes())\n if phash not in seen:\n seen.add(phash)\n yield p\n count += 1\n else:\n yield p\n count += 1\n\n\ndef disorder_generator(\n size: int,\n values: Sequence[float],\n conc: Union[float, Sequence[float]],\n samples: int,\n replace: bool = False,\n seed=None,\n):\n \"\"\"Generates (optionally unique) random samples from a given 1-D array.\n\n See Also\n --------\n random_permutations\n\n Parameters\n ----------\n size : int\n The size of the output array.\n values : Sequence of float\n The values for filling the array. The size must match the size of the\n concentrations. If one concentration is given the value-array must be of size 2.\n conc : float or Sequence of float\n The concentrations of the values. If a single concentration is given\n it is interpreted as the concentration of the first of two values.\n samples : int\n The number of random arrays to generate.\n replace : bool, optional\n If `True`, only unique permutations are returned. The default is `True`.\n seed : int, optional\n A optional seed to initialize the random number generator.\n\n Yields\n ------\n perm : (N) np.ndarray\n The randomly sampled arrays.\n\n Examples\n --------\n >>> eps = disorder_generator(5, values=[0, +1], conc=[0.4, 0.6], samples=2, seed=0)\n >>> next(eps)\n array([1, 1, 1, 0, 0])\n >>> next(eps)\n array([0, 1, 1, 1, 0])\n \"\"\"\n ordered = create_subst_array(size, values, conc)\n return random_permutations(ordered, samples, replace, seed)\n"
] | [
[
"numpy.array",
"numpy.concatenate",
"numpy.full",
"numpy.random.default_rng"
]
] |
awesome-archive/minigo | [
"188fb197fdafbe9664a32142373b1cbd1459bc67"
] | [
"tests/test_coords.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy\n\nimport coords\nimport go\nfrom tests import test_utils\n\nclass TestCoords(test_utils.MiniGoUnitTest):\n def test_upperleft(self):\n self.assertEqual(coords.parse_sgf_coords('aa'), (0, 0))\n self.assertEqual(coords.unflatten_coords(0), (0, 0))\n self.assertEqual(coords.parse_kgs_coords('A9'), (0, 0))\n self.assertEqual(coords.parse_pygtp_coords((1,9)), (0, 0))\n\n self.assertEqual(coords.unparse_sgf_coords((0, 0)), 'aa')\n self.assertEqual(coords.flatten_coords((0, 0)), 0)\n self.assertEqual(coords.to_human_coord((0, 0)), 'A9')\n self.assertEqual(coords.unparse_pygtp_coords((0, 0)), (1, 9))\n\n def test_topleft(self):\n self.assertEqual(coords.parse_sgf_coords('ia'), (0, 8))\n self.assertEqual(coords.unflatten_coords(8), (0, 8))\n self.assertEqual(coords.parse_kgs_coords('J9'), (0, 8))\n self.assertEqual(coords.parse_pygtp_coords((9,9)), (0, 8))\n\n self.assertEqual(coords.unparse_sgf_coords((0, 8)), 'ia')\n self.assertEqual(coords.flatten_coords((0, 8)), 8)\n self.assertEqual(coords.to_human_coord((0, 8)), 'J9')\n self.assertEqual(coords.unparse_pygtp_coords((0, 8)), (9, 9))\n\n def test_pass(self):\n self.assertEqual(coords.parse_sgf_coords(''), None)\n self.assertEqual(coords.unflatten_coords(81), None)\n self.assertEqual(coords.parse_kgs_coords('pass'), None)\n self.assertEqual(coords.parse_pygtp_coords((0,0)), None)\n\n self.assertEqual(coords.unparse_sgf_coords(None), '')\n self.assertEqual(coords.flatten_coords(None), 81)\n self.assertEqual(coords.to_human_coord(None), 'pass')\n self.assertEqual(coords.unparse_pygtp_coords(None), (0, 0))\n\n def test_parsing_9x9(self):\n self.assertEqual(coords.parse_sgf_coords('aa'), (0, 0))\n self.assertEqual(coords.parse_sgf_coords('ac'), (2, 0))\n self.assertEqual(coords.parse_sgf_coords('ca'), (0, 2))\n self.assertEqual(coords.parse_sgf_coords(''), None)\n self.assertEqual(coords.unparse_sgf_coords(None), '')\n self.assertEqual(\n 'aa',\n coords.unparse_sgf_coords(coords.parse_sgf_coords('aa')))\n self.assertEqual(\n 'sa',\n coords.unparse_sgf_coords(coords.parse_sgf_coords('sa')))\n self.assertEqual(\n (1, 17),\n coords.parse_sgf_coords(coords.unparse_sgf_coords((1, 17))))\n self.assertEqual(coords.parse_kgs_coords('A1'), (8, 0))\n self.assertEqual(coords.parse_kgs_coords('A9'), (0, 0))\n self.assertEqual(coords.parse_kgs_coords('C2'), (7, 2))\n self.assertEqual(coords.parse_kgs_coords('J2'), (7, 8))\n self.assertEqual(coords.parse_pygtp_coords((1, 1)), (8, 0))\n self.assertEqual(coords.parse_pygtp_coords((1, 9)), (0, 0))\n self.assertEqual(coords.parse_pygtp_coords((3, 2)), (7, 2))\n self.assertEqual(coords.unparse_pygtp_coords((8, 0)), (1, 1))\n self.assertEqual(coords.unparse_pygtp_coords((0, 0)), (1, 9))\n self.assertEqual(coords.unparse_pygtp_coords((7, 2)), (3, 2))\n\n self.assertEqual(coords.to_human_coord((0,8)), 'J9')\n self.assertEqual(coords.to_human_coord((8,0)), 'A1')\n\n def test_flatten(self):\n self.assertEqual(coords.flatten_coords((0, 0)), 0)\n self.assertEqual(coords.flatten_coords((0, 3)), 3)\n self.assertEqual(coords.flatten_coords((3, 0)), 27)\n self.assertEqual(coords.unflatten_coords(27), (3, 0))\n self.assertEqual(coords.unflatten_coords(10), (1, 1))\n self.assertEqual(coords.unflatten_coords(80), (8, 8))\n self.assertEqual(coords.flatten_coords(coords.unflatten_coords(10)), 10)\n self.assertEqual(coords.unflatten_coords(coords.flatten_coords((5, 4))), (5, 4))\n\n def test_unflatten_coords_ndindex_equivalence(self):\n ndindices = list(numpy.ndindex(go.N, go.N))\n flat_coords = list(range(go.N * go.N))\n self.assertEqual(list(map(coords.unflatten_coords, flat_coords)), ndindices)\n\n"
] | [
[
"numpy.ndindex"
]
] |
ShizhuZhang/ontask_b | [
"ca4526871f26e7153b724b1e97b922a0b52f75d6"
] | [
"src/plugins/test_plugin_2/__init__.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\n\nimport pandas as pd\n\n# The field class_name contains the name of the class to load to execute the\n# plugin.\nclass_name = 'OntaskTestPlugin'\n\n\nclass OntaskTestPlugin(object):\n \"\"\"\n Example of a class that implements the OnTask plugin interface. The\n objects of this class have to provide the following elements:\n\n 1. name: Plugin name show to the users.\n\n 2. description_txt: A string with the detailed description of what the\n plugin does\n\n 3. input_column_names: A potentially empty list of column names (strings).\n If the list is empty, the columns are selected by the userat execution\n time.\n\n 4. output_column_names: Non empty list of names (strings) of the columns\n to be used for the output of the transformation.\n\n 5. parameters: an optionally empty list with tuples with the following\n structure:\n\n ('name', type, [list of allowed values], initial value, help_text)\n\n These elements will be requested from the user before executing the\n plugin through a form. The conditions on these values are:\n\n - name must be a string\n - type must be a string equal to \"integer\", \"double\", \"string\", \n \"datetime\" or \"boolean\". \n - The list of values is to restrict the\n possible values\n - The initial value must be of the type specified by the second \n element.\n - Help_text a string to show as help text\n\n 6. method \"run\" that receives:\n - a pandas data frame with the data to process\n - a string with the name of the key column that will be used to merge\n the result.\n - A dictionary of pairs (name, value) with the parameters described in\n the previous element.\n\n and returns a result Pandas data frame. This frame **must** have one\n column with the key column name provided so that it can be properly\n merged with the existing data.\n \"\"\"\n\n def __init__(self):\n self.name = 'Test Plungin 2 Name'\n self.description_txt = 'Test Plugin 2 Description Text'\n self.input_column_names = ['A1', 'A2']\n self.output_column_names = ['RESULT 3', 'RESULT 4']\n self.parameters = [\n ('param string', 'string', ['v1', 'v2'], 'v1', 'help param string'),\n ('param integer', 'integer', [], None, 'help param integer'),\n ('param double', 'double', [1.2, 2.2, 3.2], None,\n 'help param double'),\n ('param boolean', 'boolean', [], True, 'help param boolean'),\n ('param datetime', 'datetime', [], '2018-05-25 18:03:00+09:30',\n 'help param datetime'),\n ('param datetime2', 'datetime', \n [],\n '2018-05-25 18:03:00+09:30',\n 'help param datetime'),\n ]\n\n def run(self, data_frame, merge_key, parameters=dict):\n \"\"\"\n Method to overwrite. Receives a data frame wih a number of columns\n stipulated by the num_column_input pair, the name of a key column and a\n dictionary with parameters of the form name, value.\n\n Runs the algorithm and returns a pandas data frame structure that is\n merged with the existing data frame in the workflow using the merge_key.\n\n :param data_frame: Input data for the plugin\n :param merge_key: Name of the column key that will be used for merging\n :param parameters: Dictionary with (name, value) pairs.\n\n :return: a Pandas data_frame to merge with the existing one (must\n contain a column with name merge_key)\n \"\"\"\n\n # Extract the key column from the given data frame\n result = pd.DataFrame(data_frame[merge_key])\n\n # Process the given data and create the result\n result[self.output_column_names[0]] = \\\n data_frame[self.input_column_names[0]] + \\\n \t data_frame[self.input_column_names[1]]\n result[self.output_column_names[1]] = \\\n data_frame[self.input_column_names[0]] - \\\n \t data_frame[self.input_column_names[1]]\n\n return result\n"
] | [
[
"pandas.DataFrame"
]
] |
bzamecnik/tensorpack | [
"e9a3c2b3cd441e5b288607b44f2fe44fbf3ad4bb"
] | [
"examples/FasterRCNN/train.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: train.py\n\nimport argparse\nimport itertools\nimport numpy as np\nimport os\nimport shutil\nimport cv2\nimport six\nassert six.PY3, \"FasterRCNN requires Python 3!\"\nimport tensorflow as tf\nimport tqdm\n\nimport tensorpack.utils.viz as tpviz\nfrom tensorpack import *\nfrom tensorpack.tfutils import optimizer, collect_env_info\nfrom tensorpack.tfutils.common import get_tf_version_tuple\nfrom tensorpack.tfutils.summary import add_moving_summary\n\nimport model_frcnn\nimport model_mrcnn\nfrom basemodel import image_preprocess, resnet_c4_backbone, resnet_conv5, resnet_fpn_backbone\nfrom dataset import DetectionDataset\nfrom config import finalize_configs, config as cfg\nfrom data import get_all_anchors, get_all_anchors_fpn, get_eval_dataflow, get_train_dataflow\nfrom eval import DetectionResult, predict_image, multithread_predict_dataflow, EvalCallback\nfrom model_box import RPNAnchors, clip_boxes, crop_and_resize, roi_align\nfrom model_cascade import CascadeRCNNHead\nfrom model_fpn import fpn_model, generate_fpn_proposals, multilevel_roi_align, multilevel_rpn_losses\nfrom model_frcnn import BoxProposals, FastRCNNHead, fastrcnn_outputs, fastrcnn_predictions, sample_fast_rcnn_targets\nfrom model_mrcnn import maskrcnn_loss, maskrcnn_upXconv_head\nfrom model_rpn import generate_rpn_proposals, rpn_head, rpn_losses\nfrom viz import draw_annotation, draw_final_outputs, draw_predictions, draw_proposal_recall\n\ntry:\n import horovod.tensorflow as hvd\nexcept ImportError:\n pass\n\n\nclass DetectionModel(ModelDesc):\n def preprocess(self, image):\n image = tf.expand_dims(image, 0)\n image = image_preprocess(image, bgr=True)\n return tf.transpose(image, [0, 3, 1, 2])\n\n @property\n def training(self):\n return get_current_tower_context().is_training\n\n def optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)\n tf.summary.scalar('learning_rate-summary', lr)\n\n # The learning rate in the config is set for 8 GPUs, and we use trainers with average=False.\n lr = lr / 8.\n opt = tf.train.MomentumOptimizer(lr, 0.9)\n if cfg.TRAIN.NUM_GPUS < 8:\n opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)\n return opt\n\n def get_inference_tensor_names(self):\n \"\"\"\n Returns two lists of tensor names to be used to create an inference callable.\n\n Returns:\n [str]: input names\n [str]: output names\n \"\"\"\n out = ['output/boxes', 'output/scores', 'output/labels']\n if cfg.MODE_MASK:\n out.append('output/masks')\n return ['image'], out\n\n def build_graph(self, *inputs):\n inputs = dict(zip(self.input_names, inputs))\n\n image = self.preprocess(inputs['image']) # 1CHW\n\n features = self.backbone(image)\n anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}\n proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?\n\n targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]\n head_losses = self.roi_heads(image, features, proposals, targets)\n\n if self.training:\n wd_cost = regularize_cost(\n '.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')\n total_cost = tf.add_n(\n rpn_losses + head_losses + [wd_cost], 'total_cost')\n add_moving_summary(total_cost, wd_cost)\n return total_cost\n\n\nclass ResNetC4Model(DetectionModel):\n def inputs(self):\n ret = [\n tf.TensorSpec((None, None, 3), tf.float32, 'image'),\n tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR), tf.int32, 'anchor_labels'),\n tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR, 4), tf.float32, 'anchor_boxes'),\n tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),\n tf.TensorSpec((None,), tf.int64, 'gt_labels')] # all > 0\n if cfg.MODE_MASK:\n ret.append(\n tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')\n ) # NR_GT x height x width\n return ret\n\n def backbone(self, image):\n return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]\n\n def rpn(self, image, features, inputs):\n featuremap = features[0]\n rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)\n anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])\n anchors = anchors.narrow_to(featuremap)\n\n image_shape2d = tf.shape(image)[2:] # h,w\n pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox\n proposal_boxes, proposal_scores = generate_rpn_proposals(\n tf.reshape(pred_boxes_decoded, [-1, 4]),\n tf.reshape(rpn_label_logits, [-1]),\n image_shape2d,\n cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,\n cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)\n\n if self.training:\n losses = rpn_losses(\n anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)\n else:\n losses = []\n\n return BoxProposals(proposal_boxes), losses\n\n def roi_heads(self, image, features, proposals, targets):\n image_shape2d = tf.shape(image)[2:] # h,w\n featuremap = features[0]\n\n gt_boxes, gt_labels, *_ = targets\n\n if self.training:\n # sample proposal boxes in training\n proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)\n # The boxes to be used to crop RoIs.\n # Use all proposal boxes in inference\n\n boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)\n roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)\n\n feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7\n # Keep C5 feature to be shared with mask branch\n feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')\n fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)\n\n fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,\n tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))\n\n if self.training:\n all_losses = fastrcnn_head.losses()\n\n if cfg.MODE_MASK:\n gt_masks = targets[2]\n # maskrcnn loss\n # In training, mask branch shares the same C5 feature.\n fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())\n mask_logits = maskrcnn_upXconv_head(\n 'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14\n\n target_masks_for_fg = crop_and_resize(\n tf.expand_dims(gt_masks, 1),\n proposals.fg_boxes(),\n proposals.fg_inds_wrt_gt, 14,\n pad_border=False) # nfg x 1x14x14\n target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')\n all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))\n return all_losses\n else:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output')\n\n if cfg.MODE_MASK:\n roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)\n feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])\n mask_logits = maskrcnn_upXconv_head(\n 'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14\n indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)\n final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14\n tf.sigmoid(final_mask_logits, name='output/masks')\n return []\n\n\nclass ResNetFPNModel(DetectionModel):\n\n def inputs(self):\n ret = [\n tf.TensorSpec((None, None, 3), tf.float32, 'image')]\n num_anchors = len(cfg.RPN.ANCHOR_RATIOS)\n for k in range(len(cfg.FPN.ANCHOR_STRIDES)):\n ret.extend([\n tf.TensorSpec((None, None, num_anchors), tf.int32,\n 'anchor_labels_lvl{}'.format(k + 2)),\n tf.TensorSpec((None, None, num_anchors, 4), tf.float32,\n 'anchor_boxes_lvl{}'.format(k + 2))])\n ret.extend([\n tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),\n tf.TensorSpec((None,), tf.int64, 'gt_labels')]) # all > 0\n if cfg.MODE_MASK:\n ret.append(\n tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')\n ) # NR_GT x height x width\n return ret\n\n def slice_feature_and_anchors(self, p23456, anchors):\n for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):\n with tf.name_scope('FPN_slice_lvl{}'.format(i)):\n anchors[i] = anchors[i].narrow_to(p23456[i])\n\n def backbone(self, image):\n c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)\n p23456 = fpn_model('fpn', c2345)\n return p23456\n\n def rpn(self, image, features, inputs):\n assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)\n\n image_shape2d = tf.shape(image)[2:] # h,w\n all_anchors_fpn = get_all_anchors_fpn()\n multilevel_anchors = [RPNAnchors(\n all_anchors_fpn[i],\n inputs['anchor_labels_lvl{}'.format(i + 2)],\n inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]\n self.slice_feature_and_anchors(features, multilevel_anchors)\n\n # Multi-Level RPN Proposals\n rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))\n for pi in features]\n multilevel_label_logits = [k[0] for k in rpn_outputs]\n multilevel_box_logits = [k[1] for k in rpn_outputs]\n multilevel_pred_boxes = [anchor.decode_logits(logits)\n for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]\n\n proposal_boxes, proposal_scores = generate_fpn_proposals(\n multilevel_pred_boxes, multilevel_label_logits, image_shape2d)\n\n if self.training:\n losses = multilevel_rpn_losses(\n multilevel_anchors, multilevel_label_logits, multilevel_box_logits)\n else:\n losses = []\n\n return BoxProposals(proposal_boxes), losses\n\n def roi_heads(self, image, features, proposals, targets):\n image_shape2d = tf.shape(image)[2:] # h,w\n assert len(features) == 5, \"Features have to be P23456!\"\n gt_boxes, gt_labels, *_ = targets\n\n if self.training:\n proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)\n\n fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)\n if not cfg.FPN.CASCADE:\n roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)\n\n head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)\n fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(\n 'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)\n fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,\n gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))\n else:\n def roi_func(boxes):\n return multilevel_roi_align(features[:4], boxes, 7)\n\n fastrcnn_head = CascadeRCNNHead(\n proposals, roi_func, fastrcnn_head_func,\n (gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)\n\n if self.training:\n all_losses = fastrcnn_head.losses()\n\n if cfg.MODE_MASK:\n gt_masks = targets[2]\n # maskrcnn loss\n roi_feature_maskrcnn = multilevel_roi_align(\n features[:4], proposals.fg_boxes(), 14,\n name_scope='multilevel_roi_align_mask')\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n\n target_masks_for_fg = crop_and_resize(\n tf.expand_dims(gt_masks, 1),\n proposals.fg_boxes(),\n proposals.fg_inds_wrt_gt, 28,\n pad_border=False) # fg x 1x28x28\n target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')\n all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))\n return all_losses\n else:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output')\n if cfg.MODE_MASK:\n # Cascade inference needs roi transform with refined boxes.\n roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)\n final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28\n tf.sigmoid(final_mask_logits, name='output/masks')\n return []\n\n\ndef do_visualize(model, model_path, nr_visualize=100, output_dir='output'):\n \"\"\"\n Visualize some intermediate results (proposals, raw predictions) inside the pipeline.\n \"\"\"\n df = get_train_dataflow() # we don't visualize mask stuff\n df.reset_state()\n\n pred = OfflinePredictor(PredictConfig(\n model=model,\n session_init=get_model_loader(model_path),\n input_names=['image', 'gt_boxes', 'gt_labels'],\n output_names=[\n 'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),\n 'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),\n 'fastrcnn_all_scores',\n 'output/boxes',\n 'output/scores',\n 'output/labels',\n ]))\n\n if os.path.isdir(output_dir):\n shutil.rmtree(output_dir)\n utils.fs.mkdir_p(output_dir)\n with tqdm.tqdm(total=nr_visualize) as pbar:\n for idx, dp in itertools.islice(enumerate(df), nr_visualize):\n img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']\n\n rpn_boxes, rpn_scores, all_scores, \\\n final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)\n\n # draw groundtruth boxes\n gt_viz = draw_annotation(img, gt_boxes, gt_labels)\n # draw best proposals for each groundtruth, to show recall\n proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)\n # draw the scores for the above proposals\n score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])\n\n results = [DetectionResult(*args) for args in\n zip(final_boxes, final_scores, final_labels,\n [None] * len(final_labels))]\n final_viz = draw_final_outputs(img, results)\n\n viz = tpviz.stack_patches([\n gt_viz, proposal_viz,\n score_viz, final_viz], 2, 2)\n\n if os.environ.get('DISPLAY', None):\n tpviz.interactive_imshow(viz)\n cv2.imwrite(\"{}/{:03d}.png\".format(output_dir, idx), viz)\n pbar.update()\n\n\ndef do_evaluate(pred_config, output_file):\n num_gpu = cfg.TRAIN.NUM_GPUS\n graph_funcs = MultiTowerOfflinePredictor(\n pred_config, list(range(num_gpu))).get_predictors()\n\n for dataset in cfg.DATA.VAL:\n logger.info(\"Evaluating {} ...\".format(dataset))\n dataflows = [\n get_eval_dataflow(dataset, shard=k, num_shards=num_gpu)\n for k in range(num_gpu)]\n all_results = multithread_predict_dataflow(dataflows, graph_funcs)\n output = output_file + '-' + dataset\n DetectionDataset().eval_or_save_inference_results(all_results, dataset, output)\n\n\ndef do_predict(pred_func, input_file):\n img = cv2.imread(input_file, cv2.IMREAD_COLOR)\n results = predict_image(img, pred_func)\n final = draw_final_outputs(img, results)\n viz = np.concatenate((img, final), axis=1)\n cv2.imwrite(\"output.png\", viz)\n logger.info(\"Inference output for {} written to output.png\".format(input_file))\n tpviz.interactive_imshow(viz)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')\n parser.add_argument('--logdir', help='log directory', default='train_log/maskrcnn')\n parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')\n parser.add_argument('--evaluate', help=\"Run evaluation. \"\n \"This argument is the path to the output json evaluation file\")\n parser.add_argument('--predict', help=\"Run prediction on a given image. \"\n \"This argument is the path to the input image file\", nargs='+')\n parser.add_argument('--config', help=\"A list of KEY=VALUE to overwrite those defined in config.py\",\n nargs='+')\n\n if get_tf_version_tuple() < (1, 6):\n # https://github.com/tensorflow/tensorflow/issues/14657\n logger.warn(\"TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.\")\n\n args = parser.parse_args()\n if args.config:\n cfg.update_args(args.config)\n\n MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()\n DetectionDataset() # initialize the config with information from our dataset\n\n if args.visualize or args.evaluate or args.predict:\n if not tf.test.is_gpu_available():\n from tensorflow.python.framework import test_util\n assert get_tf_version_tuple() >= (1, 7) and test_util.IsMklEnabled(), \\\n \"Inference requires either GPU support or MKL support!\"\n assert args.load\n finalize_configs(is_training=False)\n\n if args.predict or args.visualize:\n cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS\n\n if args.visualize:\n do_visualize(MODEL, args.load)\n else:\n predcfg = PredictConfig(\n model=MODEL,\n session_init=get_model_loader(args.load),\n input_names=MODEL.get_inference_tensor_names()[0],\n output_names=MODEL.get_inference_tensor_names()[1])\n if args.predict:\n predictor = OfflinePredictor(predcfg)\n for image_file in args.predict:\n do_predict(predictor, image_file)\n elif args.evaluate:\n assert args.evaluate.endswith('.json'), args.evaluate\n do_evaluate(predcfg, args.evaluate)\n else:\n is_horovod = cfg.TRAINER == 'horovod'\n if is_horovod:\n hvd.init()\n logger.info(\"Horovod Rank={}, Size={}\".format(hvd.rank(), hvd.size()))\n\n if not is_horovod or hvd.rank() == 0:\n logger.set_logger_dir(args.logdir, 'd')\n logger.info(\"Environment Information:\\n\" + collect_env_info())\n\n finalize_configs(is_training=True)\n stepnum = cfg.TRAIN.STEPS_PER_EPOCH\n\n # warmup is step based, lr is epoch based\n init_lr = cfg.TRAIN.WARMUP_INIT_LR * min(8. / cfg.TRAIN.NUM_GPUS, 1.)\n warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]\n warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum\n lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]\n\n factor = 8. / cfg.TRAIN.NUM_GPUS\n for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):\n mult = 0.1 ** (idx + 1)\n lr_schedule.append(\n (steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))\n logger.info(\"Warm Up Schedule (steps, value): \" + str(warmup_schedule))\n logger.info(\"LR Schedule (epochs, value): \" + str(lr_schedule))\n train_dataflow = get_train_dataflow()\n # This is what's commonly referred to as \"epochs\"\n total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()\n logger.info(\"Total passes of the training set is: {:.5g}\".format(total_passes))\n\n callbacks = [\n PeriodicCallback(\n ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),\n every_k_epochs=20),\n # linear warmup\n ScheduledHyperParamSetter(\n 'learning_rate', warmup_schedule, interp='linear', step_based=True),\n ScheduledHyperParamSetter('learning_rate', lr_schedule),\n PeakMemoryTracker(),\n EstimatedTimeLeft(median=True),\n SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout\n ]\n if cfg.TRAIN.EVAL_PERIOD > 0:\n callbacks.extend([\n EvalCallback(dataset, *MODEL.get_inference_tensor_names(), args.logdir)\n for dataset in cfg.DATA.VAL\n ])\n if not is_horovod:\n callbacks.append(GPUUtilizationTracker())\n\n if is_horovod and hvd.rank() > 0:\n session_init = None\n else:\n if args.load:\n session_init = get_model_loader(args.load)\n else:\n session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None\n\n traincfg = TrainConfig(\n model=MODEL,\n data=QueueInput(train_dataflow),\n callbacks=callbacks,\n steps_per_epoch=stepnum,\n max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,\n session_init=session_init,\n starting_epoch=cfg.TRAIN.STARTING_EPOCH\n )\n if is_horovod:\n trainer = HorovodTrainer(average=False)\n else:\n # nccl mode appears faster than cpu mode\n trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')\n launch_train_with_config(traincfg, trainer)\n"
] | [
[
"tensorflow.size",
"tensorflow.summary.scalar",
"tensorflow.add_n",
"tensorflow.shape",
"tensorflow.train.MomentumOptimizer",
"tensorflow.reshape",
"tensorflow.gather_nd",
"tensorflow.sigmoid",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.cast",
"tensorflow.python.framework.test_util.IsMklEnabled",
"tensorflow.TensorSpec",
"tensorflow.constant",
"numpy.concatenate",
"tensorflow.transpose",
"tensorflow.get_variable",
"tensorflow.test.is_gpu_available"
]
] |
hmartelb/meme-search | [
"2042678b3a7252ba00699e7a0618aafdf2059465"
] | [
"data/scraper.py"
] | [
"\n\nimport json\nimport time\nimport os\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\ndef process_9gag(args):\n fetched_memes = []\n errors = 0\n # for i in tqdm(range(args.))\n pass\n\ndef process_me_dot_me(args):\n pass\n\ndef templates_imgflip(args):\n args.source_url = \"https://imgflip.com/memetemplates\"\n fetched_templates = []\n errors = 0\n for i in tqdm(range(args.from_page, args.pages + 1)):\n print(f\"Requesting: {args.source_url}?page={i}\")\n response = requests.get(f\"{args.source_url}?page={i}\")\n\n print(response)\n\n if response.status_code != 200:\n print(\"Bad response\")\n break\n \n body = BeautifulSoup(response.text, 'html.parser')\n templates = body.findAll(\"div\", {\"class\": \"mt-box\"})\n \n print(len(templates))\n\n for template in templates:\n try:\n template_url = \"https://\"+template.find('img', {\"class\": \"shadow\"})['src'][2:]\n template_id, template_format = os.path.splitext(template_url.split(\"/\")[-1])\n\n template_title = template.find(\"h3\", {\"class\": \"mt-title\"}).find(\"a\")\n template_title = \"\" if template_title is None else template_title.text\n\n template_data = {\n \"id\": template_id,\n \"format\": template_format,\n \"website\": \"imgflip\",\n \"url\": template_url,\n \"title\": template_title\n }\n fetched_templates.append(template_data)\n except:\n errors += 1\n # time.sleep(args.delay)\n\n print(f\"Fetched: {len(fetched_templates)} templates. Found {errors} error(s).\")\n return fetched_templates\n\n\ndef process_imgflip(args):\n '''\n https://gist.github.com/WalterSimoncini/defca6de456bb168ada303085358bf0a\n '''\n fetched_memes = []\n errors = 0\n for i in tqdm(range(args.from_page, args.pages + 1)):\n # print(f\"Processing page {i}\")\n response = requests.get(f\"{args.source_url}?page={i}\")\n body = BeautifulSoup(response.text, 'html.parser')\n\n if response.status_code != 200:\n # print(\"Something went wrong!\")\n break # Something went wrong (e.g. page limit)\n\n memes = body.findAll(\"div\", {\"class\": \"base-unit clearfix\"})\n for meme in memes:\n if \"not-safe-for-work images\" in str(meme):\n continue # NSFW memes are available only to logged in users\n \n try:\n meme_url = 'https://'+meme.find(\"img\", {\"class\": \"base-img\"})[\"src\"][2:]\n meme_id, meme_format = os.path.splitext(meme_url.split(\"/\")[-1])\n\n # Handle anonymous authors\n meme_author = meme.find(\"a\", {\"class\": \"u-username\"})\n meme_author = \"anonymous\" if meme_author is None else meme_author.text\n \n # Handle empty titles\n meme_title = meme.find(\"h2\", {\"class\": \"base-unit-title\"}).find(\"a\")\n meme_title = \"\" if meme_title is None else meme_title.text\n \n meme_text = meme.find(\"img\", {\"class\": \"base-img\"})[\"alt\"]\n meme_text = meme_text.split(\"|\")[1].strip()\n\n meme_data = {\n \"id\": meme_id,\n \"format\": meme_format,\n \"website\": \"imgflip\",\n \"url\": meme_url,\n \"author\": meme_author,\n \"title\": meme_title,\n \"text\": meme_text.lower()\n }\n fetched_memes.append(meme_data)\n except:\n errors += 1\n\n time.sleep(args.delay)\n\n print(f\"Fetched: {len(fetched_memes)} memes. Found {errors} error(s).\")\n return fetched_memes\n\nif __name__ == '__main__':\n import argparse\n ap = argparse.ArgumentParser()\n # ap.add_argument(\"--source_url\", default=\"https://imgflip.com/tag/programming\", help=\"Memes list url (e.g. https://imgflip.com/meme/Bird-Box)\", type=str)\n ap.add_argument(\"--tag\", required=True, type=str)#default=['programming', 'artificial intelligence', 'computer'], type=list)\n ap.add_argument(\"--from_page\", default=1, help=\"Initial page\", type=int)\n ap.add_argument(\"--pages\", default=44, help=\"Maximum page number to be scraped\", type=int)\n ap.add_argument(\"--delay\", default=2, help=\"Delay between page loads (seconds)\", type=int)\n ap.add_argument(\"-o\", \"--output\", default=\"templates.tsv\")\n args = ap.parse_args()\n\n # category = args.source_url.split(\"/\")[-1].replace(\"-\", \" \")\n\n # Get the data\n data = {}\n # for tag in args.tags:\n print(f\"Processing tag: {args.tag}\")\n \n # Get the data\n # args.source_url = f\"https://imgflip.com/tag/{args.tag.replace(' ', '+')}\"\n # data = process_imgflip(args)\n \n # args.source_url = f\"https://ww.9gag.com/search/?query={args.tag.replace(' ', '+')}\"\n # data = process_9gag(args)\n \n data = templates_imgflip(args)\n\n # Create a pd.DataFrame and save (append to existing .tsv)\n df = pd.DataFrame(data)\n print(df.head(20))\n df.to_csv(args.output, sep='\\t', index=False, mode='a')"
] | [
[
"pandas.DataFrame"
]
] |
AnielliRosane/lista-ser347 | [
"61a8ac8f675dc0ec05f45408c54e9d3a0e515ff4"
] | [
"exercicio-4.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Lista de exercicio 06\n\n# Exercicio 4\n\n# importando as bibliotecas\n\nimport matplotlib as plt\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# informacoes da tabela relativas aos dados masculino e feminino (IBGE)\n\nidade = np.array(\n [\"0 a 4 anos\", \"5 a 9 anos\", \"10 a 14 anos\", \"15 a 19 anos\", \"20 a 24 anos\", \"25 a 29 anos\",\n \"30 a 34 anos\", \"35 a 39 anos\", \"40 a 44 anos\", \"45 a 49 anos\", \"50 a 54 anos\", \"55 a 59 anos\",\n \"60 a 64 anos\", \"65 a 69 anos\", \"70 a 74 anos\", \"75 a 79 anos\", \"80 a 84 anos\", \"85 a 89 anos\",\n \"90 a 94 anos\", \"95 a 99 anos\", \"100 anos e mais\"])\n\nfeminino = np.array([6779171, 7345231, 8441348, 8432004, 8614963, 8643419, 8026854, 7121915, 6688796, 6141338, 5305407,\n 4373877, 3468085, 2616745, 2074264, 1472930, 998349, 508724, 211594, 66806, 16989])\n\nmasculino = np.array([7016987, 7624144, 8725413, 8558868, 8630229, 8460995, 7717658, 6766664, 6320568, 5692014, 4834995,\n 3902344, 3041035, 2224065, 1667372, 1090517, 668623, 310759, 114964, 31529, 7247])\n\npop = [x for x in range( len(idade) ) ]\n\n# Configuracao do grafico\n\nplt.figure(figsize=(10, 8))\n\nplt.suptitle('Distribuição da População por sexo segundo os grupos de idade – Brasil – 2010', fontsize=18)\n\nplt.rc('axes.spines', **{'bottom': True, 'left': False, 'right': False, 'top': False}) # remove as linhas da figura\n\n# Subplot masculino\nplt.subplot(221)\nplt.barh(idade, masculino, align='center', color='blue', linewidth=0.5, label='Masculino')\nplt.xticks([0, 2000000, 4000000, 6000000, 8000000], [\"\", \"\", \"4000000\"])\n\nplt.legend(loc='upper left') # legenda\n\nplt.subplots_adjust(left=0.15, wspace=0.4) # coloca espaco entre os graficos\n\nplt.gca().invert_xaxis() # inverte\n\nplt.yticks([]) # remove o eixo y\n\n# colocando linhas\nplt.axvline(8000000, color='grey', alpha=0.15)\nplt.axvline(6000000, color='grey', alpha=0.15)\nplt.axvline(4000000, color='grey', alpha=0.15)\nplt.axvline(6000000, color='grey', alpha=0.15)\nplt.axvline(2000000, color='grey', alpha=0.15)\nplt.axvline(0, color='black', alpha=0.20)\n\n# subplot feminino\nplt.subplot(222)\nplt.barh(idade, feminino, align='center', color='orange', linewidth=0.5, label='Feminino')\nplt.xticks([0, 2000000, 4000000, 6000000, 8000000], [\"0\", \"\", \"4000000\"], )\n\nplt.legend(loc='upper right') # legenda\n\n# colocando linhas\nplt.axvline(8000000, color='grey', alpha=0.15)\nplt.axvline(6000000, color='grey', alpha=0.15)\nplt.axvline(4000000, color='grey', alpha=0.15)\nplt.axvline(6000000, color='grey', alpha=0.15)\nplt.axvline(2000000, color='grey', alpha=0.15)\nplt.axvline(0, color='black', alpha=0.30)\n\nplt.show();\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"matplotlib.pyplot.suptitle",
"numpy.array",
"matplotlib.pyplot.yticks"
]
] |
dukebw/nerfies | [
"b30fe19edb6435e770b35dc07aab44ae62c96278"
] | [
"third_party/pycolmap/pycolmap/scene_manager.py"
] | [
"# Author: True Price <jtprice at cs.unc.edu>\n\nfrom collections import OrderedDict, defaultdict\nfrom io import StringIO\nfrom itertools import combinations\nimport os\nimport struct\n\nfrom .camera import Camera\nfrom .image import Image\nimport numpy as np\nfrom .rotation import Quaternion\n\n# -------------------------------------------------------------------------------\n#\n# SceneManager\n#\n# -------------------------------------------------------------------------------\n\n\nclass SceneManager:\n INVALID_POINT3D = np.uint64(-1)\n\n def __init__(self, colmap_results_folder, image_path=None):\n self.folder = colmap_results_folder\n if not self.folder.endswith(\"/\"):\n self.folder += \"/\"\n\n self.image_path = None\n self.load_colmap_project_file(image_path=image_path)\n\n self.cameras = OrderedDict()\n self.images = OrderedDict()\n self.name_to_image_id = dict()\n\n self.last_camera_id = 0\n self.last_image_id = 0\n\n # Nx3 array of point3D xyz's\n self.points3D = np.zeros((0, 3))\n\n # for each element in points3D, stores the id of the point\n self.point3D_ids = np.empty(0)\n\n # point3D_id => index in self.points3D\n self.point3D_id_to_point3D_idx = dict()\n\n # point3D_id => [(image_id, point2D idx in image)]\n self.point3D_id_to_images = dict()\n\n self.point3D_colors = np.zeros((0, 3), dtype=np.uint8)\n self.point3D_errors = np.zeros(0)\n\n # ---------------------------------------------------------------------------\n\n def load_colmap_project_file(self, project_file=None, image_path=None):\n if project_file is None:\n project_file = self.folder + \"project.ini\"\n\n self.image_path = image_path\n\n if self.image_path is None:\n try:\n with open(project_file, \"r\") as f:\n for line in iter(f.readline, \"\"):\n if line.startswith(\"image_path\"):\n self.image_path = line[11:].strip()\n break\n except:\n pass\n\n if self.image_path is None:\n print(\"Warning: image_path not found for reconstruction\")\n elif not self.image_path.endswith(\"/\"):\n self.image_path += \"/\"\n\n # ---------------------------------------------------------------------------\n\n def load(self):\n self.load_cameras()\n self.load_images()\n self.load_points3D()\n\n # ---------------------------------------------------------------------------\n\n def load_cameras(self, input_file=None):\n if input_file is None:\n input_file = self.folder + \"cameras.bin\"\n if os.path.exists(input_file):\n self._load_cameras_bin(input_file)\n else:\n input_file = self.folder + \"cameras.txt\"\n if os.path.exists(input_file):\n self._load_cameras_txt(input_file)\n else:\n raise IOError(\"no cameras file found\")\n\n def _load_cameras_bin(self, input_file):\n self.cameras = OrderedDict()\n\n with open(input_file, \"rb\") as f:\n num_cameras = struct.unpack(\"L\", f.read(8))[0]\n for _ in range(num_cameras):\n camera_id, camera_type, w, h = struct.unpack(\"IiLL\", f.read(24))\n num_params = Camera.GetNumParams(camera_type)\n params = struct.unpack(\"d\" * num_params, f.read(8 * num_params))\n self.cameras[camera_id] = Camera(camera_type, w, h, params)\n self.last_camera_id = max(self.last_camera_id, camera_id)\n\n def _load_cameras_txt(self, input_file):\n self.cameras = OrderedDict()\n\n with open(input_file, \"r\") as f:\n for line in iter(lambda: f.readline().strip(), \"\"):\n if not line or line.startswith(\"#\"):\n continue\n\n data = line.split()\n camera_id = int(data[0])\n self.cameras[camera_id] = Camera(\n data[1], int(data[2]), int(data[3]), list(map(float, data[4:]))\n )\n self.last_camera_id = max(self.last_camera_id, camera_id)\n\n # ---------------------------------------------------------------------------\n\n def load_images(self, input_file=None):\n if input_file is None:\n input_file = self.folder + \"images.bin\"\n if os.path.exists(input_file):\n self._load_images_bin(input_file)\n else:\n input_file = self.folder + \"images.txt\"\n if os.path.exists(input_file):\n self._load_images_txt(input_file)\n else:\n raise IOError(\"no images file found\")\n\n def _load_images_bin(self, input_file):\n self.images = OrderedDict()\n\n with open(input_file, \"rb\") as f:\n num_images = struct.unpack(\"L\", f.read(8))[0]\n for _ in range(num_images):\n image_id = struct.unpack(\"I\", f.read(4))[0]\n q = Quaternion(np.array(struct.unpack(\"dddd\", f.read(32))))\n t = np.array(struct.unpack(\"ddd\", f.read(24)))\n camera_id = struct.unpack(\"I\", f.read(4))[0]\n name = b\"\".join(c for c in iter(lambda: f.read(1), b\"\\x00\")).decode()\n\n image = Image(name, camera_id, q, t)\n\n num_points2D = struct.unpack(\"L\", f.read(8))[0]\n\n image.points2D = np.empty((num_points2D, 2))\n image.point3D_ids = np.empty(num_points2D, dtype=np.uint64)\n for j in range(num_points2D):\n image.points2D[j] = np.array(struct.unpack(\"dd\", f.read(16)))\n image.point3D_ids[j] = np.array(struct.unpack(\"Q\", f.read(8)))\n\n self.images[image_id] = image\n self.name_to_image_id[image.name] = image_id\n\n self.last_image_id = max(self.last_image_id, image_id)\n\n def _load_images_txt(self, input_file):\n self.images = OrderedDict()\n\n with open(input_file, \"r\") as f:\n is_camera_description_line = False\n\n for line in iter(lambda: f.readline().strip(), \"\"):\n if not line or line.startswith(\"#\"):\n continue\n\n is_camera_description_line = not is_camera_description_line\n\n data = line.split()\n\n if is_camera_description_line:\n image_id = int(data[0])\n image = Image(\n data[-1],\n int(data[-2]),\n Quaternion(np.array(list(map(float, data[1:5])))),\n np.array(list(map(float, data[5:8]))),\n )\n else:\n image.points2D = np.array(\n [list(map(float, data[::3])), list(map(float, data[1::3]))]\n ).T\n image.point3D_ids = np.array(list(map(np.uint64, data[2::3])))\n\n # automatically remove points without an associated 3D point\n # mask = (image.point3D_ids != SceneManager.INVALID_POINT3D)\n # image.points2D = image.points2D[mask]\n # image.point3D_ids = image.point3D_ids[mask]\n\n self.images[image_id] = image\n self.name_to_image_id[image.name] = image_id\n\n self.last_image_id = max(self.last_image_id, image_id)\n\n # ---------------------------------------------------------------------------\n\n def load_points3D(self, input_file=None):\n if input_file is None:\n input_file = self.folder + \"points3D.bin\"\n if os.path.exists(input_file):\n self._load_points3D_bin(input_file)\n else:\n input_file = self.folder + \"points3D.txt\"\n if os.path.exists(input_file):\n self._load_points3D_txt(input_file)\n else:\n raise IOError(\"no points3D file found\")\n\n def _load_points3D_bin(self, input_file):\n with open(input_file, \"rb\") as f:\n num_points3D = struct.unpack(\"L\", f.read(8))[0]\n\n self.points3D = np.empty((num_points3D, 3))\n self.point3D_ids = np.empty(num_points3D, dtype=np.uint64)\n self.point3D_colors = np.empty((num_points3D, 3), dtype=np.uint8)\n self.point3D_id_to_point3D_idx = dict()\n self.point3D_id_to_images = dict()\n self.point3D_errors = np.empty(num_points3D)\n\n for i in range(num_points3D):\n self.point3D_ids[i] = struct.unpack(\"L\", f.read(8))[0]\n self.points3D[i] = struct.unpack(\"ddd\", f.read(24))\n self.point3D_colors[i] = struct.unpack(\"BBB\", f.read(3))\n self.point3D_errors[i] = struct.unpack(\"d\", f.read(8))[0]\n\n self.point3D_id_to_point3D_idx[self.point3D_ids[i]] = i\n\n # load (image id, point2D idx) pairs\n track_len = struct.unpack(\"L\", f.read(8))[0]\n data = struct.unpack(\"I\" * (2 * track_len), f.read(2 * track_len * 4))\n self.point3D_id_to_images[self.point3D_ids[i]] = np.array(\n data, dtype=np.uint32\n ).reshape(track_len, 2)\n\n def _load_points3D_txt(self, input_file):\n self.points3D = []\n self.point3D_ids = []\n self.point3D_colors = []\n self.point3D_id_to_point3D_idx = dict()\n self.point3D_id_to_images = dict()\n self.point3D_errors = []\n\n with open(input_file, \"r\") as f:\n for line in iter(lambda: f.readline().strip(), \"\"):\n if not line or line.startswith(\"#\"):\n continue\n\n data = line.split()\n point3D_id = np.uint64(data[0])\n\n self.point3D_ids.append(point3D_id)\n self.point3D_id_to_point3D_idx[point3D_id] = len(self.points3D)\n self.points3D.append(list(map(np.float64, data[1:4])))\n self.point3D_colors.append(list(map(np.uint8, data[4:7])))\n self.point3D_errors.append(np.float64(data[7]))\n\n # load (image id, point2D idx) pairs\n self.point3D_id_to_images[point3D_id] = np.array(\n list(map(np.uint32, data[8:]))\n ).reshape(-1, 2)\n\n self.points3D = np.array(self.points3D)\n self.point3D_ids = np.array(self.point3D_ids)\n self.point3D_colors = np.array(self.point3D_colors)\n self.point3D_errors = np.array(self.point3D_errors)\n\n # ---------------------------------------------------------------------------\n\n def save(self, output_folder, binary=True):\n self.save_cameras(output_folder, binary=binary)\n self.save_images(output_folder, binary=binary)\n self.save_points3D(output_folder, binary=binary)\n\n # ---------------------------------------------------------------------------\n\n def save_cameras(self, output_folder, output_file=None, binary=True):\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n if output_file is None:\n output_file = \"cameras.bin\" if binary else \"cameras.txt\"\n\n output_file = os.path.join(output_folder, output_file)\n\n if binary:\n self._save_cameras_bin(output_file)\n else:\n self._save_cameras_txt(output_file)\n\n def _save_cameras_bin(self, output_file):\n with open(output_file, \"wb\") as fid:\n fid.write(struct.pack(\"L\", len(self.cameras)))\n\n camera_struct = struct.Struct(\"IiLL\")\n\n for camera_id, camera in sorted(self.cameras.items()):\n fid.write(\n camera_struct.pack(\n camera_id, camera.camera_type, camera.width, camera.height\n )\n )\n # TODO (True): should move this into the Camera class\n fid.write(camera.get_params().tobytes())\n\n def _save_cameras_txt(self, output_file):\n with open(output_file, \"w\") as fid:\n print(\"# Camera list with one line of data per camera:\", file=fid)\n print(\"# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\", file=fid)\n print(\"# Number of cameras:\", len(self.cameras), file=fid)\n\n for camera_id, camera in sorted(self.cameras.items()):\n print(camera_id, camera, file=fid)\n\n # ---------------------------------------------------------------------------\n\n def save_images(self, output_folder, output_file=None, binary=True):\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n if output_file is None:\n output_file = \"images.bin\" if binary else \"images.txt\"\n\n output_file = os.path.join(output_folder, output_file)\n\n if binary:\n self._save_images_bin(output_file)\n else:\n self._save_images_txt(output_file)\n\n def _save_images_bin(self, output_file):\n with open(output_file, \"wb\") as fid:\n fid.write(struct.pack(\"L\", len(self.images)))\n\n for image_id, image in self.images.items():\n fid.write(struct.pack(\"I\", image_id))\n fid.write(image.q.q.tobytes())\n fid.write(image.tvec.tobytes())\n fid.write(struct.pack(\"I\", image.camera_id))\n fid.write(image.name + \"\\0\")\n fid.write(struct.pack(\"L\", len(image.points2D)))\n data = np.rec.fromarrays(\n (image.points2D[:, 0], image.points2D[:, 1], image.point3D_ids)\n )\n fid.write(data.tobytes())\n\n def _save_images_txt(self, output_file):\n with open(output_file, \"w\") as fid:\n print(\"# Image list with two lines of data per image:\", file=fid)\n print(\"# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\", file=fid)\n print(\"# POINTS2D[] as (X, Y, POINT3D_ID)\", file=fid)\n print(\"# Number of images: {},\".format(len(self.images)), file=fid)\n print(\"# mean observations per image: unknown\", file=fid)\n\n for image_id, image in self.images.items():\n print(image_id, file=fid)\n print(\" \".join(str(qi) for qi in image.q.q), file=fid)\n print(\" \".join(str(ti) for ti in image.tvec), file=fid)\n print(image.camera_id, image.name, file=fid)\n\n data = np.rec.fromarrays(\n (\n image.points2D[:, 0],\n image.points2D[:, 1],\n image.point3D_ids.astype(np.int64),\n )\n )\n if len(data) > 0:\n np.savetxt(fid, data, \"%.2f %.2f %d\", newline=\" \")\n fid.seek(-1, os.SEEK_CUR)\n fid.write(\"\\n\")\n\n # ---------------------------------------------------------------------------\n\n def save_points3D(self, output_folder, output_file=None, binary=True):\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n if output_file is None:\n output_file = \"points3D.bin\" if binary else \"points3D.txt\"\n\n output_file = os.path.join(output_folder, output_file)\n\n if binary:\n self._save_points3D_bin(output_file)\n else:\n self._save_points3D_txt(output_file)\n\n def _save_points3D_bin(self, output_file):\n num_valid_points3D = sum(\n 1\n for point3D_idx in self.point3D_id_to_point3D_idx.values()\n if point3D_idx != SceneManager.INVALID_POINT3D\n )\n\n iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()\n\n with open(output_file, \"wb\") as fid:\n fid.write(struct.pack(\"L\", num_valid_points3D))\n\n for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:\n if point3D_idx == SceneManager.INVALID_POINT3D:\n continue\n\n fid.write(struct.pack(\"L\", point3D_id))\n fid.write(self.points3D[point3D_idx].tobytes())\n fid.write(self.point3D_colors[point3D_idx].tobytes())\n fid.write(self.point3D_errors[point3D_idx].tobytes())\n fid.write(struct.pack(\"L\", len(self.point3D_id_to_images[point3D_id])))\n fid.write(self.point3D_id_to_images[point3D_id].tobytes())\n\n def _save_points3D_txt(self, output_file):\n num_valid_points3D = sum(\n 1\n for point3D_idx in self.point3D_id_to_point3D_idx.values()\n if point3D_idx != SceneManager.INVALID_POINT3D\n )\n\n array_to_string = lambda arr: \" \".join(str(x) for x in arr)\n\n iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()\n\n with open(output_file, \"w\") as fid:\n print(\"# 3D point list with one line of data per point:\", file=fid)\n print(\"# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as \", file=fid)\n print(\"# (IMAGE_ID, POINT2D_IDX)\", file=fid)\n print(\"# Number of points: {},\".format(num_valid_points3D), file=fid)\n print(\"# mean track length: unknown\", file=fid)\n\n for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:\n if point3D_idx == SceneManager.INVALID_POINT3D:\n continue\n\n print(point3D_id, file=fid)\n print(array_to_string(self.points3D[point3D_idx]), file=fid)\n print(array_to_string(self.point3D_colors[point3D_idx]), file=fid)\n print(self.point3D_errors[point3D_idx], file=fid)\n print(\n array_to_string(self.point3D_id_to_images[point3D_id].flat),\n file=fid,\n )\n\n # ---------------------------------------------------------------------------\n\n # return the image id associated with a given image file\n def get_image_from_name(self, image_name):\n image_id = self.name_to_image_id[image_name]\n return image_id, self.images[image_id]\n\n # ---------------------------------------------------------------------------\n\n def get_camera(self, camera_id):\n return self.cameras[camera_id]\n\n # ---------------------------------------------------------------------------\n\n def get_points3D(self, image_id, return_points2D=True, return_colors=False):\n image = self.images[image_id]\n\n mask = image.point3D_ids != SceneManager.INVALID_POINT3D\n\n point3D_idxs = np.array(\n [\n self.point3D_id_to_point3D_idx[point3D_id]\n for point3D_id in image.point3D_ids[mask]\n ]\n )\n # detect filtered points\n filter_mask = point3D_idxs != SceneManager.INVALID_POINT3D\n point3D_idxs = point3D_idxs[filter_mask]\n result = [self.points3D[point3D_idxs, :]]\n\n if return_points2D:\n mask[mask] &= filter_mask\n result += [image.points2D[mask]]\n if return_colors:\n result += [self.point3D_colors[point3D_idxs, :]]\n\n return result if len(result) > 1 else result[0]\n\n # ---------------------------------------------------------------------------\n\n def point3D_valid(self, point3D_id):\n return (\n self.point3D_id_to_point3D_idx[point3D_id] != SceneManager.INVALID_POINT3D\n )\n\n # ---------------------------------------------------------------------------\n\n def get_filtered_points3D(self, return_colors=False):\n point3D_idxs = [\n idx\n for idx in self.point3D_id_to_point3D_idx.values()\n if idx != SceneManager.INVALID_POINT3D\n ]\n result = [self.points3D[point3D_idxs, :]]\n\n if return_colors:\n result += [self.point3D_colors[point3D_idxs, :]]\n\n return result if len(result) > 1 else result[0]\n\n # ---------------------------------------------------------------------------\n\n # return 3D points shared by two images\n def get_shared_points3D(self, image_id1, image_id2):\n point3D_ids = set(self.images[image_id1].point3D_ids) & set(\n self.images[image_id2].point3D_ids\n )\n point3D_ids.discard(SceneManager.INVALID_POINT3D)\n\n point3D_idxs = np.array(\n [self.point3D_id_to_point3D_idx[point3D_id] for point3D_id in point3D_ids]\n )\n\n return self.points3D[point3D_idxs, :]\n\n # ---------------------------------------------------------------------------\n\n # project *all* 3D points into image, return their projection coordinates,\n # as well as their 3D positions\n def get_viewed_points(self, image_id):\n image = self.images[image_id]\n\n # get unfiltered points\n point3D_idxs = set(self.point3D_id_to_point3D_idx.values())\n point3D_idxs.discard(SceneManager.INVALID_POINT3D)\n point3D_idxs = list(point3D_idxs)\n points3D = self.points3D[point3D_idxs, :]\n\n # orient points relative to camera\n R = image.q.ToR()\n points3D = points3D.dot(R.T) + image.tvec[np.newaxis, :]\n points3D = points3D[points3D[:, 2] > 0, :] # keep points with positive z\n\n # put points into image coordinates\n camera = self.cameras[image.camera_id]\n points2D = points3D.dot(camera.get_camera_matrix().T)\n points2D = points2D[:, :2] / points2D[:, 2][:, np.newaxis]\n\n # keep points that are within the image\n mask = (\n (points2D[:, 0] >= 0)\n & (points2D[:, 1] >= 0)\n & (points2D[:, 0] < camera.width - 1)\n & (points2D[:, 1] < camera.height - 1)\n )\n\n return points2D[mask, :], points3D[mask, :]\n\n # ---------------------------------------------------------------------------\n\n def add_camera(self, camera):\n self.last_camera_id += 1\n self.cameras[self.last_camera_id] = camera\n return self.last_camera_id\n\n # ---------------------------------------------------------------------------\n\n def add_image(self, image):\n self.last_image_id += 1\n self.images[self.last_image_id] = image\n return self.last_image_id\n\n # ---------------------------------------------------------------------------\n\n def delete_images(self, image_list):\n # delete specified images\n for image_id in image_list:\n if image_id in self.images:\n del self.images[image_id]\n\n keep_set = set(self.images.iterkeys())\n\n # delete references to specified images, and ignore any points that are\n # invalidated\n iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()\n\n for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:\n if point3D_idx == SceneManager.INVALID_POINT3D:\n continue\n\n mask = np.array(\n [\n image_id in keep_set\n for image_id in self.point3D_id_to_images[point3D_id][:, 0]\n ]\n )\n if np.any(mask):\n self.point3D_id_to_images[point3D_id] = self.point3D_id_to_images[\n point3D_id\n ][mask]\n else:\n self.point3D_id_to_point3D_idx[\n point3D_id\n ] = SceneManager.INVALID_POINT3D\n\n # ---------------------------------------------------------------------------\n\n # camera_list: set of cameras whose points we'd like to keep\n # min/max triangulation angle: in degrees\n def filter_points3D(\n self,\n min_track_len=0,\n max_error=np.inf,\n min_tri_angle=0,\n max_tri_angle=180,\n image_set=set(),\n ):\n\n image_set = set(image_set)\n\n check_triangulation_angles = min_tri_angle > 0 or max_tri_angle < 180\n if check_triangulation_angles:\n max_tri_prod = np.cos(np.radians(min_tri_angle))\n min_tri_prod = np.cos(np.radians(max_tri_angle))\n\n iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()\n\n image_ids = []\n\n for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:\n if point3D_idx == SceneManager.INVALID_POINT3D:\n continue\n\n if image_set or min_track_len > 0:\n image_ids = set(self.point3D_id_to_images[point3D_id][:, 0])\n\n # check if error and min track length are sufficient, or if none of\n # the selected cameras see the point\n if (\n len(image_ids) < min_track_len\n or self.point3D_errors[point3D_idx] > max_error\n or image_set\n and image_set.isdisjoint(image_ids)\n ):\n self.point3D_id_to_point3D_idx[\n point3D_id\n ] = SceneManager.INVALID_POINT3D\n\n # find dot product between all camera viewing rays\n elif check_triangulation_angles:\n xyz = self.points3D[point3D_idx, :]\n tvecs = np.array(\n [(self.images[image_id].tvec - xyz) for image_id in image_ids]\n )\n tvecs /= np.linalg.norm(tvecs, axis=-1)[:, np.newaxis]\n\n cos_theta = np.array([u.dot(v) for u, v in combinations(tvecs, 2)])\n\n # min_prod = cos(maximum viewing angle), and vice versa\n # if maximum viewing angle is too small or too large,\n # don't add this point\n if np.min(cos_theta) > max_tri_prod or np.max(cos_theta) < min_tri_prod:\n self.point3D_id_to_point3D_idx[\n point3D_id\n ] = SceneManager.INVALID_POINT3D\n\n # apply the filters to the image point3D_ids\n for image in self.images.values():\n mask = np.array(\n [\n self.point3D_id_to_point3D_idx.get(point3D_id, 0)\n == SceneManager.INVALID_POINT3D\n for point3D_id in image.point3D_ids\n ]\n )\n image.point3D_ids[mask] = SceneManager.INVALID_POINT3D\n\n # ---------------------------------------------------------------------------\n\n # scene graph: {image_id: [image_id: #shared points]}\n def build_scene_graph(self):\n self.scene_graph = defaultdict(lambda: defaultdict(int))\n point3D_iter = self.point3D_id_to_images.items()\n\n for i, (point3D_id, images) in enumerate(point3D_iter):\n if not self.point3D_valid(point3D_id):\n continue\n\n for image_id1, image_id2 in combinations(images[:, 0], 2):\n self.scene_graph[image_id1][image_id2] += 1\n self.scene_graph[image_id2][image_id1] += 1\n"
] | [
[
"numpy.empty",
"numpy.zeros",
"numpy.savetxt",
"numpy.any",
"numpy.uint64",
"numpy.rec.fromarrays",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.float64",
"numpy.linalg.norm",
"numpy.radians"
]
] |
bjnortier/ai-experiments-1 | [
"aff4496d84b059af6096f8f6b51d0ebcf6ed5c37"
] | [
"conveyor_2.py"
] | [
"import os\nimport glob\nfrom pathlib import Path\nimport numpy as np\nimport random\nimport carb\nfrom PIL import Image\nfrom tensorflow import keras\nfrom pxr import Usd, UsdGeom, Gf, UsdPhysics\nimport omni.kit\nfrom omni.isaac.examples.base_sample import BaseSample\nfrom omni.isaac.core.objects import DynamicCuboid\nfrom omni.isaac.core.utils.prims import create_prim, delete_prim\nfrom omni.usd import get_context\nfrom omni.kit.viewport import get_viewport_interface\nfrom omni.isaac.core.prims.xform_prim import XFormPrim\nfrom omni.isaac.core.materials import PreviewSurface\nfrom omni.isaac.core.utils.rotations import euler_angles_to_quat\nfrom omni.syntheticdata import sensors\nimport omni.syntheticdata._syntheticdata as sd\n\n\ndef setColliderSubtree(prim, approximationShape=\"none\", execute_command_fn=None):\n pit = iter(Usd.PrimRange(prim))\n for p in pit:\n if p.GetMetadata(\"hide_in_stage_window\"):\n pit.PruneChildren()\n continue\n if p.IsA(UsdGeom.Gprim) or p.IsInstanceable():\n if len(p.GetAttribute(\"faceVertexIndices\").Get()) > 0:\n omni.physx.scripts.utils.setCollider(p, approximationShape, execute_command_fn)\n\n\ndef setRigidBody(prim, approximationShape, kinematic, custom_execute_fn=None):\n omni.physx.scripts.utils.setPhysics(prim, kinematic, custom_execute_fn)\n\n if prim.IsA(UsdGeom.Xformable):\n setColliderSubtree(prim, approximationShape, custom_execute_fn)\n else:\n omni.physx.scripts.utils.setCollider(prim, approximationShape, custom_execute_fn)\n\n\ndef create_light():\n create_prim(\n \"/World/SphereLight\",\n \"SphereLight\",\n position=np.array([0, 500, 500]),\n attributes={\n \"radius\": 150,\n \"intensity\": 5e4\n }\n )\n\n\ndef create_classification_camera():\n create_prim(\n \"/World/ClassificationCamera\",\n \"Camera\",\n orientation=np.array([0.33, 0.197, 0.464, 0.794]),\n position=np.array([151, 250, 135])\n )\n\n\ndef find_usd_assets(shapenet_dir, categories, max_asset_size=50):\n \"\"\"Look for USD files under root/category for each category specified.\n For each category, generate a list of all USD files found and select\n assets up to split * len(num_assets) if `train=True`, otherwise select the\n remainder.\n \"\"\"\n from omni.isaac.shapenet.utils import LABEL_TO_SYNSET\n\n references = {}\n for category in categories:\n category_id = LABEL_TO_SYNSET[category]\n all_assets = glob.glob(\n os.path.join(shapenet_dir, category_id, \"*/*.usd\"),\n recursive=True)\n if max_asset_size is None:\n assets_filtered = all_assets\n else:\n assets_filtered = []\n for a in all_assets:\n if os.stat(a).st_size > max_asset_size * 1e6:\n carb.log_warn(\n f\"{a} skipped as it exceeded the max \\\n size {max_asset_size} MB.\")\n else:\n assets_filtered.append(a)\n num_assets = len(assets_filtered)\n if num_assets == 0:\n raise ValueError(\n f\"No USDs found for category {category} \\\n under max size {max_asset_size} MB.\")\n\n references[category] = assets_filtered\n\n return references\n\n\ndef create_conveyor_anchor(plate_size):\n size = 5\n conveyor_anchor = create_prim(\n \"/World/Conveyor/Anchor\",\n \"Cube\",\n position=np.array([0.0, -plate_size/2 - size, 0.0]),\n scale=np.array([plate_size / 2, size, size]))\n conveyor_anchor.GetAttribute(\"visibility\").Set(\"invisible\")\n return conveyor_anchor\n\n\ndef create_conveyor_plate(stage, size, index):\n plate_path = f\"/World/Conveyor/Plates/Plate{index + 1}\"\n plate = DynamicCuboid(\n prim_path=plate_path,\n position=np.array([0, index * 100, 0.0]),\n size=np.array([size - 5, size - 5, 10.0]),\n color=np.array([0.28, 0.65, 1.0])\n )\n\n # prismatic joint\n joint_path = f\"/World/Conveyor/Joints/PrismaticJoint{index + 1}\"\n prismatic_joint = UsdPhysics.PrismaticJoint.Define(stage, joint_path)\n prismatic_joint.CreateAxisAttr(\"Y\")\n prismatic_joint.CreateBody0Rel().SetTargets([\"/World/Conveyor/Anchor\"])\n prismatic_joint.CreateBody1Rel().SetTargets([plate_path])\n prismatic_joint.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 1.0, 0.0))\n prismatic_joint.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, -0.5, 0.0))\n\n # add linear drive\n driver = UsdPhysics.DriveAPI.Apply(\n prismatic_joint.GetPrim(),\n \"linear\")\n driver.CreateTypeAttr(\"force\")\n driver.CreateMaxForceAttr(1000)\n driver.CreateTargetVelocityAttr(200.0)\n driver.CreateDampingAttr(1e10)\n driver.CreateStiffnessAttr(0)\n return plate\n\n\ndef create_pusher(stage, plate_size, index):\n actuator_path = f\"/World/Pushers/Actuators/Actuator{index + 1}\"\n anchor_path = f\"/World/Pushers/Anchors/Anchor{index + 1}\"\n depth = 10\n \n anchor = create_prim(\n anchor_path,\n \"Cube\",\n position=np.array([\n -plate_size/2 - depth - 5,\n (index + 2) * plate_size * 2,\n 20.0]),\n scale=np.array([5, 5, 5]))\n anchor.GetAttribute(\"visibility\").Set(\"invisible\")\n\n pusher = DynamicCuboid(\n prim_path=actuator_path,\n position=np.array([\n -plate_size/2 - 5,\n (index + 2) * plate_size * 2,\n 20.0]),\n size=np.array([depth, plate_size * 2, 30]),\n color=np.array([0.1, 0.1, 0.5])\n )\n\n mass_api = UsdPhysics.MassAPI.Apply(pusher.prim)\n mass_api.CreateMassAttr(1)\n\n # Prismatic joint \n joint_path = f\"/World/Pushers/Joints/Joint{index + 1}\"\n joint = UsdPhysics.PrismaticJoint.Define(stage, joint_path)\n joint.CreateAxisAttr(\"X\")\n joint.CreateBody0Rel().SetTargets([anchor_path])\n joint.CreateBody1Rel().SetTargets([actuator_path])\n joint.CreateLocalPos0Attr().Set(Gf.Vec3f(1.0, 0.0, 0.0))\n joint.CreateLocalPos1Attr().Set(Gf.Vec3f(-0.5, 0.0, 0.0))\n\n # Linear drive. No position target is set, only activated when needed. \n driver = UsdPhysics.DriveAPI.Apply(joint.GetPrim(), \"linear\")\n driver.CreateTypeAttr(\"force\")\n driver.CreateMaxForceAttr(1000)\n driver.CreateDampingAttr(2e4)\n driver.CreateStiffnessAttr(1e5)\n\n return driver\n\n\ndef create_bucket(stage, plate_size, index):\n bucket_path = f\"/World/Buckets/Bucket{index + 1}\"\n\n width = plate_size * 2\n depth = width\n height = 20\n a = create_prim(\n f\"{bucket_path}/a\",\n \"Cube\",\n position=np.array([\n plate_size/2 + depth/2 - 10,\n (index + 2) * 2 * plate_size - width / 2,\n -height - 5\n ]),\n scale=np.array([depth/2, 5, height]),\n attributes={\n \"primvars:displayColor\": [(1.0, 1.0, 1.0)]\n }\n )\n b = create_prim(\n f\"{bucket_path}/b\",\n \"Cube\",\n position=np.array([\n plate_size/2 + depth/2 - 10,\n (index + 2) * 2 * plate_size + width / 2,\n -height - 5\n ]),\n scale=np.array([depth/2, 5, height]),\n attributes={\n \"primvars:displayColor\": [(1.0, 1.0, 1.0)]\n }\n )\n c = create_prim(\n f\"{bucket_path}/c\",\n \"Cube\",\n position=np.array([\n plate_size/2 + 5 - 10,\n (index + 2) * 2 * plate_size,\n -height - 5\n ]),\n scale=np.array([5, width/2 - 5, height]),\n attributes={\n \"primvars:displayColor\": [(1.0, 1.0, 1.0)]\n }\n )\n d = create_prim(\n f\"{bucket_path}/d\",\n \"Cube\",\n position=np.array([\n plate_size/2 + depth - 5 - 10,\n (index + 2) * 2 * plate_size,\n -height - 5\n ]),\n scale=np.array([5, width/2 - 5, height]),\n attributes={\n \"primvars:displayColor\": [(1.0, 1.0, 1.0)]\n }\n )\n UsdPhysics.CollisionAPI.Apply(a)\n UsdPhysics.CollisionAPI.Apply(b)\n UsdPhysics.CollisionAPI.Apply(c)\n UsdPhysics.CollisionAPI.Apply(d)\n \n\nclass Conveyor2(BaseSample):\n def __init__(self) -> None:\n super().__init__()\n return\n\n def setup_scene(self):\n world = self.get_world()\n\n self.model = keras.models.load_model(\"/home/bjnortier/isaac/sorting/save_at_30-augmented-3.h5\")\n self.categories = [\n \"bus\", \"car\", \"plane\", \"rocket\", \"watercraft\"\n ]\n shapenet_dir = Path(os.environ[\"SHAPENET_LOCAL_DIR\"])\n self.asset_references = find_usd_assets(\n f\"{shapenet_dir}_nomat\",\n self.categories)\n\n self.num_classes = len(self.categories) \n self.num_plates = self.num_classes * 2 + 4\n \n plate_size = 100.0\n self.max_plate_position = plate_size * self.num_plates\n self.widget_index = 0\n self.plate_reset_count = 0\n\n stage = get_context().get_stage()\n world.scene.add_ground_plane(z_position=-45.0)\n create_light()\n create_classification_camera()\n create_conveyor_anchor(plate_size)\n \n self.plates = []\n for i in range(self.num_plates):\n self.plates.append(create_conveyor_plate(stage, plate_size, i))\n\n self.pushers = []\n for i in range(self.num_classes):\n self.pushers.append(create_pusher(stage, plate_size, i))\n\n for i in range(self.num_classes):\n create_bucket(stage, plate_size, i)\n\n viewport_interface = get_viewport_interface()\n viewport_handle = viewport_interface.create_instance()\n vp = viewport_interface.get_viewport_window(viewport_handle)\n vp.set_active_camera(\"/World/ClassificationCamera\")\n vp.set_texture_resolution(299, 299)\n self.classification_viewport = vp\n\n self.sd_interface = sd.acquire_syntheticdata_interface()\n self.is_sensor_initialized = False\n\n # # Create the first widget\n self.drop_widget(y_position=100.0)\n\n return\n\n def drop_widget(self, y_position=0.0):\n category = random.choice(self.categories)\n asset_reference = random.choice(self.asset_references[category])\n widget_path = f\"/World/widget_{self.widget_index}\"\n widget_prim = create_prim(\n widget_path,\n \"Xform\",\n scale=np.array([50.0, 50.0, 50.0]),\n orientation=euler_angles_to_quat(\n np.array([90.0, 0.0, 0.0]),\n degrees=True),\n position=np.array([0.0, y_position, 50.0]),\n usd_path=asset_reference,\n semantic_label=category)\n self.current_widget_category = category\n\n widget = XFormPrim(widget_path)\n material = PreviewSurface(\n prim_path=\"/World/Looks/ShapeMaterial\",\n color=np.array([0.1, 0.6, 0.1]))\n widget.apply_visual_material(material)\n\n # Determine bounds and translate to sit on the Z=0 plane\n orientation_on_plane = euler_angles_to_quat(\n np.array([90.0, 0.0, 0.0]),\n degrees=True)\n widget.set_local_pose(\n np.array([0.0, 0.0, 0.0]),\n orientation_on_plane)\n bounds = UsdGeom.Mesh(widget_prim).ComputeWorldBound(0.0, \"default\")\n new_position = np.array([0.0, 0.0, -bounds.GetBox().GetMin()[2] + 5.0])\n widget.set_local_pose(new_position)\n\n mass_api = UsdPhysics.MassAPI.Apply(widget_prim)\n mass_api.CreateMassAttr(1)\n\n setRigidBody(widget_prim, \"convexHull\", False) \n\n self.widget = widget\n self.widget_index += 1\n self.widget_class = None\n self.classification_requested = False\n self.classification_complete = False\n self.arm_activated = False\n for pusher in self.pushers:\n pusher.CreateTargetPositionAttr(0.0)\n\n async def setup_post_load(self):\n self._world = self.get_world()\n self._world.add_physics_callback(\"sim_step\", callback_fn=self.sim_step_callback) \n return\n\n def sim_step_callback(self, step_size):\n if not self.is_sensor_initialized:\n print(\"Waiting for sensor to initialize\")\n sensor = sensors.create_or_retrieve_sensor(\n self.classification_viewport, sd.SensorType.Rgb)\n self.is_sensor_initialized = \\\n self.sd_interface.is_sensor_initialized(sensor)\n if self.is_sensor_initialized:\n print(\"Sensor initialized!\")\n\n for plate in self.plates:\n # When a plate reaches the end ov the conveyour belt,\n # reset it's position to the start. Drop a widget if it's\n # the first plate\n plate_position, _ = plate.get_world_pose()\n if plate_position[1] > self.max_plate_position:\n plate_position[1] -= self.max_plate_position\n plate.set_world_pose(plate_position)\n self.plate_reset_count += 1\n if self.plate_reset_count == self.num_plates:\n self.plate_reset_count = 0\n self.drop_widget()\n\n # Classify the widget when it passes under the camera\n if not self.classification_requested:\n widget_position, _ = self.widget.get_world_pose()\n if widget_position[1] > 100:\n self.capture_gt()\n self.classification_requested = True\n\n if self.classification_complete and not self.arm_activated:\n widget_position, _ = self.widget.get_world_pose()\n if widget_position[1] > (self.widget_class + 1) * 200 + 100:\n self.arm_activated = True \n self.pushers[self.widget_class].CreateTargetPositionAttr(120.0)\n\n def capture_gt(self):\n rgb = sensors.get_rgb(self.classification_viewport)\n # Discard alpha channel\n rgb = rgb[:, :, :3]\n input = np.expand_dims(rgb, axis=0)\n prediction = self.model.predict(input)\n self.widget_class = np.argmax(prediction)\n \n print(f\"actual:predicted {self.current_widget_category}:{self.categories[self.widget_class]}\")\n image = Image.fromarray(rgb)\n image.save(\"/tmp/rgb.png\")\n self.classification_complete = True\n \n async def setup_pre_reset(self):\n return\n\n async def setup_post_reset(self):\n return\n\n def world_cleanup(self):\n return\n\n"
] | [
[
"numpy.array",
"numpy.expand_dims",
"numpy.argmax",
"tensorflow.keras.models.load_model"
]
] |
vixadd/tensorflow | [
"8c624204eb686a91779149dc500e6c8c60096074",
"8c624204eb686a91779149dc500e6c8c60096074"
] | [
"tensorflow/python/ops/parallel_for/gradients.py",
"tensorflow/python/ops/ragged/ragged_where_op.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Jacobian ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import gradients_impl as gradient_ops\nfrom tensorflow.python.ops.parallel_for import control_flow_ops\nfrom tensorflow.python.util import nest\n\n\ndef jacobian(output, inputs, use_pfor=True, parallel_iterations=None):\n \"\"\"Computes jacobian of `output` w.r.t. `inputs`.\n\n Args:\n output: A tensor.\n inputs: A tensor or a nested structure of tensor objects.\n use_pfor: If true, uses pfor for computing the jacobian. Else uses\n tf.while_loop.\n parallel_iterations: A knob to control how many iterations and dispatched in\n parallel. This knob can be used to control the total memory usage.\n\n Returns:\n A tensor or a nested structure of tensors with the same structure as\n `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding\n value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has\n shape [x_1, ..., x_m], the corresponding jacobian has shape\n [y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is\n sparse (IndexedSlices), jacobian function currently makes it dense and\n returns a Tensor instead. This may change in the future.\n \"\"\"\n flat_inputs = nest.flatten(inputs)\n output_tensor_shape = output.shape\n output_shape = array_ops.shape(output)\n output = array_ops.reshape(output, [-1])\n\n def loop_fn(i):\n y = array_ops.gather(output, i)\n return gradient_ops.gradients(y, flat_inputs)\n\n try:\n output_size = int(output.shape[0])\n except TypeError:\n output_size = array_ops.shape(output)[0]\n\n if use_pfor:\n pfor_outputs = control_flow_ops.pfor(\n loop_fn, output_size, parallel_iterations=parallel_iterations)\n else:\n pfor_outputs = control_flow_ops.for_loop(\n loop_fn,\n [output.dtype] * len(flat_inputs),\n output_size,\n parallel_iterations=parallel_iterations)\n\n for i, out in enumerate(pfor_outputs):\n if isinstance(out, ops.Tensor):\n new_shape = array_ops.concat(\n [output_shape, array_ops.shape(out)[1:]], axis=0)\n out = array_ops.reshape(out, new_shape)\n out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape))\n pfor_outputs[i] = out\n\n return nest.pack_sequence_as(inputs, pfor_outputs)\n\n\ndef batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):\n \"\"\"Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.\n\n e.g.\n x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n y = x * x\n jacobian = batch_jacobian(y, x)\n # => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]\n\n Args:\n output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should\n only depend on `inp[i,...]`.\n inp: A tensor with shape [b, x1, ..., x_m]\n use_pfor: If true, uses pfor for computing the Jacobian. Else uses a\n tf.while_loop.\n parallel_iterations: A knob to control how many iterations are vectorized\n and dispatched in parallel. The default value of None, when use_pfor is\n true, corresponds to vectorizing all the iterations. When use_pfor is\n false, the default value of None corresponds to parallel_iterations=10.\n This knob can be used to control the total memory usage.\n\n Returns:\n A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`\n is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked\n per-example jacobians.\n\n Raises:\n ValueError: if first dimension of `output` and `inp` do not match.\n \"\"\"\n output_shape = output.shape\n if not output_shape[0].is_compatible_with(inp.shape[0]):\n raise ValueError(f\"Need first dimension of `output` shape ({output.shape}) \"\n f\"and `inp` shape ({inp.shape}) to match.\")\n if output_shape.is_fully_defined():\n batch_size = int(output_shape[0])\n output_row_size = output_shape.num_elements() // batch_size\n else:\n output_shape = array_ops.shape(output)\n batch_size = output_shape[0]\n output_row_size = array_ops.size(output) // batch_size\n inp_shape = array_ops.shape(inp)\n # Flatten output to 2-D.\n with ops.control_dependencies(\n [check_ops.assert_equal(batch_size, inp_shape[0])]):\n output = array_ops.reshape(output, [batch_size, output_row_size])\n\n def loop_fn(i):\n y = array_ops.gather(output, i, axis=1)\n return gradient_ops.gradients(y, inp)[0]\n\n if use_pfor:\n pfor_output = control_flow_ops.pfor(loop_fn, output_row_size,\n parallel_iterations=parallel_iterations)\n else:\n pfor_output = control_flow_ops.for_loop(\n loop_fn, output.dtype,\n output_row_size,\n parallel_iterations=parallel_iterations)\n if pfor_output is None:\n return None\n pfor_output = array_ops.reshape(pfor_output,\n [output_row_size, batch_size, -1])\n output = array_ops.transpose(pfor_output, [1, 0, 2])\n new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)\n return array_ops.reshape(output, new_shape)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"where operation for RaggedTensors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport typing\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.ragged import ragged_concat_ops\nfrom tensorflow.python.ops.ragged import ragged_functional_ops\nfrom tensorflow.python.ops.ragged import ragged_gather_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.ops.ragged import ragged_tensor_shape\nfrom tensorflow.python.util import dispatch\n\n\[email protected]_for_api(array_ops.where_v2)\ndef where_v2(condition: ragged_tensor.RaggedOrDense,\n x: typing.Optional[ragged_tensor.RaggedOrDense] = None,\n y: typing.Optional[ragged_tensor.RaggedOrDense] = None,\n name=None):\n \"\"\"Return the elements where `condition` is `True`.\n\n : If both `x` and `y` are None: Retrieve indices of true elements.\n\n Returns the coordinates of true elements of `condition`. The coordinates\n are returned in a 2-D tensor with shape\n `[num_true_values, dim_size(condition)]`, where `result[i]` is the\n coordinates of the `i`th true value (in row-major order).\n\n : If both `x` and `y` are non-`None`: Multiplex between `x` and `y`.\n\n Choose an output shape from the shapes of `condition`, `x`, and `y` that\n all three shapes are broadcastable to; and then use the broadcasted\n `condition` tensor as a mask that chooses whether the corredsponding element\n in the output should be taken from `x` (if `condition` is true) or `y` (if\n `condition` is false).\n\n >>> # Example: retrieve indices of true elements\n >>> tf.where(tf.ragged.constant([[True, False], [True]]))\n <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[0, 0], [1, 0]])>\n\n >>> # Example: multiplex between `x` and `y`\n >>> tf.where(tf.ragged.constant([[True, False], [True, False, True]]),\n ... tf.ragged.constant([['A', 'B'], ['C', 'D', 'E']]),\n ... tf.ragged.constant([['a', 'b'], ['c', 'd', 'e']]))\n <tf.RaggedTensor [[b'A', b'b'], [b'C', b'd', b'E']]>\n\n Args:\n condition: A potentially ragged tensor of type `bool`\n x: A potentially ragged tensor (optional).\n y: A potentially ragged tensor (optional). Must be specified if `x` is\n specified. Must have the same rank and type as `x`.\n name: A name of the operation (optional).\n\n Returns:\n : If both `x` and `y` are `None`:\n A `Tensor` with shape `(num_true, rank(condition))`.\n : Otherwise:\n A potentially ragged tensor with the same type as `x` and `y`, and whose\n shape is broadcast-compatible with `x`, `y`, and `condition`.\n\n Raises:\n ValueError: When exactly one of `x` or `y` is non-`None`; or when\n `condition`, `x`, and `y` have incompatible shapes.\n \"\"\"\n if (x is None) != (y is None):\n raise ValueError('x and y must be either both None or both non-None')\n\n with ops.name_scope('RaggedWhere', name, [condition, x, y]):\n condition = ragged_tensor.convert_to_tensor_or_ragged_tensor(\n condition, name='condition')\n if x is None:\n return _coordinate_where(condition)\n else:\n x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x')\n y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, name='y')\n condition, x, y = ragged_tensor.match_row_splits_dtypes(condition, x, y)\n return _elementwise_where_v2(condition, x, y)\n\n\[email protected]_for_api(array_ops.where)\ndef where(condition: ragged_tensor.RaggedOrDense,\n x: typing.Optional[ragged_tensor.RaggedOrDense] = None,\n y: typing.Optional[ragged_tensor.RaggedOrDense] = None,\n name=None):\n \"\"\"Return the elements, either from `x` or `y`, depending on the `condition`.\n\n : If both `x` and `y` are `None`:\n Returns the coordinates of true elements of `condition`. The coordinates\n are returned in a 2-D tensor with shape\n `[num_true_values, dim_size(condition)]`, where `result[i]` is the\n coordinates of the `i`th true value (in row-major order).\n\n : If both `x` and `y` are non-`None`:\n Returns a tensor formed by selecting values from `x` where condition is\n true, and from `y` when condition is false. In particular:\n\n : If `condition`, `x`, and `y` all have the same shape:\n\n * `result[i1...iN] = x[i1...iN]` if `condition[i1...iN]` is true.\n * `result[i1...iN] = y[i1...iN]` if `condition[i1...iN]` is false.\n\n : Otherwise:\n\n * `condition` must be a vector.\n * `x` and `y` must have the same number of dimensions.\n * The outermost dimensions of `condition`, `x`, and `y` must all have the\n same size.\n * `result[i] = x[i]` if `condition[i]` is true.\n * `result[i] = y[i]` if `condition[i]` is false.\n\n Args:\n condition: A potentially ragged tensor of type `bool`\n x: A potentially ragged tensor (optional).\n y: A potentially ragged tensor (optional). Must be specified if `x` is\n specified. Must have the same rank and type as `x`.\n name: A name of the operation (optional)\n\n Returns:\n : If both `x` and `y` are `None`:\n A `Tensor` with shape `(num_true, dim_size(condition))`.\n : Otherwise:\n A potentially ragged tensor with the same type, rank, and outermost\n dimension size as `x` and `y`.\n `result.ragged_rank = max(x.ragged_rank, y.ragged_rank)`.\n\n Raises:\n ValueError: When exactly one of `x` or `y` is non-`None`; or when\n `condition`, `x`, and `y` have incompatible shapes.\n\n #### Examples:\n\n >>> # Coordinates where condition is true.\n >>> condition = tf.ragged.constant([[True, False, True], [False, True]])\n >>> print(where(condition))\n tf.Tensor( [[0 0] [0 2] [1 1]], shape=(3, 2), dtype=int64)\n\n >>> # Elementwise selection between x and y, based on condition.\n >>> condition = tf.ragged.constant([[True, False, True], [False, True]])\n >>> x = tf.ragged.constant([['A', 'B', 'C'], ['D', 'E']])\n >>> y = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e']])\n >>> print(where(condition, x, y))\n <tf.RaggedTensor [[b'A', b'b', b'C'], [b'd', b'E']]>\n\n >>> # Row selection between x and y, based on condition.\n >>> condition = [True, False]\n >>> x = tf.ragged.constant([['A', 'B', 'C'], ['D', 'E']])\n >>> y = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e']])\n >>> print(where(condition, x, y))\n <tf.RaggedTensor [[b'A', b'B', b'C'], [b'd', b'e']]>\n \"\"\"\n if (x is None) != (y is None):\n raise ValueError('x and y must be either both None or both non-None')\n with ops.name_scope('RaggedWhere', name, [condition, x, y]):\n condition = ragged_tensor.convert_to_tensor_or_ragged_tensor(\n condition, name='condition')\n if x is None:\n return _coordinate_where(condition)\n else:\n x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x')\n y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, name='y')\n condition, x, y = ragged_tensor.match_row_splits_dtypes(condition, x, y)\n return _elementwise_where(condition, x, y)\n\n\ndef _elementwise_where(condition, x, y):\n \"\"\"Ragged version of tf.where(condition, x, y).\"\"\"\n condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor)\n x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor)\n y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor)\n\n if not (condition_is_ragged or x_is_ragged or y_is_ragged):\n return array_ops.where(condition, x, y)\n\n elif condition_is_ragged and x_is_ragged and y_is_ragged:\n return ragged_functional_ops.map_flat_values(array_ops.where, condition, x,\n y)\n elif not condition_is_ragged:\n # Concatenate x and y, and then use `gather` to assemble the selected rows.\n condition.shape.assert_has_rank(1)\n x_and_y = ragged_concat_ops.concat([x, y], axis=0)\n x_nrows = _nrows(x, out_type=x_and_y.row_splits.dtype)\n y_nrows = _nrows(y, out_type=x_and_y.row_splits.dtype)\n indices = array_ops.where(condition, math_ops.range(x_nrows),\n x_nrows + math_ops.range(y_nrows))\n return ragged_gather_ops.gather(x_and_y, indices)\n\n else:\n raise ValueError('Input shapes do not match.')\n\n\ndef _elementwise_where_v2(condition, x, y):\n \"\"\"Ragged version of tf.where_v2(condition, x, y).\"\"\"\n # Broadcast x, y, and condition to have the same shape.\n if not (condition.shape.is_fully_defined() and x.shape.is_fully_defined() and\n y.shape.is_fully_defined() and x.shape == y.shape and\n condition.shape == x.shape):\n shape_c = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(\n condition)\n shape_x = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(x)\n shape_y = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(y)\n shape = ragged_tensor_shape.broadcast_dynamic_shape(\n shape_c, ragged_tensor_shape.broadcast_dynamic_shape(shape_x, shape_y))\n condition = ragged_tensor_shape.broadcast_to(condition, shape)\n x = ragged_tensor_shape.broadcast_to(x, shape)\n y = ragged_tensor_shape.broadcast_to(y, shape)\n\n condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor)\n x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor)\n y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor)\n if not (condition_is_ragged or x_is_ragged or y_is_ragged):\n return array_ops.where_v2(condition, x, y)\n\n return ragged_functional_ops.map_flat_values(array_ops.where_v2, condition, x,\n y)\n\n\ndef _coordinate_where(condition):\n \"\"\"Ragged version of tf.where(condition).\"\"\"\n if not isinstance(condition, ragged_tensor.RaggedTensor):\n return array_ops.where(condition)\n\n # The coordinate for each `true` value in condition.values.\n selected_coords = _coordinate_where(condition.values)\n\n # Convert the first index in each coordinate to a row index and column index.\n condition = condition.with_row_splits_dtype(selected_coords.dtype)\n first_index = selected_coords[:, 0]\n selected_rows = array_ops.gather(condition.value_rowids(), first_index)\n selected_row_starts = array_ops.gather(condition.row_splits, selected_rows)\n selected_cols = first_index - selected_row_starts\n\n # Assemble the row & column index with the indices for inner dimensions.\n return array_ops.concat([\n array_ops.expand_dims(selected_rows, 1),\n array_ops.expand_dims(selected_cols, 1), selected_coords[:, 1:]\n ],\n axis=1)\n\n\ndef _nrows(rt_input, out_type):\n if isinstance(rt_input, ragged_tensor.RaggedTensor):\n return rt_input.nrows(out_type=out_type)\n else:\n return array_ops.shape(rt_input, out_type=out_type)[0]\n"
] | [
[
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.ops.parallel_for.control_flow_ops.pfor",
"tensorflow.python.ops.check_ops.assert_equal",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.parallel_for.control_flow_ops.for_loop",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.ops.array_ops.transpose"
],
[
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.ragged.ragged_tensor.convert_to_tensor_or_ragged_tensor",
"tensorflow.python.ops.ragged.ragged_tensor_shape.broadcast_to",
"tensorflow.python.ops.array_ops.where_v2",
"tensorflow.python.ops.ragged.ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor",
"tensorflow.python.ops.ragged.ragged_tensor_shape.broadcast_dynamic_shape",
"tensorflow.python.ops.ragged.ragged_gather_ops.gather",
"tensorflow.python.ops.ragged.ragged_concat_ops.concat",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.util.dispatch.dispatch_for_api",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.ragged.ragged_tensor.match_row_splits_dtypes",
"tensorflow.python.ops.ragged.ragged_functional_ops.map_flat_values"
]
] |
anonymous2submit/Pointsformer | [
"0eaa141b3d79d45cd925976bde6097b51e0d3819"
] | [
"classification/models/model23.py"
] | [
"\"\"\"\nExactly equals to Model21 (the best results so far), but differnt configurations.\nExactly based on Model10, but ReLU to GeLU\nBased on Model8, add dropout and max, avg combine.\nBased on Local model, add residual connections.\nThe extraction is doubled for depth.\n\nLearning Point Cloud with Progressively Local representation.\n[B,3,N] - {[B,G,K,d]-[B,G,d]} - {[B,G',K,d]-[B,G',d]} -cls\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import einsum\nfrom einops import rearrange, repeat\nfrom pointnet2_ops import pointnet2_utils\n\ndef square_distance(src, dst):\n \"\"\"\n Calculate Euclid distance between each two points.\n src^T * dst = xn * xm + yn * ym + zn * zm;\n sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;\n sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;\n dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2\n = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst\n Input:\n src: source points, [B, N, C]\n dst: target points, [B, M, C]\n Output:\n dist: per-point square distance, [B, N, M]\n \"\"\"\n B, N, _ = src.shape\n _, M, _ = dst.shape\n dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))\n dist += torch.sum(src ** 2, -1).view(B, N, 1)\n dist += torch.sum(dst ** 2, -1).view(B, 1, M)\n return dist\n\n\ndef index_points(points, idx):\n \"\"\"\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S]\n Return:\n new_points:, indexed points data, [B, S, C]\n \"\"\"\n device = points.device\n B = points.shape[0]\n view_shape = list(idx.shape)\n view_shape[1:] = [1] * (len(view_shape) - 1)\n repeat_shape = list(idx.shape)\n repeat_shape[0] = 1\n batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)\n new_points = points[batch_indices, idx, :]\n return new_points\n\n\ndef farthest_point_sample(xyz, npoint):\n \"\"\"\n Input:\n xyz: pointcloud data, [B, N, 3]\n npoint: number of samples\n Return:\n centroids: sampled pointcloud index, [B, npoint]\n \"\"\"\n device = xyz.device\n B, N, C = xyz.shape\n centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)\n distance = torch.ones(B, N).to(device) * 1e10\n farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)\n batch_indices = torch.arange(B, dtype=torch.long).to(device)\n for i in range(npoint):\n centroids[:, i] = farthest\n centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)\n dist = torch.sum((xyz - centroid) ** 2, -1)\n distance = torch.min(distance, dist)\n farthest = torch.max(distance, -1)[1]\n return centroids\n\n\ndef query_ball_point(radius, nsample, xyz, new_xyz):\n \"\"\"\n Input:\n radius: local region radius\n nsample: max sample number in local region\n xyz: all points, [B, N, 3]\n new_xyz: query points, [B, S, 3]\n Return:\n group_idx: grouped points index, [B, S, nsample]\n \"\"\"\n device = xyz.device\n B, N, C = xyz.shape\n _, S, _ = new_xyz.shape\n group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])\n sqrdists = square_distance(new_xyz, xyz)\n group_idx[sqrdists > radius ** 2] = N\n group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]\n group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])\n mask = group_idx == N\n group_idx[mask] = group_first[mask]\n return group_idx\n\n\ndef knn_point(nsample, xyz, new_xyz):\n \"\"\"\n Input:\n nsample: max sample number in local region\n xyz: all points, [B, N, C]\n new_xyz: query points, [B, S, C]\n Return:\n group_idx: grouped points index, [B, S, nsample]\n \"\"\"\n sqrdists = square_distance(new_xyz, xyz)\n _, group_idx = torch.topk(sqrdists, nsample, dim=-1, largest=False, sorted=False)\n return group_idx\n\n\nclass LocalGrouper(nn.Module):\n def __init__(self, groups, kneighbors, **kwargs):\n \"\"\"\n Give xyz[b,p,3] and fea[b,p,d], return new_xyz[b,g,3] and new_fea[b,g,k,2d]\n :param groups: groups number\n :param kneighbors: k-nerighbors\n :param kwargs: others\n \"\"\"\n super(LocalGrouper, self).__init__()\n self.groups = groups\n self.kneighbors = kneighbors\n\n def forward(self, xyz, points):\n B, N, C = xyz.shape\n S = self.groups\n xyz = xyz.contiguous() # xyz [btach, points, xyz]\n\n # fps_idx = farthest_point_sample(xyz, self.groups).long()\n fps_idx = pointnet2_utils.furthest_point_sample(xyz, self.groups).long() # [B, npoint]\n new_xyz = index_points(xyz, fps_idx)\n new_points = index_points(points, fps_idx)\n\n idx = knn_point(self.kneighbors, xyz, new_xyz)\n # idx = query_ball_point(radius, nsample, xyz, new_xyz)\n # grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]\n grouped_points = index_points(points, idx)\n grouped_points_norm = grouped_points - new_points.view(B, S, 1, -1)\n new_points = torch.cat([grouped_points_norm,\n new_points.view(B, S, 1, -1).repeat(1, 1, self.kneighbors, 1)]\n , dim=-1)\n return new_xyz, new_points\n\n\nclass FCBNReLU1D(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=1, bias=False):\n super(FCBNReLU1D, self).__init__()\n self.net = nn.Sequential(\n nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, bias=bias),\n nn.BatchNorm1d(out_channels),\n nn.GELU()\n )\n\n def forward(self, x):\n return self.net(x)\n\nclass FCBNReLU1DRes(nn.Module):\n def __init__(self, channel, kernel_size=1, bias=False):\n super(FCBNReLU1DRes, self).__init__()\n self.net = nn.Sequential(\n nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),\n nn.BatchNorm1d(channel),\n nn.GELU(),\n nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),\n nn.BatchNorm1d(channel)\n )\n\n def forward(self, x):\n return F.gelu(self.net(x)+x)\n\nclass Attention(nn.Module):\n def __init__(self, dim, heads = 8, dim_head = 32, dropout = 0.):\n super().__init__()\n inner_dim = dim_head * heads\n # project_out = not (heads == 1 and dim_head == dim)\n self.heads = heads\n self.scale = dim_head ** -0.5\n\n self.attend = nn.Softmax(dim = -1)\n self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)\n\n self.to_out = nn.Sequential(\n nn.Conv1d(inner_dim, dim,1),\n nn.BatchNorm1d(dim)\n )\n\n def forward(self, x):\n x = x.permute(0,2,1)\n b, n, _, h = *x.shape, self.heads\n qkv = self.to_qkv(x).chunk(3, dim = -1)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)\n\n dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n\n attn = self.attend(dots)\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b (h d) n')\n\n return self.to_out(out)\n\n\n\nclass TransformerBlock(nn.Module):\n def __init__(self, dim, heads=8, dim_head=32, **kwargs):\n \"\"\"\n [b batch, d dimension, k points]\n :param dim: input data dimension\n :param heads: heads number\n :param dim_head: dimension in each head\n :param kwargs:\n \"\"\"\n super(TransformerBlock, self).__init__()\n self.attention = Attention(dim=dim, heads=heads, dim_head=dim_head)\n self.ffn = nn.Sequential(\n nn.Conv1d(dim, dim, 1, bias=False),\n nn.BatchNorm1d(dim)\n )\n\n\n def forward(self, x):\n \"\"\"\n :input x: [b batch, d dimension, p points,]\n :return: [b batch, d dimension, p points,]\n \"\"\"\n att = self.attention(x)\n att = F.gelu(att+x)\n out = self.ffn(att)\n out = F.gelu(att+out)\n return out\n\n\n\n\n\n\n\nclass PreExtraction(nn.Module):\n def __init__(self, channels, blocks=1):\n \"\"\"\n input: [b,g,k,d]: output:[b,d,g]\n :param channels:\n :param blocks:\n \"\"\"\n super(PreExtraction, self).__init__()\n operation = []\n for _ in range(blocks):\n operation.append(\n FCBNReLU1DRes(channels)\n )\n self.operation = nn.Sequential(*operation)\n self.transformer = TransformerBlock(channels, heads=4)\n def forward(self, x):\n b, n, s, d = x.size() # torch.Size([32, 512, 32, 6])\n x = x.permute(0, 1, 3, 2)\n x = x.reshape(-1, d, s)\n batch_size, _, N = x.size()\n x = self.operation(x) # [b, d, k]\n x = self.transformer(x)\n x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)\n x = x.reshape(b, n, -1).permute(0, 2, 1)\n return x\n\nclass PosExtraction(nn.Module):\n def __init__(self, channels, blocks=1):\n \"\"\"\n input[b,d,g]; output[b,d,g]\n :param channels:\n :param blocks:\n \"\"\"\n super(PosExtraction, self).__init__()\n operation = []\n for _ in range(blocks):\n operation.append(\n FCBNReLU1DRes(channels)\n )\n self.operation = nn.Sequential(*operation)\n self.transformer = TransformerBlock(channels, heads=4)\n\n def forward(self, x): # [b, d, k]\n return self.transformer(self.operation(x))\n\n\nclass Model23(nn.Module):\n def __init__(self, points=1024, class_num=40, embed_dim=64,\n pre_blocks=[2,2,2,2], pos_blocks=[2,2,2,2], k_neighbors=[32,32,32,32],\n reducers=[2,2,2,2], **kwargs):\n super(Model23, self).__init__()\n self.stages = len(pre_blocks)\n self.class_num = class_num\n self.points=points\n self.embedding = nn.Sequential(\n FCBNReLU1D(3, embed_dim),\n FCBNReLU1D(embed_dim, embed_dim)\n )\n assert len(pre_blocks)==len(k_neighbors)==len(reducers)==len(pos_blocks), \\\n \"Please check stage number consistent for pre_blocks, pos_blocks k_neighbors, reducers.\"\n self.local_grouper_list = nn.ModuleList()\n self.pre_blocks_list = nn.ModuleList()\n self.pos_blocks_list = nn.ModuleList()\n last_channel = embed_dim\n anchor_points = self.points\n for i in range(len(pre_blocks)):\n out_channel = last_channel*2\n pre_block_num=pre_blocks[i]\n pos_block_num = pos_blocks[i]\n kneighbor = k_neighbors[i]\n reduce = reducers[i]\n anchor_points = anchor_points//reduce\n\n # append local_grouper_list\n local_grouper = LocalGrouper(anchor_points, kneighbor) #[b,g,k,d]\n self.local_grouper_list.append(local_grouper)\n # append pre_block_list\n pre_block_module = PreExtraction(out_channel, pre_block_num)\n self.pre_blocks_list.append(pre_block_module)\n # append pos_block_list\n pos_block_module = PosExtraction(out_channel, pos_block_num)\n self.pos_blocks_list.append(pos_block_module)\n\n last_channel = out_channel\n\n self.classifier = nn.Sequential(\n nn.Linear(last_channel*2, 512),\n nn.BatchNorm1d(512),\n nn.GELU(),\n nn.Dropout(0.5),\n nn.Linear(512, 256),\n nn.BatchNorm1d(256),\n nn.GELU(),\n nn.Dropout(0.5),\n nn.Linear(256, self.class_num)\n )\n\n def forward(self, x):\n xyz = x.permute(0, 2, 1)\n batch_size, _, _ = x.size()\n x = self.embedding(x) # B,D,N\n for i in range(self.stages):\n xyz, x = self.local_grouper_list[i](xyz, x.permute(0, 2, 1)) # [b,g,3] [b,g,k,d]\n x = self.pre_blocks_list[i](x) # [b,d,g]\n x = self.pos_blocks_list[i](x) # [b,d,g]\n\n x_max = F.adaptive_max_pool1d(x,1).squeeze(dim=-1)\n x_mean = x.mean(dim=-1,keepdim=False)\n x = torch.cat([x_max, x_mean], dim=-1)\n x = self.classifier(x)\n return x\n\n\n\ndef model23A(num_classes=40, **kwargs) -> Model23: # 19201MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[32,32],\n reducers=[4,4], **kwargs)\n\ndef model23B(num_classes=40, **kwargs) -> Model23: # 19185MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[1,1], pos_blocks=[1,1], k_neighbors=[32,32],\n reducers=[4,4], **kwargs)\n\ndef model23C(num_classes=40, **kwargs) -> Model23: # 19537MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[2,2,2], pos_blocks=[2,2,2], k_neighbors=[32,32,32],\n reducers=[4,2,2], **kwargs)\n\ndef model23D(num_classes=40, **kwargs) -> Model23: # 31927MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[2,2,2], pos_blocks=[2,2,2], k_neighbors=[16,32,32],\n reducers=[2,2,2], **kwargs)\n\ndef model23E(num_classes=40, **kwargs) -> Model23: # 19215MiB # 93.476% on vis sever\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[3,3], pos_blocks=[3,3], k_neighbors=[32,32],\n reducers=[4,4], **kwargs)\n\ndef model23F(num_classes=40, **kwargs) -> Model23: # 6437MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[16,16],\n reducers=[4,4], **kwargs)\n\ndef model23G(num_classes=40, **kwargs) -> Model23: # 19201MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[24,24],\n reducers=[4,4], **kwargs)\n\n# don't train H, it is same to model21H\ndef model23H(num_classes=40, **kwargs) -> Model23:\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[4,4], pos_blocks=[4,4], k_neighbors=[32,32],\n reducers=[4,4], **kwargs)\n\ndef model23I(num_classes=40, **kwargs) -> Model23: # 20283MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=256,\n pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[32,32],\n reducers=[4,4], **kwargs)\n\n# Extremely large model, 101 layers in total.\ndef model23J(num_classes=40, **kwargs) -> Model23: # 24999MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[4,4,4,4], pos_blocks=[4,4,4,4], k_neighbors=[16,16,16,16],\n reducers=[4,2,2,2], **kwargs)\n\n\n# Also Eextremely large model, 101 layers in total.\ndef model23K(num_classes=40, **kwargs) -> Model23:\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[10,10], pos_blocks=[10,10], k_neighbors=[32,32],\n reducers=[4,4], **kwargs)\n\n\nif __name__ == '__main__':\n data = torch.rand(2,128,10)\n att = Attention(128)\n out = att(data)\n print(out.shape)\n\n\n\n batch, groups,neighbors,dim=2,512,32,16\n x = torch.rand(batch,groups,neighbors,dim)\n pre_extractor = PreExtraction(dim,3)\n out = pre_extractor(x)\n print(out.shape)\n\n x = torch.rand(batch, dim, groups)\n pos_extractor = PosExtraction(dim, 3)\n out = pos_extractor(x)\n print(out.shape)\n\n\n data = torch.rand(2, 3, 1024)\n print(\"===> testing model ...\")\n model = Model23()\n out = model(data)\n print(out.shape)\n\n print(\"===> testing modelE ...\")\n model = model23E()\n out = model(data)\n print(out.shape)\n"
] | [
[
"torch.min",
"torch.randint",
"torch.rand",
"torch.nn.ModuleList",
"torch.max",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.BatchNorm1d",
"torch.nn.Softmax",
"torch.arange",
"torch.nn.functional.gelu",
"torch.ones",
"torch.nn.Conv1d",
"torch.einsum",
"torch.sum",
"torch.nn.Linear",
"torch.nn.functional.adaptive_max_pool1d",
"torch.nn.GELU",
"torch.topk",
"torch.nn.Sequential",
"torch.zeros"
]
] |
DigitalBiomarkerDiscoveryPipeline/devicely | [
"9773fead4d3969a32ca2760b8db4ae728c4d5d50"
] | [
"devicely/empatica.py"
] | [
"\"\"\"\nEmpatica E4 is a wearable device that offers real-time physiological data\nacquisition such as blood volume pulse, electrodermal activity (EDA), heart\nrate, interbeat intervals, 3-axis acceleration and skin temperature.\n\"\"\"\n\nimport os\nimport random\n\nimport numpy as np\nimport pandas as pd\n\n\nclass EmpaticaReader:\n \"\"\"\n Read, timeshift and write data generated by Empatica E4.\n\n Attributes\n ----------\n start_times : dict\n Contain the timestamp of the first measurement for all\n measured signals (BVP, ACC, etc.).\n\n sample_freqs : dict ]\n Contain the sampling frequencies of all measured signals\n in Hz.\n\n IBI : pandas.DataFrame\n Contain inter-beat interval data. The column\n \"seconds_since_start\" is the time in seconds between the start of\n measurements and the column \"IBI\" is the duration in seconds between\n consecutive beats.\n\n ACC : pandas.DataFrame\n Contain the data measured with the onboard MEMS type\n 3-axis accelerometer, indexed by time of measurement.\n\n BVP : pandas.DataFrame\n Contain blood volume pulse data, indexed by time of\n measurement.\n\n EDA : pandas.DataFrame\n Contain data captured from the electrodermal activity\n sensor, indexed by time of measurement.\n\n HR : pandas.DataFrame\n Contain heart rate data, indexed by time of\n measurement.\n\n TEMP : pandas.DataFrame\n Contain temperature data, indexed by time of\n measurement.\n\n data : pandas.DataFrame\n Joined dataframe of the ACC, BVP, EDA, HR and TEMP\n dataframes (see above). May contain NaN values because sampling\n frequencies differ across signals.\n \"\"\"\n\n def __init__(self, path):\n \"\"\"\n Parse the csv files located in the specified directory into dataframes.\n\n Parameters\n ----------\n path : str\n Path of the directory that contains the individual signal csv\n files. The files must be named ACC.csv, BVP.csv, EDA.csv, HR.csv,\n IBI.csv and TEMP.csv. If present, the file tags.csv is also read.\n \"\"\"\n\n self.start_times = {}\n self.sample_freqs = {}\n\n files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]\n\n if files is None:\n print('Empty directory. Nothing to read.')\n return None\n\n self.ACC = self._read_signal(os.path.join(path, 'ACC.csv'), 'ACC', col_names=['X', 'Y', 'Z'])\n self.BVP = self._read_signal(os.path.join(path, 'BVP.csv'), 'BVP')\n self.EDA = self._read_signal(os.path.join(path, 'EDA.csv'), 'EDA')\n self.HR = self._read_signal(os.path.join(path, 'HR.csv'), 'HR')\n self.TEMP = self._read_signal(os.path.join(path, 'TEMP.csv'), 'TEMP')\n self.IBI = self._read_ibi(os.path.join(path, 'IBI.csv'))\n\n self.tags = self._read_tags(os.path.join(path, 'tags.csv'))\n\n self.data = self._get_joined_dataframe()\n\n def write(self, dir_path):\n \"\"\"\n Write the signal dataframes back to individual csv files formatted the\n same way as they were read.\n\n Parameters\n ----------\n path : str\n Path of the directory in which the csv files are created.\n\n If the directory exists, the csv files are written using writing mode 'w'\n ignoring other files in the directory.\n\n If the directory doe not exist, it will be created.\n \"\"\"\n\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n if self.ACC is not None:\n self._write_signal(os.path.join(dir_path, 'ACC.csv'), self.ACC, 'ACC')\n if self.BVP is not None:\n self._write_signal(os.path.join(dir_path, 'BVP.csv'), self.BVP, 'BVP')\n if self.EDA is not None:\n self._write_signal(os.path.join(dir_path, 'EDA.csv'), self.EDA, 'EDA')\n if self.HR is not None:\n self._write_signal(os.path.join(dir_path, 'HR.csv'), self.HR, 'HR')\n if self.TEMP is not None:\n self._write_signal(os.path.join(dir_path, 'TEMP.csv'), self.TEMP, 'TEMP')\n if self.IBI is not None:\n self._write_ibi(os.path.join(dir_path, 'IBI.csv'))\n if self.tags is not None:\n self._write_tags(os.path.join(dir_path, 'tags.csv'))\n\n def _read_signal(self, path, signal_name, col_names=None):\n try:\n if os.stat(path).st_size > 0:\n with open(path, 'r') as file:\n start_time_str = file.readline().split(', ')[0]\n self.start_times[signal_name] = pd.Timestamp(float(start_time_str), unit='s')\n sample_freq_str = file.readline().split(', ')[0]\n self.sample_freqs[signal_name] = float(sample_freq_str)\n col_names = [signal_name] if col_names is None else col_names\n dataframe = pd.read_csv(file, header=None, names=col_names)\n dataframe.index = pd.date_range(\n start=self.start_times[signal_name],\n freq=f\"{1 / self.sample_freqs[signal_name]}S\",\n periods=len(dataframe))\n if col_names is not None:\n dataframe.rename(dict(enumerate(col_names)), inplace=True)\n else:\n dataframe.rename({0: signal_name}, inplace=True)\n\n return dataframe.squeeze()\n else:\n print(f\"Not reading signal because the file {path} is empty.\")\n except OSError:\n print(f\"Not reading signal because the file {path} does not exist.\")\n\n return None\n\n def _write_signal(self, path, dataframe, signal_name):\n n_cols = len(dataframe.columns) if isinstance(dataframe, pd.DataFrame) else 1\n meta = np.array([[self.start_times[signal_name].value / 1e9] * n_cols,\n [self.sample_freqs[signal_name]] * n_cols])\n with open(path, 'w') as file:\n np.savetxt(file, meta, fmt='%s', delimiter=', ', newline='\\n')\n dataframe.to_csv(file, index=None, header=None, line_terminator='\\n')\n\n def _read_ibi(self, path):\n try:\n if os.stat(path).st_size > 0:\n with open(path, 'r') as file:\n start_time = pd.Timestamp(float(file.readline().split(',')[0]), unit='s')\n self.start_times['IBI'] = start_time\n df = pd.read_csv(file, names=['time', 'IBI'], header=None)\n df['time'] = pd.to_timedelta(df['time'], unit='s')\n df['time'] = start_time + df['time']\n return df.set_index('time') \n else:\n print(f\"Not reading signal because the file {path} is empty.\")\n except OSError:\n print(f\"Not reading signal because the file {path} does not exist.\")\n\n return None\n\n def _write_ibi(self, path):\n with open(path, 'w') as file:\n file.write(f\"{self.start_times['IBI'].value // 1e9}, IBI\\n\")\n write_df = self.IBI.copy()\n write_df.index = (write_df.index - self.start_times['IBI']).values.astype(int) / 1e9\n write_df.to_csv(file, header=None, line_terminator='\\n')\n\n def _read_tags(self, path):\n try:\n if os.stat(path).st_size > 0:\n return pd.read_csv(path, header=None,\n parse_dates=[0],\n date_parser=lambda x : pd.to_datetime(x, unit='s'),\n names=['tags'],\n squeeze=True)\n\n else:\n print(f\"Not reading tags because the file {path} is empty.\")\n except OSError:\n print(f\"Not reading tags because the file {path} does not exist.\")\n\n return None\n\n def _write_tags(self, path):\n if self.tags is not None:\n tags_write_series = self.tags.map(lambda x: x.value / 1e9)\n tags_write_series.to_csv(path, header=None, index=None, line_terminator='\\n')\n\n def timeshift(self, shift='random'):\n \"\"\"\n Timeshift all time related columns as well as the starting_times dict.\n\n Parameters\n ----------\n shift : None/'random', pd.Timestamp or pd.Timedelta\n If shift is not specified, shifts the data by a random time interval\n between one month and two years to the past.\n\n If shift is a timdelta, adds that timedelta to all time-related attributes.\n\n If shift is a timestamp, shifts the data such that the earliest entry\n has that timestamp. The remaining values will mantain the same\n time difference to the first entry.\n \"\"\"\n\n if shift == 'random':\n one_month = pd.Timedelta('- 30 days').value\n two_years = pd.Timedelta('- 730 days').value\n random_timedelta = pd.Timedelta(random.uniform(one_month, two_years))\n self.timeshift(random_timedelta)\n\n dataframes = []\n variables = [self.ACC, self.BVP, self.EDA,\n self.HR, self.TEMP, self.data]\n for variable in variables:\n if variable is not None:\n dataframes.append(variable)\n\n if isinstance(shift, pd.Timestamp):\n min_start_time = min(self.start_times.values())\n new_start_times = dict()\n for signal_name, start_time in self.start_times.items():\n new_start_times[signal_name] = shift + (start_time - min_start_time)\n self.start_times = new_start_times\n if self.tags is not None:\n timedeltas = self.tags - self.tags.min()\n self.tags = shift + timedeltas\n for dataframe in dataframes:\n timedeltas = dataframe.index - dataframe.index.min()\n dataframe.index = shift + timedeltas\n\n if isinstance(shift, pd.Timedelta):\n for signal_name in self.start_times:\n self.start_times[signal_name] += shift\n if self.tags is not None:\n self.tags += shift\n for dataframe in dataframes:\n dataframe.index += shift\n\n def _get_joined_dataframe(self):\n dataframes = []\n variables = [self.ACC, self.BVP, self.EDA,\n self.HR, self.TEMP]\n for variable in variables:\n if variable is not None:\n dataframes.append(variable)\n\n if not dataframes:\n print('No joined dataframe possible due to lack of data.')\n return None\n\n joined_idx = pd.concat([pd.Series(dataframe.index) for dataframe in dataframes])\n joined_idx = pd.Index(joined_idx.drop_duplicates().sort_values())\n\n joined_dataframe = pd.DataFrame(index=joined_idx)\n if self.ACC is not None:\n joined_dataframe.loc[self.ACC.index, 'ACC_X'] = self.ACC['X']\n joined_dataframe.loc[self.ACC.index, 'ACC_Y'] = self.ACC['Y']\n joined_dataframe.loc[self.ACC.index, 'ACC_Z'] = self.ACC['Z']\n if self.BVP is not None:\n joined_dataframe.loc[self.BVP.index, 'BVP'] = self.BVP\n if self.EDA is not None:\n joined_dataframe.loc[self.EDA.index, 'EDA'] = self.EDA\n if self.HR is not None:\n joined_dataframe.loc[self.HR.index, 'HR'] = self.HR\n if self.TEMP is not None:\n joined_dataframe.loc[self.TEMP.index, 'TEMP'] = self.TEMP\n\n return joined_dataframe\n"
] | [
[
"pandas.Series",
"pandas.read_csv",
"numpy.savetxt",
"pandas.DataFrame",
"pandas.Timedelta",
"pandas.to_datetime",
"numpy.array",
"pandas.to_timedelta"
]
] |
Near32/Archi | [
"0005713fa4e37c7cd9b34cd257c481d08928db8a"
] | [
"Archi/tests/test_esbn_model.py"
] | [
"import Archi\nimport yaml \n\n\ndef test_model_loading():\n try:\n config = yaml.safe_load(\n open(\"./esbn_model_test_config.yaml\", 'r'),\n )\n except yaml.YANNLError as e:\n print(e)\n\n from Archi import load_model\n\n model = load_model(config)\n \n assert 'KeyValueMemory' in model.modules.keys()\n assert 'key_memory' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()\n assert 'value_memory' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()\n assert 'read_key_plus_conf' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()\n assert 'CoreLSTM' in model.modules.keys()\n assert 'CoreLSTM' in model.stream_handler.placeholders['inputs'].keys()\n assert 'hidden' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()\n assert 'cell' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()\n assert 'iteration' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()\n \n\ndef test_model_forward():\n try:\n config = yaml.safe_load(\n open(\"./esbn_model_test_config.yaml\", 'r'),\n )\n except yaml.YANNLError as e:\n print(e)\n\n from Archi import load_model\n\n model = load_model(config)\n \n import torch \n\n inputs_dict = {\n 'x':torch.rand(4,3,64,64),\n }\n\n output = model(**inputs_dict)\n assert output['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() == 0.0 \n output1 = model(**inputs_dict)\n\n assert 'lstm_output' in output['modules']['CoreLSTM']\n assert 'processed_input' in output['modules']['Encoder']\n assert 'processed_input' in output['modules']['ToGateFCN']\n assert output['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() == 0.0 \n assert output1['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() != 0.0 \n assert len(dict(model.named_parameters())) != 0\n for np, p in model.named_parameters():\n print(np)\n\nif __name__ == '__main__':\n test_model_loading()\n test_model_forward()\n\n"
] | [
[
"torch.rand"
]
] |
RubensZimbres/pytorch-metric-learning | [
"41e06ef5af398c05d238e0a74ee6c42fa7bd574c"
] | [
"tests/utils/test_calculate_accuracies.py"
] | [
"import unittest\r\nfrom pytorch_metric_learning.utils import accuracy_calculator\r\nimport numpy as np\r\n\r\n\r\nclass TestCalculateAccuracies(unittest.TestCase):\r\n def test_accuracy_calculator(self):\r\n query_labels = np.array([1, 1, 2, 3, 4])\r\n\r\n knn_labels1 = np.array(\r\n [\r\n [0, 1, 1, 2, 2],\r\n [1, 0, 1, 1, 3],\r\n [4, 4, 4, 4, 2],\r\n [3, 1, 3, 1, 3],\r\n [0, 0, 4, 2, 2],\r\n ]\r\n )\r\n label_counts1 = {1: 3, 2: 5, 3: 4, 4: 5}\r\n\r\n knn_labels2 = knn_labels1 + 5\r\n label_counts2 = {k + 5: v for k, v in label_counts1.items()}\r\n\r\n for avg_of_avgs in [False, True]:\r\n for i, (knn_labels, label_counts) in enumerate(\r\n [(knn_labels1, label_counts1), (knn_labels2, label_counts2)]\r\n ):\r\n\r\n AC = accuracy_calculator.AccuracyCalculator(\r\n exclude=(\"NMI\", \"AMI\"), avg_of_avgs=avg_of_avgs\r\n )\r\n kwargs = {\r\n \"query_labels\": query_labels,\r\n \"label_counts\": label_counts,\r\n \"knn_labels\": knn_labels,\r\n \"not_lone_query_mask\": np.ones(5).astype(np.bool)\r\n if i == 0\r\n else np.zeros(5).astype(np.bool),\r\n }\r\n\r\n function_dict = AC.get_function_dict()\r\n\r\n for ecfss in [False, True]:\r\n if ecfss:\r\n kwargs[\"knn_labels\"] = kwargs[\"knn_labels\"][:, 1:]\r\n kwargs[\"embeddings_come_from_same_source\"] = ecfss\r\n acc = AC._get_accuracy(function_dict, **kwargs)\r\n if i == 1:\r\n self.assertTrue(acc[\"precision_at_1\"] == 0)\r\n self.assertTrue(acc[\"r_precision\"] == 0)\r\n self.assertTrue(acc[\"mean_average_precision_at_r\"] == 0)\r\n self.assertTrue(acc[\"mean_average_precision\"] == 0)\r\n else:\r\n self.assertTrue(\r\n acc[\"precision_at_1\"]\r\n == self.correct_precision_at_1(ecfss, avg_of_avgs)\r\n )\r\n self.assertTrue(\r\n acc[\"r_precision\"]\r\n == self.correct_r_precision(ecfss, avg_of_avgs)\r\n )\r\n self.assertTrue(\r\n acc[\"mean_average_precision_at_r\"]\r\n == self.correct_mean_average_precision_at_r(\r\n ecfss, avg_of_avgs\r\n )\r\n )\r\n self.assertTrue(\r\n acc[\"mean_average_precision\"]\r\n == self.correct_mean_average_precision(ecfss, avg_of_avgs)\r\n )\r\n\r\n def correct_precision_at_1(self, embeddings_come_from_same_source, avg_of_avgs):\r\n if not embeddings_come_from_same_source:\r\n if not avg_of_avgs:\r\n return 0.4\r\n else:\r\n return (0.5 + 0 + 1 + 0) / 4\r\n else:\r\n if not avg_of_avgs:\r\n return 1.0 / 5\r\n else:\r\n return (0.5 + 0 + 0 + 0) / 4\r\n\r\n def correct_r_precision(self, embeddings_come_from_same_source, avg_of_avgs):\r\n if not embeddings_come_from_same_source:\r\n acc0 = 2.0 / 3\r\n acc1 = 2.0 / 3\r\n acc2 = 1.0 / 5\r\n acc3 = 2.0 / 4\r\n acc4 = 1.0 / 5\r\n else:\r\n acc0 = 1.0 / 1\r\n acc1 = 1.0 / 2\r\n acc2 = 1.0 / 4\r\n acc3 = 1.0 / 3\r\n acc4 = 1.0 / 4\r\n if not avg_of_avgs:\r\n return np.mean([acc0, acc1, acc2, acc3, acc4])\r\n else:\r\n return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])\r\n\r\n def correct_mean_average_precision_at_r(\r\n self, embeddings_come_from_same_source, avg_of_avgs\r\n ):\r\n if not embeddings_come_from_same_source:\r\n acc0 = (1.0 / 2 + 2.0 / 3) / 3\r\n acc1 = (1 + 2.0 / 3) / 3\r\n acc2 = (1.0 / 5) / 5\r\n acc3 = (1 + 2.0 / 3) / 4\r\n acc4 = (1.0 / 3) / 5\r\n else:\r\n acc0 = 1\r\n acc1 = (1.0 / 2) / 2\r\n acc2 = (1.0 / 4) / 4\r\n acc3 = (1.0 / 2) / 3\r\n acc4 = (1.0 / 2) / 4\r\n if not avg_of_avgs:\r\n return np.mean([acc0, acc1, acc2, acc3, acc4])\r\n else:\r\n return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])\r\n\r\n def correct_mean_average_precision(\r\n self, embeddings_come_from_same_source, avg_of_avgs\r\n ):\r\n if not embeddings_come_from_same_source:\r\n acc0 = (1.0 / 2 + 2.0 / 3) / 2\r\n acc1 = (1 + 2.0 / 3 + 3.0 / 4) / 3\r\n acc2 = (1.0 / 5) / 1\r\n acc3 = (1 + 2.0 / 3 + 3.0 / 5) / 3\r\n acc4 = (1.0 / 3) / 1\r\n else:\r\n acc0 = 1\r\n acc1 = (1.0 / 2 + 2.0 / 3) / 2\r\n acc2 = 1.0 / 4\r\n acc3 = (1.0 / 2 + 2.0 / 4) / 2\r\n acc4 = 1.0 / 2\r\n if not avg_of_avgs:\r\n return np.mean([acc0, acc1, acc2, acc3, acc4])\r\n else:\r\n return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])\r\n\r\n def test_get_label_counts(self):\r\n label_counts, num_k = accuracy_calculator.get_label_counts(\r\n [0, 1, 3, 2, 3, 1, 3, 3, 4, 6, 5, 10, 4, 4, 4, 4, 6, 6, 5]\r\n )\r\n self.assertTrue(\r\n label_counts == {0: 1, 1: 2, 2: 1, 3: 4, 4: 5, 5: 2, 6: 3, 10: 1}\r\n )\r\n self.assertTrue(num_k == 5)\r\n\r\n def test_get_lone_query_labels(self):\r\n query_labels = np.array([0, 1, 2, 3, 4, 5, 6])\r\n reference_labels = np.array([0, 0, 0, 1, 2, 2, 3, 4, 5, 6])\r\n reference_label_counts, _ = accuracy_calculator.get_label_counts(\r\n reference_labels\r\n )\r\n\r\n lone_query_labels = accuracy_calculator.get_lone_query_labels(\r\n query_labels, reference_labels, reference_label_counts, True\r\n )\r\n self.assertTrue(\r\n np.all(np.unique(lone_query_labels) == np.array([1, 3, 4, 5, 6]))\r\n )\r\n\r\n query_labels = np.array([0, 1, 2, 3, 4])\r\n reference_labels = np.array([0, 0, 0, 1, 2, 2, 4, 5, 6])\r\n\r\n lone_query_labels = accuracy_calculator.get_lone_query_labels(\r\n query_labels, reference_labels, reference_label_counts, False\r\n )\r\n self.assertTrue(np.all(np.unique(lone_query_labels) == np.array([3])))\r\n\r\n\r\nclass TestCalculateAccuraciesAndFaiss(unittest.TestCase):\r\n def test_accuracy_calculator_and_faiss(self):\r\n AC = accuracy_calculator.AccuracyCalculator(exclude=(\"NMI\", \"AMI\"))\r\n\r\n query = np.arange(10)[:, None].astype(np.float32)\r\n reference = np.arange(10)[:, None].astype(np.float32)\r\n query_labels = np.arange(10).astype(np.int)\r\n reference_labels = np.arange(10).astype(np.int)\r\n acc = AC.get_accuracy(query, reference, query_labels, reference_labels, False)\r\n self.assertTrue(acc[\"precision_at_1\"] == 1)\r\n self.assertTrue(acc[\"r_precision\"] == 1)\r\n self.assertTrue(acc[\"mean_average_precision_at_r\"] == 1)\r\n\r\n reference = (np.arange(20) / 2.0)[:, None].astype(np.float32)\r\n reference_labels = np.zeros(20).astype(np.int)\r\n reference_labels[::2] = query_labels\r\n reference_labels[1::2] = np.ones(10).astype(np.int)\r\n acc = AC.get_accuracy(query, reference, query_labels, reference_labels, True)\r\n self.assertTrue(acc[\"precision_at_1\"] == 1)\r\n self.assertTrue(acc[\"r_precision\"] == 0.5)\r\n self.assertTrue(\r\n acc[\"mean_average_precision_at_r\"]\r\n == (1 + 2.0 / 2 + 3.0 / 5 + 4.0 / 7 + 5.0 / 9) / 10\r\n )\r\n\r\n def test_accuracy_calculator_and_faiss_avg_of_avgs(self):\r\n AC_global_average = accuracy_calculator.AccuracyCalculator(\r\n exclude=(\"NMI\", \"AMI\"), avg_of_avgs=False\r\n )\r\n AC_per_class_average = accuracy_calculator.AccuracyCalculator(\r\n exclude=(\"NMI\", \"AMI\"), avg_of_avgs=True\r\n )\r\n query = np.arange(10)[:, None].astype(np.float32)\r\n reference = np.arange(10)[:, None].astype(np.float32)\r\n query[-1] = 100\r\n reference[0] = -100\r\n query_labels = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1])\r\n reference_labels = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n acc = AC_global_average.get_accuracy(\r\n query, reference, query_labels, reference_labels, False\r\n )\r\n self.assertTrue(acc[\"precision_at_1\"] == 0.9)\r\n self.assertTrue(acc[\"r_precision\"] == 0.9)\r\n self.assertTrue(acc[\"mean_average_precision_at_r\"] == 0.9)\r\n\r\n acc = AC_per_class_average.get_accuracy(\r\n query, reference, query_labels, reference_labels, False\r\n )\r\n self.assertTrue(acc[\"precision_at_1\"] == 0.5)\r\n self.assertTrue(acc[\"r_precision\"] == 0.5)\r\n self.assertTrue(acc[\"mean_average_precision_at_r\"] == 0.5)\r\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.arange",
"numpy.array",
"numpy.unique",
"numpy.mean"
]
] |
piquark6046/tensorflow | [
"57771c5d008f6d16fd147110213855d145a7e0bc"
] | [
"tensorflow/python/eager/backprop.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Code for backpropagation using the tape utilities.\"\"\"\n\n# TODO(b/159343581): Properly support CompositeTensor in all functions in this\n# file.\n\nimport functools\nimport operator\nimport sys\n\nimport six\n\nfrom tensorflow.python import pywrap_tfe\nfrom tensorflow.python.eager import backprop_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager import imperative_grad\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import composite_tensor_gradient\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import indexed_slices\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import default_gradient\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops.unconnected_gradients import UnconnectedGradients\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import _pywrap_utils\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.lazy_loader import LazyLoader\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# Note that we need to lazy load the following two modules to avoid creating\n# circular dependencies.\n# TODO(b/119775953): fix the circular dependencies.\npfor_ops = LazyLoader(\n \"pfor_ops\", globals(),\n \"tensorflow.python.ops.parallel_for.control_flow_ops\")\n\nfunction = LazyLoader(\"function\", globals(),\n \"tensorflow.python.eager.function\")\n\n_op_attr_type_cache = {}\n\n\ndef op_attr_type(op_type, attr_name):\n try:\n return _op_attr_type_cache[(op_type, attr_name)]\n except KeyError:\n context.ensure_initialized()\n h = context.context()._handle # pylint: disable=protected-access\n attr_type = pywrap_tfe.TFE_OpNameGetAttrType(h, op_type, attr_name)\n _op_attr_type_cache[(op_type, attr_name)] = attr_type\n return attr_type\n\n\ndef make_attr(attr_type, value):\n # pybind11 enums do not return the raw value like SWIG enums do. They are\n # useful when comparing amongst each other but not direct integers as we are\n # doing in most tests.\n # https://pybind11.readthedocs.io/en/stable/classes.html#enumerations-and-internal-types\n # TODO(amitpatankar): After all SWIG transitions, convert the enum comparisons\n # from integer value to class.\n if attr_type == int(pywrap_tfe.TF_ATTR_TYPE):\n return dtypes.as_dtype(value)\n if attr_type == [int(pywrap_tfe.TF_ATTR_TYPE)]:\n return [dtypes.as_dtype(v) for v in value]\n if attr_type == int(pywrap_tfe.TF_ATTR_SHAPE):\n return tensor_shape.as_shape(value).as_proto()\n if attr_type == [int(pywrap_tfe.TF_ATTR_SHAPE)]:\n return [tensor_shape.as_shape(v).as_proto() for v in value]\n if isinstance(value, str):\n return value.encode()\n return value\n\n\nclass _MockOp(object):\n \"\"\"Pretends to be a tf.Operation for the gradient functions.\"\"\"\n\n def __init__(self, attrs, inputs, outputs, typ, skip_input_indices):\n self.attrs = attrs\n self.inputs = inputs\n self.outputs = outputs\n self.type = typ\n self.skip_input_indices = skip_input_indices\n\n def get_attr(self, attr):\n typ = op_attr_type(self.type, attr)\n for i in range(0, len(self.attrs), 2):\n if self.attrs[i] == attr:\n return make_attr(typ, self.attrs[i + 1])\n raise KeyError(attr)\n\n def _get_control_flow_context(self):\n raise NotImplementedError(\n \"tf.GradientTape.gradients() does not support graph control flow \"\n \"operations like tf.cond or tf.while at this time. Use tf.gradients() \"\n \"instead. If you need this feature, please file a feature request at \"\n \"https://github.com/tensorflow/tensorflow/issues/new\"\n )\n\n\ndef _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs,\n out_grads, skip_input_indices, forward_pass_name_scope):\n \"\"\"Calls the gradient function of the op.\n\n Args:\n op_name: the name of the op to be differentiated.\n attr_tuple: the attrs, as a tuple.\n num_inputs: the number of inputs to the op.\n inputs: inputs to the original operation.\n outputs: outputs to the original operation.\n out_grads: gradients of the operation wrt its outputs.\n skip_input_indices: a tuple that is passed to the gradient function,\n indicating which inputs to skip calculating the gradient for\n forward_pass_name_scope: the namescope of the op in the forward pass.\n\n Returns:\n The gradients with respect to the inputs of the function, as a list.\n \"\"\"\n mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices)\n grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access\n if grad_fn is None:\n return [None] * num_inputs\n\n # This does not work with v1 TensorArrays.\n if ops.executing_eagerly_outside_functions(\n ) or control_flow_util.EnableControlFlowV2(ops.get_default_graph()):\n gradient_name_scope = \"gradient_tape/\"\n if forward_pass_name_scope:\n gradient_name_scope += forward_pass_name_scope + \"/\"\n with ops.name_scope(gradient_name_scope):\n return grad_fn(mock_op, *out_grads)\n else:\n return grad_fn(mock_op, *out_grads)\n\n\npywrap_tfe.TFE_Py_RegisterGradientFunction(_gradient_function)\n\n\ndef _must_record_gradient():\n return not pywrap_tfe.TFE_Py_TapeSetIsEmpty()\n\n\n@tf_export(\"__internal__.record_gradient\", v1=[])\ndef record_gradient(op_name, inputs, attrs, outputs):\n \"\"\"Explicitly record the gradient for a given op.\n\n Args:\n op_name: The op name as listed in the `OpDef` for the op.\n inputs: A list of tensor inputs to the op.\n attrs: The op attributes as a flattened list of alternating attribute names\n and attribute values.\n outputs: A list of tensor outputs from the op.\n \"\"\"\n pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, outputs,\n ops.get_name_scope())\n\n\nexecute.must_record_gradient = _must_record_gradient\nexecute.record_gradient = record_gradient\n\n\ndef implicit_val_and_grad(f):\n \"\"\"Returns a function which differentiates f with respect to variables.\n\n The wrapped function returns the value and the gradient of f when called with\n the same arguments. The gradient is with respect to all trainable TFE\n variables accessed by `f`.\n\n This function is useful when the exact set of variables to differentiate with\n is not known ahead of time.\n\n Example:\n\n ```python\n dense_layer = tf.compat.v1.layers.Dense(1)\n def loss(x, y):\n return tf.reduce_sum(tf.square(dense_layer(x) - y))\n\n # Obtain the gradient function.\n val_grad_fn = tfe.implicit_value_and_gradients(loss)\n\n # Invoke the gradient function with concrete values of x and y.\n x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n y = tf.constant([[10.0], [20.0]])\n value, grads_and_vars = val_grad_fn(x, y)\n print('Value of loss: %s' % value)\n\n # Apply the gradients to Variables.\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)\n optimizer.apply_gradients(grads_and_vars)\n ```\n\n Args:\n f: function to be differentiated. If `f` returns a scalar, this scalar will\n be differentiated. If `f` returns a tensor or list of tensors, by default\n a scalar will be computed by adding all their values to produce a single\n scalar.\n\n Returns:\n A function which, when called, returns a tuple pair.\n Its first element is the value to which the function evaluates.\n Its second element is list of (gradient, variable) pairs.\n\n Raises:\n ValueError: if `f` returns None.\n \"\"\"\n # TODO(cais): Remove calls to tf.constant() once the gradients functions\n # accept lists and np.ndarrays.\n\n def grad_fn(*args, **kwds):\n \"\"\"Computes the gradient of the wrapped function.\"\"\"\n this_tape = tape.push_new_tape()\n try:\n end_node = f(*args, **kwds)\n if end_node is None:\n raise ValueError(\"Cannot differentiate a function that returns None; \"\n \"did you forget to return a value from {}?\".format(\n f.__name__))\n finally:\n tape.pop_tape(this_tape)\n # Note: variables are returned in construction order. This ensures unique\n # order across executions.\n variables = this_tape.watched_variables()\n if not variables:\n raise ValueError(\"No trainable variables were accessed while the \"\n \"function was being computed.\")\n\n sources = [v.handle for v in variables]\n for s in sources:\n if getattr(s, \"is_packed\", False):\n raise ValueError(\n \"GradientTape.gradient is not supported on packed EagerTensors yet.\"\n )\n grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node),\n sources)\n return end_node, list(zip(grad, variables))\n\n return grad_fn\n\n\ndef implicit_grad(f):\n \"\"\"Returns a function which differentiates f with respect to variables.\n\n The wrapped function returns the gradient of f when called with the same\n arguments. The gradient is with respect to all trainable TFE variables\n accessed by `f`.\n\n This function is useful when the exact set of variables to differentiate with\n is not known ahead of time.\n\n Example:\n\n ```python\n dense_layer = tf.compat.v1.layers.Dense(1)\n def loss(x, y):\n return tf.reduce_sum(tf.square(dense_layer(x) - y))\n\n # Obtain the gradient function.\n grad_fn = tfe.implicit_gradients(loss)\n\n # Invoke the gradient function with concrete values of x and y.\n x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n y = tf.constant([[10.0], [20.0]])\n grads_and_vars = grad_fn(x, y)\n\n # Apply the gradients to Variables.\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)\n optimizer.apply_gradients(grads_and_vars)\n ```\n\n Args:\n f: function to be differentiated. If `f` returns a scalar, this scalar will\n be differentiated. If `f` returns a tensor or list of tensors, by default\n a scalar will be computed by adding all their values to produce a single\n scalar.\n\n Returns:\n A function which, when called, returns a list of (gradient, variable) pairs.\n \"\"\"\n # TODO(cais): Remove calls to tf.constant() once the gradients functions\n # accept lists and np.ndarrays.\n\n def grad_fn(*args, **kwds):\n \"\"\"Computes the gradient of the wrapped function.\"\"\"\n return implicit_val_and_grad(f)(*args, **kwds)[1]\n\n return grad_fn\n\n\ndef _get_arg_spec(f, params, param_args):\n \"\"\"The positions of the parameters of f to be differentiated in param_args.\"\"\"\n try:\n args = tf_inspect.getfullargspec(f).args\n except TypeError as e:\n # TypeError can happen when f is a callable object.\n if params is None:\n return range(len(param_args))\n elif all(isinstance(x, int) for x in params):\n return params\n raise ValueError(\"Either callable provided is not a function or could not \"\n \"inspect its arguments by name: %s. Original error: %s\"\n % (f, e))\n if params is None:\n if not args:\n return range(len(param_args))\n if args[0] == \"self\":\n return range(len(args) - 1)\n else:\n return range(len(args))\n elif all(isinstance(x, six.string_types) for x in params):\n return [args.index(n) for n in params]\n elif all(isinstance(x, int) for x in params):\n return params\n else:\n raise ValueError(\n \"params must be all strings or all integers; got %s.\" % params)\n\n\ndef gradients_function(f, params=None):\n \"\"\"Returns a function which differentiates f with respect to params.\n\n Example:\n ```python\n # f(x, y) = (x ^ 3) * y - x * (y ^ 2)\n # Therefore, the 1st order derivatives are:\n # df / dx = 3 * (x ^ 2) * y - y ^ 2\n # df / dy = x ^ 3 - 2 * x * y\n # The 2nd order derivatives with respect to x is:\n # d^2 f / (dx)^2 = 6 * x * y\n def f(x, y):\n return x * x * x * y - x * y * y\n\n # Obtain a function that returns 1st order gradients.\n grad_fn = tfe.gradients_function(f)\n\n x = 2.0\n y = 3.0\n\n # Invoke the 1st order gradient function.\n x_grad, y_grad = grad_fn(x, y)\n assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2\n assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3\n\n # Obtain a function that returns the 2nd order gradient with respect to x.\n gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0])\n\n # Invoke the 2nd order gradient function.\n x_gradgrad = gradgrad_fn(x, y)[0]\n assert x_gradgrad.numpy() == 6 * 2 * 3\n\n # To obtain a callable that returns the gradient(s) of `f` with respect to a\n # subset of its inputs, use the `params` keyword argument with\n # `gradients_function()`.\n ygrad_fn = tfe.gradients_function(f, params=[1])\n\n (y_grad,) = ygrad_fn(x, y)\n assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3\n ```\n\n Note that only tensors with real or complex dtypes are differentiable.\n\n Args:\n f: function to be differentiated. If `f` returns a scalar, this scalar will\n be differentiated. If `f` returns a tensor or list of tensors, by default\n a scalar will be computed by adding all their values to produce a single\n scalar. If desired, the tensors can be elementwise multiplied by the\n tensors passed as the `dy` keyword argument to the returned gradient\n function.\n params: list of parameter names of f or list of integers indexing the\n parameters with respect to which we'll differentiate. Passing None\n differentiates with respect to all parameters.\n\n Returns:\n function which, when called, returns the value of f and the gradient\n of `f` with respect to all of `params`. The function takes an extra optional\n keyword argument `dy`. Setting it allows computation of vector jacobian\n products for vectors other than the vector of ones.\n\n Raises:\n ValueError: if the params are not all strings or all integers.\n \"\"\"\n\n def decorated(*args, **kwds):\n \"\"\"Computes the gradient of the decorated function.\"\"\"\n\n _, grad = val_and_grad_function(f, params=params)(*args, **kwds)\n return grad\n\n return decorated\n\n\ndef _ensure_unique_tensor_objects(parameter_positions, args):\n \"\"\"Make each of the parameter_positions in args a unique ops.Tensor object.\n\n Ensure that each parameter is treated independently.\n For example:\n\n def f(x, y): return x * y\n g = gradients_function(f)\n one = tf.constant(1.)\n\n g(one, one) should return [1., 1.]\n (even though the two arguments are the same Tensor object).\n\n Args:\n parameter_positions: List of indices into args defining the arguments to\n differentiate against.\n args: A list of arguments to the function to be differentiated.\n\n Returns:\n args, possibly edited in-place.\n \"\"\"\n s = set()\n for (i, t) in enumerate(args):\n if i in parameter_positions:\n tid = ops.tensor_id(t)\n if tid in s:\n args[i] = gen_array_ops.identity(args[i])\n else:\n s.add(tid)\n return args\n\n\ndef val_and_grad_function(f, params=None):\n \"\"\"Returns a function that computes f and its derivative w.r.t. params.\n\n Example:\n ```python\n # f(x, y) = (x ^ 3) * y - x * (y ^ 2)\n # Therefore, the 1st order derivatives are:\n # df / dx = 3 * (x ^ 2) * y - y ^ 2\n # df / dy = x ^ 3 - 2 * x * y\n def f(x, y):\n return x * x * x * y - x * y * y\n\n # Obtain a function that returns the function value and the 1st order\n # gradients.\n val_grads_fn = tfe.value_and_gradients_function(f)\n\n x = 2.0\n y = 3.0\n\n # Invoke the value-and-gradients function.\n f_val, (x_grad, y_grad) = val_grads_fn(x, y)\n assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)\n assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2\n assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3\n\n # To obtain a callable that returns the value of `f` and the gradient(s) of\n # `f` with respect to a subset of its inputs, use the `params` keyword\n # argument with `value_and_gradients_function()`.\n val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1])\n\n f_val, (y_grad,) = val_ygrad_fn(x, y)\n assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)\n assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3\n ```\n\n Args:\n f: function to be differentiated. If `f` returns a scalar, this scalar will\n be differentiated. If `f` returns a tensor or list of tensors, by default\n a scalar will be computed by adding all their values to produce a single\n scalar. If desired, the tensors can be elementwise multiplied by the\n tensors passed as the `dy` keyword argument to the returned gradient\n function.\n params: list of parameter names of f or list of integers indexing the\n parameters with respect to which we'll differentiate. Passing `None`\n differentiates with respect to all parameters.\n\n Returns:\n function which, when called, returns the value of f and the gradient\n of f with respect to all of `params`. The function takes an extra optional\n keyword argument \"dy\". Setting it allows computation of vector jacobian\n products for vectors other than the vector of ones.\n\n Raises:\n ValueError: if the params are not all strings or all integers.\n \"\"\"\n\n def decorated(*args, **kwds):\n \"\"\"Computes the value and gradient of the decorated function.\"\"\"\n dy = kwds.pop(\"dy\", None)\n if kwds:\n raise ValueError(\"Functions to be differentiated cannot \"\n \"receive keyword arguments.\")\n val, vjp = make_vjp(f, params)(*args, **kwds)\n return val, vjp(dy=dy)\n\n return decorated\n\n\ndef make_vjp(f, params=None, persistent=True):\n \"\"\"Returns a function that computes f and its vjp w.r.t.\n\n params.\n\n The term \"vjp\" here is an abbreviation for vector-jacobian product.\n\n Args:\n f: the function to be differentiated.\n params: the parameters (numbers or names) to differentiate with respect to.\n A value of None will differentiate with respect to all parameters.\n persistent: Boolean controlling whether the VJP function can be re-used.\n Must be True or False.\n\n Returns:\n A function, which when called, returns a tuple (value, vjp), where:\n - value is the result of calling f.\n - vjp is a function, which takes a vector as an argument and\n returns the product of that vector with the Jacobian of f.\n Providing no argument to vjp is equivalent to providing a\n vector of ones.\n\n For example,\n ```python\n def f(x):\n return x * x\n\n wrapped_fn = tfe.make_vjp(f)\n result, vjp = wrapped_fn(tf.constant(3.0))\n # result is 9.0\n vjp() # the vjp function returns 6.0\n\n Raises:\n ValueError: if `f` returns None.\n \"\"\"\n\n def decorated(*args, **kwds):\n \"\"\"Computes the value and gradient of the decorated function.\"\"\"\n parameter_positions = _get_arg_spec(f, params, args)\n assert not kwds, \"The gradient function can't take keyword arguments.\"\n this_tape = tape.push_new_tape(persistent=persistent)\n try:\n sources = []\n args = [\n ops.convert_to_tensor(arg) if i in parameter_positions else arg\n for i, arg in enumerate(args)\n ]\n args = _ensure_unique_tensor_objects(parameter_positions, args)\n for i in parameter_positions:\n if getattr(args[i], \"is_packed\", False):\n raise ValueError(\n \"GradientTape.gradient is not supported on packed EagerTensors\"\n \"yet.\")\n sources.append(args[i])\n tape.watch(this_tape, args[i])\n result = f(*args)\n if result is None:\n raise ValueError(\"Cannot differentiate a function that returns None; \"\n \"did you forget to return a value from {}?\".format(\n f.__name__))\n flat_result = nest.flatten(result)\n flat_result = [gen_array_ops.identity(x) for x in flat_result]\n result = nest.pack_sequence_as(result, flat_result)\n finally:\n tape.pop_tape(this_tape)\n def vjp(dy=None):\n if dy is not None:\n dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]\n return imperative_grad.imperative_grad(\n this_tape, nest.flatten(result), sources, output_gradients=dy)\n\n return result, vjp\n\n return decorated\n\n\ndef flatten_nested_indexed_slices(grad):\n assert isinstance(grad, indexed_slices.IndexedSlices)\n if isinstance(grad.values, ops.Tensor):\n return grad\n else:\n assert isinstance(grad.values, indexed_slices.IndexedSlices)\n g = flatten_nested_indexed_slices(grad.values)\n return indexed_slices.IndexedSlices(\n g.values, array_ops.gather(grad.indices, g.indices), g.dense_shape)\n\n\ndef aggregate_indexed_slices_gradients(grads):\n \"\"\"Aggregates gradients containing `IndexedSlices`s.\"\"\"\n if len(grads) < 1:\n return None\n if len(grads) == 1:\n return grads[0]\n grads = [g for g in grads if g is not None]\n # If any gradient is a `Tensor`, sum them up and return a dense tensor\n # object.\n if any(isinstance(g, ops.Tensor) for g in grads):\n return math_ops.add_n(grads)\n\n # The following `_as_indexed_slices_list` casts ids of IndexedSlices into\n # int64. It is to make sure the inputs of `concat` all have same the data\n # type.\n grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access\n\n grads = [flatten_nested_indexed_slices(x) for x in grads]\n # Form IndexedSlices out of the concatenated values and indices.\n concat_grad = indexed_slices.IndexedSlices(\n array_ops.concat([x.values for x in grads], axis=0),\n array_ops.concat([x.indices for x in grads], axis=0),\n grads[0].dense_shape)\n\n return concat_grad\n\n\ndef _aggregate_grads(gradients):\n \"\"\"Aggregate gradients from multiple sources.\n\n Args:\n gradients: A list of 'Tensor' or 'IndexedSlices' gradients.\n\n Returns:\n If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.\n Otherwise returns an aggregated 'IndexedSlices'.\n \"\"\"\n assert gradients, \"No gradients to aggregate\"\n\n if len(gradients) == 1:\n return gradients[0]\n if all(isinstance(g, ops.Tensor) for g in gradients):\n return gen_math_ops.add_n(gradients)\n else:\n assert all(\n isinstance(g, (ops.Tensor, indexed_slices.IndexedSlices))\n for g in gradients)\n return aggregate_indexed_slices_gradients(gradients)\n\n\ndef _num_elements(grad):\n \"\"\"The number of elements in the `grad` tensor.\"\"\"\n if isinstance(grad, ops.Tensor):\n shape_tuple = grad._shape_tuple() # pylint: disable=protected-access\n elif isinstance(grad, indexed_slices.IndexedSlices):\n shape_tuple = grad.values._shape_tuple() # pylint: disable=protected-access\n else:\n raise ValueError(\"`grad` not a Tensor or IndexedSlices.\")\n if shape_tuple is None or None in shape_tuple:\n return 0\n return functools.reduce(operator.mul, shape_tuple, 1)\n\n\ndef _fast_fill(value, shape, dtype):\n return array_ops.fill(\n constant_op.constant(shape, dtype=dtypes.int32),\n constant_op.constant(value, dtype=dtype))\n\n\ndef _zeros(shape, dtype):\n \"\"\"Helper to return (possibly cached) zero tensors in eager mode.\"\"\"\n # Note: variants will use _zeros_like\n if dtype == dtypes.string or dtype == dtypes.resource:\n return None\n\n ctx = context.context()\n if not ctx.executing_eagerly():\n return array_ops.zeros(shape, dtype)\n\n device = ctx.device_name\n\n if tensor_util.is_tf_type(shape):\n shape_key = shape.ref()\n else:\n shape_key = shape\n cache_key = shape_key, dtype, device\n cached = ctx.zeros_cache().get(cache_key)\n if cached is None:\n if dtypes.as_dtype(dtype).is_bool:\n value = False\n else:\n value = 0\n cached = _fast_fill(value, shape, dtype)\n ctx.zeros_cache().put(cache_key, cached)\n return cached\n\n\ndef _ones(shape, dtype):\n as_dtype = dtypes.as_dtype(dtype)\n if as_dtype == dtypes.string:\n return None\n\n if not context.executing_eagerly():\n return array_ops.ones(shape, dtype)\n\n if as_dtype.is_bool:\n value = True\n else:\n value = 1\n\n if shape == (): # pylint: disable=g-explicit-bool-comparison\n return constant_op.constant(value, dtype=dtype)\n return _fast_fill(value, shape, dtype)\n\n\n_default_vspace = imperative_grad.VSpace(\n num_elements_fn=_num_elements,\n aggregate_fn=_aggregate_grads,\n zeros_fn=_zeros,\n ones_fn=_ones,\n zeros_like_fn=default_gradient.zeros_like,\n ones_like_fn=default_gradient.ones_like,\n graph_shape_fn=gen_array_ops.shape)\npywrap_tfe.TFE_Py_RegisterVSpace(_default_vspace)\n\n\ndef _handle_or_self(x):\n \"\"\"Unwrap resource variable/ndarray to return tensors.\"\"\"\n if resource_variable_ops.is_resource_variable(x):\n return x.handle\n return x\n\n\n@tf_export(\"GradientTape\", \"autodiff.GradientTape\", v1=[\"GradientTape\"])\nclass GradientTape(object):\n \"\"\"Record operations for automatic differentiation.\n\n Operations are recorded if they are executed within this context manager and\n at least one of their inputs is being \"watched\".\n\n Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`,\n where `trainable=True` is default in both cases) are automatically watched.\n Tensors can be manually watched by invoking the `watch` method on this context\n manager.\n\n For example, consider the function `y = x * x`. The gradient at `x = 3.0` can\n be computed as:\n\n >>> x = tf.constant(3.0)\n >>> with tf.GradientTape() as g:\n ... g.watch(x)\n ... y = x * x\n >>> dy_dx = g.gradient(y, x)\n >>> print(dy_dx)\n tf.Tensor(6.0, shape=(), dtype=float32)\n\n GradientTapes can be nested to compute higher-order derivatives. For example,\n\n >>> x = tf.constant(5.0)\n >>> with tf.GradientTape() as g:\n ... g.watch(x)\n ... with tf.GradientTape() as gg:\n ... gg.watch(x)\n ... y = x * x\n ... dy_dx = gg.gradient(y, x) # dy_dx = 2 * x\n >>> d2y_dx2 = g.gradient(dy_dx, x) # d2y_dx2 = 2\n >>> print(dy_dx)\n tf.Tensor(10.0, shape=(), dtype=float32)\n >>> print(d2y_dx2)\n tf.Tensor(2.0, shape=(), dtype=float32)\n\n By default, the resources held by a GradientTape are released as soon as\n GradientTape.gradient() method is called. To compute multiple gradients over\n the same computation, create a persistent gradient tape. This allows multiple\n calls to the gradient() method as resources are released when the tape object\n is garbage collected. For example:\n\n >>> x = tf.constant(3.0)\n >>> with tf.GradientTape(persistent=True) as g:\n ... g.watch(x)\n ... y = x * x\n ... z = y * y\n >>> dz_dx = g.gradient(z, x) # (4*x^3 at x = 3)\n >>> print(dz_dx)\n tf.Tensor(108.0, shape=(), dtype=float32)\n >>> dy_dx = g.gradient(y, x)\n >>> print(dy_dx)\n tf.Tensor(6.0, shape=(), dtype=float32)\n\n By default GradientTape will automatically watch any trainable variables that\n are accessed inside the context. If you want fine grained control over which\n variables are watched you can disable automatic tracking by passing\n `watch_accessed_variables=False` to the tape constructor:\n\n >>> x = tf.Variable(2.0)\n >>> w = tf.Variable(5.0)\n >>> with tf.GradientTape(\n ... watch_accessed_variables=False, persistent=True) as tape:\n ... tape.watch(x)\n ... y = x ** 2 # Gradients will be available for `x`.\n ... z = w ** 3 # No gradients will be available as `w` isn't being watched.\n >>> dy_dx = tape.gradient(y, x)\n >>> print(dy_dx)\n tf.Tensor(4.0, shape=(), dtype=float32)\n >>> # No gradients will be available as `w` isn't being watched.\n >>> dz_dw = tape.gradient(z, w)\n >>> print(dz_dw)\n None\n\n Note that when using models you should ensure that your variables exist when\n using `watch_accessed_variables=False`. Otherwise it's quite easy to make your\n first iteration not have any gradients:\n\n ```python\n a = tf.keras.layers.Dense(32)\n b = tf.keras.layers.Dense(32)\n\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(a.variables) # Since `a.build` has not been called at this point\n # `a.variables` will return an empty list and the\n # tape will not be watching anything.\n result = b(a(inputs))\n tape.gradient(result, a.variables) # The result of this computation will be\n # a list of `None`s since a's variables\n # are not being watched.\n ```\n\n Note that only tensors with real or complex dtypes are differentiable.\n \"\"\"\n\n def __init__(self, persistent=False, watch_accessed_variables=True):\n \"\"\"Creates a new GradientTape.\n\n Args:\n persistent: Boolean controlling whether a persistent gradient tape\n is created. False by default, which means at most one call can\n be made to the gradient() method on this object.\n watch_accessed_variables: Boolean controlling whether the tape will\n automatically `watch` any (trainable) variables accessed while the tape\n is active. Defaults to True meaning gradients can be requested from any\n result computed in the tape derived from reading a trainable `Variable`.\n If False users must explicitly `watch` any `Variable`s they want to\n request gradients from.\n \"\"\"\n self._tape = None\n self._persistent = persistent\n self._watch_accessed_variables = watch_accessed_variables\n self._watched_variables = ()\n self._recording = False\n\n def __enter__(self):\n \"\"\"Enters a context inside which operations are recorded on this tape.\"\"\"\n self._push_tape()\n return self\n\n def __exit__(self, typ, value, traceback):\n \"\"\"Exits the recording context, no further operations are traced.\"\"\"\n if self._recording:\n self._pop_tape()\n\n def _push_tape(self):\n \"\"\"Pushes a new tape onto the tape stack.\"\"\"\n if self._recording:\n raise ValueError(\"Tape is still recording, This can happen if you try to \"\n \"re-enter an already-active tape.\")\n if self._tape is None:\n self._tape = tape.push_new_tape(\n persistent=self._persistent,\n watch_accessed_variables=self._watch_accessed_variables)\n else:\n tape.push_tape(self._tape)\n self._recording = True\n\n def _pop_tape(self):\n if not self._recording:\n raise ValueError(\"Tape is not recording.\")\n tape.pop_tape(self._tape)\n self._recording = False\n\n @tf_contextlib.contextmanager\n def _ensure_recording(self):\n \"\"\"Ensures that this tape is recording.\"\"\"\n if not self._recording:\n try:\n self._push_tape()\n yield\n finally:\n self._pop_tape()\n else:\n yield\n\n def watch(self, tensor):\n \"\"\"Ensures that `tensor` is being traced by this tape.\n\n Args:\n tensor: a Tensor or list of Tensors.\n\n Raises:\n ValueError: if it encounters something that is not a tensor.\n \"\"\"\n for t in nest.flatten(tensor, expand_composites=True):\n if not (_pywrap_utils.IsTensor(t) or _pywrap_utils.IsVariable(t)):\n raise ValueError(\"Passed in object of type {}, not tf.Tensor\".format(\n type(t)))\n if not backprop_util.IsTrainable(t):\n logging.log_first_n(\n logging.WARN, \"The dtype of the watched tensor must be \"\n \"floating (e.g. tf.float32), got %r\", 5, t.dtype)\n if hasattr(t, \"handle\"):\n # There are many variable-like objects, all of them currently have\n # `handle` attribute that points to a tensor. If this changes, internals\n # of watch_variable need to change as well.\n tape.watch_variable(self._tape, t)\n else:\n tape.watch(self._tape, t)\n\n @tf_contextlib.contextmanager\n def stop_recording(self):\n \"\"\"Temporarily stops recording operations on this tape.\n\n Operations executed while this context manager is active will not be\n recorded on the tape. This is useful for reducing the memory used by tracing\n all computations.\n\n For example:\n\n >>> x = tf.constant(4.0)\n >>> with tf.GradientTape() as tape:\n ... with tape.stop_recording():\n ... y = x ** 2\n >>> dy_dx = tape.gradient(y, x)\n >>> print(dy_dx)\n None\n\n Yields:\n None\n Raises:\n RuntimeError: if the tape is not currently recording.\n \"\"\"\n if self._tape is None:\n raise RuntimeError(\n \"Trying to stop recording a tape which is not recording.\")\n self._pop_tape()\n try:\n yield\n finally:\n self._push_tape()\n\n def reset(self):\n \"\"\"Clears all information stored in this tape.\n\n Equivalent to exiting and reentering the tape context manager with a new\n tape. For example, the two following code blocks are equivalent:\n\n ```\n with tf.GradientTape() as t:\n loss = loss_fn()\n with tf.GradientTape() as t:\n loss += other_loss_fn()\n t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn\n\n\n # The following is equivalent to the above\n with tf.GradientTape() as t:\n loss = loss_fn()\n t.reset()\n loss += other_loss_fn()\n t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn\n ```\n\n This is useful if you don't want to exit the context manager for the tape,\n or can't because the desired reset point is inside a control flow construct:\n\n ```\n with tf.GradientTape() as t:\n loss = ...\n if loss > k:\n t.reset()\n ```\n \"\"\"\n self._pop_tape()\n self._tape = None\n self._push_tape()\n\n def watched_variables(self):\n \"\"\"Returns variables watched by this tape in order of construction.\"\"\"\n if self._tape is not None:\n self._watched_variables = self._tape.watched_variables()\n return self._watched_variables\n\n def gradient(self,\n target,\n sources,\n output_gradients=None,\n unconnected_gradients=UnconnectedGradients.NONE):\n \"\"\"Computes the gradient using operations recorded in context of this tape.\n\n Note: Unless you set `persistent=True` a GradientTape can only be used to\n compute one set of gradients (or jacobians).\n\n In addition to Tensors, gradient also supports RaggedTensors. For example,\n\n >>> x = tf.ragged.constant([[1.0, 2.0], [3.0]])\n >>> with tf.GradientTape() as g:\n ... g.watch(x)\n ... y = x * x\n >>> g.gradient(y, x)\n <tf.RaggedTensor [[2.0, 4.0], [6.0]]>\n\n Args:\n target: a list or nested structure of Tensors or Variables or\n CompositeTensors to be differentiated.\n sources: a list or nested structure of Tensors or Variables or\n CompositeTensors. `target` will be differentiated against elements in\n `sources`.\n output_gradients: a list of gradients, one for each differentiable\n element of target. Defaults to None.\n unconnected_gradients: a value which can either hold 'none' or 'zero' and\n alters the value which will be returned if the target and sources are\n unconnected. The possible values and effects are detailed in\n 'UnconnectedGradients' and it defaults to 'none'.\n\n Returns:\n a list or nested structure of Tensors (or IndexedSlices, or None, or\n CompositeTensor), one for each element in `sources`. Returned structure\n is the same as the structure of `sources`.\n\n Raises:\n RuntimeError: If called on a used, non-persistent tape.\n RuntimeError: If called inside the context of the tape.\n TypeError: If the target is a None object.\n ValueError: If the target is a variable or if unconnected gradients is\n called with an unknown value.\n \"\"\"\n if self._tape is None:\n raise RuntimeError(\"A non-persistent GradientTape can only be used to \"\n \"compute one set of gradients (or jacobians)\")\n if self._recording:\n if not self._persistent:\n self._pop_tape()\n else:\n logging.log_first_n(\n logging.WARN, \"Calling GradientTape.gradient on a persistent \"\n \"tape inside its context is significantly less \"\n \"efficient than calling it outside the context (it \"\n \"causes the gradient ops to be recorded on the \"\n \"tape, leading to increased CPU and memory usage). \"\n \"Only call GradientTape.gradient inside the \"\n \"context if you actually want to trace the \"\n \"gradient in order to compute higher order \"\n \"derivatives.\", 1)\n\n if target is None:\n raise TypeError(\"Argument `target` should be a list or nested structure\"\n \" of Tensors, Variables or CompositeTensors to be \"\n \"differentiated, but received None.\")\n\n flat_targets = []\n for t in nest.flatten(target):\n if not backprop_util.IsTrainable(t):\n logging.vlog(\n logging.WARN, \"The dtype of the target tensor must be \"\n \"floating (e.g. tf.float32) when calling GradientTape.gradient, \"\n \"got %r\", t.dtype)\n if resource_variable_ops.is_resource_variable(t):\n with self:\n t = ops.convert_to_tensor(t)\n flat_targets.append(t)\n flat_targets = composite_tensor_gradient.get_flat_tensors_for_gradients(\n flat_targets)\n\n flat_sources = nest.flatten(sources)\n for t in flat_sources:\n if not backprop_util.IsTrainable(t):\n logging.vlog(\n logging.WARN, \"The dtype of the source tensor must be \"\n \"floating (e.g. tf.float32) when calling GradientTape.gradient, \"\n \"got %r\", t.dtype)\n if getattr(t, \"is_packed\", False):\n raise ValueError(\n \"GradientTape.gradient is not supported on packed EagerTensors yet.\"\n )\n flat_sources_raw = flat_sources\n flat_sources = composite_tensor_gradient.get_flat_tensors_for_gradients(\n flat_sources)\n flat_sources = [_handle_or_self(x) for x in flat_sources]\n\n if output_gradients is not None:\n output_gradients = nest.flatten(output_gradients)\n output_gradients = (\n composite_tensor_gradient.get_flat_tensors_for_gradients(\n output_gradients))\n output_gradients = [None if x is None else ops.convert_to_tensor(x)\n for x in output_gradients]\n\n flat_grad = imperative_grad.imperative_grad(\n self._tape,\n flat_targets,\n flat_sources,\n output_gradients=output_gradients,\n sources_raw=flat_sources_raw,\n unconnected_gradients=unconnected_gradients)\n\n if not self._persistent:\n # Keep track of watched variables before setting tape to None\n self._watched_variables = self._tape.watched_variables()\n self._tape = None\n\n flat_grad = composite_tensor_gradient.replace_flat_tensors_for_gradients(\n flat_sources_raw, flat_grad)\n grad = nest.pack_sequence_as(sources, flat_grad)\n return grad\n\n def jacobian(self,\n target,\n sources,\n unconnected_gradients=UnconnectedGradients.NONE,\n parallel_iterations=None,\n experimental_use_pfor=True):\n \"\"\"Computes the jacobian using operations recorded in context of this tape.\n\n Note: Unless you set `persistent=True` a GradientTape can only be used to\n compute one set of gradients (or jacobians).\n\n Note: By default the jacobian implementation uses parallel for (pfor), which\n creates a tf.function under the hood for each jacobian call. For better\n performance, and to avoid recompilation and vectorization rewrites on each\n call, enclose GradientTape code in @tf.function.\n\n See[wikipedia\n article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant)\n for the definition of a Jacobian.\n\n Example usage:\n\n ```python\n with tf.GradientTape() as g:\n x = tf.constant([1.0, 2.0])\n g.watch(x)\n y = x * x\n jacobian = g.jacobian(y, x)\n # jacobian value is [[2., 0.], [0., 4.]]\n ```\n\n Args:\n target: Tensor to be differentiated.\n sources: a list or nested structure of Tensors or Variables. `target`\n will be differentiated against elements in `sources`.\n unconnected_gradients: a value which can either hold 'none' or 'zero' and\n alters the value which will be returned if the target and sources are\n unconnected. The possible values and effects are detailed in\n 'UnconnectedGradients' and it defaults to 'none'.\n parallel_iterations: A knob to control how many iterations are dispatched\n in parallel. This knob can be used to control the total memory usage.\n experimental_use_pfor: If true, vectorizes the jacobian computation. Else\n falls back to a sequential while_loop. Vectorization can sometimes fail\n or lead to excessive memory usage. This option can be used to disable\n vectorization in such cases.\n\n Returns:\n A list or nested structure of Tensors (or None), one for each element in\n `sources`. Returned structure is the same as the structure of `sources`.\n Note if any gradient is sparse (IndexedSlices), jacobian function\n currently makes it dense and returns a Tensor instead. This may change in\n the future.\n\n\n Raises:\n RuntimeError: If called on a used, non-persistent tape.\n RuntimeError: If called on a non-persistent tape with eager execution\n enabled and without enabling experimental_use_pfor.\n ValueError: If vectorization of jacobian computation fails.\n \"\"\"\n if self._tape is None:\n raise RuntimeError(\"A non-persistent GradientTape can only be used to \"\n \"compute one set of gradients (or jacobians)\")\n\n flat_sources = nest.flatten(sources)\n target_static_shape = target.shape\n target_shape = array_ops.shape(target)\n # Note that we push and pop the tape here and below. This is needed since we\n # need gradients through the enclosed operations.\n with self._ensure_recording():\n target = array_ops.reshape(target, [-1])\n\n def loop_fn(i):\n with self._ensure_recording():\n y = array_ops.gather(target, i)\n return self.gradient(y, flat_sources,\n unconnected_gradients=unconnected_gradients)\n\n try:\n target_size = int(target.shape[0])\n except TypeError:\n target_size = array_ops.shape(target)[0]\n\n if experimental_use_pfor:\n try:\n output = pfor_ops.pfor(loop_fn, target_size,\n parallel_iterations=parallel_iterations)\n except ValueError as err:\n six.reraise(\n ValueError,\n ValueError(\n str(err) + \"\\nEncountered an exception while vectorizing the \"\n \"jacobian computation. Vectorization can be disabled by setting\"\n \" experimental_use_pfor to False.\"),\n sys.exc_info()[2])\n else:\n if context.executing_eagerly() and not self._persistent:\n raise RuntimeError(\n \"GradientTape must be created with persistent=True\"\n \" to compute the jacobian with eager execution enabled and with \"\n \" experimental_use_pfor set to False.\")\n output = pfor_ops.for_loop(\n loop_fn, [target.dtype] * len(flat_sources), target_size,\n parallel_iterations=parallel_iterations)\n\n for i, out in enumerate(output):\n if out is not None:\n new_shape = array_ops.concat(\n [target_shape, array_ops.shape(out)[1:]], axis=0)\n out = array_ops.reshape(out, new_shape)\n if context.executing_eagerly():\n out.set_shape(target_static_shape.concatenate(flat_sources[i].shape))\n output[i] = out\n\n return nest.pack_sequence_as(sources, output)\n\n def batch_jacobian(self,\n target,\n source,\n unconnected_gradients=UnconnectedGradients.NONE,\n parallel_iterations=None,\n experimental_use_pfor=True):\n \"\"\"Computes and stacks per-example jacobians.\n\n See [wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant)\n for the definition of a Jacobian. This function is essentially an efficient\n implementation of the following:\n\n `tf.stack([self.jacobian(y[i], x[i]) for i in range(x.shape[0])])`.\n\n Note that compared to `GradientTape.jacobian` which computes gradient of\n each output value w.r.t each input value, this function is useful when\n `target[i,...]` is independent of `source[j,...]` for `j != i`. This\n assumption allows more efficient computation as compared to\n `GradientTape.jacobian`. The output, as well as intermediate activations,\n are lower dimensional and avoid a bunch of redundant zeros which would\n result in the jacobian computation given the independence assumption.\n\n Note: Unless you set `persistent=True` a GradientTape can only be used to\n compute one set of gradients (or jacobians).\n\n Note: By default the batch_jacobian implementation uses parallel for (pfor),\n which creates a tf.function under the hood for each batch_jacobian call.\n For better performance, and to avoid recompilation and vectorization\n rewrites on each call, enclose GradientTape code in @tf.function.\n\n\n Example usage:\n\n ```python\n with tf.GradientTape() as g:\n x = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32)\n g.watch(x)\n y = x * x\n batch_jacobian = g.batch_jacobian(y, x)\n # batch_jacobian is [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]\n ```\n\n Args:\n target: A tensor with rank 2 or higher and with shape [b, y1, ..., y_n].\n `target[i,...]` should only depend on `source[i,...]`.\n source: A tensor with rank 2 or higher and with shape [b, x1, ..., x_m].\n unconnected_gradients: a value which can either hold 'none' or 'zero' and\n alters the value which will be returned if the target and sources are\n unconnected. The possible values and effects are detailed in\n 'UnconnectedGradients' and it defaults to 'none'.\n parallel_iterations: A knob to control how many iterations are dispatched\n in parallel. This knob can be used to control the total memory usage.\n experimental_use_pfor: If true, uses pfor for computing the Jacobian. Else\n uses a tf.while_loop.\n\n Returns:\n A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`\n is the jacobian of `target[i, ...]` w.r.t. `source[i, ...]`, i.e. stacked\n per-example jacobians.\n\n Raises:\n RuntimeError: If called on a used, non-persistent tape.\n RuntimeError: If called on a non-persistent tape with eager execution\n enabled and without enabling experimental_use_pfor.\n ValueError: If vectorization of jacobian computation fails or if first\n dimension of `target` and `source` do not match.\n \"\"\"\n if self._tape is None:\n raise RuntimeError(\"A non-persistent GradientTape can only be used to\"\n \"compute one set of gradients (or jacobians)\")\n target_shape = target.shape\n if target_shape.rank is None:\n dim = tensor_shape.Dimension(None)\n else:\n dim = target_shape.dims[0]\n if not (target_shape.with_rank_at_least(2) and\n source.shape.with_rank_at_least(2) and\n dim.is_compatible_with(source.shape[0])):\n raise ValueError(\n \"Need first dimension of target shape (%s) and \"\n \"source shape (%s) to match.\" % (target.shape, source.shape))\n if target_shape.is_fully_defined():\n batch_size = int(target_shape[0])\n target_row_size = target_shape.num_elements() // batch_size\n else:\n target_shape = array_ops.shape(target)\n batch_size = target_shape[0]\n target_row_size = array_ops.size(target) // batch_size\n source_shape = array_ops.shape(source)\n # Flatten target to 2-D.\n # Note that we push and pop the tape here and below. This is needed since we\n # need gradients through the enclosed operations.\n with self._ensure_recording():\n with ops.control_dependencies(\n [check_ops.assert_equal(batch_size, source_shape[0])]):\n target = array_ops.reshape(target, [batch_size, target_row_size])\n\n run_once = False\n\n def loop_fn(i):\n nonlocal run_once\n if run_once and not self._persistent:\n if parallel_iterations is not None:\n raise RuntimeError(\n \"GradientTape must be created with persistent=True\"\n \" to compute the batch_jacobian with parallel_iterations.\")\n else:\n raise RuntimeError(\n \"GradientTape must be created with persistent=True\"\n \" to compute the batch_jacobian.\")\n run_once = True\n\n with self._ensure_recording():\n y = array_ops.gather(target, i, axis=1)\n return self.gradient(y, source,\n unconnected_gradients=unconnected_gradients)\n\n if experimental_use_pfor:\n try:\n output = pfor_ops.pfor(loop_fn, target_row_size,\n parallel_iterations=parallel_iterations)\n except ValueError as err:\n six.reraise(\n ValueError,\n ValueError(\n str(err) + \"\\nEncountered an exception while vectorizing the \"\n \"batch_jacobian computation. Vectorization can be disabled by \"\n \"setting experimental_use_pfor to False.\"),\n sys.exc_info()[2])\n else:\n if context.executing_eagerly() and not self._persistent:\n raise RuntimeError(\n \"GradientTape must be created with persistent=True\"\n \" to compute the batch_jacobian with eager execution enabled and \"\n \" with experimental_use_pfor set to False.\")\n output = pfor_ops.for_loop(loop_fn, target.dtype, target_row_size,\n parallel_iterations=parallel_iterations)\n new_shape = array_ops.concat([target_shape, source_shape[1:]], axis=0)\n if output is None:\n # Note that this block is returning zeros when it could use `None` to\n # represent unconnected gradients. This is to maintain compatibility with\n # the previous behavior, which ignored `unconnected_gradients`.\n output = array_ops.zeros(new_shape, target.dtype)\n return output\n else:\n output = array_ops.reshape(output,\n [target_row_size, batch_size, -1])\n output = array_ops.transpose(output, [1, 0, 2])\n\n output = array_ops.reshape(output, new_shape)\n return output\n"
] | [
[
"tensorflow.python.framework.composite_tensor_gradient.replace_flat_tensors_for_gradients",
"tensorflow.python.eager.tape.push_new_tape",
"tensorflow.python.eager.imperative_grad.VSpace",
"tensorflow.python.eager.tape.pop_tape",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.eager.tape.watch",
"tensorflow.python.ops.math_ops._as_indexed_slices_list",
"tensorflow.python.platform.tf_logging.vlog",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.gen_array_ops.identity",
"tensorflow.python.platform.tf_logging.log_first_n",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.framework.ops.get_name_scope",
"tensorflow.python.eager.tape.watch_variable",
"tensorflow.python.eager.backprop_util.IsTrainable",
"tensorflow.python.eager.tape.push_tape",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.eager.context.ensure_initialized",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.pywrap_tfe.TFE_Py_RegisterGradientFunction",
"tensorflow.python.util._pywrap_utils.IsTensor",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.composite_tensor_gradient.get_flat_tensors_for_gradients",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.pywrap_tfe.TFE_OpNameGetAttrType",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.pywrap_tfe.TFE_Py_RegisterVSpace",
"tensorflow.python.ops.gen_math_ops.add_n",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.pywrap_tfe.TFE_Py_TapeSetIsEmpty",
"tensorflow.python.ops.resource_variable_ops.is_resource_variable",
"tensorflow.python.framework.tensor_util.is_tf_type",
"tensorflow.python.ops.check_ops.assert_equal",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.util._pywrap_utils.IsVariable",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.ops._gradient_registry.lookup",
"tensorflow.python.eager.imperative_grad.imperative_grad",
"tensorflow.python.framework.ops.tensor_id",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions"
]
] |
ammar1510/pytorch | [
"ec8d6777255821bed73b471eadddde068cd60c0b"
] | [
"test/test_fx.py"
] | [
"# Owner(s): [\"oncall: fx\"]\n\nimport builtins\nimport contextlib\nimport copy\nimport functools\nimport inspect\nimport math\nimport numbers\nimport operator\nimport os\nimport pickle\nimport sys\nimport torch\nimport traceback\nimport typing\nimport types\nimport warnings\nimport unittest\nfrom math import sqrt\nfrom torch.multiprocessing import Process\nfrom torch.testing import FileCheck\nfrom torch.testing._internal.common_methods_invocations import op_db\nfrom torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests\nimport torch.utils._pytree as pytree\nimport torch.fx._pytree as fx_pytree\nfrom torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen\nfrom torch.fx.node import Target, Argument\nfrom torch.fx.passes import shape_prop\nfrom torch.fx.immutable_collections import immutable_dict, immutable_list\nfrom torch.fx.experimental.rewriter import RewritingTracer\nfrom torch.fx.operator_schemas import get_signature_for_torch_op\nfrom copy import deepcopy\nfrom collections import namedtuple\n\nfrom torch.fx.proxy import TraceError\nfrom torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY\n\nfrom fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401\nfrom fx.test_dce_pass import TestDCE # noqa: F401\nfrom fx.test_fx_const_fold import TestConstFold # noqa: F401\nfrom fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401\n\nif sys.version_info >= (3, 7):\n from fx.test_gradual_type import AnnotationsTest # noqa: F401\nif sys.version_info >= (3, 7):\n from fx.test_gradual_type import TypeCheckerTest # noqa: F401\nfrom typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union\nfrom torch.testing._internal.common_utils import (\n IS_FBCODE,\n IS_MACOS,\n IS_WINDOWS,\n TEST_WITH_ROCM,\n find_library_location,\n run_tests,\n)\nfrom torch.testing._internal.jit_utils import JitTestCase\n\nfrom fx.named_tup import MyNamedTup\n\ntry:\n from torchvision import models as torchvision_models\n HAS_TORCHVISION = True\nexcept ImportError:\n HAS_TORCHVISION = False\nskipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, \"no torchvision\")\n\nclass SimpleTest(torch.nn.Module):\n def forward(self, x):\n return torch.relu(x + 3.0)\n\ndef a_non_torch_leaf(a, b):\n return a + b\n\n# Used for test_autowrap_function. Autowrapped functions need to be global\ndef fx_int(x: float) -> int:\n return int(x)\n\ndef fx_int_x2(x: float) -> int:\n return int(x) * 2\n\n# used in test_pytree. It's all the way out here because pickling a GraphModule\n# that uses Point errors out if Point is local to the function\nPoint = namedtuple('Point', ['x', 'y'])\n\n# Test wrap() passing both a function name as well as a function\n# directly\ndef a_lifted_leaf(a, b):\n return a[0] + a[1] + b\n\nwrap('a_lifted_leaf')\n# Test wrapping twice doesn't break anything\nwrap('a_lifted_leaf')\n\ndef a_lifted_leaf2(a, b):\n return a[0] + a[1] + b\n\nwrap(a_lifted_leaf2)\n\nwrap('len')\n\nwrap('getattr')\n\n@wrap\ndef wrapped_via_decorator(a):\n return a + 1\n\nwrap('wrapped_with_submodule')\n\ndef wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):\n return batchnorm1d(x)\n\n\nreal_wrapped_via_decorator = wrapped_via_decorator\nreal_a_lifed_leaf = a_lifted_leaf\nreal_a_lifed_leaf2 = a_lifted_leaf2\n_sqrt = sqrt\n\nwrap('wrapper_fn')\n\ndef wrapper_fn(x):\n return torch.foo(x)\n\nclass Pair(NamedTuple):\n x : torch.Tensor\n y : torch.Tensor\n\n# for testing pytrees\nclass Foo(object): # noqa: B209\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\nclass TestFX(JitTestCase):\n def setUp(self):\n # Checking for mutable operations whil tracing is feature flagged\n # Enable it in testing but not by default\n self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations\n torch.fx.proxy.TracerBase.check_mutable_operations = True\n\n if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):\n lib_file_path = find_library_location('libtorchbind_test.so')\n torch.ops.load_library(str(lib_file_path))\n\n def tearDown(self):\n torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag\n\n def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):\n \"\"\"Check that an nn.Module's results match the GraphModule version\n for a given set of args/kwargs.\n \"\"\"\n kwargs = kwargs if kwargs else {}\n ref_outs = m(*args, **kwargs)\n gm = symbolic_trace(m)\n gm.graph.lint()\n test_outs = gm(*args, **kwargs)\n self.assertEqual(ref_outs, test_outs)\n\n def test_graph_module(self):\n class MySub(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.w = torch.nn.Parameter(torch.rand(4, 3))\n\n def forward(self, x):\n return self.w + x\n\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.lin = torch.nn.Linear(4, 3)\n self.sub_mod = MySub()\n self.w = torch.nn.Parameter(torch.rand(3))\n\n def forward(self, A, B, c):\n t = torch.sigmoid(A) + self.lin(c)\n return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))\n\n m = MyModule()\n gm = symbolic_trace(m)\n\n ms = torch.jit.script(gm)\n\n class M2(torch.nn.Module):\n def forward(self, A):\n m, idx = torch.max(A, 0)\n return m + 1, idx + 1\n\n m2 = M2()\n gm2 = symbolic_trace(m2)\n\n class T(torch.nn.Module):\n\n def forward(self, A, b=4, *args, c=5, **kwargs):\n x = A + 1 + args[0] + kwargs['3']\n return x\n\n t = T()\n symbolic_trace(t)\n\n # test for issue described at https://github.com/pytorch/pytorch/issues/63883\n class M3(torch.nn.Module):\n def forward(self, x):\n return torch.relu(x)\n\n m3 = M3()\n gm3 = symbolic_trace(m3)\n new_instance = gm3.__new__(type(gm3))\n new_instance.__init__(gm3, gm3.graph)\n\n x = torch.randn(5, 3)\n torch.testing.assert_allclose(new_instance(x), torch.relu(x))\n\n def test_custom_import(self):\n graph = torch.fx.Graph()\n a = graph.placeholder('x')\n b = graph.placeholder('y')\n c = graph.call_function(a_non_torch_leaf, (a, b))\n d = graph.call_function(torch.sin, (c,))\n graph.output(d)\n gm = GraphModule(torch.nn.Module(), graph)\n x, y = torch.rand(1), torch.rand(1)\n self.assertEqual(torch.sin(x + y), gm(x, y))\n\n def test_args_kwargs(self):\n class T(torch.nn.Module):\n def forward(self, *args, **kwargs):\n x = args[0] + kwargs['foo']\n return x\n\n t = T()\n self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})\n\n def test_args_kwargs_no_self(self):\n class T(torch.nn.Module):\n def forward(*args, **kwargs): # noqa: B902\n self = args[0]\n return torch.relu(args[1])\n\n t = T()\n with self.assertRaisesRegex(RuntimeError, r'cannot be part of \\*args expansion'):\n self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})\n\n def test_fx_shifts(self):\n class MyModule(torch.nn.Module):\n def forward(self, x):\n return x << 3, x >> 3\n\n input = torch.LongTensor(10).random_(0, 1024)\n\n m = MyModule()\n self.checkGraphModule(m, (input,))\n\n def test_fx_and_or(self):\n class MyModule(torch.nn.Module):\n def forward(self, x):\n return x & x, x | x\n\n input = torch.LongTensor(10).random_(0, 1024)\n\n m = MyModule()\n self.checkGraphModule(m, (input,))\n\n def test_dict(self):\n class MyDictMod(torch.nn.Module):\n def forward(self, d):\n return d['3'].relu(), {'4' : d['3'].neg()}\n\n input_dict = {'3': torch.rand(3, 4)}\n m = MyDictMod()\n\n self.checkGraphModule(m, (input_dict,))\n\n def test_matmul_tracing(self):\n const = torch.randn(3)\n\n def matmul_f(x):\n return x @ const\n\n mod = symbolic_trace(matmul_f)\n inp = torch.randn(3)\n self.assertEqual(mod(inp), matmul_f(inp))\n\n def rmatmul_f(x):\n return const @ x\n\n mod = symbolic_trace(rmatmul_f)\n inp = torch.randn(3)\n self.assertEqual(mod(inp), rmatmul_f(inp))\n\n\n def test_disallow_override(self):\n # Custom delegate to disallow in-place tensor operations\n class NoMutableCallTracer(Tracer):\n def create_node(self, kind : str, target : Union[str, Callable],\n args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,\n type_expr : Optional[Any] = None) -> Node:\n name = target if isinstance(target, str) else torch.typename(target)\n if name[-1] == '_':\n raise RuntimeError('In-place operations are not supported')\n return super().create_node(kind, target, args, kwargs, name)\n\n # Test method\n class MyInplaceMod(torch.nn.Module):\n def forward(self, x):\n x.add_(3.0)\n return x\n\n m = MyInplaceMod()\n\n with self.assertRaisesRegex(RuntimeError, 'In-place operations'):\n NoMutableCallTracer().trace(m)\n\n # Test free function\n class MyInplaceMod2(torch.nn.Module):\n def forward(self, x):\n torch.log_(x)\n return x\n m2 = MyInplaceMod2()\n with self.assertRaisesRegex(RuntimeError, 'In-place operations'):\n NoMutableCallTracer().trace(m2)\n\n # Test symbolic node as an arg\n class MyInplaceMod3(torch.nn.Module):\n def forward(self, x):\n y = torch.ones(3, 4)\n y.add_(x)\n return x\n m3 = MyInplaceMod3()\n with self.assertRaisesRegex(RuntimeError, 'In-place operations'):\n NoMutableCallTracer().trace(m3)\n\n def test_leaf_module(self):\n # Custom delegate to make it so that there are no leaf modules, everything\n # should get traced through\n class NoLeafModulesTracer(Tracer):\n def is_leaf_module(self, m, qualname):\n return False\n\n class MyReluMod(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n return self.relu(x)\n\n mrm = MyReluMod()\n sym = NoLeafModulesTracer().trace(mrm)\n for node in sym.nodes:\n self.assertNotEqual(node.op, 'call_module')\n sym.lint()\n\n def test_wrap(self):\n self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))\n\n def to_trace(y):\n return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)\n\n m = symbolic_trace(to_trace)\n self.assertIn('a_lifted_leaf', m.code)\n self.assertEqual(27, m(2))\n self.assertIs(a_lifted_leaf, real_a_lifed_leaf)\n\n def test_wrap_fn_directly(self):\n self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))\n\n def to_trace(y):\n return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)\n\n m = symbolic_trace(to_trace)\n self.assertIn('a_lifted_leaf2', m.code)\n self.assertEqual(27, m(2))\n self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)\n\n def test_wrapped_via_decorator(self):\n self.assertEqual(wrapped_via_decorator(0), 1)\n\n def to_trace(y):\n return wrapped_via_decorator(y)\n\n m = symbolic_trace(to_trace)\n self.assertIn('wrapped_via_decorator', m.code)\n self.assertEqual(m(0), 1)\n self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)\n self.assertFalse(hasattr(wrapped_via_decorator, \"__fx_already_patched\"))\n\n def test_wrapped_via_decorator_and_transformed(self):\n self.assertEqual(wrapped_via_decorator(0), 1)\n\n def to_trace(y):\n return wrapped_via_decorator(y)\n\n m = symbolic_trace(to_trace)\n self.assertIn('wrapped_via_decorator', m.code)\n self.assertEqual(m(0), 1)\n self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)\n self.assertFalse(hasattr(wrapped_via_decorator, \"__fx_already_patched\"))\n\n transformed = torch.fx.Transformer(m).transform()\n self.assertIn('wrapped_via_decorator', transformed.code)\n self.assertEqual(transformed(0), 1)\n self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)\n self.assertFalse(hasattr(wrapped_via_decorator, \"__fx_already_patched\"))\n\n def test_wrap_with_submodule(self):\n\n class M(torch.nn.Module):\n def __init__(self):\n super(M, self).__init__()\n self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)\n\n def forward(self, x: torch.Tensor):\n return wrapped_with_submodule(x, self.batchnorm1d)\n\n m = symbolic_trace(M())\n\n self.assertIn(\"wrapped_with_submodule\", m.code)\n\n input = torch.rand(3, 2)\n ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)\n self.assertEqual(ref_batchnorm1d(input), m(input))\n\n def test_wrapped_retrace(self):\n def to_trace(y):\n return wrapped_via_decorator(y)\n\n m = symbolic_trace(to_trace)\n self.assertIn('wrapped_via_decorator', m.code)\n self.assertEqual(m(0), 1)\n\n retraced = symbolic_trace(m)\n self.assertIn('wrapped_via_decorator', retraced.code)\n self.assertEqual(retraced(0), 1)\n\n def test_graph_edit_with_proxy(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n m = M()\n g = symbolic_trace(m).graph\n new_g = torch.fx.Graph()\n val_map : Dict[Node, Node] = {}\n output_val = new_g.graph_copy(g, val_map)\n t = Proxy(output_val)\n # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.\n new_g.output((t + t).node)\n gm = GraphModule(m, new_g)\n gm.graph.lint()\n self.assertEqual(gm(3, 4), 14)\n\n def test_graph_unique_names(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n m = M()\n g = symbolic_trace(m).graph\n new_g = torch.fx.Graph()\n val_map : Dict[Node, Node] = {}\n output_val = new_g.graph_copy(g, val_map)\n t = Proxy(output_val)\n # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.\n new_g.output((t + t).node)\n gm = GraphModule(m, new_g)\n seen_names : Set[str] = set()\n for node in gm.graph.nodes:\n assert node.name not in seen_names\n seen_names.add(node.name)\n\n def test_stack_traces(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n\n tracer = torch.fx.Tracer()\n tracer.record_stack_traces = True\n\n graph = tracer.trace(M())\n # saving the original list because we will insert new nodes as a part of a test\n orig_graph_nodes = list(graph.nodes)\n for node in orig_graph_nodes:\n if node.op == 'output':\n continue\n self.assertTrue(node.stack_trace is not None)\n assert 'test_fx.py' in node.stack_trace\n\n # verify that copying the node does not lose the stack trace\n new_node = graph.node_copy(node)\n self.assertTrue(new_node.stack_trace is not None)\n assert 'test_fx.py' in new_node.stack_trace\n\n def test_graph_unique_names_manual(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n a : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')\n c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')\n d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))\n graph.output(d)\n graph2 = torch.fx.Graph()\n val_map : Dict[Node, Node] = {}\n graph2.graph_copy(graph, val_map)\n seen_names : Set[str] = set()\n for node in graph2.nodes:\n assert node.name not in seen_names\n seen_names.add(node.name)\n\n def test_unpack(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n c, d = a\n return c + d + b\n\n a = (torch.rand(1), torch.rand(1))\n b = torch.rand(1)\n m = M()\n self.checkGraphModule(m, (a, b))\n\n def test_native_callable(self):\n if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:\n raise unittest.SkipTest(\"non-portable load_library call used in test\")\n # This test exercises the case where we use FX to translate from Python\n # code to some native callable object\n #\n # For the purposes of testing, we use ElementwiseInterpreter defined\n # in test_custom_class.cpp.\n #\n # We test that we can\n # 1) Construct a native callable from FX IR\n # 2) Construct a drop-in replacement module that delegates to the\n # native callable rather than the original code\n # 3) Run both the original code and native callable wrapper with\n # equivalent results\n # 4) TorchScript compile the native callable wrapper and confirm\n # equivalent results with the reference\n # 5) TorchScript serialize and deserialize the native callable\n # and confirm equivalent results with the reference\n\n # We use this simple Module as a reference computation\n class MySimpleMod(torch.nn.Module):\n def forward(self, x):\n return 3.0 * x + x\n\n msm = MySimpleMod()\n\n # This is what a lowering pass might look like: a function that takes\n # a valid nn.Module, symbolically traces it, lowers the Module to some\n # representation, and wraps that representation up into another\n # nn.Module instance that handles dispatch to the compiled/lowered code.\n def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:\n # ===== Stage 1: Symbolic trace the module =====\n mod = symbolic_trace(orig_mod)\n\n # ===== Stage 2: Lower GraphModule representation to the C++\n # interpreter's instruction format ======\n instructions = []\n constant_idx = 0\n constants = {}\n fn_input_names = []\n\n target_to_name = {\n operator.add : \"add\",\n operator.mul : \"mul\"\n }\n\n output_node : Optional[Node] = None\n # For each instruction, create a triple\n # (instruction_name : str, inputs : List[str], output : str)\n # to feed into the C++ interpreter\n for n in mod.graph.nodes:\n target, args, out_name = n.target, n.args, n.name\n assert len(n.kwargs) == 0, \"kwargs currently not supported\"\n\n if n.op == 'placeholder':\n # Placeholders specify function argument names. Save these\n # for later when we generate the wrapper GraphModule\n fn_input_names.append(target)\n elif n.op == 'call_function':\n assert target in target_to_name, \"Unsupported call target \" + target\n arg_names = []\n for arg in args:\n if not isinstance(arg, Node):\n # Pull out constants. These constants will later be\n # fed to the interpreter C++ object via add_constant()\n arg_name = f'constant_{constant_idx}'\n constants[arg_name] = torch.tensor(\n [arg] if isinstance(arg, numbers.Number) else arg)\n arg_names.append(arg_name)\n constant_idx += 1\n else:\n arg_names.append(arg.name)\n instructions.append((target_to_name[target], arg_names, out_name))\n elif n.op == 'output':\n if output_node is not None:\n raise RuntimeError('Multiple output nodes!')\n output_node = n\n else:\n raise RuntimeError('Unsupported opcode ' + n.op)\n\n interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()\n # Load constants\n for k, v in constants.items():\n interpreter.add_constant(k, v)\n # Specify names for positional input arguments\n interpreter.set_input_names(fn_input_names)\n # Load instructions\n interpreter.set_instructions(instructions)\n # Specify name for single output\n assert isinstance(output_node.args[0], torch.fx.Node)\n interpreter.set_output_name(output_node.args[0].name)\n\n # ===== Stage 3: Create a wrapper GraphModule around the interpreter =====\n class WrapperModule(torch.nn.Module):\n def __init__(self, interpreter):\n super().__init__()\n self.interpreter = interpreter\n\n wrapper = WrapperModule(interpreter)\n\n # Create a graph that: 1) Takes function arguments 2) Invokes the interpreter\n # 3) Returns the speficied return value\n\n # FIXME: The following code could be greatly simplified by symbolic_trace'ing\n # the wrapper with a Tracer that considers the Wrapper instance a root\n # module, however, I can't get `__call__` exposed on TorchBind classes\n # without it messing up Python `hasattr` for some reason. More digging\n # into CPython's implementation of hasattr is probably in order...\n\n graph = torch.fx.Graph()\n # Add placeholders for fn inputs\n placeholder_nodes = []\n for name in fn_input_names:\n placeholder_nodes.append(graph.create_node('placeholder', name))\n\n # Get the interpreter object\n interpreter_node = graph.create_node('get_attr', 'interpreter')\n\n # Add a node to call the interpreter instance\n output_node = graph.create_node(\n op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))\n\n # Register output\n graph.output(output_node)\n\n graph.lint()\n\n # Return final GraphModule!!!\n return GraphModule(wrapper, graph)\n\n\n # Lower GraphModule to C++ interpreter\n lowered = lower_to_elementwise_interpreter(msm)\n\n # Compare correctness with original module\n x = torch.rand(3, 4)\n ref_out = msm(x)\n test_out = lowered(x)\n torch.testing.assert_close(test_out, ref_out)\n\n # Test TorchScript compilation\n scripted_lowered = torch.jit.script(lowered)\n script_out = scripted_lowered(x)\n torch.testing.assert_close(script_out, ref_out)\n\n # Test TorchScript ser/de\n import_copy = self.getExportImportCopy(scripted_lowered)\n imported_out = import_copy(x)\n torch.testing.assert_close(imported_out, ref_out)\n\n def test_reserved_getattr(self):\n \"\"\"Ensure that we do not name any nodes with a reserved builtin like `getattr`\"\"\"\n class M(torch.nn.Module):\n def forward(self, a):\n return a.foo.bar.baz\n\n m = M()\n m_g = symbolic_trace(m)\n m_g.graph.lint()\n for node in m_g.graph.nodes:\n self.assertTrue(node.name != \"getattr\")\n\n def test_node_tagging(self):\n class TaggingTracer(Tracer):\n def create_node(self, kind : str, target : Union[str, Callable],\n args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,\n type_expr : Optional[Any] = None) -> Node:\n n = super().create_node(kind, target, args, kwargs, name)\n n.tag = 'foo'\n return n\n\n class M(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n\n m = M()\n g = TaggingTracer().trace(m)\n g.lint()\n for n in g.nodes:\n self.assertTrue(hasattr(n, 'tag'))\n self.assertEqual(n.tag, 'foo')\n\n def test_tensor_attribute(self):\n class TensorAttribute(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.tensor = torch.rand(3, 4)\n\n def forward(self, x):\n return torch.nn.functional.linear(x, self.tensor)\n\n ta = TensorAttribute()\n traced = symbolic_trace(ta)\n traced(torch.rand(4, 4))\n\n class WrapperForQualname(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.ta = TensorAttribute()\n\n def forward(self, x):\n return torch.nn.functional.linear(x, self.ta.tensor)\n\n wfq = WrapperForQualname()\n traced2 = symbolic_trace(wfq)\n traced2.graph.lint()\n traced2(torch.rand(4, 4))\n\n def test_tensor_attribute_coalseced(self):\n\n def count_attrs(fx_module):\n targets = set()\n for node in traced.graph.nodes:\n if node.op == 'get_attr':\n targets.add(node.target)\n return len(targets)\n\n val = torch.tensor(5)\n\n def f(x):\n return x + val + val\n traced = symbolic_trace(f)\n traced.graph.lint()\n self.assertEqual(count_attrs(traced), 1)\n\n val2 = torch.tensor(5)\n\n def f(x):\n val = torch.tensor(5)\n return x + val + val2\n\n traced = symbolic_trace(f)\n traced.graph.lint()\n self.assertEqual(count_attrs(traced), 2)\n\n\n def test_symbolic_trace_sequential(self):\n class Simple(torch.nn.Module):\n def forward(self, x):\n return torch.neg(x)\n\n seq = torch.nn.Sequential(\n Simple(),\n Simple(),\n Simple()\n )\n traced = symbolic_trace(seq)\n traced.graph.lint()\n x = torch.rand(3, 4)\n self.assertEqual(traced(x), seq(x))\n\n def test_tensor_constant(self):\n class ConstTensor(torch.nn.Module):\n def forward(self, x):\n return torch.nn.functional.linear(x, torch.zeros(3, 4))\n\n ct = ConstTensor()\n traced = symbolic_trace(ct)\n traced.graph.lint()\n traced(torch.rand(4, 4))\n\n def test_pickle_graphmodule(self):\n class Nested(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.st = torch.nn.Linear(4, 4)\n\n def forward(self, x):\n return self.st(x)\n\n n = Nested()\n traced = symbolic_trace(n)\n traced.graph.lint()\n pickled = pickle.dumps(traced)\n loaded = pickle.loads(pickled)\n loaded.graph.lint()\n x = torch.rand(3, 4)\n self.assertEqual(loaded(x), traced(x))\n\n def test_pickle_custom_import(self):\n graph = torch.fx.Graph()\n a = graph.placeholder('x')\n b = graph.placeholder('y')\n c = graph.call_function(a_non_torch_leaf, (a, b))\n d = graph.call_function(torch.sin, (c,))\n graph.output(d)\n gm = GraphModule(torch.nn.Module(), graph)\n pickled = pickle.dumps(gm)\n loaded = pickle.loads(pickled)\n loaded.graph.lint()\n x, y = torch.rand(1), torch.rand(1)\n self.assertEqual(loaded(x, y), gm(x, y))\n\n def test_all_input_nodes(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n a : torch.fx.Node = graph.placeholder('x')\n b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))\n c : torch.fx.Node = graph.get_attr('y_attr')\n d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))\n e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))\n graph.output(e)\n graph.lint()\n\n self.assertEqual(b.all_input_nodes, [a])\n self.assertEqual(c.all_input_nodes, [])\n self.assertEqual(d.all_input_nodes, [b, c])\n self.assertEqual(e.all_input_nodes, [d])\n\n def test_deepcopy_graphmodule_with_transform(self):\n st = SimpleTest()\n traced = symbolic_trace(st)\n traced.graph.lint()\n\n def transform(traced):\n new_graph = torch.fx.Graph()\n val_map : Dict[Node, Node] = {}\n output_value = new_graph.graph_copy(traced.graph, val_map)\n relu_out = new_graph.create_node(\n op='call_method', target='neg', args=(output_value,), kwargs={})\n new_graph.output(relu_out)\n return GraphModule(traced, new_graph)\n transformed = transform(traced)\n transformed.graph.lint()\n copied = copy.deepcopy(transformed)\n self.assertNotEqual(id(type(transformed)), id(type(copied)))\n x = torch.randn(3, 4)\n self.assertEqual(copied(x), transformed(x))\n\n def test_deepcopy_with_submods_params(self):\n class Bar(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n\n def forward(self, x):\n return torch.relu(x) + self.param\n\n class Baz(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.bar = Bar()\n\n def forward(self, x):\n return self.bar(x) - self.param\n\n baz = Baz()\n traced = symbolic_trace(baz)\n traced.graph.lint()\n copied = copy.deepcopy(traced)\n copied.graph.lint()\n\n def test_deepcopy_graph_with_tracer_cls(self):\n class TestTracer(Tracer):\n def is_leaf_module(self, module, name):\n return True\n\n g = Graph(tracer_cls=TestTracer)\n x = g.placeholder(\"x\")\n g.output(x)\n\n h = copy.deepcopy(g)\n self.assertIsNotNone(h._tracer_cls)\n self.assertTrue(g._tracer_cls == h._tracer_cls)\n\n def test_unpack_list_better_error(self):\n class SomeArgs(torch.nn.Module):\n def forward(self, a, b):\n return torch.rand(3, 4)\n\n class UnpacksList(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.sa = SomeArgs()\n\n def forward(self, x : list):\n return self.sa(*x)\n\n ul = UnpacksList()\n with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):\n symbolic_trace(ul)\n\n def test_unpack_dict_better_error(self):\n class SomeKwargs(torch.nn.Module):\n def forward(self, x=3, y=4):\n return torch.rand(3, 4)\n\n class UnpacksDict(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.sk = SomeKwargs()\n\n def forward(self, x : dict):\n return self.sk(**x)\n\n ud = UnpacksDict()\n with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):\n symbolic_trace(ud)\n\n def test_pretty_print_targets(self):\n # Test that Graph pretty-print prints friendly name for targets\n # in `operator` and `builtins`\n\n class SomeMod(torch.nn.Module):\n def forward(self, x):\n return torch.add(x.foo + x.bar, 3.0)\n\n traced = symbolic_trace(SomeMod())\n graph_str = str(traced.graph)\n self.assertIn('builtins.getattr', graph_str)\n self.assertIn('operator.add', graph_str)\n self.assertIn('torch.add', graph_str)\n\n def test_pretty_print_node(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param: torch.nn.Parameter = torch.nn.Parameter(\n torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x: torch.Tensor, y: int = 2):\n return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)\n\n traced = symbolic_trace(M())\n\n all_formatted = \"\\n\".join([n.format_node() for n in traced.graph.nodes])\n\n FileCheck().check(\"x\").check(\"placeholder\") \\\n .check(\"y\").check(\"placeholder\") \\\n .check(\"getitem\").check(\"call_function\") \\\n .check(\"param\").check(\"get_attr\") \\\n .check(\"add\").check(\"call_function\") \\\n .check(\"linear\").check(\"call_module\") \\\n .check(\"clamp\").check(\"call_method\") \\\n .run(all_formatted)\n\n def test_script_tensor_constant(self):\n # TorchScript seems to ignore attributes that start with `__`.\n # We used to call anonymous Tensor values `__tensor_constant*`, but\n # they were getting ignored by script. Now they're called\n # `_tensor_constant*`\n class IHaveATensorConstant(torch.nn.Module):\n def forward(self, x):\n return x + torch.rand(3, 4)\n\n traced = torch.fx.symbolic_trace(IHaveATensorConstant())\n torch.jit.script(traced)\n\n def test_autowrap_functions(self):\n class AutowrapFnTest(torch.nn.Module):\n def forward(self, x):\n return fx_int(x.shape[0] / 2)\n\n class AutowrapFnTest2(torch.nn.Module):\n def forward(self, x):\n return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)\n\n # Check function(s) are wrapped\n # `int` would normally throw a TypeError as argument can't be `Proxy`\n tracer = Tracer(autowrap_functions=(fx_int,))\n graph = tracer.trace(AutowrapFnTest())\n traced = GraphModule(tracer.root, graph, 'test')\n tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))\n tracer_2.trace(AutowrapFnTest2())\n\n # Test scriptability\n traced_scripted = torch.jit.script(traced)\n self.assertEqual(traced_scripted(torch.rand(4)), 2)\n\n def test_torch_fx_len(self):\n class FXLenTest(torch.nn.Module):\n def forward(self, x):\n return len(x)\n\n traced = symbolic_trace(FXLenTest())\n self.assertEqual(traced(torch.rand(3, 4)), 3)\n\n # Test scriptability\n scripted = torch.jit.script(FXLenTest())\n self.assertEqual(scripted(torch.rand(3)), 3)\n\n traced_scripted = torch.jit.script(traced)\n self.assertEqual(traced_scripted(torch.rand(3)), 3)\n\n # Test non-proxy len\n class FXLenTest2(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.l = [3, 4, 5]\n\n def forward(self, x):\n return x + len(self.l)\n\n traced2 = symbolic_trace(FXLenTest2())\n inp = torch.rand(3, 4)\n self.assertEqual(traced2(inp), inp + 3.0)\n self.assertIs(len, builtins.len)\n\n def test_torch_fx_getattr(self):\n class FXGetattrTest(torch.nn.Module):\n def forward(self, x):\n return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))\n\n traced = symbolic_trace(FXGetattrTest())\n self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))\n\n def test_sqrt(self):\n class Sqrt1(torch.nn.Module):\n def forward(self, x):\n return sqrt(x.size(0))\n\n class Sqrt2(torch.nn.Module):\n def forward(self, x):\n return math.sqrt(x.size(0))\n\n class Sqrt3(torch.nn.Module):\n def forward(self, x):\n return x + math.sqrt(2) + sqrt(2)\n\n self.checkGraphModule(Sqrt1(), [torch.zeros(8)])\n self.checkGraphModule(Sqrt2(), [torch.zeros(8)])\n self.checkGraphModule(Sqrt3(), [torch.zeros(8)])\n self.assertIs(sqrt, _sqrt)\n self.assertIs(math.sqrt, _sqrt)\n\n def test_torch_custom_ops(self):\n class M(torch.nn.Module):\n def forward(self, a):\n b = torch.ops.aten.sigmoid(a)\n c = torch.ops.aten.cat([a, b])\n return torch.ops.aten.cat((c, c))\n m = M()\n input = torch.randn(3)\n ref_out = m(input)\n gm = symbolic_trace(m)\n gm.graph.lint()\n out = gm(input)\n self.assertEqual(out, ref_out)\n\n def test_pickle_torch_custom_ops(self):\n class M(torch.nn.Module):\n def forward(self, a):\n b = torch.ops.aten.sigmoid(a)\n c = torch.ops.aten.cat([a, b])\n return torch.ops.aten.cat((c, c))\n m = M()\n input = torch.randn(3)\n ref_out = m(input)\n gm = symbolic_trace(m)\n gm.graph.lint()\n pickled = pickle.dumps(gm)\n loaded = pickle.loads(pickled)\n self.assertEqual(loaded(input), gm(input))\n\n def test_pretty_print(self):\n st = SimpleTest()\n traced = symbolic_trace(st)\n traced.graph.lint()\n printed = str(traced)\n assert 'SimpleTest()' in printed\n assert 'torch.relu' in printed\n\n def test_pretty_print_graph(self):\n class KwargPrintTest(torch.nn.Module):\n def forward(self, x):\n return torch.squeeze(x + 3.0, dim=2)\n st = KwargPrintTest()\n traced = symbolic_trace(st)\n traced.graph.lint()\n stringed = str(traced.graph)\n for s in ['args', 'kwargs', '#users']:\n assert s in stringed\n\n def test_custom_proxy_type(self):\n class TensorPair:\n def __init__(self, left, right):\n self.left, self.right = left, right\n\n def add(self, other):\n l = self.left + other.left\n r = self.right + other.right\n return TensorPair(l, r)\n\n def mul(self, other):\n l = self.left * other.left\n r = self.right * other.right\n return TensorPair(l, r)\n\n def use_tensor_pair(x : TensorPair, y : TensorPair):\n s = x.add(y)\n return s.mul(x)\n\n x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))\n y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))\n\n ref_out = use_tensor_pair(x, y)\n\n traced = symbolic_trace(use_tensor_pair)\n\n traced_out = traced(x, y)\n self.assertEqual(traced_out.left, ref_out.left)\n self.assertEqual(traced_out.right, ref_out.right)\n\n def test_custom_proxy_type_literal(self):\n class TensorPair(metaclass=torch.fx.ProxyableClassMeta):\n def __init__(self, left, right):\n self.left, self.right = left, right\n\n def add(self, other):\n l = self.left + other.left\n r = self.right + other.right\n return TensorPair(l, r)\n\n def mul(self, other):\n l = self.left * other.left\n r = self.right * other.right\n return TensorPair(l, r)\n\n def use_tensor_pair_literal(x : TensorPair):\n s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))\n return s.mul(x)\n\n x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))\n\n ref_out = use_tensor_pair_literal(x)\n\n traced = symbolic_trace(use_tensor_pair_literal)\n\n traced_out = traced(x)\n self.assertEqual(traced_out.left, ref_out.left)\n self.assertEqual(traced_out.right, ref_out.right)\n\n def test_custom_proxy_dynamic_value(self):\n class TensorPair(metaclass=torch.fx.ProxyableClassMeta):\n def __init__(self, left, right):\n self.left, self.right = left, right\n\n def add(self, other):\n l = self.left + other.left\n r = self.right + other.right\n return TensorPair(l, r)\n\n def mul(self, other):\n l = self.left * other.left\n r = self.right * other.right\n return TensorPair(l, r)\n\n def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):\n s = x.add(TensorPair(y, y))\n return s.mul(x)\n\n x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))\n y = torch.randn(5, 3)\n ref_out = use_tensor_pair_ctor(x, y)\n\n traced = symbolic_trace(use_tensor_pair_ctor)\n\n traced_out = traced(x, y)\n self.assertEqual(traced_out.left, ref_out.left)\n self.assertEqual(traced_out.right, ref_out.right)\n\n def test_custom_proxy_input_dependent_control_flow(self):\n class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):\n def __init__(self, inp):\n if inp.sum() == 0:\n self.is_zero = True\n self.tensor = torch.tensor([])\n else:\n self.is_zero = False\n self.tensor = inp\n\n def add(self, other):\n if self.is_zero:\n return ZeroTensor(other.tensor)\n elif other.is_zero:\n return self\n\n def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):\n return ZeroTensor(x + y)\n\n x, y = torch.randn(5, 3), torch.randn(5, 3)\n\n ref_out = use_zero_tensor(x, y)\n\n traced = symbolic_trace(use_zero_tensor)\n\n traced_out = traced(x, y)\n\n self.assertEqual(traced_out.is_zero, ref_out.is_zero)\n self.assertEqual(traced_out.tensor, ref_out.tensor)\n\n def test_graph_fns(self):\n g = Graph()\n a = g.placeholder('a')\n b = g.call_module('linear', (a,))\n c = g.get_attr('bias')\n d = g.call_method('add', (b, c))\n e = g.call_function(torch.sin, (d,))\n g.output(e)\n mod = torch.nn.Module()\n mod.linear = torch.nn.Linear(3, 4)\n mod.bias = torch.rand(4)\n gm = GraphModule(mod, g)\n gm.graph.lint()\n input = torch.rand(3)\n r = gm(input)\n ref = torch.sin(mod.linear(input) + mod.bias)\n self.assertEqual(r, ref)\n\n def test_remove_uses(self):\n g : torch.fx.Graph = Graph()\n x : torch.fx.Node = g.placeholder('x')\n relu : torch.fx.Node = g.call_function(torch.relu, (x,))\n neg : torch.fx.Node = g.call_function(torch.neg, (relu,))\n g.output(neg)\n\n neg.replace_all_uses_with(relu)\n g.erase_node(neg)\n\n self.assertTrue(neg not in relu.users)\n\n def test_nonetype_annotation(self):\n eb = torch.nn.EmbeddingBag(3, 4)\n symbolic_trace(eb)\n\n def test_pickle_nonetype_annotation(self):\n eb = torch.nn.EmbeddingBag(10, 3, mode='sum')\n traced = symbolic_trace(eb)\n pickled = pickle.dumps(traced)\n loaded = pickle.loads(pickled)\n loaded.graph.lint()\n input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])\n offsets = torch.LongTensor([0, 4])\n self.assertEqual(loaded(input, offsets), traced(input, offsets))\n\n def test_return_tuple(self):\n class M(torch.nn.Module):\n def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n return (x, x + x)\n\n\n original = M()\n traced = symbolic_trace(original)\n self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))\n\n def test_construct_root_dict(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n a : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))\n c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')\n d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))\n graph.output(d)\n\n linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)\n add_param : torch.Tensor = torch.rand(3, 4)\n gm : torch.fx.GraphModule = torch.fx.GraphModule(\n {'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)\n gm.graph.lint()\n\n assert 'self.foo.bar.baz' in gm.code\n\n x : torch.Tensor = torch.rand(3, 3)\n out : torch.Tensor = gm(x)\n ref_out : torch.Tensor = linear_mod(x) + add_param\n self.assertEqual(out, ref_out)\n\n def test_symbolic_trace_assert(self):\n\n class AssertsTensorShape(torch.nn.Module):\n def forward(self, x):\n torch._assert(x.shape[1] > 4, \"assert_foobar\")\n return x\n\n m = AssertsTensorShape()\n # verify traceability\n traced = symbolic_trace(m)\n # verify assertion on traced model works correctly at runtime\n traced(torch.rand(4, 5))\n with self.assertRaisesRegex(AssertionError, \"assert_foobar\"):\n traced(torch.rand(4, 3))\n # verify the symbolically traced module is scriptable\n ms = torch.jit.script(m)\n with self.assertRaisesRegex(torch.jit.Error, \"assert_foobar\"):\n ms(torch.rand(4, 3))\n\n def test_fx_create_arg(self):\n class CustomArgObject:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __fx_create_arg__(self, tracer: torch.fx.Tracer):\n return tracer.create_node(\n \"call_function\",\n CustomArgObject,\n args=(\n tracer.create_arg(self.x),\n tracer.create_arg(self.y),\n ),\n kwargs={},\n )\n\n class HasCustomArgObjectWhenLeaf(torch.nn.Module):\n def forward(self, o: CustomArgObject):\n # Not normally traceable; good reason to make\n # this module a leaf.\n for x in o.x:\n o.y += x\n return o.y\n\n class Root(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.inner = HasCustomArgObjectWhenLeaf()\n\n def forward(self, x, y):\n o = CustomArgObject(x, y)\n return self.inner(o)\n\n class CreateArgTracer(torch.fx.Tracer):\n def is_leaf_module(self, m, module_qualified_name):\n return type(m) is HasCustomArgObjectWhenLeaf\n\n m = Root()\n graph = CreateArgTracer().trace(m)\n gm = torch.fx.GraphModule(m, graph)\n assert \"CustomArgObject(\" in gm.code\n\n def test_trace_fn_constant(self):\n some_constant = torch.rand(3, 4)\n\n def add_const(x):\n return some_constant + x\n\n traced = symbolic_trace(add_const)\n\n input = torch.rand(3, 4)\n self.assertEqual(traced(input), add_const(input))\n\n def test_copy_no_remap(self):\n traced = symbolic_trace(SimpleTest())\n g = traced.graph\n copied = torch.fx.Graph()\n for node in g.nodes:\n copied.node_copy(node)\n with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):\n copied.lint()\n\n def test_wrong_topo(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n a : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))\n c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')\n d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))\n graph.output(d)\n nodes = list(graph.nodes)\n nodes[3].append(nodes[2])\n with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):\n graph.lint()\n\n def test_wrong_target_type(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n with self.assertRaises(ValueError):\n n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',\n args=(), kwargs={})\n\n def test_example_shape_prop(self):\n class TestCase(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.attr = torch.randn(3, 4)\n self.submod = torch.nn.Linear(4, 4)\n\n def forward(self, x):\n return torch.neg(self.submod(x.relu() + self.attr))\n tc = TestCase()\n tc_traced = symbolic_trace(tc)\n ref_out = tc_traced(torch.rand(3, 4))\n shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))\n\n # Make sure we're testing all opcodes\n opcodes = set()\n output_shape : Optional[torch.Shape] = None\n output_stride : Optional[Tuple[int]] = None\n for node in tc_traced.graph.nodes:\n opcodes.add(node.op)\n if node.op == 'output':\n output_shape = node.args[0].meta['tensor_meta'].shape\n output_stride = node.args[0].meta['tensor_meta'].stride\n self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',\n 'call_module', 'output']))\n\n # Test shape propogation and make sure results match actual\n self.assertEqual(output_shape, ref_out.shape)\n self.assertEqual(output_stride, ref_out.stride())\n\n def test_shape_prop_layout(self):\n class ConvTest(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv_mod = torch.nn.Conv2d(5, 5, 3)\n\n def forward(self, x):\n return self.conv_mod(x)\n\n # contiguous layout\n test_mod = ConvTest()\n traced = symbolic_trace(test_mod)\n x = torch.randn(5, 5, 224, 224)\n shape_prop.ShapeProp(traced).propagate(x)\n\n assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format\n for node in traced.graph.nodes))\n\n x_channels_last = x.contiguous(memory_format=torch.channels_last)\n traced.to(memory_format=torch.channels_last)\n shape_prop.ShapeProp(traced).propagate(x_channels_last)\n for node in traced.graph.nodes:\n # NB: the implementation of conv may not preserve the memory format,\n # unfortunately. The best we can do is just check that the placeholder\n # node is channels-last\n if node.op in {'placeholder'}:\n self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)\n\n def test_shape_prop_aggregate(self):\n class ReturnTwo(torch.nn.Module):\n def forward(self, x):\n return (3, torch.sum(x))\n\n class UnderTest(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.rt = ReturnTwo()\n\n def forward(self, x):\n return self.rt(x)\n\n ut = UnderTest()\n\n class RTTracer(torch.fx.Tracer):\n def is_leaf_module(self, m, module_qualified_name):\n return type(m) is ReturnTwo\n\n graph = RTTracer().trace(ut)\n mod = torch.fx.GraphModule(ut, graph)\n\n shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))\n\n for node in mod.graph.nodes:\n if node.op == 'call_module':\n assert 'tensor_meta' in node.meta\n tensor_meta = node.meta['tensor_meta']\n assert tensor_meta[0] == 3\n assert tensor_meta[1].shape == torch.Size([])\n\n def test_shape_prop_layout_3d(self):\n class ConvTest3d(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv_mod = torch.nn.Conv3d(5, 5, 3)\n\n def forward(self, x):\n return self.conv_mod(x)\n\n test_mod_3d = ConvTest3d()\n traced_3d = symbolic_trace(test_mod_3d)\n x_3d = torch.randn(5, 5, 224, 224, 15)\n shape_prop.ShapeProp(traced_3d).propagate(x_3d)\n assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format\n for node in traced_3d.graph.nodes))\n\n x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)\n traced_3d.to(memory_format=torch.channels_last_3d)\n shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)\n for node in traced_3d.graph.nodes:\n # NB: the implementation of conv may not preserve the memory format,\n # unfortunately. The best we can do is just check that the placeholder\n # node is channels-last\n if node.op in {'placeholder'}:\n self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)\n\n def test_interpreter(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x):\n return self.linear(x + self.param).clamp(min=0.0, max=1.0)\n\n m = MyModule()\n gm = torch.fx.symbolic_trace(m)\n\n interpreter = Interpreter(gm)\n input = torch.randn(3, 4)\n self.assertEqual(interpreter.run(input), gm(input))\n self.assertEqual(interpreter.run(input), m(input))\n\n def test_interpreter_run_node_override(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x):\n return self.linear(x + self.param).clamp(min=0.0, max=1.0)\n\n m = MyModule()\n gm = torch.fx.symbolic_trace(m)\n\n class RunNodeInterpreter(Interpreter):\n def __init__(self, module):\n super().__init__(module)\n\n def run_node(self, n : Node) -> Any:\n result = super().run_node(n)\n n.cached_value = result\n return result\n\n input = torch.randn(3, 4)\n RunNodeInterpreter(gm).run(input)\n for node in gm.graph.nodes:\n assert hasattr(node, 'cached_value')\n\n def test_interpreter_onthefly_swap(self):\n\n def fn(x):\n return torch.sigmoid(x).neg()\n\n gm = torch.fx.symbolic_trace(fn)\n\n class NegSigmSwapInterpreter(Interpreter):\n def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:\n if target == torch.sigmoid:\n return torch.neg(*args, **kwargs)\n return super().call_function(n)\n\n def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:\n if target == 'neg':\n call_self, *args_tail = args\n return call_self.sigmoid(*args_tail, **kwargs)\n return super().call_method(n)\n\n input = torch.randn(3, 4)\n result = NegSigmSwapInterpreter(gm).run(input)\n self.assertEqual(result, torch.neg(input).sigmoid())\n\n def test_interpreter_partial_eval(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x):\n return self.linear(x + self.param).clamp(min=0.0, max=1.0)\n\n gm = torch.fx.symbolic_trace(MyModule())\n interp = Interpreter(gm)\n env = {}\n for node in gm.graph.nodes:\n if node.op == 'call_module' and node.target == 'linear':\n env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0\n break\n assert len(env) == 1\n x = torch.randn(3, 4)\n result = interp.run(x, initial_env=env)\n self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))\n\n def test_interpreter_star_args(self):\n def with_star_args(x, *args):\n return x + args[0]\n\n gm = torch.fx.symbolic_trace(with_star_args)\n interp = Interpreter(gm)\n result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))\n self.assertEqual(result, torch.ones(3, 4) * 2.0)\n\n @skipIfNoTorchVision\n def test_interpreter_noop_resnet18(self):\n rn18 = torchvision_models.resnet18()\n transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()\n inp = torch.randn(5, 3, 224, 224)\n self.assertEqual(transformed(inp), rn18(inp))\n\n @skipIfNoTorchVision\n def test_interpreter_gc_values(self):\n rn18 = torchvision_models.resnet18()\n interp = Interpreter(symbolic_trace(rn18))\n inp = torch.rand(5, 3, 224, 224)\n out = interp.run(inp)\n env_key_names = set(n.name for n in interp.env.keys())\n self.assertEqual(env_key_names, set(['output']))\n\n def test_interpreter_default_args(self):\n class Model(torch.nn.Module):\n def forward(self, x, y=3.14159):\n return x + y\n\n model = Model()\n gm = torch.fx.symbolic_trace(model)\n\n interp = Interpreter(gm)\n x = torch.randn(5, 3)\n out = interp.run(x)\n torch.testing.assert_allclose(out, x + 3.14159)\n\n def test_interpreter_not_enough_args(self):\n class Model(torch.nn.Module):\n def forward(self, x, y):\n return x + y\n\n model = Model()\n gm = torch.fx.symbolic_trace(model)\n\n interp = Interpreter(gm)\n x = torch.randn(5, 3)\n with self.assertRaisesRegex(RuntimeError,\n 'Expected positional argument for parameter y, but one was not passed in'):\n out = interp.run(x)\n\n def test_transformer_noop(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x):\n return self.linear(x + self.param).clamp(min=0.0, max=1.0)\n\n m = MyModule()\n gm = torch.fx.symbolic_trace(m)\n\n new_gm = Transformer(gm).transform()\n\n input = torch.randn(3, 4)\n self.assertEqual(new_gm(input), gm(input))\n\n def test_transformer_op_swap(self):\n\n def fn(x):\n return torch.sigmoid(x).neg()\n\n gm = torch.fx.symbolic_trace(fn)\n\n class NegSigmSwapXformer(Transformer):\n def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:\n if target == torch.sigmoid:\n return torch.neg(*args, **kwargs)\n return super().call_function(n)\n\n def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:\n if target == 'neg':\n call_self, *args_tail = args\n return call_self.sigmoid(*args_tail, **kwargs)\n return super().call_method(n)\n\n transformed = NegSigmSwapXformer(gm).transform()\n input = torch.randn(3, 4)\n self.assertEqual(transformed(input), torch.neg(input).sigmoid())\n\n def test_transformer_multi_outputs(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x):\n x = x + self.param\n out = self.linear(x)\n return x, out\n\n m = MyModule()\n gm = torch.fx.symbolic_trace(m)\n\n new_gm = Transformer(gm).transform()\n\n input = torch.randn(3, 4)\n self.assertEqual(new_gm(input), gm(input))\n\n def test_fn_type_annotations(self):\n class Foo(torch.nn.Module):\n def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:\n return {'a': p.x + p.y + z + i}\n\n foo_scripted = torch.jit.script(Foo())\n foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)\n\n fxed = symbolic_trace(Foo())\n fxed_scripted = torch.jit.script(fxed)\n fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)\n\n def test_fn_type_annotation_empty(self):\n def forward(a : List[torch.Tensor]):\n return a[0]\n torch.jit.script(symbolic_trace(forward))\n\n def test_wrapped_method(self):\n def wrap_with_relu(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n return torch.relu(fn(*args, **kwargs))\n return wrapper\n\n class Foo(torch.nn.Module):\n @wrap_with_relu\n def forward(self, x, w):\n return torch.matmul(x, w)\n\n f = Foo()\n traced = symbolic_trace(f)\n x, w = torch.rand(3, 4), torch.rand(4, 4)\n self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))\n\n def test_empty_graph_codegen(self):\n graph = torch.fx.Graph()\n gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n self.assertEqual(gm(), None)\n\n def test_sequential(self):\n m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))\n gm = torch.fx.symbolic_trace(m)\n gm_copy = copy.deepcopy(gm)\n\n def test_ctx_mgr(self):\n @contextlib.contextmanager\n def do_nothing():\n yield\n\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @do_nothing()\n def forward(self, x):\n return torch.relu(x)\n\n m = M()\n self.checkGraphModule(m, (torch.rand(3, 4),))\n\n def test_typename_print(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),\n type_expr=List[float])\n output : torch.fx.Node = graph.output(b)\n\n self.assertTrue('typing.List[float]' in str(graph))\n\n def test_layout(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)\n\n traced = symbolic_trace(M())\n x = torch.rand(5, 9, 3, 4)\n self.assertEqual(traced(x), torch.zeros_like(x))\n\n def test_ellipsis(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x, y):\n return x + y[:, 1:10, ...]\n\n traced = symbolic_trace(M())\n x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)\n self.assertEqual(traced(x, y), x + y[:, 1:10, ...])\n\n def test_inf_nan(self):\n class FooMod(torch.nn.Module):\n def forward(self, x):\n return x + float('inf'), x + float('-inf'), x + float('nan')\n\n fm = FooMod()\n self.checkGraphModule(fm, (torch.rand(3, 4),))\n\n def test_inf_nan_kwds(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')\n c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')\n graph.output((b, c))\n\n gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n x = torch.rand(3, 4)\n self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))\n\n def test_deepcopy_recursion_depth(self):\n depth = sys.getrecursionlimit() + 20\n\n g = torch.fx.Graph()\n x = g.placeholder('x')\n for i in range(depth):\n x = g.call_function(torch.relu, (x,))\n g.output(x)\n\n copied_graph = copy.deepcopy(g)\n\n val_map = {}\n for orig_node, new_node in zip(g.nodes, copied_graph.nodes):\n val_map[orig_node] = new_node\n\n for orig_node, new_node in zip(g.nodes, copied_graph.nodes):\n orig_users = set(orig_node.users.keys())\n orig_users_equiv = set(val_map[u] for u in orig_users)\n new_users = set(new_node.users.keys())\n self.assertEqual(orig_users_equiv, new_users)\n\n @skipIfNoTorchVision\n def test_replace_uses(self):\n rn18 = torchvision_models.resnet18()\n\n class LowerReluTracer(torch.fx.Tracer):\n def is_leaf_module(self, m : torch.nn.Module, qualname : str):\n if isinstance(m, torch.nn.ReLU):\n return False\n return super().is_leaf_module(m, qualname)\n\n rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))\n\n to_erase = []\n for node in rn18_traced.graph.nodes:\n if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:\n kwargs = node.kwargs.copy()\n # Neg doesn't have in-place\n kwargs.pop('inplace')\n with rn18_traced.graph.inserting_before(node):\n new_node = rn18_traced.graph.call_function(\n the_function=torch.neg, args=node.args, kwargs=node.kwargs)\n node.replace_all_uses_with(replace_with=new_node)\n to_erase.append(node)\n\n for node in to_erase:\n rn18_traced.graph.erase_node(node)\n\n\n def test_replace_input(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n y : torch.fx.Node = graph.create_node('placeholder', 'y')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))\n output : torch.fx.Node = graph.output(b)\n\n b.replace_input_with(x, y)\n\n gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n\n input_x = torch.randn(33, 44)\n input_y = torch.randn(11, 22)\n self.assertEqual(gm(input_x, input_y), torch.relu(input_y))\n\n def test_insertion_point(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))\n output : torch.fx.Node = graph.output(b)\n\n with graph.inserting_before(b):\n neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))\n _, *relu_args = b.args\n b.args = (neg, *relu_args)\n\n gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n\n input = torch.randn(33, 44)\n self.assertEqual(gm(input), torch.relu(torch.neg(input)))\n\n def test_update_args_api(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n y : torch.fx.Node = graph.create_node('placeholder', 'y')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))\n output : torch.fx.Node = graph.output(b)\n\n orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)\n self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))\n\n\n b.update_arg(0, y)\n new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))\n\n def test_update_kwargs_api(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n y : torch.fx.Node = graph.create_node('placeholder', 'y')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})\n output : torch.fx.Node = graph.output(b)\n\n orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)\n self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))\n\n\n b.update_kwarg('input', y)\n new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))\n\n def test_move_before(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))\n output : torch.fx.Node = graph.output(b)\n\n neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))\n _, *relu_args = b.args\n b.args = (neg, *relu_args)\n b.prepend(neg)\n\n gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n\n input = torch.randn(33, 44)\n self.assertEqual(gm(input), torch.relu(torch.neg(input)))\n\n def test_prepend_self(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))\n output : torch.fx.Node = graph.output(b)\n\n b.prepend(b)\n x.append(b)\n self.assertEqual(len(graph.nodes), 3)\n\n def test_erase_node_error(self):\n st = SimpleTest()\n traced = symbolic_trace(st)\n\n for node in traced.graph.nodes:\n # Test deleting with uses both in another Node and at the output\n if node.target in [operator.add, torch.relu]:\n with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):\n traced.graph.erase_node(node)\n\n def test_copy_it(self):\n d = immutable_dict([(3, 4), (5, 6)])\n l = immutable_list([(3, 4), (5, 6)])\n\n self.assertEqual(d, deepcopy(d))\n self.assertEqual(l, deepcopy(l))\n\n def test_get_torch_func_signature(self):\n for key in dir(torch):\n obj = getattr(torch, key)\n if callable(obj):\n schemas = get_signature_for_torch_op(obj)\n\n def test_find_uses(self):\n graph = torch.fx.Graph()\n x = torch.fx.Proxy(graph.placeholder('x'))\n\n y = torch.relu(x)\n z = x + x\n u = torch.neg(x)\n graph.output((y + z + u).node)\n graph.lint()\n\n users_of_x = x.node.users\n self.assertEqual(len(users_of_x), 3)\n expected_ops = set(['relu', 'add', 'neg'])\n for use in users_of_x:\n assert any(use.name.startswith(prefix) for prefix in expected_ops)\n\n def test_inline_graph(self):\n class InlineInto(torch.nn.Module):\n def forward(self, x):\n return torch.relu(x)\n\n class ToInline(torch.nn.Module):\n def forward(self, x):\n return torch.neg(x)\n\n inline_into = symbolic_trace(InlineInto())\n to_inline = symbolic_trace(ToInline())\n\n combined_graph = torch.fx.Graph()\n output_node = combined_graph.graph_copy(inline_into.graph, {})\n\n input_node = list(to_inline.graph.nodes)[0]\n assert input_node and input_node.op == 'placeholder'\n\n val_map = {input_node : output_node}\n output = combined_graph.graph_copy(to_inline.graph, val_map)\n combined_graph.output(output)\n\n combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)\n\n input = torch.rand(3, 4)\n self.assertEqual(combined_module(input), input.relu().neg())\n\n def test_multi_insert_point(self):\n graph = torch.fx.Graph()\n x = torch.fx.Proxy(graph.placeholder('x'))\n relu = torch.relu(x)\n\n with graph.inserting_before(relu.node):\n y = torch.neg(x)\n z = torch.tanh(y)\n\n graph.output((relu.node, z.node))\n graph.lint()\n\n expected_ops = ['x', 'neg', 'tanh', 'relu']\n for node, expected in zip(graph.nodes, expected_ops):\n assert expected in node.name\n\n def test_reassign_args_kwargs_uses(self):\n graph = torch.fx.Graph()\n x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))\n z = x + y\n zed = z + z + z\n graph.output(zed.node)\n graph.lint()\n\n # zed = z + z + z -> zed = z + z + x\n zed.node.args = (zed.node.args[0], x.node)\n self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])\n\n # z = x + y -> z = y + y\n z.node.args = (y.node, y.node)\n self.assertEqual(list(x.node.users.keys()), [zed.node])\n\n def test_trace_function(self):\n def foo(x, y):\n return torch.relu(x) + y\n\n x, y = torch.randn(3, 4), torch.randn(3, 4)\n self.checkGraphModule(foo, (x, y))\n\n def test_trace_dict_int_keys(self):\n class ModWithDictArg(torch.nn.Module):\n def forward(self, d : Dict[int, torch.Tensor]):\n return d[42]\n\n class CallsModWithDict(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.m = ModWithDictArg()\n\n def forward(self, x):\n return self.m({42: x})\n\n class MyTracer(torch.fx.Tracer):\n def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:\n return isinstance(m, ModWithDictArg)\n\n traced_graph = MyTracer().trace(CallsModWithDict())\n\n def test_trace_dict_proxy_keys(self):\n class ModWithDictArg(torch.nn.Module):\n def forward(self, d : Dict[torch.Tensor, torch.Tensor]):\n return d[42]\n\n class CallsModWithDict(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.m = ModWithDictArg()\n\n def forward(self, x):\n return self.m({x: x})\n\n class MyTracer(torch.fx.Tracer):\n def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:\n return isinstance(m, ModWithDictArg)\n\n with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):\n traced_graph = MyTracer().trace(CallsModWithDict())\n\n def test_module_deepcopy_edit_nodes(self):\n class Foo(torch.nn.Module):\n def forward(self, x):\n return torch.relu(x)\n\n traced1 = symbolic_trace(Foo())\n copied = copy.deepcopy(traced1)\n\n for node in copied.graph.nodes:\n if node.target == torch.relu:\n node.target = torch.neg\n\n copied.recompile()\n traced1.recompile()\n\n x = torch.randn(15, 15)\n torch.testing.assert_allclose(traced1(x), torch.relu(x))\n torch.testing.assert_allclose(copied(x), torch.neg(x))\n\n def test_direct_param_use(self):\n class TransposeTest(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.b = torch.nn.Parameter(torch.rand(4, 3))\n\n def forward(self, x):\n return self.b\n\n class Foo(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.a = TransposeTest()\n\n def forward(self, x):\n return self.a.b, self.a.b.t(), self.a.b.view(12)\n\n traced = torch.fx.symbolic_trace(Foo())\n assert(all('constant' not in node.target for node in traced.graph.nodes))\n\n def test_single_default_arg(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, y=1):\n return y\n\n m = M()\n self.checkGraphModule(m, ())\n self.checkGraphModule(m, (3,))\n\n def test_multiple_default_args(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, y=1, z=2):\n return y + z\n\n m = M()\n self.checkGraphModule(m, ())\n self.checkGraphModule(m, (3,))\n self.checkGraphModule(m, (3, 4))\n\n def test_regular_and_default_args(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x, y=1):\n return x + y\n\n m = M()\n self.checkGraphModule(m, (2,))\n self.checkGraphModule(m, (2, 3))\n\n def test_string_literal_return(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self):\n return \"foo\"\n\n m = M()\n self.checkGraphModule(m, ())\n\n def test_namedtuple_return_qualname(self):\n class NamedTupReturn(torch.nn.Module):\n def forward(self, x):\n return MyNamedTup(x, x)\n\n traced = symbolic_trace(NamedTupReturn())\n input = torch.rand(3, 4)\n self.assertEqual(traced(input), MyNamedTup(input, input))\n\n def test_update_args_kwargs_yells_at_you(self):\n symtraced = symbolic_trace(SimpleTest())\n node = next(iter(symtraced.graph.nodes))\n with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):\n node.__update_args_kwargs((), {})\n\n def test_torchbind_class_attribute_in_fx(self):\n if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:\n self.skipTest(\"torch.classes._TorchScriptTesting._StackString is registered, skipping\")\n\n class FooBar1234(torch.nn.Module):\n def __init__(self):\n super(FooBar1234, self).__init__()\n self.f = torch.classes._TorchScriptTesting._StackString([\"3\", \"4\"])\n\n def forward(self):\n return self.f.top()\n\n m = FooBar1234()\n self.checkGraphModule(m, ())\n\n def test_torchbind_class_attribute_in_fx_tensor_arg(self):\n if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:\n self.skipTest(\"torch.classes._TorchScriptTesting._ReLUClass is registered, skipping\")\n\n class FooBar2341(torch.nn.Module):\n def __init__(self):\n super(FooBar2341, self).__init__()\n self.f = torch.classes._TorchScriptTesting._ReLUClass()\n\n def forward(self, x):\n return self.f.run(x)\n\n m = FooBar2341()\n\n traced = symbolic_trace(m)\n input = torch.randn(3, 4)\n self.assertEqual(traced(input), m(input))\n\n self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))\n\n def test_script_method_trace(self):\n class Scripted(torch.nn.Module):\n def forward(self, x):\n return torch.relu(x)\n\n class Holder(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.s = torch.jit.script(Scripted())\n\n def forward(self, x):\n return self.s(x)\n\n h = Holder()\n traced = symbolic_trace(h)\n input = torch.randn(3, 4)\n self.assertEqual(traced(input), h(input))\n\n self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))\n\n def test_namedtuple_return_trace(self):\n class NamedTupReturn(torch.nn.Module):\n def forward(self, x):\n return Pair(x, x)\n\n traced = symbolic_trace(NamedTupReturn())\n input = torch.rand(3, 4)\n self.assertEqual(traced(input), Pair(input, input))\n\n def test_return_type_exists(self):\n class ReturnTypeModule(torch.nn.Module):\n def other(self, x: List[str]) -> List[str]:\n return x\n\n def forward(self, x: List[str]) -> List[str]:\n return self.other(x)\n\n traced = symbolic_trace(ReturnTypeModule())\n self.assertIn(\"-> typing_List[str]\", traced._code)\n scripted = torch.jit.script(traced)\n self.assertIn(\"-> List[str]\", scripted.code)\n\n def getitem_inner(self):\n class GetItemBase(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.register_buffer('pe', torch.randn(8, 8))\n\n class GetItem1(GetItemBase):\n def forward(self, x):\n return self.pe[:, :x.size(0)]\n\n class GetItem2(GetItemBase):\n def forward(self, x):\n return self.pe[x.size(0)]\n\n class GetItem3(GetItemBase):\n def forward(self, x):\n return self.pe[4] # fx creates `self._tensor_constant0` here\n\n self.checkGraphModule(GetItem1(), [torch.zeros(4)])\n self.checkGraphModule(GetItem2(), [torch.zeros(4)])\n self.checkGraphModule(GetItem3(), [torch.zeros(4)])\n\n @unittest.skipUnless(os.environ.get(\"FX_PATCH_GETITEM\") == \"1\",\n \"Will be checked in test_getitem_subproc\")\n def test_getitem(self):\n self.getitem_inner()\n\n def test_getitem_subproc(self):\n # need to run this test in a subproc to work around:\n # https://github.com/pytorch/pytorch/issues/50710\n proc = Process(target=run_getitem_target)\n proc.start()\n proc.join()\n self.assertEqual(proc.exitcode, 0)\n\n\n def test_user_friendly_call_provenance_with_function(self):\n def fn(x):\n return wrapper_fn(x)\n\n traced = torch.fx.symbolic_trace(fn)\n\n with self.assertRaisesRegex(RuntimeError, \"'wrapper_fn' is \"\n \"being compiled since it was called\"\n \" from 'fn.forward'\"):\n scripted = torch.jit.script(traced)\n\n def test_user_friendly_call_provenance_with_module(self):\n class M(torch.nn.Module):\n def forward(self, x):\n return wrapper_fn(x)\n\n traced = torch.fx.symbolic_trace(M())\n\n with self.assertRaisesRegex(RuntimeError, \"'wrapper_fn' is \"\n \"being compiled since it was called\"\n \" from 'M.forward'\"):\n scripted = torch.jit.script(traced)\n\n def test_snake_case(self):\n class M(torch.nn.Module):\n def __init__(self):\n super(M, self).__init__()\n self.activations = torch.nn.ModuleDict([\n [\"snake_case\", torch.nn.ReLU()],\n [\"PascalCase\", torch.nn.LeakyReLU()],\n [\"ALL_CAPS\", torch.nn.PReLU()]\n ])\n\n def forward(self, x):\n a = self.activations[\"snake_case\"](x)\n b = self.activations[\"PascalCase\"](x)\n c = self.activations[\"ALL_CAPS\"](x)\n return a, b, c\n\n traced = symbolic_trace(M())\n\n check = [\n (\"activations_snake_case\", \"activations.snake_case\"),\n (\"activations_pascal_case\", \"activations.PascalCase\"),\n (\"activations_all_caps\", \"activations.ALL_CAPS\")\n ]\n\n i = 0\n for node in traced.graph.nodes:\n if node.op == \"placeholder\" or node.op == \"output\":\n continue\n name = check[i][0]\n target = check[i][1]\n self.assertEqual(name, node.name)\n self.assertEqual(target, node.target)\n i += 1\n self.assertEqual(i, 3)\n\n def test_no_mutation(self):\n from torch.fx.immutable_collections import immutable_list\n x = immutable_list([3, 4])\n with self.assertRaisesRegex(NotImplementedError, \"new_args\"):\n x[0] = 4\n\n def test_partial_trace(self):\n class Foo(torch.nn.Module):\n def forward(self, x, y):\n if y:\n return 2 * x\n else:\n return x\n mod = Foo()\n mod_true = symbolic_trace(mod, concrete_args={'y': True})\n mod_false = symbolic_trace(mod, concrete_args={'y': False})\n self.assertEqual(mod_true(3, True), 6)\n print(mod_true.code)\n assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))\n with self.assertRaises(AssertionError):\n mod_true(3, False)\n self.assertEqual(mod_false(3, False), 3)\n with self.assertRaises(AssertionError):\n mod_false(3, True)\n\n def f_higher(a, f):\n return f(a)\n\n nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})\n self.assertEqual(nf(3, lambda x: x * 2), 6)\n\n def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):\n class M(torch.nn.Module):\n def __init__(self):\n super(M, self).__init__()\n self.W = torch.nn.Parameter(torch.randn(5))\n\n def forward(self, x):\n return torch.dot(self.W, x)\n\n traced = torch.fx.symbolic_trace(M())\n\n out = [n for n in traced.graph.nodes if n.op == \"output\"][-1]\n with traced.graph.inserting_before(out):\n relu_out = traced.graph.call_method(method_name='relu',\n args=(out.args[0],))\n out.args = (relu_out,)\n\n traced.recompile()\n\n with self.capture_stderr() as captured:\n with self.assertRaises(TypeError):\n traced(5)\n\n self.assertRegex(captured[0],\n r\"Call using an FX-traced Module, line .* of the \"\n r\"traced Module's generated forward function:\")\n\n def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(3, 4)\n\n def forward(self, x):\n return self.linear(x)\n\n traced = torch.fx.symbolic_trace(M())\n\n # Do not change this to `capture_stderr` or another context\n # manager without ensuring that the output is as expected\n try:\n traced(torch.rand(5, 5))\n except RuntimeError:\n captured = traceback.format_exc()\n\n self.assertNotRegex(captured,\n r\"Call using an FX-traced Module, line .* of the \"\n r\"traced Module's generated forward function:\")\n\n def test_graph_module_replicate_for_dp(self):\n class Foo(torch.nn.Module):\n def forward(self, x):\n return torch.relu(x)\n\n gm = torch.fx.symbolic_trace(Foo())\n\n x = torch.randn(5, 3)\n out = gm(x)\n\n replica = gm._replicate_for_data_parallel()\n out_replica = replica(x)\n\n torch.testing.assert_allclose(out_replica, out)\n\n def test_ast_rewriter_rewrites_assert(self):\n class M(torch.nn.Module):\n def forward(self, x: torch.Tensor, y: int, z: int):\n assert y == z\n return torch.add(x, x)\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(M())\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n traced.graph.lint()\n\n def test_ast_rewriter_rewrites_assert_with_message(self):\n class M(torch.nn.Module):\n def forward(self, x: torch.Tensor, y: int, z: int):\n assert y == z, \"msg\"\n return torch.add(x, x)\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(M())\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n traced.graph.lint()\n\n def test_throw_out_variant(self):\n def foo(x):\n y = torch.rand_like(x)\n torch.sigmoid(x, out=y)\n return y\n\n class MyTracer(torch.fx.Tracer):\n check_mutable_operations = True\n\n tracer = MyTracer()\n with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):\n traced_graph = tracer.trace(foo)\n\n def test_ast_rewriter_reassigns_submodules(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.bn = torch.nn.BatchNorm2d(100)\n\n def forward(self, x: torch.Tensor):\n return torch.add(x, x)\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(M())\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n traced.graph.lint()\n\n def test_ast_rewriter_wrap(self):\n self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))\n\n def to_trace(y):\n return (\n a_lifted_leaf((4, y), 3)\n + a_lifted_leaf((3, 4), 5)\n + a_lifted_leaf((y, y), y)\n )\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(to_trace)\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n self.assertIn(\"a_lifted_leaf\", traced.code)\n self.assertEqual(27, traced(2))\n self.assertIs(a_lifted_leaf, real_a_lifed_leaf)\n\n def test_ast_rewriter_wrap_fn_directly(self):\n self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))\n\n def to_trace(y):\n return (\n a_lifted_leaf2((4, y), 3)\n + a_lifted_leaf2((3, 4), 5)\n + a_lifted_leaf2((y, y), y)\n )\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(to_trace)\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n self.assertIn(\"a_lifted_leaf2\", traced.code)\n self.assertEqual(27, traced(2))\n self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)\n\n def test_profiler_ranges_side_effect(self):\n g = torch.fx.Graph()\n handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))\n g.call_function(torch.ops.profiler._record_function_exit, (handle,))\n g.output(None)\n\n found_targets = {}\n for node in g.nodes:\n if node.op == 'call_function':\n found_targets.setdefault(node.target)\n self.assertEqual(\n list(found_targets.keys()),\n [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]\n )\n\n g.eliminate_dead_code()\n found_targets = {}\n for node in g.nodes:\n if node.op == 'call_function':\n found_targets.setdefault(node.target)\n self.assertEqual(\n list(found_targets.keys()),\n [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]\n )\n\n def test_ast_rewriter_wrapped_via_decorator(self):\n class F(torch.nn.Module):\n def forward(self, x):\n return wrapped_via_decorator(x)\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(F())\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n self.assertIn(\"wrapped_via_decorator\", traced.code)\n self.assertEqual(traced(0), 1)\n self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)\n self.assertFalse(hasattr(wrapped_via_decorator, \"__fx_already_patched\"))\n\n def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):\n self.assertEqual(wrapped_via_decorator(0), 1)\n\n def to_trace(y):\n return wrapped_via_decorator(y)\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(to_trace)\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n self.assertIn(\"wrapped_via_decorator\", traced.code)\n self.assertEqual(traced(0), 1)\n self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)\n self.assertFalse(hasattr(wrapped_via_decorator, \"__fx_already_patched\"))\n\n transformed = torch.fx.Transformer(traced).transform()\n self.assertIn(\"wrapped_via_decorator\", transformed.code)\n self.assertEqual(transformed(0), 1)\n self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)\n self.assertFalse(hasattr(wrapped_via_decorator, \"__fx_already_patched\"))\n\n def test_ast_rewriter_wrap_with_submodule(self):\n class M(torch.nn.Module):\n def __init__(self):\n super(M, self).__init__()\n self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)\n\n def forward(self, x: torch.Tensor):\n return wrapped_with_submodule(x, self.batchnorm1d)\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(M())\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n self.assertIn(\"wrapped_with_submodule\", traced.code)\n\n input = torch.rand(3, 2)\n ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)\n self.assertEqual(ref_batchnorm1d(input), traced(input))\n\n def test_submodule_manipulation_API(self):\n class C(torch.nn.Module):\n def __init__(self):\n super(C, self).__init__()\n self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)\n self.param = torch.nn.Parameter(torch.rand(2, 3))\n\n def forward(self, x):\n return self.conv(torch.cat([self.param, x]))\n\n class B(torch.nn.Module):\n def __init__(self):\n super(B, self).__init__()\n self.linear = torch.nn.Linear(100, 200)\n self.register_buffer(\"buf\", torch.randn(2, 3))\n self.net_c = C()\n\n def forward(self, x):\n return self.linear(torch.cat([self.buf, self.net_c(x)]))\n\n class A(torch.nn.Module):\n def __init__(self):\n super(A, self).__init__()\n self.net_b = B()\n self.param = torch.nn.Parameter(torch.rand(2, 3))\n\n def forward(self, x):\n return self.net_b(x) + self.param\n\n a = symbolic_trace(A())\n\n a.add_submodule(\"net_b.net_c.dropout\", torch.nn.Dropout(p=0.2))\n\n conv = [n for n in a.graph.nodes if n.target == \"net_b.net_c.conv\"][-1]\n with a.graph.inserting_before(conv):\n with warnings.catch_warnings(record=True) as w:\n dropout = a.graph.call_module(module_name=\"net_b.net_c.dropout\",\n args=conv.args)\n self.assertEqual(len(w), 0)\n\n conv.replace_all_uses_with(dropout)\n a.graph.erase_node(conv)\n a.recompile()\n\n def module_exists(gm: GraphModule, path: str) -> bool:\n return any(path == name for name, _ in gm.named_modules())\n\n def parameter_exists(gm: GraphModule, path: str) -> bool:\n return (any(path == name for name, _ in gm.named_parameters())\n and any(path == name for name in gm.state_dict().keys()))\n\n def buffer_exists(gm: GraphModule, path: str) -> bool:\n return (any(path == name for name, _ in gm.named_buffers())\n and any(path == name for name in gm.state_dict().keys()))\n\n # Test that we added the \"dropout\" submodule\n self.assertTrue(module_exists(a, \"net_b.net_c.dropout\"))\n\n # Test `get_submodule` with an added submodule\n self.assertIsNotNone(a.get_submodule(\"net_b.net_c.dropout\"))\n\n # Test that the \"conv\" submodule is still there\n self.assertTrue(module_exists(a, \"net_b.net_c.conv\"))\n\n # Test `get_submodule` with an original module\n self.assertIsNotNone(a.get_submodule(\"net_b.net_c.conv\"))\n\n # Test that the \"conv\" node is NOT still there\n conv = [n for n in a.graph.nodes if n.target == \"net_b.net_c.conv\"]\n self.assertEqual(conv, [])\n\n a.delete_submodule(\"net_b.net_c.conv\")\n\n # Test that the \"conv\" submodule is now gone\n self.assertFalse(module_exists(a, \"net_b.net_c.conv\"))\n\n # Test `get_submodule` with a deleted submodule\n with self.assertRaisesRegex(AttributeError, \"has no attribute \"\n \"`conv`\"):\n self.assertIsNone(a.get_submodule(\"net_b.net_c.conv\"))\n\n # Test `get_attr` warnings\n cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]\n\n with a.graph.inserting_before(cat):\n\n with warnings.catch_warnings(record=True) as w:\n param = a.graph.get_attr(qualified_name=\"net_b.net_c.param\")\n self.assertEqual(len(w), 0)\n\n with self.assertWarnsRegex(UserWarning, \"Attempted to \"\n \"insert a get_attr Node with no \"\n \"underlying reference in the \"\n \"owning GraphModule\"):\n bad_param = a.graph.get_attr(qualified_name=\"net_b.param\")\n a.graph.erase_node(bad_param)\n\n cat.args = (*cat.args, param)\n\n a.recompile()\n\n a.graph.lint()\n\n # Test `get_parameter`\n a.get_parameter(\"net_b.net_c.param\")\n with self.assertRaisesRegex(AttributeError, \"is not an \"\n \"nn.Parameter\"):\n a.get_parameter(\"net_b.buf\")\n with self.assertRaisesRegex(AttributeError, \"has no attribute \"\n \"`param`\"):\n a.get_parameter(\"net_b.param\")\n\n # Test `get_buffer`\n a.get_buffer(\"net_b.buf\")\n with self.assertRaisesRegex(AttributeError, \"is not a \"\n \"buffer\"):\n a.get_buffer(\"net_b.net_c.param\")\n with self.assertRaisesRegex(AttributeError, \"has no attribute \"\n \"`buf`\"):\n a.get_buffer(\"net_b.net_c.buf\")\n\n # Test non-nested attributes\n a.get_submodule(\"\")\n a.get_parameter(\"param\")\n\n # Insert some unused submodules\n a.add_submodule(\"net_b.embedding\", torch.nn.Embedding(10, 3))\n a.add_submodule(\"net_b.net_c.embedding\", torch.nn.Embedding(10, 3))\n a.add_submodule(\"net_b.net_c.rnn\", torch.nn.RNN(10, 20, 2))\n a.add_submodule(\"batch_norm_2d\", torch.nn.BatchNorm2d(100))\n\n # Garbage collection\n a.delete_all_unused_submodules()\n\n # Test that all the unused submodules are gone\n self.assertFalse(module_exists(a, \"net_b.embedding\"))\n self.assertFalse(module_exists(a, \"net_b.net_c.embedding\"))\n self.assertFalse(module_exists(a, \"net_b.net_c.rnn\"))\n self.assertFalse(module_exists(a, \"batch_norm_2d\"))\n\n # Test that we didn't delete any unused Parameters or buffers\n self.assertTrue(parameter_exists(a, \"net_b.net_c.param\"))\n self.assertTrue(buffer_exists(a, \"net_b.buf\"))\n\n a.graph.lint()\n\n def test_delete_unused_submodules_leaf(self):\n class SubModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(10, 10)\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n x = self.linear(x)\n x = self.relu(x)\n return x\n\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.submod = SubModule()\n\n def forward(self, x):\n x = self.submod(x)\n return x\n\n model = Model()\n\n class MyCustomTracer(torch.fx.Tracer):\n def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:\n return module_qualified_name == \"submod\"\n\n inputs = torch.randn(1, 10)\n traced_graph = MyCustomTracer().trace(model)\n gm2 = torch.fx.GraphModule(model, traced_graph)\n gm2.delete_all_unused_submodules()\n torch.testing.assert_allclose(gm2(inputs), model(inputs))\n\n def test_tracing_graphmodules_as_leaf_submodules(self):\n class A(torch.nn.Module):\n def forward(self, t):\n return t + t\n\n class B(torch.nn.Module):\n def __init__(self):\n super(type(self), self).__init__()\n self.calling = False\n self.called = False\n\n def forward(self, t):\n if self.calling:\n return t - t\n else:\n return t + t\n\n def __call__(self, *args):\n self.called = True\n self.calling = True\n return super(type(self), self).__call__(*args)\n self.calling = False\n\n class M(torch.nn.Module):\n def __init__(self, a, b):\n super().__init__()\n self.a = a\n self.b = b\n\n def forward(self, t):\n x = self.a(t)\n y = self.b(t)\n return x + y\n\n class LeafTracer(Tracer):\n def is_leaf_module(self, module, name):\n return True\n\n class LeafTracerNotB(Tracer):\n def is_leaf_module(self, module, name):\n return False if \"b\" in name else True\n\n # Recompile calls added \"for fun\", since they\n # chain __call__ wrappers.\n\n #\n # Test: B as a regular, non-leaf module\n #\n a = symbolic_trace(A())\n a.recompile()\n m = M(a, B())\n graph = LeafTracerNotB().trace(m)\n gm = GraphModule(m, graph)\n gm.recompile()\n\n # Test graphmodule/submodule a is not inlined.\n self.assertTrue(isinstance(gm.get_submodule(\"a\"), GraphModule))\n match = [n for n in gm.graph.nodes if n.op == \"call_module\" and n.target == \"a\"]\n self.assertTrue(len(match) == 1)\n\n # Test submodule b is not treated as leaf.\n self.assertFalse(hasattr(gm, \"b\"))\n\n # Test assert custom __call__ on submodule b was honored.\n match = [\n n\n for n in gm.graph.nodes\n if n.op == \"call_function\" and n.target == operator.sub\n ]\n self.assertTrue(len(match) == 1)\n\n #\n # Test: B as a regular, leaf module\n # symbolic_trace should only patch torch.nn.Module.__call__,\n # which means B.__call__ should still execute\n #\n a = symbolic_trace(A())\n a.recompile()\n b = B()\n m = M(a, b)\n graph = LeafTracer().trace(m)\n gm = GraphModule(m, graph)\n gm.recompile()\n\n # Test graphmodule/submodule a is not inlined.\n self.assertTrue(isinstance(gm.get_submodule(\"a\"), GraphModule))\n match = [n for n in gm.graph.nodes if n.op == \"call_module\" and n.target == \"a\"]\n self.assertTrue(len(match) == 1)\n\n # Test submodule b is leaf:\n self.assertTrue(isinstance(gm.get_submodule(\"b\"), torch.nn.Module))\n match = [n for n in gm.graph.nodes if n.op == \"call_module\" and n.target == \"b\"]\n self.assertTrue(len(match) == 1)\n\n # Test b.__call__ was run\n self.assertTrue(b.called)\n self.assertTrue(gm.get_submodule(\"b\").called)\n\n #\n # Test: B as GraphModule leaf\n # __call__ not honored since symbolic_trace directly invokes forward()\n #\n a = symbolic_trace(A())\n a.recompile()\n b = symbolic_trace(B())\n b.recompile()\n m = M(a, b)\n graph = LeafTracer().trace(m)\n gm = GraphModule(m, graph)\n gm.recompile()\n\n self.assertTrue(isinstance(gm.get_submodule(\"a\"), GraphModule))\n match = [n for n in gm.graph.nodes if n.op == \"call_module\" and n.target == \"a\"]\n self.assertTrue(len(match) == 1)\n\n self.assertTrue(isinstance(gm.get_submodule(\"b\"), torch.nn.Module))\n match = [n for n in gm.graph.nodes if n.op == \"call_module\" and n.target == \"b\"]\n self.assertTrue(len(match) == 1)\n\n def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.register_buffer(\"my_buff\", torch.rand(3, 4))\n self.register_parameter(\n \"my_param\", torch.nn.Parameter(torch.rand(3, 4))\n )\n\n def forward(self, x):\n return x + self.my_buff + self.my_param\n\n mod = MyModule()\n mod_traced = symbolic_trace(mod)\n\n # Create new GraphModule based on original, either w/ dict or root module.\n orig_buff = mod_traced.get_buffer(\"my_buff\")\n orig_param = mod_traced.get_parameter(\"my_param\")\n mod_traced_new = GraphModule(\n {\"my_buff\": orig_buff, \"my_param\": orig_param} if use_dict_init else mod,\n mod_traced.graph,\n )\n\n # Check that both my_buff and my_param are found and the same.\n try:\n new_buff = mod_traced_new.get_buffer(\"my_buff\")\n except Exception:\n self.fail(\"Did not find my_buff\")\n self.assertEqual(orig_buff, new_buff)\n\n try:\n new_param = mod_traced_new.get_parameter(\"my_param\")\n except Exception:\n self.fail(\"Did not find my_param\")\n self.assertEqual(orig_param, new_param)\n\n x = torch.rand(3, 4)\n orig_out = mod_traced(x)\n submodules_out = mod_traced_new(x)\n\n self.assertEqual(orig_out, submodules_out)\n\n def test_graph_module_init_buffer_param_copied_dict_init(self):\n self._test_graph_module_init_buffer_param_copied(use_dict_init=True)\n\n def test_graph_module_init_buffer_param_copied_mod_init(self):\n self._test_graph_module_init_buffer_param_copied(use_dict_init=False)\n\n def test_annotations_with_no_forward_references(self):\n class A:\n def __call__(self, x: torch.Tensor):\n return torch.add(x, x)\n\n class M(torch.nn.Module):\n def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:\n return a(x)\n\n self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)\n\n def test_annotations_with_forward_references(self):\n class A:\n def __call__(self, x: torch.Tensor):\n return torch.add(x, x)\n\n class M(torch.nn.Module):\n def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':\n return a(x)\n\n self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)\n\n def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):\n class A:\n def __call__(self, x: torch.Tensor):\n return torch.add(x, x)\n\n class M(torch.nn.Module):\n def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:\n return a(x[0])\n\n self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)\n\n def test_annotations_with_non_torch_reference_and_internal_forward_references(self):\n class A:\n def __call__(self, x: torch.Tensor):\n return torch.add(x, x)\n\n class M(torch.nn.Module):\n def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':\n return a(x)[0]\n\n self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)\n\n @unittest.skipIf(sys.version_info < (3, 7), \"`__future__` feature \"\n \"`annotations` is not defined in Python <3.7\")\n def test_annotation_with_future(self):\n try:\n import fx.test_future # noqa: F401\n finally:\n del sys.modules[\"__future__\"]\n\n def test_annotations_empty_tuple(self):\n class Foo(torch.nn.Module):\n def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):\n return \"foo\"\n\n traced = torch.fx.symbolic_trace(Foo())\n\n x = ()\n y = (\"bar\", ())\n\n traced(x, y)\n\n FileCheck().check(\"_Tuple[()]\") \\\n .check(\"typing_Tuple[str,typing_Tuple[()]]\") \\\n .run(traced.code)\n\n scripted = torch.jit.script(traced)\n\n scripted(x, y)\n\n FileCheck().check(\"Tuple[()]\") \\\n .check(\"Tuple[str, Tuple[()]]\") \\\n .run(scripted.code)\n\n @unittest.skipIf(IS_WINDOWS, \"Python Windows bug? https://bugs.python.org/issue45108\")\n def test_assert(self):\n def f(x):\n assert x > 1\n return x + 1\n try:\n torch.fx.proxy.TracerBase.trace_asserts = True\n traced = symbolic_trace(f)\n finally:\n torch.fx.proxy.TracerBase.trace_asserts = False\n\n self.assertEqual(f(2), traced(2))\n with self.assertRaises(AssertionError):\n traced(0)\n\n def test_pytree(self):\n def f_sum(x):\n return sum(x)\n\n def f_sum_dict(x):\n out = 0\n for k, v in x.items():\n out += v\n return out\n\n def f_dict_list_map(x):\n new_dict = {}\n for k, v in x.items():\n new_dict[k] = [i + 1 for i in v]\n return new_dict\n\n def f_dict_add(x):\n return x['a'] + sum(x['z'])\n\n def f_namedtuple_add(x):\n return x.x + x.y\n\n pytree._register_pytree_node(\n Foo,\n lambda x: ([x.a, x.b], None),\n lambda x, _: Foo(x[0], x[1]),\n )\n fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])\n\n def f_custom(x):\n return x.a + x.b\n\n def f_custom_dict(x):\n return f_sum_dict(x.a) + x.b\n\n def f_return_custom(x):\n return Foo(x.b, x.a)\n\n tests = [\n (f_sum, [PH, PH, PH]),\n (f_sum, []),\n (f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),\n (f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),\n (f_dict_list_map, {5: (PH, PH, PH)}),\n (f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),\n (f_dict_add, {'a': PH, 'z': []}),\n (f_custom, Foo(PH, PH)),\n (f_custom, Foo(PH, 3)),\n (f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),\n # (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees\n (f_namedtuple_add, Point(PH, PH)),\n ]\n\n def verify_pytree(f, inp):\n val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)\n num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])\n orig_out = f(val)\n nf = symbolic_trace(f, concrete_args={'x': inp})\n self.assertEqual(nf(val), orig_out)\n\n bare_fx = GraphModule({}, copy.deepcopy(nf.graph))\n bare_fx.graph.set_codegen(CodeGen())\n bare_fx.recompile()\n self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)\n\n assert num_flat_args == 0 or \"tree_flatten_spec\" in nf.code\n assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)\n\n nf = symbolic_trace(nf)\n self.assertEqual(nf(val), orig_out)\n assert \"tree_flatten_spec\" not in nf.code\n assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)\n\n nf = symbolic_trace(nf, concrete_args={'x': inp})\n self.assertEqual(nf(val), orig_out)\n assert num_flat_args == 0 or \"tree_flatten_spec\" in nf.code\n assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)\n\n pickled = pickle.dumps(nf)\n nf = pickle.loads(pickled)\n self.assertEqual(nf(val), orig_out)\n\n for f, inp in tests:\n verify_pytree(f, inp)\n\n def test_pytree_concrete(self):\n def f(b, a):\n if b:\n return a['a']\n else:\n return a['z']\n\n inp = {'a': {'a': PH, 'z': PH}, 'b': True}\n nf = symbolic_trace(f, concrete_args=inp)\n val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)\n self.assertEqual(nf(**val), f(**val))\n\n nf = symbolic_trace(nf)\n self.assertEqual(nf(**val), f(**val))\n\n def test_custom_codegen(self):\n class ListCodeGen(CodeGen):\n def gen_fn_def(self, free_vars, maybe_return_annotation):\n lst_unpack = f\"\"\"\ndef forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:\n {', '.join(free_vars)} = args_list\"\"\"\n return lst_unpack\n\n def additional_globals(self):\n return [('List', typing.List)]\n\n def process_inputs(self, *inputs):\n assert(len(inputs) == 1)\n return inputs[0]\n\n def f(a, b):\n return a + b\n\n nf = symbolic_trace(f)\n vals = [torch.randn(3), torch.randn(3)]\n self.assertEqual(nf(*vals), f(*vals))\n\n nf.graph.set_codegen(ListCodeGen())\n nf.recompile()\n\n bare_fx = GraphModule({}, copy.deepcopy(nf.graph))\n bare_fx.graph.set_codegen(CodeGen())\n bare_fx.recompile()\n\n self.assertEqual(nf(vals), f(*vals))\n self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))\n\n ts_f = torch.jit.script(nf)\n self.assertEqual(nf(vals), ts_f(vals))\n\n\n def test_imul_code_print(self):\n graph = torch.fx.Graph()\n a = graph.placeholder(\"a\")\n b = graph.placeholder(\"b\")\n graph.call_function(operator.imul, (a, b), {})\n graph.output(a)\n gm = torch.fx.GraphModule({}, graph)\n gm.recompile()\n self.assertEqual(gm(2, 3), 6)\n self.assertIn(\"a *= b\", gm.code)\n\n\ndef run_getitem_target():\n from torch.fx._symbolic_trace import _wrapped_methods_to_patch\n _wrapped_methods_to_patch.append((torch.Tensor, \"__getitem__\"))\n try:\n TestFX().getitem_inner()\n finally:\n _wrapped_methods_to_patch.pop()\n\n\nclass TestOperatorSignatures(JitTestCase):\n def setUp(self):\n # Checking for mutable operations whil tracing is feature flagged\n # Enable it in testing but not by default\n self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations\n torch.fx.proxy.TracerBase.check_mutable_operations = True\n\n def tearDown(self):\n torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag\n\n @onlyCPU\n @ops(op_db, allowed_dtypes=(torch.float,))\n def test_get_torch_func_signature_exhaustive(self, device, dtype, op):\n if not isinstance(op.op, types.BuiltinFunctionType):\n raise unittest.SkipTest(\"This path doesn't work on Python functions\")\n sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)\n schemas = get_signature_for_torch_op(op.op)\n if not schemas:\n raise RuntimeError('No Schemas Returned')\n for sample_input in sample_inputs_itr:\n # Iterate through overloads until we hit a match. If we exit this\n # loop via `else`, we haven't found a match\n for schema in schemas:\n try:\n bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)\n bound_args.apply_defaults()\n op(*bound_args.args, **bound_args.kwargs)\n break\n except TypeError as e:\n pass\n else:\n raise RuntimeError(f'Did not match any schemas for op {op.name}!')\n\n\nclass TestFXAPIBackwardCompatibility(JitTestCase):\n def setUp(self):\n self.maxDiff = None\n\n # Checking for mutable operations whil tracing is feature flagged\n # Enable it in testing but not by default\n self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations\n torch.fx.proxy.TracerBase.check_mutable_operations = True\n\n def tearDown(self):\n torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag\n\n\n def _fn_to_stable_annotation_str(self, obj):\n \"\"\"\n Unfortunately we have to serialize function signatures manually since\n serialization for `inspect.Signature` objects is not stable across\n python versions\n \"\"\"\n fn_name = torch.typename(obj)\n\n signature = inspect.signature(obj)\n\n sig_str = f'{fn_name}{signature}'\n\n arg_strs = []\n for k, v in signature.parameters.items():\n maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\\\n if v.annotation is not inspect.Signature.empty else ''\n\n def default_val_str(val):\n if isinstance(val, (tuple, list)):\n str_pieces = ['(' if isinstance(val, tuple) else '[']\n str_pieces.append(', '.join(default_val_str(v) for v in val))\n if isinstance(val, tuple) and len(str_pieces) == 2:\n str_pieces.append(',')\n str_pieces.append(')' if isinstance(val, tuple) else ']')\n return ''.join(str_pieces)\n\n # Need to fix up some default value strings.\n # First case: modules. Default module `repr` contains the FS path of the module.\n # Don't leak that\n if isinstance(val, types.ModuleType):\n return f'<module {val.__name__}>'\n\n # Second case: callables. Callables (such as lambdas) encode their address in\n # their string repr. Don't do that\n if callable(val):\n return f'<function {val.__name__}>'\n\n return str(val)\n\n if v.default is not inspect.Signature.empty:\n default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f\"'{v.default}'\"\n maybe_default = f' = {default_val_str}'\n else:\n maybe_default = ''\n maybe_stars = ''\n if v.kind == inspect.Parameter.VAR_POSITIONAL:\n maybe_stars = '*'\n elif v.kind == inspect.Parameter.VAR_KEYWORD:\n maybe_stars = '**'\n arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')\n\n return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\\\n if signature.return_annotation is not inspect.Signature.empty else ''\n\n return f'{fn_name}({\", \".join(arg_strs)}){return_annot}'\n\n def _annotation_type_to_stable_str(self, t, sig_str):\n if t is inspect.Signature.empty:\n return ''\n\n # Forward ref\n if isinstance(t, str):\n return f\"'{t}'\"\n if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):\n return t.__forward_arg__\n if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):\n return t.__forward_arg__\n\n trivial_mappings = {\n str : 'str',\n int : 'int',\n float: 'float',\n bool: 'bool',\n torch.dtype: 'torch.dtype',\n torch.Tensor: 'torch.Tensor',\n torch.device: 'torch.device',\n torch.memory_format: 'torch.memory_format',\n slice: 'slice',\n torch.nn.Module: 'torch.nn.modules.module.Module',\n torch.fx.Graph : 'torch.fx.graph.Graph',\n torch.fx.Node : 'torch.fx.node.Node',\n torch.fx.Proxy : 'torch.fx.proxy.Proxy',\n torch.fx.node.Target : 'torch.fx.node.Target',\n torch.fx.node.Argument : 'torch.fx.node.Argument',\n torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',\n torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',\n torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',\n Ellipsis : '...',\n typing.Any: 'Any',\n type(None): 'NoneType',\n None: 'None',\n typing.Iterator: 'Iterator',\n }\n\n mapping = trivial_mappings.get(t, None)\n if mapping:\n return mapping\n\n # Handle types with contained types\n contained = getattr(t, '__args__', None) or []\n\n # Callables contain a bare List for arguments\n contained = t if isinstance(t, list) else contained\n\n # Python 3.8 puts type vars into __args__ for unbound types such as Dict\n if all(isinstance(ct, typing.TypeVar) for ct in contained):\n contained = []\n\n contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]\n contained_type_str = f'[{\", \".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''\n\n\n origin = getattr(t, '__origin__', None)\n if origin is None:\n # Unbound types don't have `__origin__` in some Python versions, so fix that up here.\n origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin\n\n if origin in {tuple, typing.Tuple}:\n return f'Tuple{contained_type_str}'\n if origin in {typing.Union}:\n # Annoying hack to detect Optional\n if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):\n not_none_param = contained[0] if contained[0] is not type(None) else contained[1]\n return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'\n return f'Union{contained_type_str}'\n if origin in {dict, typing.Dict}:\n return f'Dict{contained_type_str}'\n if origin in {list, typing.List}:\n return f'List{contained_type_str}'\n if origin in {type, typing.Type}:\n return f'Type{contained_type_str}'\n if isinstance(t, typing.Callable):\n if len(contained) > 0 and contained[0] is not Ellipsis:\n return f'Callable[[{\", \".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'\n else:\n return f'Callable{contained_type_str}'\n\n raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'\n f'Please add support for this type and confirm with the '\n f'FX team that your signature change is valid.')\n\n\n def test_function_back_compat(self):\n \"\"\"\n Test backward compatibility for function signatures with\n @compatibility(is_backward_compatible=True). Currently this checks for\n exact signature matches, which may lead to false positives. If this\n becomes too annoying, we can refine this check to actually parse out\n the saved schema strings and check if the change is truly backward-\n incompatible.\n \"\"\"\n signature_strs = []\n\n for obj in _BACK_COMPAT_OBJECTS:\n if not isinstance(obj, type):\n signature_strs.append(self._fn_to_stable_annotation_str(obj))\n\n signature_strs.sort()\n\n try:\n self.assertExpected('\\n'.join(signature_strs), 'fx_backcompat_function_signatures')\n except AssertionError as e:\n msg = f\"{e}\\n****** ERROR ******\\nAn FX function that has been marked \" \\\n f\"as backwards-compatible has experienced a signature change. See the \" \\\n f\"above exception context for more information. If this change was \" \\\n f\"unintended, please revert it. If it was intended, check with the FX \" \\\n f\"team to ensure that the proper deprecation protocols have been followed \" \\\n f\"and subsequently --accept the change.\"\n raise AssertionError(msg)\n\n def test_class_member_back_compat(self):\n \"\"\"\n Test backward compatibility for members of classes with\n @compatibility(is_backward_compatible=True). Currently this checks for\n exact matches on the publicly visible members of the class.\n \"\"\"\n class_method_strs = []\n\n for obj in _BACK_COMPAT_OBJECTS:\n if isinstance(obj, type):\n public_members = [name for name in obj.__dict__ if not name.startswith('_')]\n class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')\n\n class_method_strs.sort()\n\n try:\n self.assertExpected('\\n'.join(class_method_strs), 'fx_backcompat_class_members')\n except AssertionError as e:\n msg = f\"{e}\\n****** ERROR ******\\nAn FX class that has been marked \" \\\n f\"as backwards-compatible has experienced change in its public members. See the \" \\\n f\"above exception context for more information. If this change was \" \\\n f\"unintended, please revert it. If it was intended, check with the FX \" \\\n f\"team to ensure that the proper deprecation protocols have been followed \" \\\n f\"and subsequently --accept the change.\"\n raise AssertionError(msg)\n\n def test_public_api_surface(self):\n non_back_compat_objects = {}\n\n def check_symbols_have_bc_designation(m, prefix):\n if not m.__name__.startswith('torch.fx'):\n return\n if m.__name__.startswith('torch.fx.experimental'):\n return\n for k, v in m.__dict__.items():\n if v is m:\n continue\n if k.startswith('_'):\n continue\n if isinstance(v, types.ModuleType):\n check_symbols_have_bc_designation(v, prefix + [k])\n elif isinstance(v, type) or isinstance(v, types.FunctionType):\n if v not in _MARKED_WITH_COMATIBLITY:\n non_back_compat_objects.setdefault(v)\n\n check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])\n check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])\n\n non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]\n # Only want objects in torch.fx\n non_back_compat_strs = [\n s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]\n # Only want objects in public namespaces\n non_back_compat_strs = [\n s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]\n non_back_compat_strs.sort()\n\n if len(non_back_compat_strs) != 0:\n raise AssertionError(f\"Public FX API(s) {non_back_compat_strs} introduced but not given a \"\n f\"backwards-compatibility classification! Please decorate these \"\n f\"API(s) with `@torch.fx._compatibility.compatibility` to specify \"\n f\"BC guarantees.\")\n\nclass TestFunctionalTracing(JitTestCase):\n def setUp(self):\n # Checking for mutable operations whil tracing is feature flagged\n # Enable it in testing but not by default\n self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations\n torch.fx.proxy.TracerBase.check_mutable_operations = True\n\n def tearDown(self):\n torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag\n\n IGNORE_FUNCS = (\"has_torch_function\", \"has_torch_function_unary\",\n \"has_torch_function_variadic\", \"handle_torch_function\",\n \"boolean_dispatch\")\n TO_PATCH = {\"has_torch_function\": None,\n \"has_torch_function_unary\": None,\n \"has_torch_function_variadic\": None}\n\n BUILT_IN_FUNC = (AssertionError, \"\")\n PROXY_ITERABLE = (TypeError, r\"argument of type 'Proxy' is not iterable\")\n PROXY_ITERATED = (TraceError, r\"Proxy object cannot be iterated\")\n LEN_ERROR = (RuntimeError, r\"'len' is not supported in symbolic tracing by default\")\n ARG_TYPE_MISMATCH = (TypeError, r\", not Proxy$\")\n CONTROL_FLOW = (TraceError, r\"symbolically traced variables cannot be used as inputs to control flow\")\n INTERPOLATE_ARGS_CONFLICT = (ValueError, r\"only one of size or scale_factor should be defined\")\n MUTABLE = (RuntimeError, r\"Tried to trace mutable operation\")\n\n UNTRACEABLE_FUNCTIONALS = {\n \"adaptive_avg_pool1d\": BUILT_IN_FUNC,\n \"avg_pool1d\": BUILT_IN_FUNC,\n \"avg_pool2d\": BUILT_IN_FUNC,\n \"avg_pool3d\": BUILT_IN_FUNC,\n \"bilinear\": BUILT_IN_FUNC,\n \"celu_\": BUILT_IN_FUNC,\n \"channel_shuffle\": BUILT_IN_FUNC,\n \"native_channel_shuffle\": BUILT_IN_FUNC,\n \"conv1d\": BUILT_IN_FUNC,\n \"conv2d\": BUILT_IN_FUNC,\n \"conv3d\": BUILT_IN_FUNC,\n \"conv_tbc\": BUILT_IN_FUNC,\n \"conv_transpose1d\": BUILT_IN_FUNC,\n \"conv_transpose2d\": BUILT_IN_FUNC,\n \"conv_transpose3d\": BUILT_IN_FUNC,\n \"cosine_similarity\": BUILT_IN_FUNC,\n \"elu_\": BUILT_IN_FUNC,\n \"gelu\": BUILT_IN_FUNC,\n \"hardshrink\": BUILT_IN_FUNC,\n \"hardtanh_\": BUILT_IN_FUNC,\n \"leaky_relu_\": BUILT_IN_FUNC,\n \"linear\": BUILT_IN_FUNC,\n \"logsigmoid\": BUILT_IN_FUNC,\n \"one_hot\": BUILT_IN_FUNC,\n \"pairwise_distance\": BUILT_IN_FUNC,\n \"pdist\": BUILT_IN_FUNC,\n \"pixel_shuffle\": BUILT_IN_FUNC,\n \"pixel_unshuffle\": BUILT_IN_FUNC,\n \"prelu\": BUILT_IN_FUNC,\n \"relu_\": BUILT_IN_FUNC,\n \"rrelu_\": BUILT_IN_FUNC,\n \"selu_\": BUILT_IN_FUNC,\n \"softplus\": BUILT_IN_FUNC,\n \"softshrink\": BUILT_IN_FUNC,\n \"threshold_\": BUILT_IN_FUNC,\n\n \"adaptive_avg_pool2d\": LEN_ERROR,\n \"adaptive_avg_pool3d\": LEN_ERROR,\n \"adaptive_max_pool2d_with_indices\": LEN_ERROR,\n \"adaptive_max_pool3d_with_indices\": LEN_ERROR,\n \"instance_norm\": CONTROL_FLOW,\n \"pad\": LEN_ERROR,\n\n \"adaptive_max_pool1d\": PROXY_ITERABLE,\n \"adaptive_max_pool2d\": PROXY_ITERABLE,\n \"adaptive_max_pool3d\": PROXY_ITERABLE,\n \"fractional_max_pool2d\": PROXY_ITERABLE,\n \"fractional_max_pool3d\": PROXY_ITERABLE,\n \"max_pool1d\": PROXY_ITERABLE,\n \"max_pool2d\": PROXY_ITERABLE,\n \"max_pool3d\": PROXY_ITERABLE,\n\n \"group_norm\": PROXY_ITERATED,\n \"lp_pool2d\": PROXY_ITERATED,\n \"max_unpool1d\": PROXY_ITERATED,\n \"max_unpool2d\": PROXY_ITERATED,\n \"max_unpool3d\": PROXY_ITERATED,\n\n \"adaptive_max_pool1d_with_indices\": ARG_TYPE_MISMATCH,\n \"fractional_max_pool2d_with_indices\": ARG_TYPE_MISMATCH,\n \"fractional_max_pool3d_with_indices\": ARG_TYPE_MISMATCH,\n \"layer_norm\": ARG_TYPE_MISMATCH,\n \"lp_pool1d\": ARG_TYPE_MISMATCH,\n\n \"affine_grid\": CONTROL_FLOW,\n \"alpha_dropout\": CONTROL_FLOW,\n \"batch_norm\": CONTROL_FLOW,\n \"binary_cross_entropy\": CONTROL_FLOW,\n \"binary_cross_entropy_with_logits\": CONTROL_FLOW,\n \"celu\": CONTROL_FLOW,\n \"cosine_embedding_loss\": CONTROL_FLOW,\n \"cross_entropy\": CONTROL_FLOW,\n \"ctc_loss\": CONTROL_FLOW,\n \"dropout\": CONTROL_FLOW,\n \"dropout2d\": CONTROL_FLOW,\n \"dropout3d\": CONTROL_FLOW,\n \"elu\": CONTROL_FLOW,\n \"embedding\": CONTROL_FLOW,\n \"embedding_bag\": CONTROL_FLOW,\n \"feature_alpha_dropout\": CONTROL_FLOW,\n \"fold\": CONTROL_FLOW,\n \"gaussian_nll_loss\": CONTROL_FLOW,\n \"glu\": CONTROL_FLOW,\n \"grid_sample\": CONTROL_FLOW,\n \"gumbel_softmax\": CONTROL_FLOW,\n \"hardsigmoid\": CONTROL_FLOW,\n \"hardswish\": CONTROL_FLOW,\n \"hardtanh\": CONTROL_FLOW,\n \"hinge_embedding_loss\": CONTROL_FLOW,\n \"huber_loss\": CONTROL_FLOW,\n \"interpolate\": CONTROL_FLOW,\n \"kl_div\": CONTROL_FLOW,\n \"l1_loss\": CONTROL_FLOW,\n \"leaky_relu\": CONTROL_FLOW,\n \"local_response_norm\": CONTROL_FLOW,\n \"margin_ranking_loss\": CONTROL_FLOW,\n \"max_pool1d_with_indices\": CONTROL_FLOW,\n \"max_pool2d_with_indices\": CONTROL_FLOW,\n \"max_pool3d_with_indices\": CONTROL_FLOW,\n \"mse_loss\": CONTROL_FLOW,\n \"multi_head_attention_forward\": CONTROL_FLOW,\n \"multi_margin_loss\": CONTROL_FLOW,\n \"multilabel_margin_loss\": CONTROL_FLOW,\n \"multilabel_soft_margin_loss\": CONTROL_FLOW,\n \"nll_loss\": CONTROL_FLOW,\n \"poisson_nll_loss\": CONTROL_FLOW,\n \"relu\": CONTROL_FLOW,\n \"relu6\": CONTROL_FLOW,\n \"rrelu\": CONTROL_FLOW,\n \"selu\": CONTROL_FLOW,\n \"silu\": CONTROL_FLOW,\n \"mish\": CONTROL_FLOW,\n \"smooth_l1_loss\": CONTROL_FLOW,\n \"soft_margin_loss\": CONTROL_FLOW,\n \"threshold\": CONTROL_FLOW,\n \"triplet_margin_loss\": CONTROL_FLOW,\n \"triplet_margin_with_distance_loss\": CONTROL_FLOW,\n \"unfold\": CONTROL_FLOW,\n \"upsample\": CONTROL_FLOW,\n\n \"upsample_bilinear\": INTERPOLATE_ARGS_CONFLICT,\n \"upsample_nearest\": INTERPOLATE_ARGS_CONFLICT,\n\n \"normalize\" : MUTABLE,\n }\n\n # List of nn.functionals with Tensor inputs but not with type annotation\n FUNCTIONALS_WITHOUT_ANNOTATION = (\n \"adaptive_max_pool1d\",\n \"adaptive_max_pool2d\",\n \"adaptive_max_pool3d\",\n \"fractional_max_pool2d\",\n \"fractional_max_pool3d\",\n \"max_pool1d\",\n \"max_pool2d\",\n \"max_pool3d\",\n \"gaussian_nll_loss\",\n \"upsample\",\n \"upsample_bilinear\",\n \"upsample_nearest\",\n )\n\n # Inconsistent behavior between Python 3.8 and other Python versions:\n # - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`\n # - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same\n # internal exception above\n # Use the following map to override the expected exception for Python 3.8\n UNTRACEABLE_FUNCTIONALS_PY38 = {\n \"adaptive_max_pool1d\": PROXY_ITERATED,\n \"adaptive_max_pool2d\": PROXY_ITERATED,\n \"adaptive_max_pool3d\": PROXY_ITERATED,\n \"fractional_max_pool2d\": PROXY_ITERATED,\n \"fractional_max_pool3d\": PROXY_ITERATED,\n \"max_pool1d\": PROXY_ITERATED,\n \"max_pool2d\": PROXY_ITERATED,\n \"max_pool3d\": PROXY_ITERATED,\n\n \"group_norm\": LEN_ERROR\n }\n\n @classmethod\n def _get_functional(cls):\n functional_list = []\n for f in dir(torch.nn.functional):\n if not f.islower():\n continue\n # Ignore internal functions\n if f.startswith('_'):\n continue\n # Ignore supporting functions\n if f in cls.IGNORE_FUNCS:\n continue\n fn = getattr(torch.nn.functional, f)\n # Ignore non-callable object like modules\n if not isinstance(fn, Callable):\n continue\n if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:\n try:\n sig = inspect.signature(fn)\n has_tensor_arg = False\n for arg, param in sig.parameters.items():\n if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):\n has_tensor_arg = True\n if not has_tensor_arg:\n continue\n # No signature or Object is not supported\n except ValueError:\n pass\n functional_list.append((f, fn))\n return functional_list\n\n @classmethod\n def generate_test_func(cls, func_name, fn):\n\n def functional_test(self):\n if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \\\n sys.version_info >= (3, 8) and sys.version_info < (3, 10):\n exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]\n with self.assertRaisesRegex(exc, err):\n symbolic_trace(fn)\n elif func_name in self.UNTRACEABLE_FUNCTIONALS:\n exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]\n with self.assertRaisesRegex(exc, err):\n symbolic_trace(fn)\n else:\n symbolic_trace(fn)\n return functional_test\n\n @classmethod\n def generate_tests(cls):\n functional_list = cls._get_functional()\n for func_name, fn in functional_list:\n test_name = \"test_nn_functional_\" + func_name\n functional_test = cls.generate_test_func(func_name, fn)\n setattr(cls, test_name, functional_test)\n\n @classmethod\n def setUpClass(cls):\n\n def no(*args, **kwargs):\n return False\n\n for name in cls.TO_PATCH.keys():\n cls.TO_PATCH[name] = getattr(torch.nn.functional, name)\n setattr(torch.nn.functional, name, no)\n\n @classmethod\n def tearDownClass(cls):\n for name in cls.TO_PATCH.keys():\n setattr(torch.nn.functional, name, cls.TO_PATCH[name])\n\nTestFunctionalTracing.generate_tests()\n\n\ninstantiate_device_type_tests(TestOperatorSignatures, globals())\n\n@skipIfNoTorchVision\nclass TestVisionTracing(JitTestCase):\n def setUp(self):\n # Checking for mutable operations whil tracing is feature flagged\n # Enable it in testing but not by default\n self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations\n torch.fx.proxy.TracerBase.check_mutable_operations = True\n\n def tearDown(self):\n torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag\n\n PROXY_ITERATED = (TraceError, r\"Proxy object cannot be iterated\")\n INCONSISTENT_TYPE = (\n RuntimeError,\n r\"Return value was annotated as having type __torch__.torchvision.models[.\\w]+ but is actually of type Tensor\"\n )\n\n UNTRACEABLE_MODELS = {\n \"fasterrcnn_resnet50_fpn\": PROXY_ITERATED,\n \"fasterrcnn_mobilenet_v3_large_320_fpn\": PROXY_ITERATED,\n \"fasterrcnn_mobilenet_v3_large_fpn\": PROXY_ITERATED,\n \"maskrcnn_resnet50_fpn\": PROXY_ITERATED,\n \"keypointrcnn_resnet50_fpn\": PROXY_ITERATED,\n \"retinanet_resnet50_fpn\": PROXY_ITERATED,\n }\n UNSCRIPTABLE_MODELS = {\n \"googlenet\": INCONSISTENT_TYPE,\n \"inception_v3\": INCONSISTENT_TYPE,\n }\n\n output_transform = {\n \"fcn_resnet50\": lambda x: x[\"out\"],\n \"fcn_resnet101\": lambda x: x[\"out\"],\n \"deeplabv3_resnet50\": lambda x: x[\"out\"],\n \"deeplabv3_resnet101\": lambda x: x[\"out\"],\n \"deeplabv3_mobilenet_v3_large\": lambda x: x[\"out\"],\n \"lraspp_mobilenet_v3_large\": lambda x: x[\"out\"],\n \"fasterrcnn_resnet50_fpn\": lambda x: x[1],\n \"fasterrcnn_mobilenet_v3_large_fpn\": lambda x: x[1],\n \"fasterrcnn_mobilenet_v3_large_320_fpn\": lambda x: x[1],\n \"maskrcnn_resnet50_fpn\": lambda x: x[1],\n \"keypointrcnn_resnet50_fpn\": lambda x: x[1],\n \"retinanet_resnet50_fpn\": lambda x: x[1],\n }\n\n @classmethod\n def generate_test_fn(cls, name, model_fn, x, kwargs):\n def run_test(self):\n model = model_fn(**kwargs)\n model = model.eval()\n if name in self.UNTRACEABLE_MODELS:\n err, exc = self.UNTRACEABLE_MODELS[name]\n with self.assertRaisesRegex(err, exc):\n graph = symbolic_trace(model)\n else:\n out_transform = self.output_transform.get(name, lambda x: x)\n graph : torch.fx.GraphModule = symbolic_trace(model)\n a = out_transform(model(x))\n b = out_transform(graph(x))\n self.assertEqual(a, b)\n\n if name in self.UNSCRIPTABLE_MODELS:\n err, exc = self.UNSCRIPTABLE_MODELS[name]\n with self.assertRaisesRegex(err, exc):\n script = torch.jit.script(graph)\n else:\n script = torch.jit.script(graph)\n c = out_transform(script(x))\n self.assertEqual(a, c)\n\n return run_test\n\n @classmethod\n def generate_classification_tests(cls):\n for k, v in torchvision_models.__dict__.items():\n if callable(v) and k[0].lower() == k[0] and k[0] != \"_\":\n test_name = 'test_torchvision_models_' + k\n x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)\n kwargs = dict(num_classes=50)\n model_test = cls.generate_test_fn(k, v, x, kwargs)\n setattr(cls, test_name, model_test)\n\n @classmethod\n def generate_segmentation_tests(cls):\n for k, v in torchvision_models.segmentation.__dict__.items():\n if callable(v) and k[0].lower() == k[0] and k[0] != \"_\":\n test_name = 'test_torchvision_models_segmentation_' + k\n x = torch.rand(1, 3, 32, 32)\n kwargs = dict(num_classes=10, pretrained_backbone=False)\n model_test = cls.generate_test_fn(k, v, x, kwargs)\n setattr(cls, test_name, model_test)\n\n @classmethod\n def generate_detection_tests(cls):\n for k, v in torchvision_models.detection.__dict__.items():\n if callable(v) and k[0].lower() == k[0] and k[0] != \"_\":\n test_name = 'test_torchvision_models_detection_' + k\n x = [torch.rand(3, 300, 300)]\n kwargs = dict(num_classes=10, pretrained_backbone=False)\n model_test = cls.generate_test_fn(k, v, x, kwargs)\n setattr(cls, test_name, model_test)\n\n @classmethod\n def generate_video_tests(cls):\n for k, v in torchvision_models.video.__dict__.items():\n if callable(v) and k[0].lower() == k[0] and k[0] != \"_\":\n test_name = 'test_torchvision_models_video_' + k\n x = torch.rand(1, 3, 4, 112, 112)\n kwargs = dict(num_classes=50)\n model_test = cls.generate_test_fn(k, v, x, kwargs)\n setattr(cls, test_name, model_test)\n\n @classmethod\n def generate_tests(cls):\n cls.generate_classification_tests()\n cls.generate_detection_tests()\n cls.generate_segmentation_tests()\n cls.generate_video_tests()\n\nif HAS_TORCHVISION:\n TestVisionTracing.generate_tests()\n\nif __name__ == '__main__':\n run_tests()\n"
] | [
[
"torch.jit.script",
"torch.fx.immutable_collections.immutable_dict",
"torch.classes._TorchScriptTesting._ReLUClass",
"torch.fx.wrap",
"torch.nn.LeakyReLU",
"torch.rand_like",
"torch.testing._internal.common_utils.run_tests",
"torch.rand",
"torch.nn.Conv2d",
"torch.testing._internal.common_device_type.ops",
"torch.nn.Module",
"torch.max",
"torch.neg",
"torch.fx._symbolic_trace._wrapped_methods_to_patch.pop",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.EmbeddingBag",
"torch.nn.BatchNorm2d",
"torch.fx.Tracer",
"torch.fx.CodeGen",
"torch.nn.BatchNorm1d",
"torch.randn",
"torch.testing.assert_allclose",
"torch.add",
"torch.fx.Transformer",
"torch.ops.aten.sigmoid",
"torch.fx.Graph",
"torch.fx.symbolic_trace",
"torch.ops.aten.cat",
"torch.sin",
"torch.matmul",
"torch.classes._TorchScriptTesting._ElementwiseInterpreter",
"torch.tanh",
"torch.arange",
"torch.sigmoid",
"torch.testing.assert_close",
"torch.Tensor",
"torch.fx.Node",
"torch.ones",
"torch.typename",
"torch.fx.Proxy",
"torch.fx.experimental.rewriter.RewritingTracer",
"torch.tensor",
"torch.utils._pytree.tree_flatten",
"torch.classes._TorchScriptTesting._StackString",
"torch.fx.operator_schemas.get_signature_for_torch_op",
"torch.testing.FileCheck",
"torch._assert",
"torch.fx._symbolic_trace._wrapped_methods_to_patch.append",
"torch.fx.immutable_collections.immutable_list",
"torch.foo",
"torch.sum",
"torch.testing._internal.common_utils.find_library_location",
"torch.fx._pytree.register_pytree_flatten_spec",
"torch.nn.Linear",
"torch.dot",
"torch.Size",
"torch.nn.functional.linear",
"torch.empty_like",
"torch.nn.PReLU",
"torch.fx.GraphModule",
"torch.zeros_like",
"torch.nn.Embedding",
"torch.relu",
"torch.fx.passes.shape_prop.ShapeProp",
"torch.nn.RNN",
"torch.log_",
"torch.zeros",
"torch.fx.Interpreter",
"torch.LongTensor",
"torch.nn.ReLU",
"torch.nn.Conv3d",
"torch.squeeze",
"torch.multiprocessing.Process"
]
] |
deepneuralmachine/google-research | [
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231"
] | [
"bam/task_specific/task.py",
"seq2act/models/input.py",
"robust_loss/fit_partition_spline.py",
"tcc/algos/tcn.py"
] | [
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base class for tasks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport csv\nimport os\nimport tensorflow.compat.v1 as tf\n\n\nclass Example(object):\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, task_name):\n self.task_name = task_name\n\n\nclass Task(object):\n \"\"\"Override this class to add a new task.\"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, config, name, long_sequences=False):\n self.config = config\n self.name = name\n self.long_sequences = long_sequences\n\n def get_examples(self, split):\n return self.load_data(split + \".tsv\", split)\n\n def get_test_splits(self):\n return [\"test\"]\n\n def load_data(self, fname, split):\n examples = self._create_examples(\n read_tsv(os.path.join(self.config.raw_data_dir(self.name), fname),\n max_lines=50 if self.config.debug else None),\n split)\n return examples\n\n @abc.abstractmethod\n def _create_examples(self, lines, split):\n pass\n\n @abc.abstractmethod\n def get_scorer(self):\n pass\n\n @abc.abstractmethod\n def get_feature_specs(self):\n pass\n\n @abc.abstractmethod\n def featurize(self, example, is_training):\n pass\n\n @abc.abstractmethod\n def get_prediction_module(self, bert_model, features, is_training,\n percent_done):\n pass\n\n def __repr__(self):\n return \"Task(\" + self.name + \")\"\n\n\ndef read_tsv(input_file, quotechar=None, max_lines=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for i, line in enumerate(reader):\n if max_lines and i >= max_lines:\n break\n lines.append(line)\n return lines\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The input function of seq2act models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom enum import Enum\nfrom tensor2tensor.layers import common_layers\nimport tensorflow.compat.v1 as tf # tf\n\nNUM_TOKENS_PER_OBJ = 30\nNUM_TOKENS_PER_SYN = 30\n\n\nclass DataSource(Enum):\n \"\"\"The class that represents word2act data source.\"\"\"\n RICO_SCA = 'rico_sca'\n ANDROID_HOWTO = 'android_howto'\n PIXEL_HELP = 'pixel_help'\n\n @staticmethod\n def from_str(label):\n if label == 'rico_sca':\n return DataSource.RICO_SCA\n elif label == 'android_howto':\n return DataSource.ANDROID_HOWTO\n elif label == 'pixel_help':\n return DataSource.PIXEL_HELP\n else:\n raise ValueError('Unrecognized source %s' % label)\n\n\nMAX_UI_OBJECT_NUM = {\n DataSource.PIXEL_HELP: 93,\n}\n\nMAX_TOKEN_NUM = {\n DataSource.ANDROID_HOWTO: 30,\n DataSource.RICO_SCA: 30,\n DataSource.PIXEL_HELP: 153,\n}\n\n# ['connect_str', token_id(connector_str)]\n# token id based on all_source_lower_case_vocab_59429\nPADDED_CONCATENATORS = [\n [5, 0, 0],\n [115, 0, 0],\n [8, 32, 0],\n [115, 8, 32],\n [12, 0, 0],\n]\n\nCONCATENATORS_STR = [\n ', ',\n ' , ',\n ' and then ',\n ' , and then ',\n '. '\n]\n\n\ndef _construct_padding_info(data_source, load_dom_dist, load_extra):\n \"\"\"Constructs the padding info tuple.\"\"\"\n token_num = MAX_TOKEN_NUM[data_source]\n # Model uses this anchor padding value to mask out the padded features.\n anchor_padding_value_int = tf.cast(-1, tf.int32)\n padding_value_int = tf.cast(0, tf.int32)\n padding_value_str = tf.cast('', tf.string)\n # Tuple of (feature name, padded_shape, padded_value)\n padding_info = [\n ('task', [None], padding_value_int),\n ('rule', [], padding_value_int),\n ('verbs', [None], padding_value_int),\n ('input_refs', [None, 2], padding_value_int),\n ('obj_refs', [None, 2], padding_value_int),\n ('verb_refs', [None, 2], padding_value_int),\n ('objects', [None], padding_value_int),\n ('obj_text', [None, None, token_num], padding_value_int),\n ('obj_type', [None, None], anchor_padding_value_int),\n ('obj_clickable', [None, None], padding_value_int),\n ('obj_screen_pos', [None, None, 4], tf.cast(0, tf.int32)),\n ('obj_dom_pos', [None, None, 3], padding_value_int),\n ('agreement_count', [], padding_value_int),\n ('data_source', [], padding_value_int),\n ]\n if load_dom_dist:\n padding_info.append(('obj_dom_dist', [None, None, None], padding_value_int))\n if load_extra:\n padding_info.append(('task_id', [], padding_value_str))\n padding_info.append(('raw_task', [], padding_value_str))\n padding_info.append(('obj_raw_text', [None, None], padding_value_str))\n\n padded_shapes = {}\n padded_values = {}\n for (key, padding_shape, padding_value) in padding_info:\n padded_shapes[key] = padding_shape\n padded_values[key] = padding_value\n return padded_shapes, padded_values\n\n\ndef input_fn(data_files,\n batch_size,\n repeat=-1,\n data_source=DataSource.RICO_SCA,\n required_agreement=2,\n max_range=1000,\n max_dom_pos=2000,\n max_pixel_pos=100,\n load_dom_dist=False,\n load_extra=False,\n buffer_size=8 * 1024,\n shuffle_size=8 * 1024,\n required_rule_id_list=None,\n shuffle_repeat=True,\n mean_synthetic_length=1.0,\n stddev_synthetic_length=0.0,\n load_screen=True,\n shuffle_files=True):\n \"\"\"Retrieves batches of data for training.\n\n Adds padding to ensure all dimension in one batch are always same.\n\n Args:\n data_files: A list of file names to initialize the TFRecordDataset\n batch_size: Number for the size of the batch.\n repeat: the number of times to repeat the input data.\n data_source: A DataSource instance.\n required_agreement: the minimum agreement required.\n max_range: the max range.\n max_dom_pos: the max dom pos.\n max_pixel_pos: the max screen pixels.\n load_dom_dist: whether to load the dom distance feature.\n load_extra: whether to load the raw text data.\n buffer_size: the buffer size for prefetching.\n shuffle_size: the shuffle size.\n required_rule_id_list: the list of required rule ids.\n shuffle_repeat: whether to shuffle and repeat.\n mean_synthetic_length: the mean length for synthetic sequence.\n stddev_synthetic_length: the stddev length for synthetic sequence.\n load_screen: whether to load screen features.\n shuffle_files: shuffling file names.\n Returns:\n a tf.dataset.Dateset object.\n Raises:\n ValueError: The data_format is neither 'recordio' nor 'tfrecord'.\n \"\"\"\n if not isinstance(data_source, DataSource):\n assert False, 'data_source %s unsupported' % str(data_source)\n padded_shapes, padded_values = _construct_padding_info(\n data_source, load_dom_dist, load_extra)\n if not isinstance(data_files, (list,)):\n data_files = [data_files]\n all_files = tf.concat(\n values=[tf.matching_files(f) for f in data_files], axis=0)\n if repeat == -1 and shuffle_files:\n all_files = tf.random.shuffle(all_files)\n if data_files[0].endswith('.recordio'):\n dataset = tf.data.RecordIODataset(all_files)\n elif data_files[0].endswith('.tfrecord'):\n dataset = tf.data.TFRecordDataset(\n all_files, num_parallel_reads=10 if repeat == -1 else None)\n else:\n assert False, 'Data_format %s is not supported.' % data_files[0]\n\n def _map_fn(x):\n return parse_tf_example(x, data_source, max_range, max_dom_pos,\n max_pixel_pos, load_dom_dist=load_dom_dist,\n load_extra=load_extra,\n append_eos=(data_source != DataSource.RICO_SCA or\n mean_synthetic_length == 1.0),\n load_screen=load_screen)\n dataset = dataset.map(_map_fn)\n def _is_enough_agreement(example):\n return tf.greater_equal(example['agreement_count'], required_agreement)\n dataset = dataset.filter(_is_enough_agreement)\n\n def _length_filter(example):\n return tf.less(tf.shape(example['obj_refs'])[0], 20)\n dataset = dataset.filter(_length_filter)\n\n def _filter_data_by_rule(example, rule_id_list):\n return tf.reduce_any(\n [tf.equal(example['rule'], rule_id) for rule_id in rule_id_list])\n if data_source == DataSource.RICO_SCA and required_rule_id_list is not None:\n dataset = dataset.filter(\n lambda x: _filter_data_by_rule(x, required_rule_id_list))\n\n # (TODO: liyang) tf.data.experimental.bucket_by_sequence_length\n if shuffle_repeat:\n dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(\n shuffle_size, count=repeat))\n dataset = dataset.padded_batch(\n batch_size, padded_shapes=padded_shapes, padding_values=padded_values)\n if data_source == DataSource.RICO_SCA and mean_synthetic_length > 1.0:\n def _stitch_fn(x):\n return _batch_stitch(x, mean_length=mean_synthetic_length,\n stddev=stddev_synthetic_length)\n dataset = dataset.map(_stitch_fn)\n dataset = dataset.prefetch(buffer_size=buffer_size)\n return dataset\n\n\ndef hybrid_input_fn(data_files_list,\n data_source_list,\n batch_size_list,\n max_range=1000,\n max_dom_pos=2000,\n max_pixel_pos=100,\n load_dom_dist=False,\n load_extra=False,\n buffer_size=8 * 1024,\n mean_synthetic_length=1.0,\n stddev_synthetic_length=0.0,\n hybrid_batch_size=128,\n boost_input=False,\n load_screen=True,\n shuffle_size=1024):\n \"\"\"Combines multiple datasouces.\"\"\"\n mixed_dataset = None\n for data_files, data_source, batch_size in zip(\n data_files_list, data_source_list, batch_size_list):\n dataset = input_fn(data_files, batch_size, repeat=-1,\n data_source=data_source,\n required_agreement=-1,\n max_range=max_range, max_dom_pos=max_dom_pos,\n max_pixel_pos=max_pixel_pos,\n load_dom_dist=load_dom_dist,\n load_extra=load_extra,\n buffer_size=0,\n mean_synthetic_length=mean_synthetic_length,\n stddev_synthetic_length=stddev_synthetic_length,\n shuffle_repeat=False,\n load_screen=load_screen)\n if mixed_dataset is None:\n mixed_dataset = dataset\n else:\n mixed_dataset = dataset.concatenate(mixed_dataset)\n\n mixed_dataset = mixed_dataset.unbatch()\n # Boost input examples\n if boost_input:\n def _input_booster(example):\n with tf.control_dependencies([tf.rank(example['input_refs']), 2]):\n has_input = tf.reduce_any(\n tf.greater(example['input_refs'][:, 1],\n example['input_refs'][:, 0]))\n return tf.logical_or(has_input, tf.less(tf.random_uniform([]), 0.1))\n dataset = dataset.filter(_input_booster)\n # Remix single examples\n mixed_dataset = mixed_dataset.shuffle(hybrid_batch_size * shuffle_size)\n # Batch again\n padded_shapes, padded_values = _construct_padding_info(\n data_source_list[0], load_dom_dist, load_extra)\n mixed_dataset = mixed_dataset.padded_batch(\n hybrid_batch_size, padded_shapes=padded_shapes,\n padding_values=padded_values)\n mixed_dataset = mixed_dataset.repeat()\n mixed_dataset = mixed_dataset.prefetch(buffer_size=buffer_size)\n return mixed_dataset\n\n\ndef parse_tf_example(example_proto,\n data_source,\n max_range=100,\n max_dom_pos=2000,\n max_pixel_pos=100,\n load_dom_dist=False,\n load_extra=False,\n append_eos=True,\n load_screen=True):\n \"\"\"Parses an example TFRecord proto into dictionary of tensors.\n\n Args:\n example_proto: TFRecord format proto that contains screen information.\n data_source: A DataSource instance.\n max_range: the max range.\n max_dom_pos: the maximum dom positoin.\n max_pixel_pos: the max dom position.\n load_dom_dist: whether to load the feature.\n load_extra: whether to load the extra data for debugging.\n append_eos: whether to append eos.\n load_screen: whether to load screen features.\n Returns:\n feature: The parsed tensor dictionary with the input feature data\n label: The parsed label tensor with the input label for the feature\n \"\"\"\n feature_spec = {\n 'instruction_word_id_seq':\n tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n 'input_str_position_seq':\n tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n 'obj_desc_position_seq':\n tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n 'verb_str_position_seq':\n tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n 'agreement_count':\n tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n 'instruction_rule_id':\n tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True)\n }\n if load_screen:\n feature_spec['verb_id_seq'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n feature_spec['ui_target_id_seq'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n feature_spec['ui_obj_word_id_seq'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n feature_spec['ui_obj_type_id_seq'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n feature_spec['ui_obj_clickable_seq'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n feature_spec['ui_obj_cord_x_seq'] = tf.FixedLenSequenceFeature(\n [], tf.float32, allow_missing=True)\n feature_spec['ui_obj_cord_y_seq'] = tf.FixedLenSequenceFeature(\n [], tf.float32, allow_missing=True)\n feature_spec['ui_obj_dom_location_seq'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n\n if load_dom_dist:\n feature_spec['ui_obj_dom_distance'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n if load_extra:\n feature_spec['instruction_str'] = tf.FixedLenSequenceFeature(\n [], tf.string, allow_missing=True)\n feature_spec['task_id'] = tf.FixedLenSequenceFeature(\n [], tf.string, allow_missing=True)\n feature_spec['ui_obj_str_seq'] = tf.FixedLenSequenceFeature(\n [], tf.string, allow_missing=True)\n\n feature_dict = tf.parse_single_example(example_proto, feature_spec)\n\n for key in feature_dict:\n if feature_dict[key].dtype == tf.int64:\n feature_dict[key] = tf.cast(feature_dict[key], tf.int32)\n if data_source == DataSource.ANDROID_HOWTO:\n tf.logging.info('Parsing android_howto dataset')\n feature = _process_android_howto(feature_dict, max_range=max_range,\n load_dom_dist=load_dom_dist,\n load_extra=load_extra)\n elif data_source == DataSource.RICO_SCA:\n tf.logging.info('Parsing synthetic dataset')\n feature = _process_rico_sca(\n feature_dict, max_range=max_range, max_dom_pos=max_dom_pos,\n load_dom_dist=load_dom_dist,\n load_extra=load_extra,\n load_screen=load_screen)\n elif data_source == DataSource.PIXEL_HELP:\n tf.logging.info('Parsing test dataset')\n feature = _process_pixel_help(feature_dict, data_source,\n load_dom_dist=load_dom_dist,\n load_extra=load_extra)\n else:\n raise ValueError('Unsupported datasource %s' % str(data_source))\n # Remove padding from \"task\"\n feature['task'] = tf.boolean_mask(feature['task'],\n tf.not_equal(feature['task'], 0))\n feature['obj_screen_pos'] = tf.to_int32(\n feature['obj_screen_pos'] * (max_pixel_pos - 1))\n # Appending EOS and padding to match the appended length\n if append_eos:\n feature['input_refs'] = tf.pad(feature['input_refs'], [[0, 1], [0, 0]])\n feature['obj_refs'] = tf.pad(feature['obj_refs'], [[0, 1], [0, 0]])\n step_num = tf.size(feature['task'])\n feature['verb_refs'] = tf.concat(\n [feature['verb_refs'], [[step_num, step_num + 1]]], axis=0)\n feature['task'] = tf.pad(feature['task'], [[0, 1]], constant_values=1)\n feature['obj_text'] = tf.pad(feature['obj_text'], [[0, 1], [0, 0], [0, 0]])\n feature['obj_clickable'] = tf.pad(feature['obj_clickable'],\n [[0, 1], [0, 0]])\n feature['obj_type'] = tf.pad(\n feature['obj_type'], [[0, 1], [0, 0]], constant_values=-1)\n feature['obj_screen_pos'] = tf.pad(feature['obj_screen_pos'],\n [[0, 1], [0, 0], [0, 0]])\n feature['obj_dom_pos'] = tf.pad(feature['obj_dom_pos'],\n [[0, 1], [0, 0], [0, 0]])\n if load_dom_dist:\n feature['obj_dom_dist'] = tf.pad(feature['obj_dom_dist'],\n [[0, 1], [0, 0], [0, 0]])\n feature['objects'] = tf.pad(feature['objects'], [[0, 1]])\n feature['verbs'] = tf.pad(feature['verbs'], [[0, 1]])\n return feature\n\n\ndef _bound_refs(feature, max_range):\n \"\"\"Makes sure the refs are in the allowed range.\"\"\"\n for key in feature:\n if not key.endswith('_refs'):\n continue\n feature[key] = tf.where(\n tf.greater(feature[key][:, 1] - feature[key][:, 0], max_range),\n tf.stack([feature[key][:, 0], feature[key][:, 0] + max_range], axis=1),\n feature[key])\n\n\ndef _process_android_howto(feature_dict, max_range, load_dom_dist=False,\n load_extra=False):\n \"\"\"Processes webanswer feature dictionary.\n\n Args:\n feature_dict: feature dictionary\n max_range: the max range.\n load_dom_dist: whether to load the dom distance feature.\n load_extra: whether to load the extra data for debugging.\n Returns:\n A processed feature dictionary.\n \"\"\"\n\n feature = {\n 'task': tf.reshape(feature_dict['instruction_word_id_seq'], [-1]),\n 'input_refs': tf.reshape(feature_dict['input_str_position_seq'], [-1, 2]),\n 'obj_refs': tf.reshape(feature_dict['obj_desc_position_seq'], [-1, 2]),\n 'verb_refs': tf.reshape(feature_dict['verb_str_position_seq'], [-1, 2]),\n 'agreement_count': tf.reshape(feature_dict['agreement_count'], [])\n }\n if load_extra:\n feature['task_id'] = tf.constant('empty_task_id', dtype=tf.string)\n feature['raw_task'] = tf.reshape(feature_dict['instruction_str'], [])\n _bound_refs(feature, max_range)\n _load_fake_screen(feature, load_extra, load_dom_dist)\n return feature\n\n\ndef _load_fake_screen(feature, load_extra, load_dom_dist):\n \"\"\"Loads a fake screen.\"\"\"\n # Fills in fake ui object features into feature dictionary.\n step_num = tf.shape(feature['verb_refs'])[0]\n obj_num = 1\n if load_extra:\n feature['obj_raw_text'] = tf.fill([step_num, obj_num], '')\n feature['data_source'] = tf.constant(1, dtype=tf.int32)\n feature['obj_text'] = tf.zeros([step_num, obj_num, NUM_TOKENS_PER_OBJ],\n tf.int32)\n feature['obj_type'] = tf.cast(tf.fill([step_num, obj_num], -1), tf.int32)\n feature['obj_clickable'] = tf.zeros([step_num, obj_num], tf.int32)\n feature['obj_screen_pos'] = tf.zeros([step_num, obj_num, 4], tf.float32)\n feature['obj_dom_pos'] = tf.zeros([step_num, obj_num, 3], tf.int32)\n if load_dom_dist:\n feature['obj_dom_dist'] = tf.zeros([step_num, obj_num, obj_num], tf.int32)\n feature['objects'] = tf.zeros([step_num], tf.int32)\n feature['verbs'] = tf.zeros([step_num], tf.int32)\n feature['rule'] = tf.constant(5, dtype=tf.int32)\n\n\ndef _batch_stitch(features, mean_length=4.0, stddev=2.0):\n \"\"\"Stitches a batch of single-step data to a batch of multi-step data.\"\"\"\n batch_size = common_layers.shape_list(features['task'])[0]\n num_sequences = tf.maximum(\n tf.to_int32(tf.to_float(batch_size) / mean_length), 1)\n lengths = tf.random.truncated_normal(shape=[num_sequences],\n mean=mean_length, stddev=stddev)\n max_length = tf.reduce_max(lengths) * (\n tf.to_float(batch_size) / tf.reduce_sum(lengths))\n max_length = tf.to_int32(tf.ceil(max_length))\n total_items = max_length * num_sequences\n num_paddings = total_items - batch_size\n indices = tf.random.shuffle(tf.range(total_items))\n for key in features:\n shape_list = common_layers.shape_list(features[key])\n assert len(shape_list) >= 1\n with tf.control_dependencies([\n tf.assert_greater_equal(num_paddings, 0,\n name='num_paddings_positive')]):\n paddings = [[0, num_paddings]] + [[0, 0]] * (len(shape_list) - 1)\n features[key] = tf.pad(features[key], paddings,\n constant_values=-1 if key == 'obj_type' else 0)\n features[key] = tf.gather(features[key], indices)\n shape = [num_sequences, max_length]\n if len(shape_list) >= 2:\n shape += shape_list[1:]\n features[key] = tf.reshape(features[key], shape)\n # Remove all-padding seqs\n step_mask = tf.reduce_any(tf.greater(features['task'], 1), axis=-1)\n mask = tf.reduce_any(step_mask, axis=-1)\n step_mask = tf.boolean_mask(step_mask, mask)\n for key in features:\n features[key] = tf.boolean_mask(features[key], mask=mask)\n num_sequences = tf.shape(features['task'])[0]\n # Sort steps within each seq\n _, step_indices = tf.math.top_k(tf.to_int32(step_mask), k=max_length)\n step_indices = step_indices + tf.expand_dims(\n tf.range(num_sequences) * max_length, 1)\n step_indices = tf.reshape(step_indices, [-1])\n for key in features:\n shape_list = common_layers.shape_list(features[key])\n features[key] = tf.gather(tf.reshape(features[key], [-1] + shape_list[2:]),\n step_indices)\n features[key] = tf.reshape(features[key], shape_list)\n features = _stitch(features)\n return features\n\n\ndef _stitch(features):\n \"\"\"Stitch features on the first dimension.\"\"\"\n full_mask = tf.greater(features['task'], 1)\n step_mask = tf.reduce_any(full_mask, axis=-1)\n step_mask_exclude_last = tf.pad(step_mask,\n [[0, 0], [0, 1]],\n constant_values=False)[:, 1:]\n num_sequences = common_layers.shape_list(features['task'])[0]\n num_steps = common_layers.shape_list(features['task'])[1]\n connectors = tf.constant(PADDED_CONCATENATORS)\n # Select connectors\n connector_indices = tf.random.uniform(\n [num_sequences * num_steps], minval=0,\n maxval=len(PADDED_CONCATENATORS), dtype=tf.int32)\n selected_connectors = tf.reshape(\n tf.gather(connectors, connector_indices),\n [num_sequences, num_steps, len(PADDED_CONCATENATORS[0])])\n selected_connectors = tf.multiply(\n selected_connectors,\n tf.expand_dims(tf.to_int32(step_mask_exclude_last), 2),\n name='connector_mask')\n features['task'] = tf.concat([features['task'], selected_connectors], axis=-1)\n ref_offsets = tf.expand_dims(\n tf.cumsum(tf.reduce_sum(tf.to_int32(tf.greater(features['task'], 1)), -1),\n exclusive=True, axis=-1), 2)\n features['task'] = tf.reshape(features['task'], [num_sequences, -1])\n full_mask = tf.greater(features['task'], 1)\n full_mask_int = tf.to_int32(full_mask)\n indices = tf.where(tf.sequence_mask(lengths=tf.reduce_sum(full_mask_int, -1)))\n values = tf.boolean_mask(tf.reshape(features['task'], [-1]),\n tf.reshape(full_mask, [-1]))\n sparse_task = tf.sparse.SparseTensor(\n indices=indices, values=values,\n dense_shape=tf.to_int64(tf.shape(features['task'])))\n # Stitch task and raw_task\n stitched_features = {}\n stitched_features['task'] = tf.sparse_tensor_to_dense(sparse_task)\n max_len = tf.reduce_max(\n tf.reduce_sum(tf.to_int32(tf.greater(stitched_features['task'], 1)), -1))\n stitched_features['task'] = stitched_features['task'][:, :max_len]\n if 'raw_task' in features:\n connector_strs = tf.reshape(\n tf.gather(tf.constant(CONCATENATORS_STR), connector_indices),\n [num_sequences, num_steps])\n masked_connector_strs = tf.where(\n step_mask_exclude_last,\n connector_strs, tf.fill(tf.shape(connector_strs), ''))\n stitched_features['raw_task'] = tf.strings.reduce_join(\n tf.strings.reduce_join(tf.concat([\n tf.expand_dims(features['raw_task'], 2),\n tf.expand_dims(masked_connector_strs, 2)], axis=2), axis=-1), -1)\n # Stitch screen sequences\n action_lengths = tf.reduce_sum(tf.to_int32(\n tf.greater(features['verb_refs'][:, :, 0, 1],\n features['verb_refs'][:, :, 0, 0])), -1)\n max_action_length = tf.reduce_max(action_lengths)\n def _pad(tensor, padding_value=0):\n shape_list = common_layers.shape_list(tensor)\n assert len(shape_list) >= 2\n padding_list = [[0, 0], [0, 1]] + [[0, 0]] * (len(shape_list) - 2)\n return tf.pad(tensor[:, :max_action_length],\n padding_list, constant_values=padding_value)\n for key in features.keys():\n if key.endswith('_refs'):\n features[key] = tf.squeeze(features[key], 2)\n ref_mask = tf.expand_dims(tf.to_int32(\n tf.not_equal(features[key][:, :, 0],\n features[key][:, :, 1])), 2)\n stitched_features[key] = tf.multiply(\n (features[key] + ref_offsets), ref_mask, name='ref_mask')\n stitched_features[key] = _pad(stitched_features[key])\n elif key in ['verbs', 'objects', 'consumed', 'obj_dom_pos',\n 'obj_text', 'obj_type', 'obj_clickable', 'obj_screen_pos',\n 'verb_refs', 'obj_refs', 'input_refs', 'obj_dom_dist']:\n features[key] = tf.squeeze(features[key], 2)\n stitched_features[key] = features[key]\n stitched_features[key] = _pad(\n stitched_features[key],\n padding_value=-1 if key == 'obj_type' else 0)\n elif key not in ['task', 'raw_task']:\n stitched_features[key] = features[key][:, 0]\n # Append eos to 'task'\n stitched_features['task'] = tf.pad(stitched_features['task'],\n [[0, 0], [0, 1]])\n task_mask = tf.to_int32(tf.greater(stitched_features['task'], 1))\n task_eos_mask = tf.pad(task_mask, [[0, 0], [1, 0]], constant_values=1)[:, :-1]\n stitched_features['task'] = stitched_features['task'] + (\n task_eos_mask - task_mask)\n # Append eos\n verb_mask = tf.to_int32(tf.greater(stitched_features['verbs'], 1))\n verb_eos_mask = tf.pad(verb_mask, [[0, 0], [1, 0]], constant_values=1)[:, :-1]\n verb_eos = verb_eos_mask - verb_mask\n stitched_features['verbs'] = stitched_features['verbs'] + verb_eos\n # Append last step refs to 'verb_refs'\n task_lengths = tf.where(tf.equal(stitched_features['task'], 1))[:, 1]\n eos_pos = tf.to_int32(tf.stack([task_lengths, task_lengths + 1], axis=1))\n action_mask = tf.to_int32(\n tf.sequence_mask(action_lengths, max_action_length + 1))\n action_and_eos_mask = tf.pad(action_mask, [[0, 0], [1, 0]],\n constant_values=1)[:, :-1]\n verb_ref_eos = action_and_eos_mask - action_mask\n eos_refs = tf.multiply(\n tf.tile(tf.expand_dims(eos_pos, 1), [1, max_action_length + 1, 1]),\n tf.expand_dims(verb_ref_eos, 2), name='verb_ref_eos')\n stitched_features['verb_refs'] += eos_refs\n return stitched_features\n\n\ndef _process_rico_sca(feature_dict, max_range, max_dom_pos,\n load_dom_dist=False, load_extra=False, load_screen=True):\n \"\"\"Processes one_shot feature dictionary.\n\n Args:\n feature_dict: feature dictionary\n max_range: the max range.\n max_dom_pos: the max dom pos.\n load_dom_dist: whether to load the dom distance feature.\n load_extra: whether to load the extra data for debugging.\n load_screen: whether to load the screen features.\n Returns:\n A processed feature dictionary.\n \"\"\"\n phrase_count = tf.size(feature_dict['obj_desc_position_seq']) // 2\n feature = {\n 'task':\n tf.reshape(feature_dict['instruction_word_id_seq'],\n [phrase_count, NUM_TOKENS_PER_SYN]),\n 'input_refs':\n tf.reshape(feature_dict['input_str_position_seq'],\n [phrase_count, 1, 2]),\n 'obj_refs':\n tf.reshape(feature_dict['obj_desc_position_seq'],\n [phrase_count, 1, 2]),\n 'verb_refs':\n tf.reshape(feature_dict['verb_str_position_seq'],\n [phrase_count, 1, 2]),\n 'rule':\n tf.reshape(feature_dict['instruction_rule_id'], [phrase_count]),\n }\n selected_synthetic_action_idx = tf.random_uniform(\n shape=(), minval=0, maxval=phrase_count, dtype=tf.int32)\n for key in feature:\n feature[key] = feature[key][selected_synthetic_action_idx]\n if load_extra:\n feature['raw_task'] = tf.reshape(\n feature_dict['instruction_str'],\n [phrase_count])[selected_synthetic_action_idx]\n feature['task_id'] = tf.constant('empty_task_id', dtype=tf.string)\n if load_screen:\n feature['verbs'] = tf.reshape(\n feature_dict['verb_id_seq'],\n [phrase_count, 1])[selected_synthetic_action_idx]\n feature['objects'] = tf.reshape(\n feature_dict['ui_target_id_seq'],\n [phrase_count, 1])[selected_synthetic_action_idx]\n feature['obj_text'] = tf.reshape(feature_dict['ui_obj_word_id_seq'],\n [1, -1, NUM_TOKENS_PER_OBJ])\n feature['obj_type'] = tf.reshape(\n feature_dict['ui_obj_type_id_seq'], [1, -1])\n feature['obj_clickable'] = tf.reshape(feature_dict['ui_obj_clickable_seq'],\n [1, -1])\n def _make_obj_screen_pos():\n return tf.concat([\n tf.reshape(feature_dict['ui_obj_cord_x_seq'], [1, -1, 2]),\n tf.reshape(feature_dict['ui_obj_cord_y_seq'], [1, -1, 2])\n ], 2)\n\n feature['obj_screen_pos'] = tf.cond(\n tf.equal(\n tf.size(feature_dict['ui_obj_cord_x_seq']),\n 0), lambda: tf.fill([1, tf.shape(feature['obj_type'])[1], 4], 0.),\n _make_obj_screen_pos)\n feature['obj_dom_pos'] = tf.reshape(feature_dict['ui_obj_dom_location_seq'],\n [1, -1, 3])\n feature['obj_dom_pos'] = tf.minimum(feature['obj_dom_pos'], max_dom_pos - 1)\n if load_dom_dist:\n num_ui_obj = tf.to_int32(\n tf.sqrt(tf.to_float(tf.size(feature_dict['ui_obj_dom_distance']))))\n feature['obj_dom_dist'] = tf.reshape(feature_dict['ui_obj_dom_distance'],\n [1, num_ui_obj, num_ui_obj])\n if load_extra:\n feature['obj_raw_text'] = tf.reshape(feature_dict['ui_obj_str_seq'],\n [1, -1])\n else:\n _load_fake_screen(feature, load_extra, load_dom_dist)\n _bound_refs(feature, max_range)\n feature['data_source'] = tf.constant(0, dtype=tf.int32)\n feature['agreement_count'] = tf.constant(100, dtype=tf.int32)\n\n return feature\n\n\ndef _process_pixel_help(feature_dict, data_source, load_dom_dist=False,\n load_extra=False):\n \"\"\"Processes testing data feature dictionary.\n\n Args:\n feature_dict: feature dictionary\n data_source: TEST_PIXEL_HELP\n load_dom_dist: whether to load the dom distance feature.\n load_extra: whether to load the extra data for debugging.\n Returns:\n A processed feature dictionary.\n \"\"\"\n step_num = tf.size(feature_dict['verb_id_seq'])\n feature = {\n 'task':\n tf.reshape(feature_dict['instruction_word_id_seq'], [-1]),\n 'obj_text':\n tf.reshape(feature_dict['ui_obj_word_id_seq'], [\n step_num, MAX_UI_OBJECT_NUM[data_source],\n MAX_TOKEN_NUM[data_source]\n ]),\n 'obj_type':\n tf.reshape(feature_dict['ui_obj_type_id_seq'],\n [step_num, MAX_UI_OBJECT_NUM[data_source]]),\n 'obj_clickable':\n tf.reshape(feature_dict['ui_obj_clickable_seq'],\n [step_num, MAX_UI_OBJECT_NUM[data_source]]),\n # pylint: disable=g-long-ternary\n 'obj_screen_pos': (\n tf.reshape(tf.concat([\n tf.reshape(feature_dict['ui_obj_cord_x_seq'], [step_num, -1, 2]),\n tf.reshape(feature_dict['ui_obj_cord_y_seq'], [step_num, -1, 2])\n ], axis=2), [step_num, MAX_UI_OBJECT_NUM[data_source], 4])),\n 'obj_dom_pos':\n tf.reshape(feature_dict['ui_obj_dom_location_seq'],\n [step_num, MAX_UI_OBJECT_NUM[data_source], 3]),\n 'verbs':\n tf.reshape(feature_dict['verb_id_seq'], [step_num]),\n 'objects':\n tf.reshape(feature_dict['ui_target_id_seq'], [step_num]),\n 'input_refs':\n tf.reshape(feature_dict['input_str_position_seq'], [step_num, 2]),\n 'obj_refs':\n tf.reshape(feature_dict['obj_desc_position_seq'], [step_num, 2]),\n 'verb_refs': # No data for Pixel on the field\n tf.zeros([step_num, 2], tf.int32),\n 'agreement_count':\n tf.constant(100, dtype=tf.int32),\n }\n if load_dom_dist:\n feature['obj_dom_dist'] = tf.reshape(\n feature_dict['ui_obj_dom_distance'],\n [step_num, MAX_UI_OBJECT_NUM[data_source],\n MAX_UI_OBJECT_NUM[data_source]])\n feature['rule'] = tf.constant(5, dtype=tf.int32)\n if load_extra:\n feature['task_id'] = tf.reshape(feature_dict['task_id'], [])\n feature['raw_task'] = tf.reshape(feature_dict['instruction_str'], [])\n feature['obj_raw_text'] = tf.reshape(\n feature_dict['ui_obj_str_seq'],\n [step_num, MAX_UI_OBJECT_NUM[data_source]])\n feature['data_source'] = tf.constant(2, dtype=tf.int32)\n return feature\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Approximate the distribution's partition function with a spline.\n\nThis script generates values for the distribution's partition function and then\nfits a cubic hermite spline to those values, which is then stored to disk.\nTo run this script, assuming you're in this directory, run:\n python -m robust_loss.fit_partition_spline_test\nThis script will likely never have to be run again, and is provided here for\ncompleteness and reproducibility, or in case someone decides to modify\ndistribution.partition_spline_curve() in the future in case they find a better\ncurve. If the user wants a more accurate spline approximation, this can be\nobtained by modifying the `x_max`, `x_scale`, and `redundancy` parameters in the\ncode below, but this should only be done with care.\n\"\"\"\n\nfrom absl import app\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom robust_loss import cubic_spline\nfrom robust_loss import distribution\nfrom robust_loss import general\n\ntf.enable_v2_behavior()\n\n\ndef numerical_base_partition_function(alpha):\n \"\"\"Numerically approximate the partition function Z(alpha).\"\"\"\n # Generate values `num_samples` values in [-x_max, x_max], with more samples\n # near the origin as `power` is set to larger values.\n num_samples = 2**24 + 1 # We want an odd value so that 0 gets sampled.\n x_max = 10**10\n power = 6\n t = t = tf.linspace(\n tf.constant(-1, tf.float64), tf.constant(1, tf.float64), num_samples)\n t = tf.sign(t) * tf.abs(t)**power\n x = t * x_max\n\n # Compute losses for the values, then exponentiate the negative losses and\n # integrate with the trapezoid rule to get the partition function.\n losses = general.lossfun(x, alpha, np.float64(1))\n y = tf.math.exp(-losses)\n partition = tf.reduce_sum((y[1:] + y[:-1]) * (x[1:] - x[:-1])) / 2.\n return partition\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n # Parameters governing how the x coordinate of the spline will be laid out.\n # We will construct a spline with knots at\n # [0 : 1 / x_scale : x_max],\n # by fitting it to values sampled at\n # [0 : 1 / (x_scale * redundancy) : x_max]\n x_max = 12\n x_scale = 1024\n redundancy = 4 # Must be >= 2 for the spline to be useful.\n\n spline_spacing = 1. / (x_scale * redundancy)\n x_knots = np.arange(\n 0, x_max + spline_spacing, spline_spacing, dtype=np.float64)\n table = []\n # We iterate over knots, and for each knot recover the alpha value\n # corresponding to that knot with inv_partition_spline_curve(), and then\n # with that alpha we accurately approximate its partition function using\n # numerical_base_partition_function().\n for x_knot in x_knots:\n alpha = distribution.inv_partition_spline_curve(x_knot).numpy()\n partition = numerical_base_partition_function(alpha).numpy()\n table.append((x_knot, alpha, partition))\n print(table[-1])\n\n table = np.array(table)\n x = table[:, 0]\n alpha = table[:, 1]\n y_gt = np.log(table[:, 2])\n\n # We grab the values from the true log-partition table that correpond to\n # knots, by looking for where x * x_scale is an integer.\n mask = np.abs(np.round(x * x_scale) - (x * x_scale)) <= 1e-8\n values = y_gt[mask]\n\n # Initialize `tangents` using a central differencing scheme.\n values_pad = np.concatenate([[values[0] - values[1] + values[0]], values,\n [values[-1] - values[-2] + values[-1]]], 0)\n tangents = (values_pad[2:] - values_pad[:-2]) / 2.\n\n # Construct the spline's value and tangent TF variables, constraining the last\n # knot to have a fixed value Z(infinity) and a tangent of zero.\n n = len(values)\n tangents = tf.Variable(tangents, tf.float64)\n values = tf.Variable(values, tf.float64)\n\n # Fit the spline.\n num_iters = 10001\n\n optimizer = tf.keras.optimizers.SGD(learning_rate=1e-9, momentum=0.99)\n\n trace = []\n for ii in range(num_iters):\n with tf.GradientTape() as tape:\n tape.watch([values, tangents])\n # Fix the endpoint to be a known constant with a zero tangent.\n i_values = tf.where(\n np.arange(n) == (n - 1),\n tf.ones_like(values) * 0.70526025442689566, values)\n i_tangents = tf.where(\n np.arange(n) == (n - 1), tf.zeros_like(tangents), tangents)\n i_y = cubic_spline.interpolate1d(x * x_scale, i_values, i_tangents)\n # We minimize the maximum residual, which makes for a very ugly\n # optimization problem but works well in practice.\n i_loss = tf.reduce_max(tf.abs(i_y - y_gt))\n grads = tape.gradient(i_loss, [values, tangents])\n optimizer.apply_gradients(zip(grads, [values, tangents]))\n trace.append(i_loss.numpy())\n if (ii % 200) == 0:\n print('{:5d}: {:e}'.format(ii, trace[-1]))\n\n mask = alpha <= 4\n max_error_a4 = np.max(np.abs(i_y[mask] - y_gt[mask]))\n max_error = np.max(np.abs(i_y - y_gt))\n print('Max Error (a <= 4): {:e}'.format(max_error_a4))\n print('Max Error: {:e}'.format(max_error))\n\n # Just a sanity-check on the error.\n assert max_error_a4 <= 5e-7\n assert max_error <= 5e-7\n\n # Save the spline to disk.\n np.savez(\n './data/partition_spline.npz',\n x_scale=x_scale,\n values=i_values.numpy(),\n tangents=i_tangents.numpy())\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"TCN loss for unsupervised training.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tcc.algos.algorithm import Algorithm\nfrom tcc.config import CONFIG\nfrom tcc.utils import get_cnn_feats\nfrom tcc.utils import set_learning_phase\n\n\ndef _npairs_loss(labels, embeddings_anchor, embeddings_positive, reg_lambda):\n \"\"\"Returns n-pairs metric loss.\"\"\"\n reg_anchor = tf.reduce_mean(tf.reduce_sum(tf.square(embeddings_anchor), 1))\n reg_positive = tf.reduce_mean(tf.reduce_sum(\n tf.square(embeddings_positive), 1))\n l2loss = 0.25 * reg_lambda * (reg_anchor + reg_positive)\n\n # Get per pair similarities.\n similarity_matrix = tf.matmul(\n embeddings_anchor, embeddings_positive, transpose_a=False,\n transpose_b=True)\n\n # Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.\n lshape = tf.shape(labels)\n assert lshape.shape == 1\n labels = tf.reshape(labels, [lshape[0], 1])\n\n labels_remapped = tf.cast(\n tf.equal(labels, tf.transpose(labels)), tf.float32)\n labels_remapped /= tf.reduce_sum(labels_remapped, 1, keepdims=True)\n\n # Add the softmax loss.\n xent_loss = tf.nn.softmax_cross_entropy_with_logits(\n logits=similarity_matrix, labels=labels_remapped)\n xent_loss = tf.reduce_mean(xent_loss)\n\n return l2loss + xent_loss\n\n\ndef single_sequence_loss(embs, num_steps):\n \"\"\"Returns n-pairs loss for a single sequence.\"\"\"\n\n labels = tf.range(num_steps)\n labels = tf.stop_gradient(labels)\n embeddings_anchor = embs[0::2]\n embeddings_positive = embs[1::2]\n loss = _npairs_loss(labels, embeddings_anchor, embeddings_positive,\n reg_lambda=CONFIG.TCN.REG_LAMBDA)\n return loss\n\n\nclass TCN(Algorithm):\n \"\"\"Time-contrastive Network.\"\"\"\n\n @set_learning_phase\n def call(self, data, steps, seq_lens, training):\n \"\"\"One pass through the model.\"\"\"\n cnn = self.model['cnn']\n emb = self.model['emb']\n\n if training:\n num_steps = CONFIG.TRAIN.NUM_FRAMES * CONFIG.DATA.NUM_STEPS\n else:\n num_steps = CONFIG.EVAL.NUM_FRAMES * CONFIG.DATA.NUM_STEPS\n\n # Number of steps is doubled due to sampling of positives and anchors.\n cnn_feats = get_cnn_feats(cnn, data, training, 2 * num_steps)\n\n if training:\n num_steps = CONFIG.TRAIN.NUM_FRAMES\n else:\n num_steps = CONFIG.EVAL.NUM_FRAMES\n\n embs = emb(cnn_feats, 2 * num_steps)\n embs = tf.stack(tf.split(embs, 2 * num_steps, axis=0), axis=1)\n\n return embs\n\n def compute_loss(self, embs, steps, seq_lens, global_step, training,\n frame_labels, seq_labels):\n if training:\n num_steps = CONFIG.TRAIN.NUM_FRAMES\n batch_size = CONFIG.TRAIN.BATCH_SIZE\n else:\n num_steps = CONFIG.EVAL.NUM_FRAMES\n batch_size = CONFIG.EVAL.BATCH_SIZE\n losses = []\n for i in xrange(batch_size):\n losses.append(single_sequence_loss(embs[i], num_steps))\n loss = tf.reduce_mean(tf.stack(losses))\n return loss\n"
] | [
[
"tensorflow.compat.v1.gfile.Open"
],
[
"tensorflow.compat.v1.not_equal",
"tensorflow.compat.v1.parse_single_example",
"tensorflow.compat.v1.data.RecordIODataset",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.multiply",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.reduce_max",
"tensorflow.compat.v1.greater",
"tensorflow.compat.v1.boolean_mask",
"tensorflow.compat.v1.sparse_tensor_to_dense",
"tensorflow.compat.v1.gather",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.random.shuffle",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.data.TFRecordDataset",
"tensorflow.compat.v1.random.truncated_normal",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.to_int32",
"tensorflow.compat.v1.pad",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.reduce_any",
"tensorflow.compat.v1.sequence_mask",
"tensorflow.compat.v1.minimum",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.data.experimental.shuffle_and_repeat",
"tensorflow.compat.v1.size",
"tensorflow.compat.v1.to_float",
"tensorflow.compat.v1.random_uniform",
"tensorflow.compat.v1.FixedLenSequenceFeature",
"tensorflow.compat.v1.range",
"tensorflow.compat.v1.greater_equal",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.matching_files",
"tensorflow.compat.v1.ceil",
"tensorflow.compat.v1.rank",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.fill",
"tensorflow.compat.v1.assert_greater_equal",
"tensorflow.compat.v1.logging.info"
],
[
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.sign",
"numpy.float64",
"numpy.round",
"numpy.abs",
"tensorflow.compat.v2.math.exp",
"tensorflow.compat.v2.GradientTape",
"numpy.arange",
"numpy.log",
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.ones_like",
"numpy.array",
"numpy.concatenate",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.keras.optimizers.SGD",
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.abs"
],
[
"tensorflow.compat.v2.nn.softmax_cross_entropy_with_logits",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.transpose",
"tensorflow.compat.v2.stop_gradient",
"tensorflow.compat.v2.stack",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.reshape",
"tensorflow.compat.v2.square",
"tensorflow.compat.v2.range",
"tensorflow.compat.v2.split",
"tensorflow.compat.v2.matmul"
]
] |
LuoYuanke/PrivChainer | [
"758d765c7903f6913cfd58c21db069d5f2a12203",
"758d765c7903f6913cfd58c21db069d5f2a12203"
] | [
"chainer/functions/pooling/average_pooling_2d.py",
"chainer/utils/conv_nd.py"
] | [
"import numpy\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer.functions.pooling import pooling_2d\nfrom chainer.utils import conv\n\n\nclass AveragePooling2D(pooling_2d.Pooling2D):\n\n \"\"\"Average pooling over a set of 2d planes.\"\"\"\n # TODO(beam2d): Support cover_all mode.\n\n def forward_cpu(self, x):\n self._in_shape = x[0].shape\n self._in_dtype = x[0].dtype\n\n col = conv.im2col_cpu(x[0], self.kh, self.kw, self.sy, self.sx,\n self.ph, self.pw)\n y = col.mean(axis=(2, 3))\n return y,\n\n def forward_gpu(self, x):\n if chainer.should_use_cudnn('>=auto'):\n self.retain_inputs((0,))\n return super(AveragePooling2D, self).forward_gpu(x)\n\n self._in_shape = x[0].shape\n self._in_dtype = x[0].dtype\n\n n, c, h, w = x[0].shape\n y_h = conv.get_conv_outsize(h, self.kh, self.sy, self.ph)\n y_w = conv.get_conv_outsize(w, self.kw, self.sx, self.pw)\n y = cuda.cupy.empty((n, c, y_h, y_w), dtype=x[0].dtype)\n coeff = 1. / (self.kh * self.kw)\n kern = cuda.elementwise(\n 'raw T in, int32 h, int32 w,'\n 'int32 out_h, int32 out_w, int32 kh, int32 kw,'\n 'int32 sy, int32 sx, int32 ph, int32 pw, T coeff',\n 'T out', '''\n int c0 = i / (out_h * out_w);\n int out_y = i / out_w % out_h;\n int out_x = i % out_w;\n int in_y_0 = max(0, out_y * sy - ph);\n int in_y_1 = min(h, out_y * sy + kh - ph);\n int in_x_0 = max(0, out_x * sx - pw);\n int in_x_1 = min(w, out_x * sx + kw - pw);\n\n T val = 0;\n for (int y = in_y_0; y < in_y_1; ++y) {\n int offset_y = w * (y + h * c0);\n for (int x = in_x_0; x < in_x_1; ++x) {\n val = val + in[x + offset_y];\n }\n }\n out = val * coeff;\n ''', 'avg_pool_fwd')\n kern(x[0].reduced_view(), h, w, y_h, y_w, self.kh, self.kw,\n self.sy, self.sx, self.ph, self.pw, coeff, y)\n return y,\n\n def backward(self, indexes, gy):\n return AveragePooling2DGrad(self).apply(gy)\n\n def create_pool_desc(self):\n return cuda.cudnn.create_pooling_descriptor(\n (self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),\n cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING)\n\n\nclass AveragePooling2DGrad(function_node.FunctionNode):\n\n def __init__(self, apool2d):\n self.kh = apool2d.kh\n self.kw = apool2d.kw\n self.sy = apool2d.sy\n self.sx = apool2d.sx\n self.ph = apool2d.ph\n self.pw = apool2d.pw\n self._used_cudnn = apool2d._used_cudnn\n if not self._used_cudnn:\n self._in_shape = apool2d._in_shape\n self._in_dtype = apool2d._in_dtype\n self.apool2d = apool2d\n\n def forward_cpu(self, gy):\n h, w = self._in_shape[2:]\n gcol = numpy.tile(gy[0][:, :, None, None],\n (1, 1, self.kh, self.kw, 1, 1))\n gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)\n gx /= self.kh * self.kw\n return gx,\n\n def forward_gpu(self, gy):\n if self._used_cudnn:\n x, = self.apool2d.get_retained_inputs()\n return self.apool2d.backward_gpu((x.data,), gy)\n n, c, h, w = self._in_shape\n y_h, y_w = gy[0].shape[2:]\n gx = cuda.cupy.empty(self._in_shape, self._in_dtype)\n coeff = 1. / (self.kh * self.kw)\n cuda.elementwise(\n 'raw T gy, int32 h, int32 w,'\n 'int32 out_h, int32 out_w, int32 kh, int32 kw,'\n 'int32 sy, int32 sx, int32 ph, int32 pw, T coeff',\n 'T gx',\n '''\n int c0 = i / (h * w);\n int y = i / w % h + ph;\n int x = i % w + pw;\n int out_y_0 = max(0, (y - kh + sy) / sy);\n int out_y_1 = min(out_h, (y + sy) / sy);\n int out_x_0 = max(0, (x - kw + sx) / sx);\n int out_x_1 = min(out_w, (x + sx) / sx);\n int hc0 = out_h * c0;\n\n T val = 0;\n for (int out_y = out_y_0; out_y < out_y_1; ++out_y) {\n for (int out_x = out_x_0; out_x < out_x_1; ++out_x) {\n val = val + gy[out_x + out_w * (out_y + hc0)];\n }\n }\n gx = val * coeff;\n ''', 'avg_pool_bwd')(gy[0].reduced_view(),\n h, w, y_h, y_w, self.kh, self.kw,\n self.sy, self.sx, self.ph, self.pw, coeff,\n gx)\n return gx,\n\n def backward(self, indexes, grad_outputs):\n return AveragePooling2D(\n (self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),\n False).apply(grad_outputs)\n\n\ndef average_pooling_2d(x, ksize, stride=None, pad=0):\n \"\"\"Spatial average pooling function.\n\n This function acts similarly to :class:`~functions.Convolution2D`, but\n it computes the average of input spatial patch for each channel\n without any parameter instead of computing the inner products.\n\n Args:\n x (~chainer.Variable): Input variable.\n ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and\n ``ksize=(k, k)`` are equivalent.\n stride (int or pair of ints or None): Stride of pooling applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is\n specified, then it uses same stride as the pooling window size.\n pad (int or pair of ints): Spatial padding width for the input array.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n .. note::\n\n This function currently does not support ``cover_all`` mode as\n :func:`max_pooling_2d`. Average pooling runs in non-cover-all mode.\n\n \"\"\"\n return AveragePooling2D(ksize, stride, pad, False).apply((x,))[0]\n",
"import itertools\nimport numpy\nimport six\n\nfrom chainer.backends import cuda\nfrom chainer.utils.conv import get_conv_outsize\nfrom chainer.utils import conv_nd_kernel\n\n\ndef as_tuple(x, n):\n if hasattr(x, '__getitem__'):\n assert len(x) == n\n return tuple(x)\n return (x,) * n\n\n\ndef im2col_nd_cpu(img, ksize, stride, pad, pval=0, cover_all=False):\n n, c = img.shape[0:2] # (n, c, d_1, d_2, ..., d_N)\n dims = img.shape[2:]\n ndim = len(dims)\n assert ndim == len(ksize) == len(stride) == len(pad)\n outs = tuple(get_conv_outsize(d, k, s, p, cover_all)\n for (d, k, s, p) in zip(dims, ksize, stride, pad))\n assert all(out > 0 for out in outs), 'Output sizes should be positive.'\n\n # Pad around image.\n pad_width = ((0, 0), (0, 0)) + tuple(\n (p, p + s - 1) for (s, p) in zip(stride, pad))\n img = numpy.pad(img, pad_width, mode='constant', constant_values=(pval,))\n\n # Make patch array with which we will compute correlation with filter.\n # shape: (n, c, k_1, k_2, ..., k_N, out_1, out_2, ..., out_N)\n shape = (n, c) + ksize + outs\n col = numpy.ndarray(shape, dtype=img.dtype)\n\n # Fill the patch array.\n colon = slice(None)\n for kxs in itertools.product(*[six.moves.range(k) for k in ksize]):\n # col[:, :, kx_1, kx_2, ..., kx_N, :, :, ..., :]\n col_index = (colon, colon) + kxs + (colon,) * ndim\n # img[:, :, kx_1:kx_lim_1:s_1, ..., kx_N:kx_lim_N:s_N]\n kx_lims = tuple(kx + s * out\n for (kx, s, out) in zip(kxs, stride, outs))\n img_index = (colon, colon) + tuple(\n slice(kx, kx_lim, s)\n for (kx, kx_lim, s) in zip(kxs, kx_lims, stride))\n col[col_index] = img[img_index]\n\n return col\n\n\ndef im2col_nd_gpu(img, ksize, stride, pad, cover_all=False):\n n, c = img.shape[0:2] # (n, c, d_1, d_2, ..., d_N)\n dims = img.shape[2:]\n ndim = len(dims)\n assert ndim == len(ksize) == len(stride) == len(pad)\n outs = tuple(get_conv_outsize(d, k, s, p, cover_all)\n for (d, k, s, p) in zip(dims, ksize, stride, pad))\n assert all(out > 0 for out in outs), 'Output sizes should be positive.'\n\n # col_shape: (n, c, k_1, k_2, ..., k_N, out_1, out_2, ..., out_N)\n shape = (n, c) + ksize + outs\n col = cuda.cupy.empty(shape, dtype=img.dtype)\n\n in_params, out_params, operation, name = \\\n conv_nd_kernel.Im2colNDKernel.generate(ndim)\n\n cuda.elementwise(in_params, out_params, operation, name)(\n img.reduced_view(), *(dims + outs + ksize + stride + pad + (col,)))\n\n return col\n\n\ndef col2im_nd_cpu(col, stride, pad, dims):\n n, c = col.shape[:2] # (n, c, kx_1, ..., kx_N, out_1, ..., out_N)\n mid = (len(col.shape) - 2) // 2 + 2\n ksize = col.shape[2:mid]\n outs = col.shape[mid:]\n colon = slice(None)\n assert len(outs) == len(ksize) == len(stride) == len(pad) == len(dims)\n\n # Image with padded size.\n img_shape = (n, c) + tuple(d + 2 * p + s - 1\n for (d, p, s) in zip(dims, pad, stride))\n img = numpy.zeros(img_shape, dtype=col.dtype)\n for kxs in itertools.product(*[six.moves.range(k) for k in ksize]):\n # (:, :, kx_1:kx_lim_1:s_1, ..., kx_N:kx_lim_N:s_N)\n kx_lims = tuple(kx + s * out\n for (kx, s, out) in zip(kxs, stride, outs))\n img_index = (colon, colon) + tuple(\n slice(kx, kx_lim, s)\n for (kx, kx_lim, s) in zip(kxs, kx_lims, stride))\n # (:, :, kx_1, kx_2, ..., kx_N, :, :, ..., :)\n col_index = (colon, colon) + kxs + (colon,) * len(outs)\n img[img_index] += col[col_index]\n\n # (:, :, p_1:d_1 + p_1, p_2:d_2 + p_2, ..., p_N:d_N + p_N]\n img_index = (colon, colon) + tuple(\n slice(p, d + p) for (p, d) in zip(pad, dims))\n return img[img_index]\n\n\ndef col2im_nd_gpu(col, stride, pad, dims):\n n, c = col.shape[:2] # (n, c, k_1, ..., k_N, out_1, ..., out_N)\n mid = (len(col.shape) - 2) // 2 + 2\n ksize = col.shape[2:mid]\n outs = col.shape[mid:]\n ndim = len(dims)\n assert len(outs) == len(ksize) == len(stride) == len(pad) == ndim\n\n img_shape = (n, c) + dims # (n, c, d_1, d_2, ..., d_N)\n img = cuda.cupy.empty(img_shape, dtype=col.dtype)\n\n in_params, out_params, operation, name = \\\n conv_nd_kernel.Col2imNDKernel.generate(ndim)\n\n cuda.elementwise(in_params, out_params, operation, name)(\n col.reduced_view(), *(dims + outs + ksize + stride + pad + (img,)))\n\n return img\n"
] | [
[
"numpy.tile"
],
[
"numpy.ndarray",
"numpy.pad",
"numpy.zeros"
]
] |
jwillis0720/pybody | [
"2d7c68650ac1ef5f3003ccb67171898eac1f63eb"
] | [
"src/sadie/renumbering/result.py"
] | [
"import logging\nimport pandas as pd\nfrom ast import literal_eval\n\nfrom .constants import NUMBERING_RESULTS\nfrom sadie.numbering.scheme_numbering import scheme_numbering\n\nlogger = logging.getLogger(\"NUMBERING\")\n\n\nclass NumberingResults(pd.DataFrame):\n def __init__(self, *args, scheme=\"\", region_definition=\"\", allowed_chains=[], allowed_species=[], **kwargs):\n # use the __init__ method from DataFrame to ensure\n # that we're inheriting the correct behavior\n super(NumberingResults, self).__init__(*args, **kwargs)\n # self[\"scheme\"] = scheme\n # self[\"region_definition\"] = region_definition\n # self[\"allowed_species\"] = \",\".join(allowed_species)\n # self[\"allowed_chains\"] = \",\".join(allowed_chains)\n # self._add_segment_regions()\n\n @property\n def _constructor(self):\n return NumberingResults\n\n def get_alignment_table(self) -> pd.DataFrame:\n \"\"\"Get a numbered alignment table from the numbering and insertions\n\n Returns\n -------\n pd.DataFrame\n A dataframe with Id, chain_type, scheme and numbering. Values are the amino acid sequences\n \"\"\"\n all_dataframes = []\n\n # I'm not sure if there is a more effiecient way to do this other than iterate through the df and pivot each row\n for index in range(len(self)):\n all_dataframes.append(self._pivot_alignment(self.iloc[index]))\n all_dataframes = pd.concat(all_dataframes)\n all_dataframes = all_dataframes.sort_index(axis=1, level=[0, 1])\n all_dataframes.columns = list(map(lambda x: str(x[0]) + x[1], all_dataframes.columns.values))\n all_dataframes = all_dataframes.reset_index()\n return self[[\"Id\", \"chain_type\", \"scheme\"]].merge(all_dataframes, on=\"Id\").copy()\n\n def _get_region(self, row, start, end, segment_name):\n with_segment = \"\".join(\n list(\n map(\n lambda x: x[-1],\n list(\n filter(\n lambda x: x[0] >= start and x[0] <= end,\n list(\n zip(\n row[\"Numbering\"],\n row[\"Insertion\"],\n row[\"Numbered_Sequence\"],\n )\n ),\n )\n ),\n )\n )\n )\n without_segment = with_segment.replace(\"-\", \"\")\n return pd.Series(\n {\n f\"{segment_name}_gaps\": with_segment,\n f\"{segment_name}_no_gaps\": without_segment,\n }\n )\n\n def _add_segment_regions(self) -> \"NumberingResults\":\n \"\"\"Private method to delineate the framework and cdr boundaries from the numbering\n\n Returns\n -------\n NumberingResults\n Instance of NumberingResults\n \"\"\"\n return_frames = []\n for group, sub_df in self.groupby([\"scheme\", \"region_definition\", \"Chain\"]):\n numbering = group[0]\n chain = {\"H\": \"heavy\", \"KL\": \"light\"}[group[-1]]\n boundaries = group[1]\n numbering_lookup = scheme_numbering[numbering][chain][boundaries]\n for region in [\n \"fwr1_aa\",\n \"cdr1_aa\",\n \"fwr2_aa\",\n \"cdr2_aa\",\n \"fwr3_aa\",\n \"cdr3_aa\",\n \"fwr4_aa\",\n ]:\n _start = numbering_lookup[f\"{region}_start\"]\n _end = numbering_lookup[f\"{region}_end\"]\n sub_df = sub_df.join(self.apply(lambda x: self._get_region(x, _start, _end, region), axis=1))\n return_frames.append(sub_df)\n segmented_df = pd.concat(return_frames).reset_index(drop=True)\n # everything preceding the antibody\n segmented_df[\"leader\"] = segmented_df[[\"sequence\", \"seqstart_index\"]].apply(lambda x: x[0][: x[1]], axis=1)\n\n # everything following the antibody. keyword tail will clash with pandas\n segmented_df[\"follow\"] = segmented_df[[\"sequence\", \"seqend_index\"]].apply(lambda x: x[0][x[1] + 1 :], axis=1)\n return segmented_df\n\n def _pivot_alignment(self, row: pd.Series) -> pd.DataFrame:\n \"\"\"Private method to pivot a segmented row into an alignment series\n\n Parameters\n ----------\n row : pd.Series\n indidual Numbering result row\n\n Returns\n -------\n pivoted dataframe\n \"\"\"\n pivoted_df = (\n pd.DataFrame(\n zip(row[\"Numbering\"], row[\"Insertion\"], row[\"Numbered_Sequence\"]),\n columns=[\"numbering\", \"insertion\", \"sequence\"],\n )\n .assign(Id=row[\"Id\"])\n .pivot(\"Id\", [\"numbering\", \"insertion\"], \"sequence\")\n )\n return pivoted_df\n\n def get_sanatized_antibodies(self):\n # drop sequences that don't start at the first amino acid and dont end at the last amino acid.\n return self[(self[\"seqstart_index\"] == 0) & (self[\"seqend_index\"] == self[\"sequence\"].str.len() - 1)]\n\n @staticmethod\n def read_csv(*args, **kwargs):\n return NumberingResults(\n pd.read_csv(\n *args,\n index_col=0,\n dtype=NUMBERING_RESULTS,\n converters={\"Numbering\": literal_eval, \"Insertion\": literal_eval, \"Numbered_Sequence\": literal_eval},\n **kwargs,\n )\n )\n\n def drop_bad_numbering(self) -> \"NumberingResults\":\n return self[(self[\"seqstart_index\"] == 0) & (self[\"seqend_index\"] == self[\"sequence\"].str.len() - 1)]\n"
] | [
[
"pandas.read_csv",
"pandas.Series",
"pandas.concat"
]
] |
rafaelcostafrf/UAV_3d_virtual_env | [
"bccaa52ec97fff5c0a17e1351a09f913d91c4c7b"
] | [
"environment/controller/ppo_test.py"
] | [
"import sys\nfrom quadrotor_env import quad, render, animation\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.distributions import MultivariateNormal\nimport numpy as np\nfrom quadrotor_env import quad, render, animation\nfrom model import ActorCritic\n\n\"\"\"\nMECHANICAL ENGINEERING POST-GRADUATE PROGRAM\nUNIVERSIDADE FEDERAL DO ABC - SANTO ANDRÉ, BRASIL\n\nNOME: RAFAEL COSTA FERNANDES\nRA: 21201920754\nE−MAIL: [email protected]\n\nDESCRIPTION:\n PPO testing algorithm (no training, only forward passes)\n\"\"\"\n\ntime_int_step = 0.01\nmax_timesteps = 1000\nT = 5\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nenv = quad(time_int_step, max_timesteps, euler=0, direct_control=1, deep_learning=1, T=T, debug=0)\nstate_dim = env.deep_learning_in_size\npolicy = ActorCritic(state_dim, action_dim=4, action_std=0).to(device)\n\n\n#LOAD TRAINED POLICY\ntry:\n policy.load_state_dict(torch.load('PPO_continuous_solved_drone.pth',map_location=device))\n print('Saved policy loaded')\nexcept:\n print('Could not load policy')\n sys.exit(1)\n\n#PLOTTER SETUP\nprint_states = [0, 2, 4, 6, 7, 8, 9, 10, 11, 12]\nplot_labels = ['x', 'y', 'z', 'phi', 'theta', 'psi', 'f1', 'f2', 'f3', 'f4']\nline_styles = ['-', '-', '-', '--', '--', '--', ':', ':', ':', ':',]\nplotter = render(print_states, plot_labels, line_styles, depth_plot_list=0, animate=0)\n\n\n\n# DO ONE RANDOM EPISODE\nplotter.clear()\nstate = env.reset()\nfirst_state = np.concatenate((env.previous_state[0:6],env.ang,np.zeros(4)))\nplotter.add(0,first_state)\ndone = False\nt=0\nwhile not done:\n t+=time_int_step\n action = policy.actor(torch.FloatTensor(state).to(device)).cpu().detach().numpy()\n state, _, done = env.step(action)\n plot_state = np.concatenate((env.state[0:6],env.ang,action))\n plotter.add(t,plot_state)\nprint('Env Solved, printing...')\nplotter.plot()\n# plotter.depth_plot()\nan = animation()\nan.animate(plotter.states)\nplotter.clear()\n\n"
] | [
[
"torch.FloatTensor",
"torch.load",
"numpy.zeros",
"torch.cuda.is_available",
"numpy.concatenate"
]
] |
techthiyanes/transformers | [
"705d65368fb28246534ef636fe62c008f4fb2682"
] | [
"tests/wav2vec2/test_modeling_flax_wav2vec2.py"
] | [
"# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport math\nimport unittest\n\nimport numpy as np\nfrom datasets import load_dataset\n\nfrom transformers import Wav2Vec2Config, is_flax_available\nfrom transformers.testing_utils import (\n is_librosa_available,\n is_pyctcdecode_available,\n require_flax,\n require_librosa,\n require_pyctcdecode,\n require_soundfile,\n slow,\n)\n\nfrom ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, random_attention_mask\n\n\nif is_flax_available():\n import jax\n import jax.numpy as jnp\n import optax\n from flax.traverse_util import flatten_dict\n from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2Processor\n from transformers.models.wav2vec2.modeling_flax_wav2vec2 import (\n FlaxWav2Vec2ForCTC,\n FlaxWav2Vec2ForPreTraining,\n FlaxWav2Vec2GumbelVectorQuantizer,\n FlaxWav2Vec2Model,\n _compute_mask_indices,\n _sample_negative_indices,\n )\n\n\nif is_pyctcdecode_available():\n from transformers import Wav2Vec2ProcessorWithLM\n\n\nif is_librosa_available():\n import librosa\n\n\nclass FlaxWav2Vec2ModelTester:\n def __init__(\n self,\n parent,\n batch_size=13,\n seq_length=1024, # speech is longer\n is_training=False,\n hidden_size=24,\n feat_extract_norm=\"layer\",\n feat_extract_dropout=0.0,\n feat_extract_activation=\"gelu\",\n conv_dim=(32, 32, 32),\n conv_stride=(4, 4, 4),\n conv_kernel=(8, 8, 8),\n conv_bias=False,\n num_conv_pos_embeddings=16,\n num_conv_pos_embedding_groups=2,\n num_hidden_layers=4,\n num_attention_heads=2,\n hidden_dropout_prob=0.1, # this is most likely not correctly set yet\n intermediate_size=20,\n layer_norm_eps=1e-5,\n hidden_act=\"gelu\",\n initializer_range=0.02,\n vocab_size=32,\n do_stable_layer_norm=True,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.hidden_size = hidden_size\n self.feat_extract_norm = feat_extract_norm\n self.feat_extract_dropout = feat_extract_dropout\n self.feat_extract_activation = feat_extract_activation\n self.conv_dim = conv_dim\n self.conv_stride = conv_stride\n self.conv_kernel = conv_kernel\n self.conv_bias = conv_bias\n self.num_conv_pos_embeddings = num_conv_pos_embeddings\n self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_dropout_prob = hidden_dropout_prob\n self.intermediate_size = intermediate_size\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.vocab_size = vocab_size\n self.do_stable_layer_norm = do_stable_layer_norm\n self.scope = scope\n\n output_seq_length = self.seq_length\n for kernel, stride in zip(self.conv_kernel, self.conv_stride):\n output_seq_length = (output_seq_length - (kernel - 1)) / stride\n self.output_seq_length = int(math.ceil(output_seq_length))\n self.encoder_seq_length = self.output_seq_length\n\n def prepare_config_and_inputs(self):\n input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size)\n attention_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n config = Wav2Vec2Config(\n do_stable_layer_norm=self.do_stable_layer_norm,\n hidden_size=self.hidden_size,\n feat_extract_norm=self.feat_extract_norm,\n feat_extract_dropout=self.feat_extract_dropout,\n feat_extract_activation=self.feat_extract_activation,\n conv_dim=self.conv_dim,\n conv_stride=self.conv_stride,\n conv_kernel=self.conv_kernel,\n conv_bias=self.conv_bias,\n num_conv_pos_embeddings=self.num_conv_pos_embeddings,\n num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n hidden_dropout_prob=self.hidden_dropout_prob,\n intermediate_size=self.intermediate_size,\n layer_norm_eps=self.layer_norm_eps,\n hidden_act=self.hidden_act,\n initializer_range=self.initializer_range,\n vocab_size=self.vocab_size,\n )\n\n return config, input_values, attention_mask\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n config, input_values, attention_mask = config_and_inputs\n inputs_dict = {\"input_values\": input_values, \"attention_mask\": attention_mask}\n return config, inputs_dict\n\n\n@require_flax\nclass FlaxWav2Vec2ModelTest(FlaxModelTesterMixin, unittest.TestCase):\n all_model_classes = (\n (FlaxWav2Vec2Model, FlaxWav2Vec2ForCTC, FlaxWav2Vec2ForPreTraining) if is_flax_available() else ()\n )\n\n def setUp(self):\n self.model_tester = FlaxWav2Vec2ModelTester(self)\n\n def test_train(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n input_values = inputs_dict[\"input_values\"]\n attention_mask = inputs_dict[\"attention_mask\"]\n\n model = FlaxWav2Vec2ForPreTraining(config)\n\n features_shape = (\n input_values.shape[0],\n model._get_feat_extract_output_lengths(np.array(input_values.shape[1])),\n )\n\n batch_size, sequence_length = features_shape[:2]\n\n mask_prob = 0.5\n mask_length = 4\n mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)\n\n dropout_rng, gumbel_rng = jax.random.split(jax.random.PRNGKey(0))\n\n output = model(\n input_values,\n attention_mask=attention_mask,\n mask_time_indices=mask_time_indices,\n train=True,\n dropout_rng=dropout_rng,\n gumbel_rng=gumbel_rng,\n )[0]\n\n self.assertTrue(output.shape == (batch_size, sequence_length, model.config.proj_codevector_dim))\n\n # overwrite because of `input_values`\n def test_forward_signature(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n signature = inspect.signature(model.__call__)\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\n arg_names = [*signature.parameters.keys()]\n\n expected_arg_names = [\"input_values\", \"attention_mask\"]\n self.assertListEqual(arg_names[:2], expected_arg_names)\n\n # overwrite because of `input_values`\n def test_jit_compilation(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n with self.subTest(model_class.__name__):\n prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)\n model = model_class(config)\n\n @jax.jit\n def model_jitted(input_values, attention_mask=None, **kwargs):\n return model(input_values=input_values, attention_mask=attention_mask, **kwargs)\n\n with self.subTest(\"JIT Enabled\"):\n jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()\n\n with self.subTest(\"JIT Disabled\"):\n with jax.disable_jit():\n outputs = model_jitted(**prepared_inputs_dict).to_tuple()\n\n self.assertEqual(len(outputs), len(jitted_outputs))\n for jitted_output, output in zip(jitted_outputs, outputs):\n\n self.assertEqual(jitted_output.shape, output.shape)\n\n def test_freeze_feature_encoder(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n input_values = inputs_dict[\"input_values\"]\n attention_mask = inputs_dict[\"attention_mask\"]\n\n model = FlaxWav2Vec2ForPreTraining(config)\n params = model.params\n\n # dummy loss function\n def compute_loss(\n params, input_values, attention_mask, freeze_feature_encoder: bool = False, epsilon: float = 1e-8\n ):\n outputs = model(\n input_values,\n attention_mask=attention_mask,\n freeze_feature_encoder=freeze_feature_encoder,\n params=params,\n )\n # compute cosine similarity of projected and projected_quantized states\n cosine_sim = optax.cosine_similarity(\n outputs.projected_states, outputs.projected_quantized_states, epsilon=epsilon\n )\n loss = cosine_sim.sum()\n return loss, outputs.to_tuple()\n\n # transform the loss function to get the gradients\n grad_fn = jax.value_and_grad(compute_loss, has_aux=True)\n\n # compute loss, outputs and gradients for unfrozen model\n (loss, outputs), grads = grad_fn(params, input_values, attention_mask, freeze_feature_encoder=False)\n\n # compare to loss, outputs and gradients for frozen model\n (loss_frozen, outputs_frozen), grads_frozen = grad_fn(\n params, input_values, attention_mask, freeze_feature_encoder=True\n )\n\n # ensure that the outputs and losses remain precisely equal\n for output, output_frozen in zip(outputs, outputs_frozen):\n self.assertTrue((output == output_frozen).all())\n self.assertEqual(loss, loss_frozen)\n\n grads = flatten_dict(grads)\n grads_frozen = flatten_dict(grads_frozen)\n\n # ensure that the dicts of gradients contain the same keys\n self.assertEqual(grads.keys(), grads_frozen.keys())\n\n # ensure that the gradients of the feature extractor layers are precisely zero when frozen and contain non-zero entries when unfrozen\n feature_extractor_grads = tuple(grads[k] for k in grads if \"feature_extractor\" in k)\n feature_extractor_grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if \"feature_extractor\" in k)\n\n for feature_extractor_grad, feature_extractor_grad_frozen in zip(\n feature_extractor_grads, feature_extractor_grads_frozen\n ):\n self.assertTrue((feature_extractor_grad_frozen == 0.0).all())\n self.assertTrue((feature_extractor_grad > 0.0).any())\n\n # ensure that the gradients of all unfrozen layers remain equal, i.e. all layers excluding the frozen 'feature_extractor'\n grads = tuple(grads[k] for k in grads if \"feature_extractor\" not in k)\n grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if \"feature_extractor\" not in k)\n\n for grad, grad_frozen in zip(grads, grads_frozen):\n self.assertTrue((grad == grad_frozen).all())\n\n @slow\n def test_model_from_pretrained(self):\n for model_class_name in self.all_model_classes:\n model = model_class_name.from_pretrained(\"facebook/wav2vec2-large-960h-lv60-self\", from_pt=True)\n outputs = model(np.ones((1, 1024), dtype=\"f4\"))\n self.assertIsNotNone(outputs)\n\n\n@require_flax\nclass FlaxWav2Vec2UtilsTest(unittest.TestCase):\n def test_compute_mask_indices(self):\n batch_size = 4\n sequence_length = 60\n mask_prob = 0.5\n mask_length = 1\n\n mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)\n\n self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)])\n\n def test_compute_mask_indices_overlap(self):\n batch_size = 4\n sequence_length = 80\n mask_prob = 0.5\n mask_length = 4\n\n mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)\n\n # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal\n for batch_sum in mask.sum(axis=-1):\n self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)\n\n def test_compute_mask_indices_attn_mask_overlap(self):\n batch_size = 4\n sequence_length = 80\n mask_prob = 0.5\n mask_length = 4\n\n attention_mask = np.ones((batch_size, sequence_length), dtype=np.int32)\n attention_mask[:2, sequence_length // 2 :] = 0\n\n mask = _compute_mask_indices(\n (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask\n )\n\n for batch_sum in mask.sum(axis=-1):\n self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)\n\n self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0)\n\n def test_compute_perplexity(self):\n probs = np.arange(100).reshape(2, 5, 10) / 100\n\n ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs)\n self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3)\n\n # mask half of the input\n mask = np.ones((2,), dtype=np.bool)\n mask[0] = 0\n\n ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask)\n self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3)\n\n def test_sample_negatives(self):\n batch_size = 2\n sequence_length = 10\n hidden_size = 4\n num_negatives = 3\n\n features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape(\n sequence_length, hidden_size\n ) # each value in vector consits of same value\n features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size))\n\n negative_indices = _sample_negative_indices(features.shape, num_negatives)\n\n features = features.reshape(-1, hidden_size) # BTC => (BxT)C\n # take negative vectors from sampled indices\n sampled_negatives = features[negative_indices.reshape(-1)]\n negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose(\n 2, 0, 1, 3\n )\n\n self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size))\n\n # make sure no negatively sampled vector is actually a positive one\n for negative in negatives:\n self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0)\n\n # make sure that full vectors are sampled and not values of vectors\n # => this means that `unique()` yields a single value for `hidden_size` dim\n self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1))\n\n def test_sample_negatives_with_attn_mask(self):\n batch_size = 2\n sequence_length = 10\n hidden_size = 4\n num_negatives = 3\n\n features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape(\n sequence_length, hidden_size\n ) # each value in vector consits of same value\n\n # second half of last input tensor is padded\n attention_mask = np.ones((batch_size, sequence_length), dtype=np.int8)\n attention_mask[-1, sequence_length // 2 :] = 0\n\n forbidden_indices = (\n np.arange(sequence_length // 2, sequence_length, dtype=np.int32) + (batch_size - 1) * sequence_length\n ).tolist()\n\n features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size))\n\n negative_indices = _sample_negative_indices(features.shape, num_negatives, attention_mask=attention_mask)\n\n # make sure that no padding tokens are sampled\n self.assertTrue(all([idx not in negative_indices for idx in forbidden_indices]))\n\n features = features.reshape(-1, hidden_size) # BTC => (BxT)C\n # take negative vectors from sampled indices\n sampled_negatives = features[negative_indices.reshape(-1)]\n negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose(\n 2, 0, 1, 3\n )\n\n self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size))\n\n # make sure no negatively sampled vector is actually a positive one\n for negative in negatives:\n self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0)\n\n # make sure that full vectors are sampled and not just slices of vectors\n # => this means that `unique()` yields a single value for `hidden_size` dim\n self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1))\n\n\n@require_flax\n@require_soundfile\n@slow\nclass FlaxWav2Vec2ModelIntegrationTest(unittest.TestCase):\n def _load_datasamples(self, num_samples):\n ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n # automatic decoding with librispeech\n speech_samples = ds.sort(\"id\").filter(\n lambda x: x[\"id\"] in [f\"1272-141231-000{i}\" for i in range(num_samples)]\n )[:num_samples][\"audio\"]\n\n return [x[\"array\"] for x in speech_samples]\n\n def test_inference_ctc_robust_batched(self):\n model = FlaxWav2Vec2ForCTC.from_pretrained(\"facebook/wav2vec2-large-960h-lv60-self\", from_pt=True)\n processor = Wav2Vec2Processor.from_pretrained(\"facebook/wav2vec2-large-960h-lv60-self\", do_lower_case=True)\n\n input_speech = self._load_datasamples(4)\n\n inputs = processor(input_speech, return_tensors=\"np\", padding=True)\n\n input_values = inputs.input_values\n attention_mask = inputs.attention_mask\n\n logits = model(input_values, attention_mask=attention_mask).logits\n\n predicted_ids = jnp.argmax(logits, axis=-1)\n predicted_trans = processor.batch_decode(predicted_ids)\n\n EXPECTED_TRANSCRIPTIONS = [\n \"a man said to the universe sir i exist\",\n \"sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore\",\n \"the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about\",\n \"his instant panic was followed by a small sharp blow high on his chest\",\n ]\n self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)\n\n def test_inference_pretrained(self):\n model = FlaxWav2Vec2ForPreTraining.from_pretrained(\"facebook/wav2vec2-large-lv60\", from_pt=True)\n feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\n \"facebook/wav2vec2-large-lv60\", return_attention_mask=True\n )\n input_speech = self._load_datasamples(2)\n\n inputs_dict = feature_extractor(input_speech, return_tensors=\"np\", padding=True)\n\n features_shape = (\n inputs_dict[\"input_values\"].shape[0],\n model._get_feat_extract_output_lengths(np.array(inputs_dict[\"input_values\"].shape[1])),\n )\n\n mask_time_indices = _compute_mask_indices(\n features_shape,\n model.config.mask_time_prob,\n model.config.mask_time_length,\n min_masks=2,\n )\n\n outputs = model(\n inputs_dict.input_values,\n attention_mask=inputs_dict.attention_mask,\n mask_time_indices=mask_time_indices,\n )\n\n # compute cosine similarity\n cosine_sim = optax.cosine_similarity(\n outputs.projected_states, outputs.projected_quantized_states, epsilon=1e-8\n )\n\n # retrieve cosine sim of masked features\n cosine_sim_masked = cosine_sim[mask_time_indices]\n\n # ... now compare to randomly initialized model\n\n config = Wav2Vec2Config.from_pretrained(\"facebook/wav2vec2-large-lv60\")\n model_rand = FlaxWav2Vec2ForPreTraining(config)\n\n outputs_rand = model_rand(\n inputs_dict.input_values,\n attention_mask=inputs_dict.attention_mask,\n mask_time_indices=mask_time_indices,\n )\n\n # compute cosine similarity\n cosine_sim_rand = optax.cosine_similarity(\n outputs_rand.projected_states, outputs_rand.projected_quantized_states\n )\n\n # retrieve cosine sim of masked features\n cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices]\n\n # a pretrained wav2vec2 model has learned to predict the quantized latent states\n # => the cosine similarity between quantized states and predicted states > 0.5\n # a random wav2vec2 model has not learned to predict the quantized latent states\n # => the cosine similarity between quantized states and predicted states is very likely < 0.1\n self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0)\n\n @require_pyctcdecode\n @require_librosa\n def test_wav2vec2_with_lm(self):\n ds = load_dataset(\"common_voice\", \"es\", split=\"test\", streaming=True)\n sample = next(iter(ds))\n\n resampled_audio = librosa.resample(sample[\"audio\"][\"array\"], 48_000, 16_000)\n\n model = FlaxWav2Vec2ForCTC.from_pretrained(\"patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm\")\n processor = Wav2Vec2ProcessorWithLM.from_pretrained(\"patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm\")\n\n input_values = processor(resampled_audio, return_tensors=\"np\").input_values\n\n logits = model(input_values).logits\n\n transcription = processor.batch_decode(np.array(logits)).text\n\n self.assertEqual(transcription[0], \"bien y qué regalo vas a abrir primero\")\n"
] | [
[
"numpy.ones",
"numpy.arange",
"numpy.broadcast_to",
"numpy.array",
"numpy.unique"
]
] |
templeblock/automl | [
"0a73e836fd4a9d22919cb1ff5af9ca30082fa4b2"
] | [
"efficientdet/det_model_fn.py"
] | [
"# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Model function definition, including both architecture and loss.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport re\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nimport coco_metric\nimport efficientdet_arch\nimport hparams_config\nimport iou_utils\nimport nms_np\nimport retinanet_arch\nimport utils\nfrom keras import anchors\nfrom keras import postprocess\n\n_DEFAULT_BATCH_SIZE = 64\n\n\ndef update_learning_rate_schedule_parameters(params):\n \"\"\"Updates params that are related to the learning rate schedule.\"\"\"\n # params['batch_size'] is per-shard within model_fn if strategy=tpu.\n batch_size = (\n params['batch_size'] * params['num_shards']\n if params['strategy'] == 'tpu' else params['batch_size'])\n # Learning rate is proportional to the batch size\n params['adjusted_learning_rate'] = (\n params['learning_rate'] * batch_size / _DEFAULT_BATCH_SIZE)\n steps_per_epoch = params['num_examples_per_epoch'] / batch_size\n params['lr_warmup_step'] = int(params['lr_warmup_epoch'] * steps_per_epoch)\n params['first_lr_drop_step'] = int(params['first_lr_drop_epoch'] *\n steps_per_epoch)\n params['second_lr_drop_step'] = int(params['second_lr_drop_epoch'] *\n steps_per_epoch)\n params['total_steps'] = int(params['num_epochs'] * steps_per_epoch)\n params['steps_per_epoch'] = steps_per_epoch\n\n\ndef stepwise_lr_schedule(adjusted_learning_rate, lr_warmup_init, lr_warmup_step,\n first_lr_drop_step, second_lr_drop_step, global_step):\n \"\"\"Handles linear scaling rule, gradual warmup, and LR decay.\"\"\"\n # lr_warmup_init is the starting learning rate; the learning rate is linearly\n # scaled up to the full learning rate after `lr_warmup_step` before decaying.\n logging.info('LR schedule method: stepwise')\n linear_warmup = (\n lr_warmup_init +\n (tf.cast(global_step, dtype=tf.float32) / lr_warmup_step *\n (adjusted_learning_rate - lr_warmup_init)))\n learning_rate = tf.where(global_step < lr_warmup_step, linear_warmup,\n adjusted_learning_rate)\n lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step],\n [0.01, second_lr_drop_step]]\n for mult, start_global_step in lr_schedule:\n learning_rate = tf.where(global_step < start_global_step, learning_rate,\n adjusted_learning_rate * mult)\n return learning_rate\n\n\ndef cosine_lr_schedule(adjusted_lr, lr_warmup_init, lr_warmup_step, total_steps,\n step):\n logging.info('LR schedule method: cosine')\n linear_warmup = (\n lr_warmup_init + (tf.cast(step, dtype=tf.float32) / lr_warmup_step *\n (adjusted_lr - lr_warmup_init)))\n decay_steps = tf.cast(total_steps - lr_warmup_step, tf.float32)\n cosine_lr = 0.5 * adjusted_lr * (\n 1 + tf.cos(np.pi * tf.cast(step, tf.float32) / decay_steps))\n return tf.where(step < lr_warmup_step, linear_warmup, cosine_lr)\n\n\ndef polynomial_lr_schedule(adjusted_lr, lr_warmup_init, lr_warmup_step, power,\n total_steps, step):\n logging.info('LR schedule method: polynomial')\n linear_warmup = (\n lr_warmup_init + (tf.cast(step, dtype=tf.float32) / lr_warmup_step *\n (adjusted_lr - lr_warmup_init)))\n polynomial_lr = adjusted_lr * tf.pow(\n 1 - (tf.cast(step, tf.float32) / total_steps), power)\n return tf.where(step < lr_warmup_step, linear_warmup, polynomial_lr)\n\n\ndef learning_rate_schedule(params, global_step):\n \"\"\"Learning rate schedule based on global step.\"\"\"\n lr_decay_method = params['lr_decay_method']\n if lr_decay_method == 'stepwise':\n return stepwise_lr_schedule(params['adjusted_learning_rate'],\n params['lr_warmup_init'],\n params['lr_warmup_step'],\n params['first_lr_drop_step'],\n params['second_lr_drop_step'], global_step)\n\n if lr_decay_method == 'cosine':\n return cosine_lr_schedule(params['adjusted_learning_rate'],\n params['lr_warmup_init'],\n params['lr_warmup_step'], params['total_steps'],\n global_step)\n\n if lr_decay_method == 'polynomial':\n return polynomial_lr_schedule(params['adjusted_learning_rate'],\n params['lr_warmup_init'],\n params['lr_warmup_step'],\n params['poly_lr_power'],\n params['total_steps'], global_step)\n\n if lr_decay_method == 'constant':\n return params['adjusted_learning_rate']\n\n raise ValueError('unknown lr_decay_method: {}'.format(lr_decay_method))\n\n\ndef focal_loss(y_pred, y_true, alpha, gamma, normalizer, label_smoothing=0.0):\n \"\"\"Compute the focal loss between `logits` and the golden `target` values.\n\n Focal loss = -(1-pt)^gamma * log(pt)\n where pt is the probability of being classified to the true class.\n\n Args:\n y_pred: A float32 tensor of size [batch, height_in, width_in,\n num_predictions].\n y_true: A float32 tensor of size [batch, height_in, width_in,\n num_predictions].\n alpha: A float32 scalar multiplying alpha to the loss from positive examples\n and (1-alpha) to the loss from negative examples.\n gamma: A float32 scalar modulating loss from hard and easy examples.\n normalizer: Divide loss by this value.\n label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.\n\n Returns:\n loss: A float32 scalar representing normalized total loss.\n \"\"\"\n with tf.name_scope('focal_loss'):\n alpha = tf.convert_to_tensor(alpha, dtype=y_pred.dtype)\n gamma = tf.convert_to_tensor(gamma, dtype=y_pred.dtype)\n\n # compute focal loss multipliers before label smoothing, such that it will\n # not blow up the loss.\n pred_prob = tf.sigmoid(y_pred)\n p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n modulating_factor = (1.0 - p_t) ** gamma\n\n # apply label smoothing for cross_entropy for each entry.\n y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing\n ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n\n # compute the final loss and return\n return alpha_factor * modulating_factor * ce / normalizer\n\n\ndef _box_loss(box_outputs, box_targets, num_positives, delta=0.1):\n \"\"\"Computes box regression loss.\"\"\"\n # delta is typically around the mean value of regression target.\n # for instances, the regression targets of 512x512 input with 6 anchors on\n # P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].\n normalizer = num_positives * 4.0\n mask = tf.not_equal(box_targets, 0.0)\n box_loss = tf.losses.huber_loss(\n box_targets,\n box_outputs,\n weights=mask,\n delta=delta,\n reduction=tf.losses.Reduction.SUM)\n box_loss /= normalizer\n return box_loss\n\n\ndef _box_iou_loss(box_outputs, box_targets, num_positives, iou_loss_type):\n \"\"\"Computes box iou loss.\"\"\"\n normalizer = num_positives * 4.0\n box_iou_loss = iou_utils.iou_loss(box_outputs, box_targets, iou_loss_type)\n box_iou_loss = tf.reduce_sum(box_iou_loss) / normalizer\n return box_iou_loss\n\n\ndef detection_loss(cls_outputs, box_outputs, labels, params):\n \"\"\"Computes total detection loss.\n\n Computes total detection loss including box and class loss from all levels.\n Args:\n cls_outputs: an OrderDict with keys representing levels and values\n representing logits in [batch_size, height, width, num_anchors].\n box_outputs: an OrderDict with keys representing levels and values\n representing box regression targets in [batch_size, height, width,\n num_anchors * 4].\n labels: the dictionary that returned from dataloader that includes\n groundtruth targets.\n params: the dictionary including training parameters specified in\n default_haprams function in this file.\n\n Returns:\n total_loss: an integer tensor representing total loss reducing from\n class and box losses from all levels.\n cls_loss: an integer tensor representing total class loss.\n box_loss: an integer tensor representing total box regression loss.\n box_iou_loss: an integer tensor representing total box iou loss.\n \"\"\"\n # Sum all positives in a batch for normalization and avoid zero\n # num_positives_sum, which would lead to inf loss during training\n num_positives_sum = tf.reduce_sum(labels['mean_num_positives']) + 1.0\n levels = cls_outputs.keys()\n\n cls_losses = []\n box_losses = []\n for level in levels:\n # Onehot encoding for classification labels.\n cls_targets_at_level = tf.one_hot(labels['cls_targets_%d' % level],\n params['num_classes'])\n\n if params['data_format'] == 'channels_first':\n bs, _, width, height, _ = cls_targets_at_level.get_shape().as_list()\n cls_targets_at_level = tf.reshape(cls_targets_at_level,\n [bs, -1, width, height])\n else:\n bs, width, height, _, _ = cls_targets_at_level.get_shape().as_list()\n cls_targets_at_level = tf.reshape(cls_targets_at_level,\n [bs, width, height, -1])\n box_targets_at_level = labels['box_targets_%d' % level]\n\n cls_loss = focal_loss(\n cls_outputs[level],\n cls_targets_at_level,\n params['alpha'],\n params['gamma'],\n normalizer=num_positives_sum,\n label_smoothing=params['label_smoothing'])\n\n if params['data_format'] == 'channels_first':\n cls_loss = tf.reshape(cls_loss,\n [bs, -1, width, height, params['num_classes']])\n else:\n cls_loss = tf.reshape(cls_loss,\n [bs, width, height, -1, params['num_classes']])\n cls_loss *= tf.cast(\n tf.expand_dims(tf.not_equal(labels['cls_targets_%d' % level], -2), -1),\n tf.float32)\n cls_losses.append(tf.reduce_sum(cls_loss))\n\n if params['box_loss_weight']:\n box_losses.append(\n _box_loss(\n box_outputs[level],\n box_targets_at_level,\n num_positives_sum,\n delta=params['delta']))\n\n if params['iou_loss_type']:\n input_anchors = anchors.Anchors(params['min_level'], params['max_level'],\n params['num_scales'],\n params['aspect_ratios'],\n params['anchor_scale'],\n params['image_size'])\n box_output_list = [tf.reshape(box_outputs[i], [-1, 4]) for i in levels]\n box_outputs = tf.concat(box_output_list, axis=0)\n box_target_list = [\n tf.reshape(labels['box_targets_%d' % level], [-1, 4])\n for level in levels\n ]\n box_targets = tf.concat(box_target_list, axis=0)\n anchor_boxes = tf.tile(input_anchors.boxes, [params['batch_size'], 1])\n box_outputs = anchors.decode_box_outputs(box_outputs, anchor_boxes)\n box_targets = anchors.decode_box_outputs(box_targets, anchor_boxes)\n box_iou_loss = _box_iou_loss(box_outputs, box_targets, num_positives_sum,\n params['iou_loss_type'])\n\n else:\n box_iou_loss = 0\n\n # Sum per level losses to total loss.\n cls_loss = tf.add_n(cls_losses)\n box_loss = tf.add_n(box_losses) if box_losses else 0\n\n total_loss = (\n cls_loss +\n params['box_loss_weight'] * box_loss +\n params['iou_loss_weight'] * box_iou_loss)\n\n return total_loss, cls_loss, box_loss, box_iou_loss\n\n\ndef reg_l2_loss(weight_decay, regex=r'.*(kernel|weight):0$'):\n \"\"\"Return regularization l2 loss loss.\"\"\"\n var_match = re.compile(regex)\n return weight_decay * tf.add_n([\n tf.nn.l2_loss(v)\n for v in tf.trainable_variables()\n if var_match.match(v.name)\n ])\n\n\ndef _model_fn(features, labels, mode, params, model, variable_filter_fn=None):\n \"\"\"Model definition entry.\n\n Args:\n features: the input image tensor with shape [batch_size, height, width, 3].\n The height and width are fixed and equal.\n labels: the input labels in a dictionary. The labels include class targets\n and box targets which are dense label maps. The labels are generated from\n get_input_fn function in data/dataloader.py\n mode: the mode of TPUEstimator including TRAIN, EVAL, and PREDICT.\n params: the dictionary defines hyperparameters of model. The default\n settings are in default_hparams function in this file.\n model: the model outputs class logits and box regression outputs.\n variable_filter_fn: the filter function that takes trainable_variables and\n returns the variable list after applying the filter rule.\n\n Returns:\n tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction.\n\n Raises:\n RuntimeError: if both ckpt and backbone_ckpt are set.\n \"\"\"\n utils.image('input_image', features)\n training_hooks = []\n\n def _model_outputs(inputs):\n # Convert params (dict) to Config for easier access.\n return model(inputs, config=hparams_config.Config(params))\n\n precision = utils.get_precision(params['strategy'], params['mixed_precision'])\n cls_outputs, box_outputs = utils.build_model_with_precision(\n precision, _model_outputs, features, params['is_training_bn'])\n\n levels = cls_outputs.keys()\n for level in levels:\n cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)\n box_outputs[level] = tf.cast(box_outputs[level], tf.float32)\n\n # First check if it is in PREDICT mode.\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n 'image': features,\n }\n for level in levels:\n predictions['cls_outputs_%d' % level] = cls_outputs[level]\n predictions['box_outputs_%d' % level] = box_outputs[level]\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Set up training loss and learning rate.\n update_learning_rate_schedule_parameters(params)\n global_step = tf.train.get_or_create_global_step()\n learning_rate = learning_rate_schedule(params, global_step)\n\n # cls_loss and box_loss are for logging. only total_loss is optimized.\n det_loss, cls_loss, box_loss, box_iou_loss = detection_loss(\n cls_outputs, box_outputs, labels, params)\n reg_l2loss = reg_l2_loss(params['weight_decay'])\n total_loss = det_loss + reg_l2loss\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n utils.scalar('lrn_rate', learning_rate)\n utils.scalar('trainloss/cls_loss', cls_loss)\n utils.scalar('trainloss/box_loss', box_loss)\n utils.scalar('trainloss/det_loss', det_loss)\n utils.scalar('trainloss/reg_l2_loss', reg_l2loss)\n utils.scalar('trainloss/loss', total_loss)\n if params['iou_loss_type']:\n utils.scalar('trainloss/box_iou_loss', box_iou_loss)\n train_epochs = tf.cast(global_step, tf.float32) / params['steps_per_epoch']\n utils.scalar('train_epochs', train_epochs)\n\n moving_average_decay = params['moving_average_decay']\n if moving_average_decay:\n ema = tf.train.ExponentialMovingAverage(\n decay=moving_average_decay, num_updates=global_step)\n ema_vars = utils.get_ema_vars()\n if params['strategy'] == 'horovod':\n import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top\n learning_rate = learning_rate * hvd.size()\n if mode == tf.estimator.ModeKeys.TRAIN:\n if params['optimizer'].lower() == 'sgd':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate, momentum=params['momentum'])\n elif params['optimizer'].lower() == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate)\n else:\n raise ValueError('optimizers should be adam or sgd')\n\n if params['strategy'] == 'tpu':\n optimizer = tf.tpu.CrossShardOptimizer(optimizer)\n elif params['strategy'] == 'horovod':\n optimizer = hvd.DistributedOptimizer(optimizer)\n training_hooks = [hvd.BroadcastGlobalVariablesHook(0)]\n\n # Batch norm requires update_ops to be added as a train_op dependency.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n var_list = tf.trainable_variables()\n if variable_filter_fn:\n var_list = variable_filter_fn(var_list)\n\n if params.get('clip_gradients_norm', 0) > 0:\n logging.info('clip gradients norm by %f', params['clip_gradients_norm'])\n grads_and_vars = optimizer.compute_gradients(total_loss, var_list)\n with tf.name_scope('clip'):\n grads = [gv[0] for gv in grads_and_vars]\n tvars = [gv[1] for gv in grads_and_vars]\n clipped_grads, gnorm = tf.clip_by_global_norm(\n grads, params['clip_gradients_norm'])\n utils.scalar('gnorm', gnorm)\n grads_and_vars = list(zip(clipped_grads, tvars))\n\n with tf.control_dependencies(update_ops):\n train_op = optimizer.apply_gradients(grads_and_vars, global_step)\n else:\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(\n total_loss, global_step, var_list=var_list)\n\n if moving_average_decay:\n with tf.control_dependencies([train_op]):\n train_op = ema.apply(ema_vars)\n\n else:\n train_op = None\n\n eval_metrics = None\n if mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(**kwargs):\n \"\"\"Returns a dictionary that has the evaluation metrics.\"\"\"\n if params['nms_configs'].get('pyfunc', True):\n detections_bs = []\n for index in range(kwargs['boxes'].shape[0]):\n nms_configs = params['nms_configs']\n detections = tf.numpy_function(\n functools.partial(nms_np.per_class_nms, nms_configs=nms_configs),\n [\n kwargs['boxes'][index],\n kwargs['scores'][index],\n kwargs['classes'][index],\n tf.slice(kwargs['image_ids'], [index], [1]),\n tf.slice(kwargs['image_scales'], [index], [1]),\n params['num_classes'],\n nms_configs['max_output_size'],\n ], tf.float32)\n detections_bs.append(detections)\n else:\n # These two branches should be equivalent, but currently they are not.\n # TODO(tanmingxing): enable the non_pyfun path after bug fix.\n nms_boxes, nms_scores, nms_classes, _ = postprocess.per_class_nms(\n params, kwargs['boxes'], kwargs['scores'], kwargs['classes'],\n kwargs['image_scales'])\n img_ids = tf.cast(\n tf.expand_dims(kwargs['image_ids'], -1), nms_scores.dtype)\n detections_bs = [\n img_ids * tf.ones_like(nms_scores),\n nms_boxes[:, :, 1],\n nms_boxes[:, :, 0],\n nms_boxes[:, :, 3] - nms_boxes[:, :, 1],\n nms_boxes[:, :, 2] - nms_boxes[:, :, 0],\n nms_scores,\n nms_classes,\n ]\n detections_bs = tf.stack(detections_bs, axis=-1, name='detnections')\n\n if params.get('testdev_dir', None):\n logging.info('Eval testdev_dir %s', params['testdev_dir'])\n eval_metric = coco_metric.EvaluationMetric(\n testdev_dir=params['testdev_dir'])\n coco_metrics = eval_metric.estimator_metric_fn(detections_bs,\n tf.zeros([1]))\n else:\n logging.info('Eval val with groudtruths %s.', params['val_json_file'])\n eval_metric = coco_metric.EvaluationMetric(\n filename=params['val_json_file'])\n coco_metrics = eval_metric.estimator_metric_fn(\n detections_bs, kwargs['groundtruth_data'])\n\n # Add metrics to output.\n cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])\n box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])\n output_metrics = {\n 'cls_loss': cls_loss,\n 'box_loss': box_loss,\n }\n output_metrics.update(coco_metrics)\n return output_metrics\n\n cls_loss_repeat = tf.reshape(\n tf.tile(tf.expand_dims(cls_loss, 0), [\n params['batch_size'],\n ]), [params['batch_size'], 1])\n box_loss_repeat = tf.reshape(\n tf.tile(tf.expand_dims(box_loss, 0), [\n params['batch_size'],\n ]), [params['batch_size'], 1])\n\n cls_outputs = postprocess.to_list(cls_outputs)\n box_outputs = postprocess.to_list(box_outputs)\n params['nms_configs']['max_nms_inputs'] = anchors.MAX_DETECTION_POINTS\n boxes, scores, classes = postprocess.pre_nms(params, cls_outputs,\n box_outputs)\n metric_fn_inputs = {\n 'cls_loss_repeat': cls_loss_repeat,\n 'box_loss_repeat': box_loss_repeat,\n 'image_ids': labels['source_ids'],\n 'groundtruth_data': labels['groundtruth_data'],\n 'image_scales': labels['image_scales'],\n 'boxes': boxes,\n 'scores': scores,\n 'classes': classes,\n }\n eval_metrics = (metric_fn, metric_fn_inputs)\n\n checkpoint = params.get('ckpt') or params.get('backbone_ckpt')\n\n if checkpoint and mode == tf.estimator.ModeKeys.TRAIN:\n # Initialize the model from an EfficientDet or backbone checkpoint.\n if params.get('ckpt') and params.get('backbone_ckpt'):\n raise RuntimeError(\n '--backbone_ckpt and --checkpoint are mutually exclusive')\n\n if params.get('backbone_ckpt'):\n var_scope = params['backbone_name'] + '/'\n if params['ckpt_var_scope'] is None:\n # Use backbone name as default checkpoint scope.\n ckpt_scope = params['backbone_name'] + '/'\n else:\n ckpt_scope = params['ckpt_var_scope'] + '/'\n else:\n # Load every var in the given checkpoint\n var_scope = ckpt_scope = '/'\n\n def scaffold_fn():\n \"\"\"Loads pretrained model through scaffold function.\"\"\"\n logging.info('restore variables from %s', checkpoint)\n\n var_map = utils.get_ckpt_var_map(\n ckpt_path=checkpoint,\n ckpt_scope=ckpt_scope,\n var_scope=var_scope,\n skip_mismatch=params['skip_mismatch'])\n\n tf.train.init_from_checkpoint(checkpoint, var_map)\n\n return tf.train.Scaffold()\n elif mode == tf.estimator.ModeKeys.EVAL and moving_average_decay:\n\n def scaffold_fn():\n \"\"\"Load moving average variables for eval.\"\"\"\n logging.info('Load EMA vars with ema_decay=%f', moving_average_decay)\n restore_vars_dict = ema.variables_to_restore(ema_vars)\n saver = tf.train.Saver(restore_vars_dict)\n return tf.train.Scaffold(saver=saver)\n else:\n scaffold_fn = None\n\n if params['strategy'] != 'tpu':\n # Profile every 1K steps.\n profile_hook = tf.train.ProfilerHook(\n save_steps=1000, output_dir=params['model_dir'])\n training_hooks.append(profile_hook)\n\n # Report memory allocation if OOM\n class OomReportingHook(tf.estimator.SessionRunHook):\n\n def before_run(self, run_context):\n return tf.estimator.SessionRunArgs(\n fetches=[],\n options=tf.RunOptions(report_tensor_allocations_upon_oom=True))\n\n training_hooks.append(OomReportingHook())\n\n return tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n eval_metrics=eval_metrics,\n host_call=utils.get_tpu_host_call(global_step, params),\n scaffold_fn=scaffold_fn,\n training_hooks=training_hooks)\n\n\ndef retinanet_model_fn(features, labels, mode, params):\n \"\"\"RetinaNet model.\"\"\"\n variable_filter_fn = functools.partial(\n retinanet_arch.remove_variables, resnet_depth=params['resnet_depth'])\n return _model_fn(\n features,\n labels,\n mode,\n params,\n model=retinanet_arch.retinanet,\n variable_filter_fn=variable_filter_fn)\n\n\ndef efficientdet_model_fn(features, labels, mode, params):\n \"\"\"EfficientDet model.\"\"\"\n variable_filter_fn = functools.partial(\n efficientdet_arch.freeze_vars, pattern=params['var_freeze_expr'])\n return _model_fn(\n features,\n labels,\n mode,\n params,\n model=efficientdet_arch.efficientdet,\n variable_filter_fn=variable_filter_fn)\n\n\ndef get_model_arch(model_name='efficientdet-d0'):\n \"\"\"Get model architecture for a given model name.\"\"\"\n if 'retinanet' in model_name:\n return retinanet_arch.retinanet\n\n if 'efficientdet' in model_name:\n return efficientdet_arch.efficientdet\n\n raise ValueError('Invalide model name {}'.format(model_name))\n\n\ndef get_model_fn(model_name='efficientdet-d0'):\n \"\"\"Get model fn for a given model name.\"\"\"\n if 'retinanet' in model_name:\n return retinanet_model_fn\n\n if 'efficientdet' in model_name:\n return efficientdet_model_fn\n\n raise ValueError('Invalide model name {}'.format(model_name))\n"
] | [
[
"tensorflow.compat.v1.not_equal",
"tensorflow.compat.v1.RunOptions",
"tensorflow.compat.v1.losses.huber_loss",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.train.Scaffold",
"tensorflow.compat.v1.where",
"tensorflow.compat.v1.train.init_from_checkpoint",
"tensorflow.compat.v1.name_scope",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.train.ExponentialMovingAverage",
"tensorflow.compat.v1.add_n",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.tile",
"tensorflow.compat.v1.estimator.EstimatorSpec",
"tensorflow.compat.v1.slice",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.train.ProfilerHook",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.sigmoid",
"tensorflow.compat.v1.metrics.mean",
"tensorflow.compat.v1.clip_by_global_norm",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.tpu.CrossShardOptimizer",
"tensorflow.compat.v1.train.MomentumOptimizer",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.compat.v1.nn.l2_loss",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.compat.v1.convert_to_tensor",
"tensorflow.compat.v1.get_collection"
]
] |
holajoa/keras-YOLOv3-model-set | [
"c15b8a2f48371c063f6482b25593dc70d5956323"
] | [
"yolo3/models/yolo3_resnet50.py"
] | [
"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"YOLO_v3 ResNet50 Model Defined in Keras.\"\"\"\r\n\r\nfrom tensorflow.keras.layers import UpSampling2D, Concatenate\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.applications.resnet import ResNet50\r\n\r\nfrom yolo3.models.layers import yolo3_predictions, yolo3lite_predictions, tiny_yolo3_predictions, tiny_yolo3lite_predictions\r\n\r\n\r\ndef yolo3_resnet50_body(inputs, num_anchors, num_classes):\r\n \"\"\"Create YOLO_V3 ResNet50 model CNN body in Keras.\"\"\"\r\n resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)\r\n print('backbone layers number: {}'.format(len(resnet50.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # conv5_block3_out: 13 x 13 x 2048\r\n # conv4_block6_out: 26 x 26 x 1024\r\n # conv3_block4_out: 52 x 52 x 512\r\n\r\n # f1 :13 x 13 x 2048\r\n f1 = resnet50.get_layer('conv5_block3_out').output\r\n # f2: 26 x 26 x 1024\r\n f2 = resnet50.get_layer('conv4_block6_out').output\r\n # f3 : 52 x 52 x 512\r\n f3 = resnet50.get_layer('conv3_block4_out').output\r\n\r\n f1_channel_num = 1024\r\n f2_channel_num = 512\r\n f3_channel_num = 256\r\n\r\n y1, y2, y3 = yolo3_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs = inputs, outputs=[y1,y2,y3])\r\n\r\n\r\ndef yolo3lite_resnet50_body(inputs, num_anchors, num_classes):\r\n '''Create YOLO_v3 Lite ResNet50 model CNN body in keras.'''\r\n resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)\r\n print('backbone layers number: {}'.format(len(resnet50.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # conv5_block3_out: 13 x 13 x 2048\r\n # conv4_block6_out: 26 x 26 x 1024\r\n # conv3_block4_out: 52 x 52 x 512\r\n\r\n # f1 :13 x 13 x 2048\r\n f1 = resnet50.get_layer('conv5_block3_out').output\r\n # f2: 26 x 26 x 1024\r\n f2 = resnet50.get_layer('conv4_block6_out').output\r\n # f3 : 52 x 52 x 512\r\n f3 = resnet50.get_layer('conv3_block4_out').output\r\n\r\n f1_channel_num = 1024\r\n f2_channel_num = 512\r\n f3_channel_num = 256\r\n\r\n y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs = inputs, outputs=[y1,y2,y3])\r\n\r\n\r\ndef yolo3lite_spp_resnet50_body(inputs, num_anchors, num_classes):\r\n '''Create YOLO_v3 Lite SPP ResNet50 model CNN body in keras.'''\r\n resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)\r\n print('backbone layers number: {}'.format(len(resnet50.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # conv5_block3_out: 13 x 13 x 2048\r\n # conv4_block6_out: 26 x 26 x 1024\r\n # conv3_block4_out: 52 x 52 x 512\r\n\r\n # f1 :13 x 13 x 2048\r\n f1 = resnet50.get_layer('conv5_block3_out').output\r\n # f2: 26 x 26 x 1024\r\n f2 = resnet50.get_layer('conv4_block6_out').output\r\n # f3 : 52 x 52 x 512\r\n f3 = resnet50.get_layer('conv3_block4_out').output\r\n\r\n f1_channel_num = 1024\r\n f2_channel_num = 512\r\n f3_channel_num = 256\r\n\r\n y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes, use_spp=True)\r\n\r\n return Model(inputs = inputs, outputs=[y1,y2,y3])\r\n\r\n\r\ndef tiny_yolo3_resnet50_body(inputs, num_anchors, num_classes):\r\n '''Create Tiny YOLO_v3 ResNet50 model CNN body in keras.'''\r\n resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)\r\n print('backbone layers number: {}'.format(len(resnet50.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # conv5_block3_out: 13 x 13 x 2048\r\n # conv4_block6_out: 26 x 26 x 1024\r\n # conv3_block4_out: 52 x 52 x 512\r\n\r\n # f1 :13 x 13 x 2048\r\n f1 = resnet50.get_layer('conv5_block3_out').output\r\n # f2: 26 x 26 x 1024\r\n f2 = resnet50.get_layer('conv4_block6_out').output\r\n\r\n f1_channel_num = 1024\r\n f2_channel_num = 512\r\n\r\n y1, y2 = tiny_yolo3_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs, [y1,y2])\r\n\r\n\r\ndef tiny_yolo3lite_resnet50_body(inputs, num_anchors, num_classes):\r\n '''Create Tiny YOLO_v3 Lite ResNet50 model CNN body in keras.'''\r\n resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)\r\n print('backbone layers number: {}'.format(len(resnet50.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # conv5_block3_out: 13 x 13 x 2048\r\n # conv4_block6_out: 26 x 26 x 1024\r\n # conv3_block4_out: 52 x 52 x 512\r\n\r\n # f1 :13 x 13 x 2048\r\n f1 = resnet50.get_layer('conv5_block3_out').output\r\n # f2: 26 x 26 x 1024\r\n f2 = resnet50.get_layer('conv4_block6_out').output\r\n\r\n f1_channel_num = 1024\r\n f2_channel_num = 512\r\n\r\n y1, y2 = tiny_yolo3lite_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs, [y1,y2])\r\n\r\n"
] | [
[
"tensorflow.keras.models.Model",
"tensorflow.keras.applications.resnet.ResNet50"
]
] |
ruthlorenz/ESMValTool | [
"c3c61b5341037d01c776c3524c0dd4c767507a3d"
] | [
"esmvaltool/diag_scripts/ocean/diagnostic_profiles.py"
] | [
"\"\"\"\nDiagnostic:\n\nDiagnostic to produce images of the profile over time from a cube.\nThese plost show cube value (ie temperature) on the x-axis, and depth/height\non the y axis. The colour scale is the annual mean of the cube data.\n\nNote that this diagnostic assumes that the preprocessors do the bulk of the\nhard work, and that the cube received by this diagnostic (via the settings.yml\nand metadata.yml files) has a time component, and depth component, but no\nlatitude or longitude coordinates.\n\nAn approproate preprocessor for a 3D+time field would be:\npreprocessors:\n prep_profile:\n extract_volume:\n long1: 0.\n long2: 20.\n lat1: -30.\n lat2: 30.\n z_min: 0.\n z_max: 3000.\n average_region:\n coord1: longitude\n coord2: latitude\n\nThis tool is part of the ocean diagnostic tools package in the ESMValTool.\n\nAuthor: Lee de Mora (PML)\n [email protected]\n\"\"\"\nimport logging\nimport os\nimport sys\nimport matplotlib\nmatplotlib.use('Agg') # noqa\nimport matplotlib.pyplot as plt\n\nimport iris\nimport iris.quickplot as qplt\n\nimport diagnostic_tools as diagtools\nfrom esmvaltool.diag_scripts.shared import run_diagnostic\n\n# This part sends debug statements to stdout\nlogger = logging.getLogger(os.path.basename(__file__))\nlogging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n\n\ndef determine_profiles_str(cube):\n \"\"\"\n Determine a string from the cube, to describe the profile.\n\n Used in image titles, descriptions and filenames.\n \"\"\"\n options = ['latitude', 'longitude']\n for option in options:\n coord = cube.coord(option)\n if len(coord.points) > 1:\n continue\n value = coord.points.mean()\n if option == 'latitude':\n return str(value) + ' N'\n if option == 'longitude':\n if value > 180.:\n return str(value - 360.) + ' W'\n return str(value) + ' E'\n return ''\n\n\ndef make_profiles_plots(\n cfg,\n metadata,\n filename,\n):\n \"\"\"\n Make a simple profile plot for an individual model.\n\n The cfg is the opened global config,\n metadata is the metadata dictionairy\n filename is the preprocessing model file.\n \"\"\"\n # Load cube and set up units\n cube = iris.load_cube(filename)\n cube = diagtools.bgc_units(cube, metadata['short_name'])\n\n # Make annual means from:\n cube = cube.aggregated_by('year', iris.analysis.MEAN)\n\n # Is this data is a multi-model dataset?\n multi_model = metadata['dataset'].find('MultiModel') > -1\n\n #\n times = cube.coord('time')\n times_float = diagtools.timecoord_to_float(times)\n time_0 = times_float[0]\n\n cmap = plt.cm.get_cmap('jet')\n\n plot_details = {}\n for time_index, time in enumerate(times_float):\n\n color = cmap((time - time_0) / (times_float[-1] - time_0))\n\n qplt.plot(cube[time_index, :], cube[time_index, :].coord('depth'),\n c=color)\n\n plot_details[time_index] = {'c': color, 'ls': '-', 'lw': 1,\n 'label': str(int(time))}\n\n # Add title to plot\n title = ' '.join([\n metadata['dataset'],\n metadata['long_name'],\n ])\n plt.title(title)\n\n # Add Legend outside right.\n diagtools.add_legend_outside_right(plot_details, plt.gca())\n\n # Load image format extention\n image_extention = diagtools.get_image_format(cfg)\n\n # Determine image filename:\n if multi_model:\n path = diagtools.folder(\n cfg['plot_dir']) + os.path.basename(filename).replace(\n '.nc', '_profile' + image_extention)\n else:\n path = diagtools.get_image_path(\n cfg,\n metadata,\n suffix='profile' + image_extention,\n )\n\n # Saving files:\n if cfg['write_plots']:\n logger.info('Saving plots to %s', path)\n plt.savefig(path)\n\n plt.close()\n\n\ndef main(cfg):\n \"\"\"\n Load the config file, and send it to the plot maker.\n\n The cfg is the opened global config.\n \"\"\"\n for index, metadata_filename in enumerate(cfg['input_files']):\n logger.info(\n 'metadata filename:\\t%s',\n metadata_filename\n )\n\n metadatas = diagtools.get_input_files(cfg, index=index)\n for filename in sorted(metadatas.keys()):\n\n logger.info('-----------------')\n logger.info(\n 'model filenames:\\t%s',\n filename,\n )\n\n ######\n # Time series of individual model\n make_profiles_plots(cfg, metadatas[filename], filename)\n\n logger.info('Success')\n\n\nif __name__ == '__main__':\n with run_diagnostic() as config:\n main(config)\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.title",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.close",
"matplotlib.use"
]
] |
vlad17/BlitzML | [
"f13e089acf7435416bec17e87e5b3130426fc2cd"
] | [
"test/python/test_problem_options.py"
] | [
"import unittest\nimport blitzml\nimport numpy as np\n\nfrom common import captured_output\n\nclass TestProblemOptions(unittest.TestCase):\n def setUp(self):\n A = np.arange(20).reshape(5, 4)\n b = np.arange(5).astype(np.float64)\n self.prob = blitzml.LassoProblem(A, b)\n\n def tearDown(self):\n del self.prob\n\n def test_min_time(self):\n self.assertLessEqual(self.prob._min_time, 0.)\n self.prob._min_time = 2.0\n self.assertEqual(self.prob._min_time, 2.0)\n\n def test_max_time(self):\n self.assertGreaterEqual(self.prob._max_time, 3600.)\n self.prob._max_time = 5.0\n self.assertEqual(self.prob._max_time, 5.0)\n\n def test_max_iterations(self):\n self.assertGreaterEqual(self.prob._max_iterations, 100)\n self.prob._max_iterations = 10\n self.assertEqual(self.prob._max_iterations, 10)\n\n def test_tolerance(self):\n self.assertGreater(self.prob._stopping_tolerance, 0.)\n self.prob._stopping_tolerance = 0.\n self.assertEqual(self.prob._stopping_tolerance, 0.)\n self.prob._stopping_tolerance = 0.1\n self.assertEqual(self.prob._stopping_tolerance, 0.1)\n\n def test_verbose(self):\n self.assertEqual(self.prob._verbose, False)\n self.prob._verbose = True\n self.assertEqual(self.prob._verbose, True)\n\n def test_use_screening(self):\n self.assertEqual(self.prob._use_screening, True)\n self.prob._use_screening = False\n self.assertEqual(self.prob._use_screening, False)\n\n def test_use_working_sets(self):\n self.assertEqual(self.prob._use_working_sets, True)\n self.prob._use_working_sets = False\n self.assertEqual(self.prob._use_working_sets, False)\n\n def test_suppress_warnings(self):\n bad_log_dir = \"path/to/bad_log/dir/zxc8aj3n\"\n with captured_output() as out:\n self.prob.solve(self.prob.compute_max_l1_penalty(),\n log_directory=bad_log_dir)\n self.assertIn(\"Warning\", out[0])\n\n blitzml.suppress_warnings()\n\n with captured_output() as out:\n self.prob.solve(self.prob.compute_max_l1_penalty(),\n log_directory=bad_log_dir)\n self.assertNotIn(\"Warning\", out[0])\n\n blitzml.unsuppress_warnings()\n\n"
] | [
[
"numpy.arange"
]
] |
5joono/Swin-Transformer | [
"b5b7e85aa11ad72b2bec2d458fa78066e4c3d0f2"
] | [
"multiprune_plusone/multiprune_plusone.py"
] | [
"import os\nimport numpy as np\nimport pandas as pd\nos.environ['MKL_THREADING_LAYER'] = 'GNU'\n\n# df = pd.DataFrame(columns=['multiprune', 'headstr', 'pluslayer', 'plushead', 'acc1'])\n# df.to_csv(\"multiprune_plusone.csv\",index=False)\n\nprevheadlist = [set([7]),set([11]),set([0]),set([7]),set([9]),set([9])]\nplusheadlist = [set(range(12))-{7},set(range(12))-{11},set(range(12))-{0},set(range(12))-{7},set(range(12))-{9},set(range(12))-{9}]\n\nfor multiprune in range(1,12):\n \n headstr = []\n for oneset in prevheadlist:\n setstr = [str(int(s)) for s in oneset]\n setstr = '+'.join(setstr)\n headstr.append(setstr)\n headstr = '.'.join(headstr)\n \n for pluslayer in range(6):\n for plushead in plusheadlist[pluslayer]:\n os.system(f'python -m torch.distributed.launch --nproc_per_node 1 --master_port 12345 main.py --eval --cfg configs/swin_tiny_patch4_window7_224.yaml --resume swin_tiny_patch4_window7_224.pth --data-path data/imagenet/ --prune {multiprune}_{headstr}_{pluslayer}_{plushead}')\n \n df = pd.read_csv(\"multiprune_plusone.csv\")\n df = df[(df.multiprune == multiprune) & (df.pluslayer == pluslayer)]\n df = df.apply(pd.to_numeric, errors = 'coerce')\n max_acc1_idx = df.idxmax().acc1\n plusheadlist[pluslayer].remove(df.loc[max_acc1_idx].plushead)\n prevheadlist[pluslayer].add(df.loc[max_acc1_idx].plushead)\n"
] | [
[
"pandas.read_csv"
]
] |
duanzhiihao/mycv | [
"184b52f7a5c1b6f603122d4f4050952b65ba0ead"
] | [
"mycv/train.py"
] | [
"from mycv.utils.general import disable_multithreads\ndisable_multithreads()\nimport os\nfrom pathlib import Path\nimport argparse\nfrom tqdm import tqdm\nimport math\nimport torch\nimport torch.cuda.amp as amp\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nimport wandb\n\nfrom mycv.utils.general import increment_dir\nfrom mycv.utils.torch_utils import set_random_seeds, ModelEMA\nfrom mycv.datasets.imagenet import ImageNetCls, imagenet_val\n\n\ndef cal_acc(p: torch.Tensor, labels: torch.LongTensor):\n assert not p.requires_grad and p.device == labels.device\n assert p.dim() == 2 and p.shape[0] == labels.shape[0]\n _, p_cls = torch.max(p, dim=1)\n tp = (p_cls == labels)\n acc = tp.sum() / len(tp)\n return acc\n\n\ndef train():\n # ====== set the run settings ======\n parser = argparse.ArgumentParser()\n parser.add_argument('--project', type=str, default='imagenet')\n parser.add_argument('--group', type=str, default='mini200')\n parser.add_argument('--model', type=str, default='csp_s')\n parser.add_argument('--resume', type=str, default='')\n parser.add_argument('--batch_size', type=int, default=128)\n parser.add_argument('--amp', type=bool, default=True)\n parser.add_argument('--ema', type=bool, default=True)\n parser.add_argument('--optimizer', type=str, default='SGD', choices=['Adam', 'SGD'])\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--metric', type=str, default='top1', choices=['top1'])\n parser.add_argument('--device', type=int, default=0)\n parser.add_argument('--workers', type=int, default=4)\n parser.add_argument('--local_rank', type=int, default=-1, help='DDP arg, do not modify')\n parser.add_argument('--wbmode', action='store_true')\n cfg = parser.parse_args()\n # model\n cfg.img_size = 224\n cfg.input_norm = False\n cfg.sync_bn = False\n # optimizer\n cfg.lr = 0.01\n cfg.momentum = 0.9\n cfg.weight_decay = 0.0001\n cfg.nesterov = False\n # lr scheduler\n cfg.lrf = 0.2 # min lr factor\n cfg.lr_warmup_epochs = 1\n # EMA\n # cfg.ema_decay = 0.999\n cfg.ema_warmup_epochs = 4\n # Main process\n IS_MAIN = (cfg.local_rank in [-1, 0])\n\n # check arguments\n metric: str = cfg.metric.lower()\n epochs: int = cfg.epochs\n local_rank: int = cfg.local_rank\n world_size: int = int(os.environ.get('WORLD_SIZE', 1))\n assert local_rank == int(os.environ.get('RANK', -1)), 'Only support single node'\n assert cfg.batch_size % world_size == 0, 'batch_size must be multiple of device count'\n batch_size: int = cfg.batch_size // world_size\n if IS_MAIN:\n print(cfg, '\\n')\n print('Batch size on each single GPU =', batch_size, '\\n')\n # fix random seeds for reproducibility\n set_random_seeds(1)\n torch.backends.cudnn.benchmark = True\n # device setting\n assert torch.cuda.is_available()\n if local_rank == -1: # Single GPU\n device = torch.device(f'cuda:{cfg.device}')\n else: # DDP mode\n assert torch.cuda.device_count() > local_rank and torch.distributed.is_available()\n torch.cuda.set_device(local_rank)\n device = torch.device('cuda', local_rank)\n torch.distributed.init_process_group(\n backend='nccl', init_method='env://', world_size=world_size, rank=local_rank\n )\n print(f'Local rank: {local_rank}, using device {device}:', 'device property:',\n torch.cuda.get_device_properties(device))\n\n # Dataset\n if IS_MAIN:\n print('Initializing Datasets and Dataloaders...')\n if cfg.group == 'default':\n train_split = 'train'\n val_split = 'val'\n cfg.num_class = 1000\n elif cfg.group == 'mini200':\n train_split = 'train200_600'\n val_split = 'val200_600'\n cfg.num_class = 200\n else:\n raise ValueError()\n # training set\n trainset = ImageNetCls(train_split, img_size=cfg.img_size, input_norm=cfg.input_norm)\n sampler = torch.utils.data.distributed.DistributedSampler(\n trainset, num_replicas=world_size, rank=local_rank, shuffle=True\n ) if local_rank != -1 else None\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=batch_size, shuffle=(sampler is None), sampler=sampler,\n num_workers=cfg.workers, pin_memory=True\n )\n # test set\n testloader = torch.utils.data.DataLoader(\n ImageNetCls(split=val_split, img_size=cfg.img_size, input_norm=cfg.input_norm),\n batch_size=batch_size, shuffle=False, num_workers=cfg.workers//2,\n pin_memory=True, drop_last=False\n )\n\n # Initialize model\n if cfg.model == 'res50':\n from mycv.models.cls.resnet import resnet50\n model = resnet50(num_classes=cfg.num_class)\n elif cfg.model == 'res101':\n from mycv.models.cls.resnet import resnet101\n model = resnet101(num_classes=cfg.num_class)\n elif cfg.model.startswith('yolov5'):\n from mycv.models.yolov5.cls import YOLOv5Cls\n assert cfg.model[-1] in ['s', 'm', 'l']\n model = YOLOv5Cls(model=cfg.model[-1], num_class=cfg.num_class)\n elif cfg.model.startswith('csp'):\n from mycv.models.yolov5.cls import CSP\n assert cfg.model[-1] in ['s', 'm', 'l']\n model = CSP(model=cfg.model[-1], num_class=cfg.num_class)\n else:\n raise NotImplementedError()\n model = model.to(device)\n # loss function\n loss_func = torch.nn.CrossEntropyLoss(reduction='mean')\n\n # different optimization setting for different layers\n pgb, pgw = [], []\n for k, v in model.named_parameters():\n if ('.bn' in k) or ('.bias' in k): # batchnorm or bias\n pgb.append(v)\n else: # conv weights\n assert '.weight' in k\n pgw.append(v)\n parameters = [\n {'params': pgb, 'lr': cfg.lr, 'weight_decay': 0.0},\n {'params': pgw, 'lr': cfg.lr, 'weight_decay': cfg.weight_decay}\n ]\n if IS_MAIN:\n print('Parameter groups:', [len(pg['params']) for pg in parameters])\n del pgb, pgw\n\n # optimizer\n if cfg.optimizer == 'SGD':\n optimizer = torch.optim.SGD(parameters, lr=cfg.lr,\n momentum=cfg.momentum, nesterov=cfg.nesterov)\n elif cfg.optimizer == 'Adam':\n optimizer = torch.optim.Adam(parameters, lr=cfg.lr)\n else:\n raise ValueError()\n # AMP\n scaler = amp.GradScaler(enabled=cfg.amp)\n\n log_parent = Path(f'runs/{cfg.project}')\n wb_id = None\n results = {metric: 0}\n if cfg.resume:\n # resume\n run_name = cfg.resume\n log_dir = log_parent / run_name\n assert log_dir.is_dir()\n checkpoint = torch.load(log_dir / 'last.pt')\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scaler.load_state_dict(checkpoint['scaler'])\n start_epoch = checkpoint['epoch'] + 1\n cur_fitness = best_fitness = checkpoint.get(metric, 0)\n if IS_MAIN:\n wb_id = open(log_dir / 'wandb_id.txt', 'r').read()\n else:\n # new experiment\n run_name = increment_dir(dir_root=log_parent, name=cfg.model)\n log_dir = log_parent / run_name # wandb logging dir\n if IS_MAIN:\n os.makedirs(log_dir, exist_ok=False)\n print(str(model), file=open(log_dir / 'model.txt', 'w'))\n start_epoch = 0\n cur_fitness = best_fitness = 0\n\n # initialize wandb\n if IS_MAIN:\n wbrun = wandb.init(project=cfg.project, group=cfg.group, name=run_name, config=cfg,\n dir='runs/', resume='allow', id=wb_id, mode=cfg.wbmode)\n cfg = wbrun.config\n cfg.log_dir = log_dir\n cfg.wandb_id = wbrun.id\n if not (log_dir / 'wandb_id.txt').exists():\n with open(log_dir / 'wandb_id.txt', 'w') as f:\n f.write(wbrun.id)\n else:\n wbrun = None\n\n # lr scheduler\n def warmup_cosine(x):\n warmup_iter = cfg.lr_warmup_epochs * len(trainloader)\n if x < warmup_iter:\n factor = x / warmup_iter\n else:\n _cur = x - warmup_iter + 1\n _total = epochs * len(trainloader)\n factor = cfg.lrf + 0.5 * (1 - cfg.lrf) * (1 + math.cos(_cur * math.pi / _total))\n return factor\n scheduler = LambdaLR(optimizer, lr_lambda=warmup_cosine, last_epoch=start_epoch - 1)\n\n # SyncBatchNorm\n if local_rank != -1 and cfg.sync_bn:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n\n # Exponential moving average\n if IS_MAIN and cfg.ema:\n emas = [\n ModelEMA(model, decay=0.99),\n ModelEMA(model, decay=0.999),\n ModelEMA(model, decay=0.9999)\n ]\n for ema in emas:\n ema.updates = start_epoch * len(trainloader) # set EMA updates\n ema.warmup = cfg.ema_warmup_epochs * len(trainloader) # set EMA warmup\n else:\n emas = None\n\n # DDP mode\n if local_rank != -1:\n model = DDP(model, device_ids=[local_rank], output_device=local_rank)\n\n # ======================== start training ========================\n niter = s = None\n for epoch in range(start_epoch, epochs):\n model.train()\n if local_rank != -1:\n trainloader.sampler.set_epoch(epoch)\n optimizer.zero_grad()\n\n pbar = enumerate(trainloader)\n train_loss, train_acc = 0.0, 0.0\n if IS_MAIN:\n pbar_title = ('%-10s' * 6) % (\n 'Epoch', 'GPU_mem', 'lr', 'tr_loss', 'tr_acc', metric\n )\n print('\\n' + pbar_title) # title\n pbar = tqdm(pbar, total=len(trainloader))\n for i, (imgs, labels) in pbar:\n # debugging\n # if True:\n # import matplotlib.pyplot as plt\n # from mycv.datasets.food101 import CLASS_NAMES\n # for im, lbl in zip(imgs, labels):\n # im = im * trainset._input_std + trainset._input_mean\n # im = im.permute(1,2,0).numpy()\n # print(CLASS_NAMES[lbl])\n # plt.imshow(im); plt.show()\n imgs = imgs.to(device=device)\n labels = labels.to(device=device)\n\n # forward\n with amp.autocast(enabled=cfg.amp):\n p = model(imgs)\n loss = loss_func(p, labels) * imgs.shape[0]\n if local_rank != -1:\n loss = loss * world_size\n # loss is averaged within image, sumed over batch, and sumed over gpus\n # backward, update\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n if emas:\n for ema in emas:\n ema.update(model)\n # Scheduler\n scheduler.step()\n\n # logging\n if IS_MAIN:\n niter = epoch * len(trainloader) + i\n cur_lr = optimizer.param_groups[0]['lr']\n loss = loss.detach().cpu().item()\n acc = cal_acc(p.detach(), labels)\n train_loss = (train_loss*i + loss) / (i+1)\n train_acc = (train_acc*i + acc) / (i+1)\n mem = torch.cuda.max_memory_allocated(device) / 1e9\n s = ('%-10s' * 2 + '%-10.4g' * 4) % (\n f'{epoch}/{epochs-1}', f'{mem:.3g}G',\n cur_lr, train_loss, 100*train_acc, 100*cur_fitness\n )\n pbar.set_description(s)\n torch.cuda.reset_peak_memory_stats()\n # Weights & Biases logging\n if niter % 100 == 0:\n wbrun.log({\n 'general/lr': cur_lr,\n 'metric/train_loss': train_loss,\n 'metric/train_acc': train_acc,\n 'ema/n_updates': emas[0].updates if emas is not None else 0,\n 'ema0/decay': emas[0].get_decay() if emas is not None else 0,\n 'ema1/decay': emas[1].get_decay() if emas is not None else 0,\n 'ema2/decay': emas[2].get_decay() if emas is not None else 0,\n }, step=niter)\n # logging end\n # ----Mini batch end\n # ----Epoch end\n # If DDP mode, synchronize model parameters on all gpus\n if local_rank != -1:\n model._sync_params_and_buffers(authoritative_rank=0)\n\n # Evaluation\n if IS_MAIN:\n # results is like {'top1': xxx, 'top5': xxx}\n _log_dic = {'general/epoch': epoch}\n results = imagenet_val(model, split=val_split, testloader=testloader)\n _log_dic.update({'metric/plain_val_'+k: v for k,v in results.items()})\n\n res_emas = torch.zeros(len(emas))\n if emas is not None:\n for ei, ema in enumerate(emas):\n results = imagenet_val(ema.ema, split=val_split, testloader=testloader)\n _log_dic.update({f'metric/ema{ei}_val_'+k: v for k,v in results.items()})\n res_emas[ei] = results[metric]\n # select best result among all emas\n _idx = torch.argmax(res_emas)\n cur_fitness = res_emas[_idx]\n _save_model = emas[_idx].ema\n best_decay = emas[_idx].final_decay\n else:\n cur_fitness = results[metric]\n _save_model = model\n best_decay = 0\n # wandb log\n wbrun.log(_log_dic, step=niter)\n # Write evaluation results\n res = s + '||' + '%10.4g' * 1 % (results[metric])\n with open(log_dir / 'results.txt', 'a') as f:\n f.write(res + '\\n')\n # save last checkpoint\n checkpoint = {\n 'model' : _save_model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n 'scaler' : scaler.state_dict(),\n 'epoch' : epoch,\n metric : cur_fitness,\n 'best_decay': best_decay\n }\n torch.save(checkpoint, log_dir / 'last.pt')\n # save best checkpoint\n if cur_fitness > best_fitness:\n best_fitness = cur_fitness\n torch.save(checkpoint, log_dir / 'best.pt')\n del checkpoint\n # ----Epoch end\n # ----Training end\n\n\nif __name__ == '__main__':\n train()\n\n # from mycv.models.cls.resnet import resnet50\n # model = resnet50(num_classes=1000)\n # weights = torch.load('weights/resnet50-19c8e357.pth')\n # model.load_state_dict(weights)\n # model = model.cuda()\n # model.eval()\n # results = imagenet_val(model, img_size=224, batch_size=64, workers=4)\n # print(results['top1'])\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"torch.max",
"torch.cuda.amp.GradScaler",
"torch.distributed.init_process_group",
"torch.distributed.is_available",
"torch.save",
"torch.cuda.device_count",
"torch.optim.Adam",
"torch.cuda.reset_peak_memory_stats",
"torch.cuda.max_memory_allocated",
"torch.device",
"torch.cuda.set_device",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.optim.SGD",
"torch.load",
"torch.argmax",
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.parallel.DistributedDataParallel",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.get_device_properties",
"torch.nn.CrossEntropyLoss",
"torch.cuda.amp.autocast"
]
] |
geophysics-ubonn/crtomo_tools | [
"a01b4d31d7250bc729605ae4dc035f108168128e"
] | [
"examples/01_modelling/plot_06_synthetic_4d.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nGenerating a 4D synthetic data set with noise.\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nA 2D space, time and frequency data set is generated for testing purposes in\nreda.\n\"\"\"\n###############################################################################\n# imports\nimport os\nfrom glob import glob\n\nimport numpy as np\n\nimport crtomo\nimport reda\n\n###############################################################################\n# Generate the forward models\nfrequencies = np.logspace(-3, 3, 5)\ngrid = crtomo.crt_grid(\n 'data_synthetic_4d/elem.dat', 'data_synthetic_4d/elec.dat'\n)\n\n# this context manager makes sure that all output is relative to the given\n# directory\nwith reda.CreateEnterDirectory('output_synthetic_4d'):\n for nr, anomaly_z_pos in enumerate(range(0, -10, -3)):\n outdir = 'modV_{:02}'.format(nr)\n if os.path.isdir(outdir):\n continue\n sinv = crtomo.eitMan(grid=grid, frequencies=frequencies)\n sinv.add_homogeneous_model(100, 0)\n sinv.set_area_to_single_colecole(\n 18, 22, anomaly_z_pos -2.0, anomaly_z_pos,\n [100, 0.1, 0.04, 0.6]\n )\n r = sinv.plot_forward_models()\n r['rmag']['fig'].savefig('forward_rmag_{:02}.pdf'.format(nr))\n r['rpha']['fig'].savefig('forward_rpha_{:02}.pdf'.format(nr))\n for f, td in sinv.tds.items():\n td.configs.gen_dipole_dipole(skipc=0, nr_voltage_dipoles=40)\n td.configs.gen_reciprocals(append=True)\n r = sinv.measurements()\n\n sinv.save_measurements_to_directory(outdir)\n\n # plot pseudosections\n Vdirs = sorted(glob('modV*'))\n for nr, Vdir in enumerate(Vdirs):\n seit = reda.sEIT()\n seit.import_crtomo(Vdir)\n seit.compute_K_analytical(spacing=1)\n seit.plot_pseudosections(\n 'r', return_fig=True\n ).savefig('ps_r_{:02}.jpg'.format(nr), dpi=300)\n seit.plot_pseudosections(\n 'rho_a', return_fig=True\n ).savefig('ps_rho_a_{:02}.jpg'.format(nr), dpi=300)\n seit.plot_pseudosections(\n 'rpha', return_fig=True\n ).savefig('ps_rpha_{:02}.jpg'.format(nr), dpi=300)\n\n\n###############################################################################\n# now generate noisy data\n\n# this context manager makes sure that all output is relative to the given\n# directory\nwith reda.CreateEnterDirectory('output_synthetic_4d'):\n Vdirs = sorted(glob('modV*'))\n for nr, Vdir in enumerate(Vdirs):\n seit = reda.sEIT()\n seit.import_crtomo(Vdir)\n seit.compute_K_analytical(spacing=1)\n # use different seeds for different time steps\n np.random.seed(34 + nr)\n noise = np.random.normal(loc=0, scale=1, size=seit.data.shape[0])\n r_save = seit.data['r'].values.copy()\n seit.data['r'] = r_save + noise * r_save / 8000.0 * np.log(seit.data['k'])\n seit.data['rho_a'] = seit.data['r'] * seit.data['k']\n seit.plot_pseudosections(\n 'rho_a', return_fig=True\n ).savefig('noisy_ps_rho_a_{:02}.jpg'.format(nr), dpi=300)\n rpha_save = seit.data['rpha'].values.copy()\n noise_rpha = np.random.normal(loc=0, scale=1, size=seit.data.shape[0])\n seit.data['rpha'] = rpha_save + noise_rpha * rpha_save / 10.0\n seit.plot_pseudosections(\n 'rpha', return_fig=True\n ).savefig('ps_rpha_{:02}.jpg'.format(nr), dpi=300)\n seit.export_to_crtomo_multi_frequency(Vdir + '_noisy')\n"
] | [
[
"numpy.random.normal",
"numpy.log",
"numpy.random.seed",
"numpy.logspace"
]
] |
singh-karanpal/Capstone | [
"807ca3f70276a0dd17244a123a759a914d358424"
] | [
"src/models/model_evaluate.py"
] | [
"# author: Carlina Kim, Karanpal Singh, Sukriti Trehan, Victor Cuspinera\n# date: 2020-06-21\n\n'''This script will read the saved theme/subtheme model(s), padded validation sets and y validation sets for model evaluation, \nand will save the evaluation results in the specified directory.\n\nThere are 2 parameters Input Path and Output Path where you want to save the evaluation results.\n\nUsage: model_evaluate.py --level='theme' --output_dir=<destination_dir_path>\n\nExample:\n python src/models/model_evaluate.py --level='theme' --output_dir=reports/\n python src/models/model_evaluate.py --level='subtheme' --output_dir=reports/\n\nOptions:\n--input_dir=<input_dir_path> Directory name for the padded documents and embeddings\n--output_dir=<destination_dir_path> Directory for saving evaluated results\n'''\n\nimport pandas as pd\nimport numpy as np\nfrom docopt import docopt\n\nfrom sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, precision_recall_curve\nimport matplotlib.pyplot as plt\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\nopt = docopt(__doc__)\n\nprint(\"\\n-----START: model_evaluate.py-----\\n\")\n\ndef main(level, output_dir):\n \"\"\"\n Takes the input level and calls model_evaluate class with \n output_dir as argument \n \"\"\"\n me = model_evaluate()\n me.get_evaluations(level=level, output_dir=output_dir)\n print('Thanks for your patience, the evaluation process has finished!\\n')\n print('----END: model_evaluate.py----\\n')\n return\n\nclass model_evaluate:\n # Loads data and evaluates saved theme model and subtheme models on validation set\n \n def eval_metrics(self, model_name, x_valid, y_valid, level='theme'):\n \"\"\"\n Evaluates model results on different threshold levels and produces data table/\n precision recall curves\n\n Parameters\n -----------\n model_name: (TensforFlow Saved model)\n x_valid: (pandas dataframe) dataframe with validation comments\n y_valid: (numpy array) array with labels\n level: (string) Takes value 'theme' or 'subtheme' to evaluate accordingly\n\n Returns\n -------\n Pandas DataFrame or matplotlib plot\n dataframe with evaluation metrics including precision, recall, f1 score at\n different threshold values\n \"\"\"\n pred_values = model_name.predict(x_valid)\n\n if level == 'theme':\n precision_dict = dict()\n recall_dict = dict()\n thresh_dict = dict()\n\n precision_dict[\"BiGRU + Fasttext\"], recall_dict[\"BiGRU + Fasttext\"], thresh_dict[\"BiGRU + Fasttext\"] = precision_recall_curve(y_valid.ravel(), pred_values.ravel())\n\n labels = []\n labels = list(precision_dict.keys())\n\n plt.figure()\n plt.step(recall_dict['BiGRU + Fasttext'], precision_dict['BiGRU + Fasttext'], where='post', color='orange')\n\n plt.xlabel('Recall', fontsize=18)\n plt.ylabel('Precision', fontsize=18)\n plt.axhline(y=0.743643, xmin=0, xmax=0.71, ls='--', color=\"cornflowerblue\")\n plt.axvline(x=0.705382, ymin=0, ymax=0.71, ls='--', color=\"cornflowerblue\")\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n plt.legend(labels, loc=(1.01, .79), prop=dict(size=14))\n plt.title('Precision Recall Curves for best performing model', fontsize = 18)\n plt.savefig('reports/figures/pr_curve_valid_theme.png')\n\n # PRECISION & RECALL\n predictions_results = []\n\n thresholds=[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n\n for val in thresholds:\n pred=pred_values.copy()\n pred[pred>=val]=1\n pred[pred<val]=0\n\n accuracy = accuracy_score(y_valid, pred, normalize=True, sample_weight=None)\n precision = precision_score(y_valid, pred, average='micro')\n recall = recall_score(y_valid, pred, average='micro')\n f1 = f1_score(y_valid, pred, average='micro')\n case= {'Threshold': val,\n 'Accuracy': accuracy,\n 'Precision': precision,\n 'Recall': recall,\n 'F1-measure': f1}\n predictions_results.append(case)\n\n return pd.DataFrame(predictions_results)\n \n def get_evaluations(self, level, output_dir):\n \"\"\"\n Evaluates models by using eval_metrics function\n \"\"\"\n if level == 'theme':\n print(\"**Loading data**\")\n x_valid = np.load('data/interim/question1_models/advance/X_valid_padded.npy')\n y_valid = np.load('data/interim/question1_models/advance/y_valid.npy')\n print(\"**Loading the saved theme model**\")\n model = tf.keras.models.load_model('models/Theme_Model/theme_model')\n print(\"**Predicting on validation set using saved model and evaluating metrics**\")\n results = self.eval_metrics(model_name = model, x_valid = x_valid, y_valid = y_valid)\n print(\"**Saving results**\")\n results.to_csv(output_dir + '/tables/theme_tables/theme_valid_eval.csv')\n print(\"Evaluations saved to reports/\")\n\n else:\n print(\"Loading data and evaluating the subthemes model on validation set\")\n themes = ['CPD', 'CB', 'EWC', 'Exec', 'FWE',\n 'SP', 'RE', 'Sup', 'SW', 'TEPE', 'VMG', 'OTH']\n\n for label in themes:\n print(\"****Label:\", label, \"****\")\n print(\"**Loading data**\")\n x_valid = np.load('data/interim/subthemes/' + str(label) + '/X_valid_padded.npy')\n # self.x_valids.append(x_valid)\n y_valid = np.load('data/interim/subthemes/' + str(label) + '/y_valid.npy')\n # self.y_valids.append(y_valid)\n print(\"**Loading the saved subtheme model**\")\n model = tf.keras.models.load_model('models/Subtheme_Models/' + str(label).lower() + '_model')\n # self.models.append(model)\n print(\"**Predicting on validation set using saved model and evaluating metrics**\")\n results = self.eval_metrics(model_name = model, x_valid = x_valid, y_valid = y_valid, level = 'subtheme')\n print(\"**Saving results**\")\n results.to_csv(output_dir + '/tables/subtheme_tables' + str(label).lower() + '_valid_eval.csv')\n print(\"Process of subtheme\", label, \"model completed\\n\")\n print(\"Evaluations saved to reports/tables\")\n\nif __name__ == \"__main__\":\n main(opt[\"--level\"], opt[\"--output_dir\"])\n"
] | [
[
"sklearn.metrics.precision_score",
"matplotlib.pyplot.ylabel",
"tensorflow.compat.v1.keras.models.load_model",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"sklearn.metrics.f1_score",
"sklearn.metrics.recall_score",
"numpy.load",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.axhline",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.step",
"matplotlib.pyplot.ylim",
"tensorflow.compat.v1.disable_v2_behavior",
"pandas.DataFrame",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xlabel"
]
] |
ImageMarkup/isic-archive | [
"d221af3368baf3a06ecab67e69e9d0077426c8f9"
] | [
"isic_archive/models/segmentation_helpers/scikit.py"
] | [
"import collections\nimport io\nfrom typing import BinaryIO, Tuple, Union\nimport warnings\n\nimport numpy\nimport skimage.io\nimport skimage.measure\nimport skimage.morphology\nimport skimage.segmentation\nimport skimage.transform\n\nfrom .base import BaseSegmentationHelper\n\n\nclass ScikitSegmentationHelper(BaseSegmentationHelper):\n @classmethod\n def loadImage(cls, imageDataStream: Union[BinaryIO, str]) -> numpy.ndarray:\n \"\"\"\n Load an image into an RGB array.\n\n :param imageDataStream: A file-like object containing the encoded\n (JPEG, etc.) image data or a file path.\n :return: A Numpy array with the RGB image data.\n \"\"\"\n imageData = skimage.io.imread(imageDataStream, plugin='pil')\n\n if len(imageData.shape) == 1 and imageData.shape[0] > 1:\n # Some images seem to have a 2nd (or 3rd+) layer, which should be ignored\n # https://github.com/scikit-image/scikit-image/issues/2154\n # The first element within the result should be the main image\n imageData = imageData[0]\n\n if len(imageData.shape) == 3 and imageData.shape[2] == 4:\n # cv2.floodFill doesn't work correctly with array views, so copy\n imageData = imageData[:, :, :3].copy()\n return imageData\n\n @classmethod\n def writeImage(cls, image, encoding='png', width=None):\n if width is not None:\n factor = float(width) / image.shape[1]\n image = skimage.transform.rescale(image, factor)\n\n imageStream = io.BytesIO()\n with warnings.catch_warnings():\n # Ignore warnings about low contrast images, as masks are often empty\n warnings.filterwarnings('ignore', r'^.* is a low contrast image$', UserWarning)\n # The 'pil' plugin is about 40% faster than the default 'imageio' plugin\n # The 'pil' plugin uses 'format_str' as an argument, not 'format'\n skimage.io.imsave(imageStream, image, plugin='pil', format_str=encoding)\n imageStream.seek(0)\n return imageStream\n\n @classmethod\n def segment(cls, image: numpy.ndarray, seedCoord: Tuple[int, int], tolerance: int\n ) -> numpy.ndarray:\n \"\"\"\n Do a flood-fill segmentation of an image, yielding a single contiguous region with no holes.\n\n :param image: A Numpy array with the image to be segmented.\n :param seedCoord: (X, Y) coordinates of the segmentation seed point.\n :param tolerance: The intensity tolerance value for the segmentation.\n :return: The mask image of the segmented region, with values 0 or 255.\n \"\"\"\n maskImage = cls._floodFill(\n image,\n seedCoord,\n tolerance)\n\n # Now, fill in any holes in the maskImage\n # First, add a padded border, allowing the next operation to reach\n # around edge-touching components\n maskImage = numpy.pad(maskImage, 1, 'constant', constant_values=1)\n maskImageBackground = cls._floodFill(\n maskImage,\n # The seed point is a part of the padded border of maskImage\n seedCoord=(0, 0),\n # The seed point and border will have a value of 1, but we want to\n # also include the actual mask background, which has a value of 0\n tolerance=1)\n # Remove the extra padding\n maskImageBackground = maskImageBackground[1:-1, 1:-1]\n # Flip the background, to get the mask with holes removed\n maskImage = numpy.invert(maskImageBackground)\n\n return maskImage\n\n @classmethod\n def _clippedAdd(cls, array, value):\n typeInfo = numpy.iinfo(array.dtype)\n newArray = array.astype(int)\n newArray += value\n return newArray.clip(typeInfo.min, typeInfo.max).astype(array.dtype)\n\n @classmethod\n def _floodFill(\n cls, image: numpy.ndarray, seedCoord: Tuple[int, int], tolerance: int,\n connectivity: int = 8) -> numpy.ndarray:\n \"\"\"\n Segment an image into a region connected to a seed point, using OpenCV.\n\n :param image: The image to be segmented.\n :param seedCoord: The point inside the connected region where the\n segmentation will start.\n :param tolerance: The maximum color/intensity difference between the\n seed point and a point in the connected region.\n :param connectivity: (optional) The number of allowed connectivity\n propagation directions. Allowed values are:\n * 4 for edge pixels\n * 8 for edge and corner pixels\n :returns: A binary label mask, with an extra 1-pixel wide padded border.\n The values are either ``0`` or ``fillValue``.\n \"\"\"\n seedValue = image[seedCoord[1], seedCoord[0]]\n seedValueMin = cls._clippedAdd(seedValue, -tolerance)\n seedValueMax = cls._clippedAdd(seedValue, tolerance)\n\n if connectivity == 4:\n connectivityArg = 1\n elif connectivity == 8:\n connectivityArg = 2\n else:\n raise ValueError('Unknown connectivity value.')\n\n binaryImage = numpy.logical_and(\n image >= seedValueMin,\n image <= seedValueMax\n )\n if len(image.shape) == 3:\n # Reduce RGB components, requiring all to be within threshold\n binaryImage = numpy.all(binaryImage, 2)\n\n labelImage = skimage.measure.label(\n binaryImage.astype(int),\n return_num=False,\n connectivity=connectivityArg\n )\n del binaryImage\n\n maskImage = numpy.equal(\n labelImage, labelImage[seedCoord[1], seedCoord[0]])\n del labelImage\n maskImage = maskImage.astype(numpy.uint8) * 255\n\n return maskImage\n\n @classmethod\n def _structuringElement(cls, shape, radius, elementType=bool):\n size = (radius * 2) + 1\n\n if shape == 'circle':\n element = skimage.morphology.disk(radius, elementType)\n elif shape == 'cross':\n element = numpy.zeros((size, size), elementType)\n element[:, size // 2] = elementType(True)\n element[size // 2, :] = elementType(True)\n elif shape == 'square':\n element = skimage.morphology.square(size, elementType)\n else:\n raise ValueError('Unknown element shape value.')\n\n return element\n\n @classmethod\n def _binaryOpening(cls, image, elementShape='circle', elementRadius=5):\n element = cls._structuringElement(elementShape, elementRadius, bool)\n\n morphedImage = skimage.morphology.binary_opening(\n image=image,\n selem=element\n )\n return morphedImage\n\n @classmethod\n def _collapseCoords(cls, coords):\n collapsedCoords = [coords[0]]\n collapsedCoords.extend([\n coord\n for prevCoord, coord, nextCoord in zip(\n coords[0:], coords[1:], coords[2:])\n if numpy.cross(nextCoord - prevCoord, coord - prevCoord) != 0\n ])\n collapsedCoords.append(coords[-1])\n collapsedCoords = numpy.array(collapsedCoords)\n return collapsedCoords\n\n @classmethod\n def maskToContour(cls, maskImage: numpy.ndarray) -> numpy.ndarray:\n \"\"\"\n Extract the contour line within a segmented label mask, using Scikit-Image.\n\n :param maskImage: A binary label mask of numpy.uint8.\n :return: An array of point pairs.\n \"\"\"\n if maskImage.dtype != numpy.uint8:\n raise TypeError('maskImage must be an array of uint8.')\n\n coords = skimage.measure.find_contours(\n # TODO: threshold image more efficiently\n array=maskImage.astype(bool).astype(numpy.double),\n level=0.5,\n fully_connected='low',\n positive_orientation='low'\n )\n coords = numpy.fliplr(coords[0])\n coords = cls._collapseCoords(coords)\n return coords\n\n @classmethod\n def contourToMask(cls, imageShape: Tuple[int, int], coords: numpy.ndarray) -> numpy.ndarray:\n \"\"\"\n Convert a contour line to a label mask.\n\n :param imageShape: The [Y, X] shape of the image.\n :param coords: An array of point pairs.\n :return: A binary label mask of numpy.uint8.\n \"\"\"\n maskImage = skimage.measure.grid_points_in_poly(\n shape=imageShape,\n verts=numpy.fliplr(coords)\n ).astype(numpy.uint8)\n maskImage *= 255\n return maskImage\n\n @classmethod\n def _slic(cls, image, numSegments=None, segmentSize=None):\n compactness = 0.01 # make superpixels highly deformable\n maxIter = 10\n sigma = 2.0\n\n if numSegments and segmentSize:\n raise ValueError(\n 'Only one of numSegments or segmentSize may be set.')\n elif numSegments:\n pass\n elif segmentSize:\n numSegments = (image.shape[0] * image.shape[1]) / (segmentSize ** 2)\n else:\n raise ValueError('One of numSegments or segmentSize must be set.')\n\n labelImage = skimage.segmentation.slic(\n image,\n n_segments=numSegments,\n compactness=compactness,\n max_iter=maxIter,\n sigma=sigma,\n enforce_connectivity=True,\n min_size_factor=0.5,\n slic_zero=True\n )\n return labelImage\n\n class _PersistentCounter(object):\n def __init__(self):\n self.value = 0\n\n def __call__(self):\n ret = self.value\n self.value += 1\n return ret\n\n @classmethod\n def _uint64ToRGB(cls, val):\n return numpy.dstack((\n val.astype(numpy.uint8),\n (val >> numpy.uint64(8)).astype(numpy.uint8),\n (val >> numpy.uint64(16)).astype(numpy.uint8)\n ))\n\n @classmethod\n def _RGBTounit64(cls, val: numpy.ndarray) -> numpy.ndarray:\n \"\"\"\n Decode an RGB representation of a superpixel label into its native scalar value.\n\n :param val: A single pixel, or a 3-channel image.\n This is an numpy.ndarray of uint8, with a shape [3] or [n, m, 3].\n \"\"\"\n return \\\n (val[..., 0].astype(numpy.uint64)) + \\\n (val[..., 1].astype(numpy.uint64) << numpy.uint64(8)) + \\\n (val[..., 2].astype(numpy.uint64) << numpy.uint64(16))\n\n @classmethod\n def superpixels(cls, image):\n superpixelLabels = cls._slic(image, numSegments=1000)\n superpixels = cls._uint64ToRGB(superpixelLabels)\n return superpixels\n\n @classmethod\n def superpixels_legacy(cls, image, coords):\n maskImage = cls.contourToMask(image.shape[:2], coords)\n\n from .opencv import OpenCVSegmentationHelper\n # This operation is much faster in OpenCV\n maskImage = OpenCVSegmentationHelper._binaryOpening(\n maskImage.astype(numpy.uint8),\n elementShape='circle',\n elementRadius=5\n ).astype(bool)\n\n insideImage = image.copy()\n insideImage[numpy.logical_not(maskImage)] = 0\n insideSuperpixelLabels = cls._slic(insideImage, segmentSize=20)\n\n outsideImage = image.copy()\n outsideImage[maskImage] = 0\n outsideSuperpixelLabels = cls._slic(outsideImage, segmentSize=60)\n\n # https://stackoverflow.com/questions/16210738/implementation-of-numpy-in1d-for-2d-arrays\n insideSuperpixelMask = numpy.in1d(\n insideSuperpixelLabels.flat,\n numpy.unique(insideSuperpixelLabels[maskImage])\n ).reshape(insideSuperpixelLabels.shape)\n\n combinedSuperpixelLabels = outsideSuperpixelLabels.copy()\n combinedSuperpixelLabels[insideSuperpixelMask] = \\\n insideSuperpixelLabels[insideSuperpixelMask] + \\\n outsideSuperpixelLabels.max() + 10000\n\n labelValues = collections.defaultdict(cls._PersistentCounter())\n for value in numpy.nditer(combinedSuperpixelLabels,\n op_flags=['readwrite']):\n value[...] = labelValues[value.item()]\n\n combinedSuperpixels = cls._uint64ToRGB(combinedSuperpixelLabels)\n return combinedSuperpixels\n"
] | [
[
"numpy.zeros",
"numpy.invert",
"numpy.equal",
"numpy.logical_and",
"numpy.fliplr",
"numpy.cross",
"numpy.uint64",
"numpy.nditer",
"numpy.logical_not",
"numpy.iinfo",
"numpy.all",
"numpy.array",
"numpy.pad",
"numpy.unique"
]
] |
yypurpose/mmdetection | [
"ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c"
] | [
"mmdet/models/backbones/res2net.py"
] | [
"import math\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.utils.checkpoint as cp\r\nfrom mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,\r\n kaiming_init)\r\nfrom mmcv.runner import load_checkpoint\r\nfrom torch.nn.modules.batchnorm import _BatchNorm\r\n\r\nfrom mmdet.utils import get_root_logger\r\nfrom ..builder import BACKBONES\r\nfrom .resnet import Bottleneck as _Bottleneck\r\nfrom .resnet import ResNet\r\n\r\n\r\nclass Bottle2neck(_Bottleneck):\r\n expansion = 4\r\n\r\n def __init__(self,\r\n inplanes,\r\n planes,\r\n scales=4,\r\n base_width=26,\r\n base_channels=64,\r\n stage_type='normal',\r\n **kwargs):\r\n \"\"\"Bottle2neck block for Res2Net.\r\n\r\n If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if\r\n it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\r\n \"\"\"\r\n super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)\r\n assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'\r\n width = int(math.floor(self.planes * (base_width / base_channels)))\r\n\r\n self.norm1_name, norm1 = build_norm_layer(\r\n self.norm_cfg, width * scales, postfix=1)\r\n self.norm3_name, norm3 = build_norm_layer(\r\n self.norm_cfg, self.planes * self.expansion, postfix=3)\r\n\r\n self.conv1 = build_conv_layer(\r\n self.conv_cfg,\r\n self.inplanes,\r\n width * scales,\r\n kernel_size=1,\r\n stride=self.conv1_stride,\r\n bias=False)\r\n self.add_module(self.norm1_name, norm1)\r\n\r\n if stage_type == 'stage' and self.conv2_stride != 1:\r\n self.pool = nn.AvgPool2d(\r\n kernel_size=3, stride=self.conv2_stride, padding=1)\r\n convs = []\r\n bns = []\r\n\r\n fallback_on_stride = False\r\n if self.with_dcn:\r\n fallback_on_stride = self.dcn.pop('fallback_on_stride', False)\r\n if not self.with_dcn or fallback_on_stride:\r\n for i in range(scales - 1):\r\n convs.append(\r\n build_conv_layer(\r\n self.conv_cfg,\r\n width,\r\n width,\r\n kernel_size=3,\r\n stride=self.conv2_stride,\r\n padding=self.dilation,\r\n dilation=self.dilation,\r\n bias=False))\r\n bns.append(\r\n build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])\r\n self.convs = nn.ModuleList(convs)\r\n self.bns = nn.ModuleList(bns)\r\n else:\r\n assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\r\n for i in range(scales - 1):\r\n convs.append(\r\n build_conv_layer(\r\n self.dcn,\r\n width,\r\n width,\r\n kernel_size=3,\r\n stride=self.conv2_stride,\r\n padding=self.dilation,\r\n dilation=self.dilation,\r\n bias=False))\r\n bns.append(\r\n build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])\r\n self.convs = nn.ModuleList(convs)\r\n self.bns = nn.ModuleList(bns)\r\n\r\n self.conv3 = build_conv_layer(\r\n self.conv_cfg,\r\n width * scales,\r\n self.planes * self.expansion,\r\n kernel_size=1,\r\n bias=False)\r\n self.add_module(self.norm3_name, norm3)\r\n\r\n self.stage_type = stage_type\r\n self.scales = scales\r\n self.width = width\r\n delattr(self, 'conv2')\r\n delattr(self, self.norm2_name)\r\n\r\n def forward(self, x):\r\n \"\"\"Forward function.\"\"\"\r\n\r\n def _inner_forward(x):\r\n identity = x\r\n\r\n out = self.conv1(x)\r\n out = self.norm1(out)\r\n out = self.relu(out)\r\n\r\n if self.with_plugins:\r\n out = self.forward_plugin(out, self.after_conv1_plugin_names)\r\n\r\n spx = torch.split(out, self.width, 1)\r\n sp = self.convs[0](spx[0].contiguous())\r\n sp = self.relu(self.bns[0](sp))\r\n out = sp\r\n for i in range(1, self.scales - 1):\r\n if self.stage_type == 'stage':\r\n sp = spx[i]\r\n else:\r\n sp = sp + spx[i]\r\n sp = self.convs[i](sp.contiguous())\r\n sp = self.relu(self.bns[i](sp))\r\n out = torch.cat((out, sp), 1)\r\n\r\n if self.stage_type == 'normal' or self.conv2_stride == 1:\r\n out = torch.cat((out, spx[self.scales - 1]), 1)\r\n elif self.stage_type == 'stage':\r\n out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)\r\n\r\n if self.with_plugins:\r\n out = self.forward_plugin(out, self.after_conv2_plugin_names)\r\n\r\n out = self.conv3(out)\r\n out = self.norm3(out)\r\n\r\n if self.with_plugins:\r\n out = self.forward_plugin(out, self.after_conv3_plugin_names)\r\n\r\n if self.downsample is not None:\r\n identity = self.downsample(x)\r\n\r\n out += identity\r\n\r\n return out\r\n\r\n if self.with_cp and x.requires_grad:\r\n out = cp.checkpoint(_inner_forward, x)\r\n else:\r\n out = _inner_forward(x)\r\n\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\n\r\nclass Res2Layer(nn.Sequential):\r\n \"\"\"Res2Layer to build Res2Net style backbone.\r\n\r\n Args:\r\n block (nn.Module): block used to build ResLayer.\r\n inplanes (int): inplanes of block.\r\n planes (int): planes of block.\r\n num_blocks (int): number of blocks.\r\n stride (int): stride of the first block. Default: 1\r\n avg_down (bool): Use AvgPool instead of stride conv when\r\n downsampling in the bottle2neck. Default: False\r\n conv_cfg (dict): dictionary to construct and config conv layer.\r\n Default: None\r\n norm_cfg (dict): dictionary to construct and config norm layer.\r\n Default: dict(type='BN')\r\n scales (int): Scales used in Res2Net. Default: 4\r\n base_width (int): Basic width of each scale. Default: 26\r\n \"\"\"\r\n\r\n def __init__(self,\r\n block,\r\n inplanes,\r\n planes,\r\n num_blocks,\r\n stride=1,\r\n avg_down=True,\r\n conv_cfg=None,\r\n norm_cfg=dict(type='BN'),\r\n scales=4,\r\n base_width=26,\r\n **kwargs):\r\n self.block = block\r\n\r\n downsample = None\r\n if stride != 1 or inplanes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.AvgPool2d(\r\n kernel_size=stride,\r\n stride=stride,\r\n ceil_mode=True,\r\n count_include_pad=False),\r\n build_conv_layer(\r\n conv_cfg,\r\n inplanes,\r\n planes * block.expansion,\r\n kernel_size=1,\r\n stride=1,\r\n bias=False),\r\n build_norm_layer(norm_cfg, planes * block.expansion)[1],\r\n )\r\n\r\n layers = []\r\n layers.append(\r\n block(\r\n inplanes=inplanes,\r\n planes=planes,\r\n stride=stride,\r\n downsample=downsample,\r\n conv_cfg=conv_cfg,\r\n norm_cfg=norm_cfg,\r\n scales=scales,\r\n base_width=base_width,\r\n stage_type='stage',\r\n **kwargs))\r\n inplanes = planes * block.expansion\r\n for i in range(1, num_blocks):\r\n layers.append(\r\n block(\r\n inplanes=inplanes,\r\n planes=planes,\r\n stride=1,\r\n conv_cfg=conv_cfg,\r\n norm_cfg=norm_cfg,\r\n scales=scales,\r\n base_width=base_width,\r\n **kwargs))\r\n super(Res2Layer, self).__init__(*layers)\r\n\r\n\r\[email protected]_module()\r\nclass Res2Net(ResNet):\r\n \"\"\"Res2Net backbone.\r\n\r\n Args:\r\n scales (int): Scales used in Res2Net. Default: 4\r\n base_width (int): Basic width of each scale. Default: 26\r\n depth (int): Depth of res2net, from {50, 101, 152}.\r\n in_channels (int): Number of input image channels. Default: 3.\r\n num_stages (int): Res2net stages. Default: 4.\r\n strides (Sequence[int]): Strides of the first block of each stage.\r\n dilations (Sequence[int]): Dilation of each stage.\r\n out_indices (Sequence[int]): Output from which stages.\r\n style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\r\n layer is the 3x3 conv layer, otherwise the stride-two layer is\r\n the first 1x1 conv layer.\r\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\r\n avg_down (bool): Use AvgPool instead of stride conv when\r\n downsampling in the bottle2neck.\r\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\r\n -1 means not freezing any parameters.\r\n norm_cfg (dict): Dictionary to construct and config norm layer.\r\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\r\n freeze running stats (mean and var). Note: Effect on Batch Norm\r\n and its variants only.\r\n plugins (list[dict]): List of plugins for stages, each dict contains:\r\n\r\n - cfg (dict, required): Cfg dict to build plugin.\r\n - position (str, required): Position inside block to insert\r\n plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.\r\n - stages (tuple[bool], optional): Stages to apply plugin, length\r\n should be same as 'num_stages'.\r\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\r\n memory while slowing down the training speed.\r\n zero_init_residual (bool): Whether to use zero init for last norm layer\r\n in resblocks to let them behave as identity.\r\n\r\n Example:\r\n >>> from mmdet.models import Res2Net\r\n >>> import torch\r\n >>> self = Res2Net(depth=50, scales=4, base_width=26)\r\n >>> self.eval()\r\n >>> inputs = torch.rand(1, 3, 32, 32)\r\n >>> level_outputs = self.forward(inputs)\r\n >>> for level_out in level_outputs:\r\n ... print(tuple(level_out.shape))\r\n (1, 256, 8, 8)\r\n (1, 512, 4, 4)\r\n (1, 1024, 2, 2)\r\n (1, 2048, 1, 1)\r\n \"\"\"\r\n\r\n arch_settings = {\r\n 50: (Bottle2neck, (3, 4, 6, 3)),\r\n 101: (Bottle2neck, (3, 4, 23, 3)),\r\n 152: (Bottle2neck, (3, 8, 36, 3))\r\n }\r\n\r\n def __init__(self,\r\n scales=4,\r\n base_width=26,\r\n style='pytorch',\r\n deep_stem=True,\r\n avg_down=True,\r\n **kwargs):\r\n self.scales = scales\r\n self.base_width = base_width\r\n super(Res2Net, self).__init__(\r\n style='pytorch', deep_stem=True, avg_down=True, **kwargs)\r\n\r\n def make_res_layer(self, **kwargs):\r\n return Res2Layer(\r\n scales=self.scales,\r\n base_width=self.base_width,\r\n base_channels=self.base_channels,\r\n **kwargs)\r\n\r\n def init_weights(self, pretrained=None):\r\n \"\"\"Initialize the weights in backbone.\r\n\r\n Args:\r\n pretrained (str, optional): Path to pre-trained weights.\r\n Defaults to None.\r\n \"\"\"\r\n if isinstance(pretrained, str):\r\n logger = get_root_logger()\r\n load_checkpoint(self, pretrained, strict=False, logger=logger)\r\n elif pretrained is None:\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n kaiming_init(m)\r\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\r\n constant_init(m, 1)\r\n\r\n if self.dcn is not None:\r\n for m in self.modules():\r\n if isinstance(m, Bottle2neck):\r\n # dcn in Res2Net bottle2neck is in ModuleList\r\n for n in m.convs:\r\n if hasattr(n, 'conv_offset'):\r\n constant_init(n.conv_offset, 0)\r\n\r\n if self.zero_init_residual:\r\n for m in self.modules():\r\n if isinstance(m, Bottle2neck):\r\n constant_init(m.norm3, 0)\r\n else:\r\n raise TypeError('pretrained must be a str or None')\r\n"
] | [
[
"torch.utils.checkpoint.checkpoint",
"torch.split",
"torch.nn.ModuleList",
"torch.nn.AvgPool2d",
"torch.cat"
]
] |
untzag/WrightTools | [
"05480d2f91ceeca422d9e5ac381fce1840207cb0"
] | [
"WrightTools/data/_data.py"
] | [
"\"\"\"Central data class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport collections\nimport operator\nimport functools\nimport warnings\n\nimport numpy as np\n\nimport h5py\n\nimport scipy\nfrom scipy.interpolate import griddata, interp1d\n\nfrom .._group import Group\nfrom .. import collection as wt_collection\nfrom .. import exceptions as wt_exceptions\nfrom .. import kit as wt_kit\nfrom .. import units as wt_units\nfrom ._axis import Axis, identifier_to_operator\nfrom ._channel import Channel\nfrom ._constant import Constant\nfrom ._variable import Variable\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"Data\"]\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Data(Group):\n \"\"\"Multidimensional dataset.\"\"\"\n\n class_name = \"Data\"\n\n def __init__(self, *args, **kwargs):\n self._axes = []\n self._constants = []\n Group.__init__(self, *args, **kwargs)\n # populate axes, constants from attrs string\n for identifier in self.attrs.get(\"axes\", []):\n if hasattr(identifier, \"decode\"):\n identifier = identifier.decode()\n expression, units = identifier.split(\"{\")\n units = units.replace(\"}\", \"\").strip()\n if units == \"None\":\n units = None\n # Should not be needed for wt5 >= 1.0.3, kept for opening older wt5 files.\n for i in identifier_to_operator.keys():\n expression = expression.replace(i, identifier_to_operator[i])\n expression = expression.replace(\" \", \"\") # remove all whitespace\n axis = Axis(self, expression, units)\n self._axes.append(axis)\n for identifier in self.attrs.get(\"constants\", []):\n if hasattr(identifier, \"decode\"):\n identifier = identifier.decode()\n expression, units = identifier.split(\"{\")\n units = units.replace(\"}\", \"\").strip()\n if units == \"None\":\n units = None\n for i in identifier_to_operator.keys():\n expression = expression.replace(i, identifier_to_operator[i])\n expression = expression.replace(\" \", \"\") # remove all whitespace\n const = Constant(self, expression, units)\n self._constants.append(const)\n self._current_axis_identities_in_natural_namespace = []\n if self.file.mode is not None and self.file.mode != \"r\":\n self._on_constants_updated()\n self._on_axes_updated()\n # the following are populated if not already recorded\n self.channel_names\n self.source\n self.variable_names\n\n def __repr__(self) -> str:\n return \"<WrightTools.Data '{0}' {1} at {2}>\".format(\n self.natural_name, str(self.axis_names), \"::\".join([self.filepath, self.name])\n )\n\n @property\n def axes(self) -> tuple:\n return tuple(self._axes)\n\n @property\n def axis_expressions(self) -> tuple:\n \"\"\"Axis expressions.\"\"\"\n return tuple(a.expression for a in self._axes)\n\n @property\n def axis_names(self) -> tuple:\n \"\"\"Axis names.\"\"\"\n return tuple(a.natural_name for a in self._axes)\n\n @property\n def constants(self) -> tuple:\n return tuple(self._constants)\n\n @property\n def constant_expressions(self) -> tuple:\n \"\"\"Axis expressions.\"\"\"\n return tuple(a.expression for a in self._constants)\n\n @property\n def constant_names(self) -> tuple:\n \"\"\"Axis names.\"\"\"\n return tuple(a.natural_name for a in self._constants)\n\n @property\n def channel_names(self) -> tuple:\n \"\"\"Channel names.\"\"\"\n if \"channel_names\" not in self.attrs.keys():\n self.attrs[\"channel_names\"] = np.array([], dtype=\"S\")\n return tuple(s.decode() for s in self.attrs[\"channel_names\"])\n\n @channel_names.setter\n def channel_names(self, value):\n \"\"\"Set channel names.\"\"\"\n self.attrs[\"channel_names\"] = np.array(value, dtype=\"S\")\n\n @property\n def channels(self) -> tuple:\n \"\"\"Channels.\"\"\"\n return tuple(self[n] for n in self.channel_names)\n\n @property\n def datasets(self) -> tuple:\n \"\"\"Datasets.\"\"\"\n return tuple(v for _, v in self.items() if isinstance(v, h5py.Dataset))\n\n @property\n def kind(self):\n \"\"\"Kind.\"\"\"\n if \"kind\" not in self.attrs.keys():\n self.attrs[\"kind\"] = \"None\"\n value = self.attrs[\"kind\"]\n return value if not value == \"None\" else None\n\n @property\n def ndim(self) -> int:\n \"\"\"Get number of dimensions.\"\"\"\n try:\n assert self._ndim is not None\n except (AssertionError, AttributeError):\n if len(self.variables) == 0:\n self._ndim = 0\n else:\n self._ndim = self.variables[0].ndim\n finally:\n return self._ndim\n\n @property\n def shape(self) -> tuple:\n \"\"\"Shape.\"\"\"\n try:\n assert self._shape is not None\n except (AssertionError, AttributeError):\n self._shape = wt_kit.joint_shape(*self.variables)\n finally:\n return self._shape\n\n @property\n def size(self) -> int:\n \"\"\"Size.\"\"\"\n return functools.reduce(operator.mul, self.shape)\n\n @property\n def source(self):\n \"\"\"Source.\"\"\"\n if \"source\" not in self.attrs.keys():\n self.attrs[\"source\"] = \"None\"\n value = self.attrs[\"source\"]\n return value if not value == \"None\" else None\n\n @property\n def units(self) -> tuple:\n \"\"\"All axis units.\"\"\"\n return tuple(a.units for a in self._axes)\n\n @property\n def constant_units(self) -> tuple:\n \"\"\"All constant units.\"\"\"\n return tuple(a.units for a in self._constants)\n\n @property\n def variable_names(self) -> tuple:\n \"\"\"Variable names.\"\"\"\n if \"variable_names\" not in self.attrs.keys():\n self.attrs[\"variable_names\"] = np.array([], dtype=\"S\")\n return tuple(s.decode() for s in self.attrs[\"variable_names\"])\n\n @variable_names.setter\n def variable_names(self, value):\n \"\"\"Set variable names.\"\"\"\n self.attrs[\"variable_names\"] = np.array(value, dtype=\"S\")\n\n @property\n def variables(self) -> tuple:\n \"\"\"Variables.\"\"\"\n try:\n assert self._variables is not None\n except (AssertionError, AttributeError):\n self._variables = [self[n] for n in self.variable_names]\n finally:\n return tuple(self._variables)\n\n @property\n def _leaf(self):\n return \"{0} {1}\".format(self.natural_name, self.shape)\n\n def _on_axes_updated(self):\n \"\"\"Method to run when axes are changed in any way.\n\n Propagates updated axes properly.\n \"\"\"\n # update attrs\n self.attrs[\"axes\"] = np.array([a.identity.encode() for a in self._axes], dtype=\"S\")\n # remove old attributes\n while len(self._current_axis_identities_in_natural_namespace) > 0:\n key = self._current_axis_identities_in_natural_namespace.pop(0)\n try:\n delattr(self, key)\n except AttributeError:\n pass # already gone\n # populate new attributes\n for a in self._axes:\n key = a.natural_name\n setattr(self, key, a)\n self._current_axis_identities_in_natural_namespace.append(key)\n\n def _on_constants_updated(self):\n \"\"\"Method to run when constants are changed in any way.\n\n Propagates updated constants properly.\n \"\"\"\n # update attrs\n self.attrs[\"constants\"] = np.array(\n [a.identity.encode() for a in self._constants], dtype=\"S\"\n )\n\n def _print_branch(self, prefix, depth, verbose):\n def print_leaves(prefix, lis, vline=True):\n for i, item in enumerate(lis):\n if vline:\n a = \"│ \"\n else:\n a = \" \"\n if i + 1 == len(lis):\n b = \"└── \"\n else:\n b = \"├── \"\n s = prefix + a + b + \"{0}: {1}\".format(i, item._leaf)\n print(s)\n\n if verbose:\n # axes\n print(prefix + \"├── axes\")\n print_leaves(prefix, self.axes)\n # constants\n print(prefix + \"├── constants\")\n print_leaves(prefix, self.constants)\n # variables\n print(prefix + \"├── variables\")\n print_leaves(prefix, self.variables)\n # channels\n print(prefix + \"└── channels\")\n print_leaves(prefix, self.channels, vline=False)\n else:\n # axes\n s = \"axes: \"\n s += \", \".join([\"{0} ({1})\".format(a.expression, a.units) for a in self.axes])\n print(prefix + \"├── \" + s)\n # constants\n s = \"constants: \"\n s += \", \".join(\n [\"{0} ({1} {2})\".format(a.expression, a.value, a.units) for a in self.constants]\n )\n print(prefix + \"├── \" + s)\n # channels\n s = \"channels: \"\n s += \", \".join(self.channel_names)\n print(prefix + \"└── \" + s)\n\n def bring_to_front(self, channel):\n \"\"\"Bring a specific channel to the zero-indexed position in channels.\n\n All other channels get pushed back but remain in order.\n\n Parameters\n ----------\n channel : int or str\n Channel index or name.\n \"\"\"\n channel_index = wt_kit.get_index(self.channel_names, channel)\n new = list(self.channel_names)\n new.insert(0, new.pop(channel_index))\n self.channel_names = new\n\n def chop(self, *args, at={}, parent=None, verbose=True) -> wt_collection.Collection:\n \"\"\"Divide the dataset into its lower-dimensionality components.\n\n Parameters\n ----------\n axis : str or int (args)\n Axes of the returned data objects. Strings refer to the names of\n axes in this object, integers refer to their index. Provide multiple\n axes to return multidimensional data objects.\n at : dict (optional)\n Choice of position along an axis. Keys are axis names, values are lists\n ``[position, input units]``. If exact position does not exist,\n the closest valid position is used.\n parent : WrightTools Collection instance (optional)\n Collection to place the new \"chop\" collection within. Default is\n None (new parent).\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n WrightTools Collection\n Collection of chopped data objects.\n\n Examples\n --------\n >>> data.axis_names\n ['d2', 'w1', 'w2']\n\n Get all w1 wigners.\n\n >>> datas = data.chop('d2', 'w1')\n >>> len(datas)\n 51\n\n Get 2D frequency at d2=0 fs.\n\n >>> datas = data.chop('w1', 'w2', at={'d2': [0, 'fs']})\n >>> len(datas)\n 0\n >>> datas[0].axis_names\n ['w1', 'w2']\n >>> datas[0].d2[:]\n 0.\n\n See Also\n --------\n collapse\n Collapse the dataset along one axis.\n split\n Split the dataset while maintaining its dimensionality.\n \"\"\"\n from ._axis import operators, operator_to_identifier\n\n # parse args\n args = list(args)\n for i, arg in enumerate(args):\n if isinstance(arg, int):\n args[i] = self._axes[arg].natural_name\n elif isinstance(arg, str):\n # same normalization that occurs in the natural_name @property\n arg = arg.strip()\n for op in operators:\n arg = arg.replace(op, operator_to_identifier[op])\n args[i] = wt_kit.string2identifier(arg)\n\n # normalize the at keys to the natural name\n for k in [ak for ak in at.keys() if type(ak) == str]:\n for op in operators:\n if op in k:\n nk = k.replace(op, operator_to_identifier[op])\n at[nk] = at[k]\n at.pop(k)\n k = nk\n\n # get output collection\n out = wt_collection.Collection(name=\"chop\", parent=parent)\n # get output shape\n kept = args + [ak for ak in at.keys() if type(ak) == str]\n kept_axes = [self._axes[self.axis_names.index(a)] for a in kept]\n removed_axes = [a for a in self._axes if a not in kept_axes]\n removed_shape = wt_kit.joint_shape(*removed_axes)\n if removed_shape == ():\n removed_shape = (1,) * self.ndim\n removed_shape = list(removed_shape)\n for i in at.keys():\n if type(i) == int:\n removed_shape[i] = 1\n for ax in kept_axes:\n if ax.shape.count(1) == ax.ndim - 1:\n removed_shape[ax.shape.index(ax.size)] = 1\n removed_shape = tuple(removed_shape)\n # iterate\n i = 0\n for idx in np.ndindex(removed_shape):\n idx = np.array(idx, dtype=object)\n idx[np.array(removed_shape) == 1] = slice(None)\n for axis, point in at.items():\n if type(axis) == int:\n idx[axis] = point\n continue\n point, units = point\n destination_units = self._axes[self.axis_names.index(axis)].units\n point = wt_units.converter(point, units, destination_units)\n axis_index = self.axis_names.index(axis)\n axis = self._axes[axis_index]\n idx_index = np.array(axis.shape) > 1\n if np.sum(idx_index) > 1:\n raise wt_exceptions.MultidimensionalAxisError(\"chop\", axis.natural_name)\n idx_index = list(idx_index).index(True)\n idx[idx_index] = np.argmin(np.abs(axis[tuple(idx)] - point))\n data = out.create_data(name=\"chop%03i\" % i)\n for v in self.variables:\n kwargs = {}\n kwargs[\"name\"] = v.natural_name\n kwargs[\"values\"] = v[idx]\n kwargs[\"units\"] = v.units\n kwargs[\"label\"] = v.label\n kwargs.update(v.attrs)\n data.create_variable(**kwargs)\n for c in self.channels:\n kwargs = {}\n kwargs[\"name\"] = c.natural_name\n kwargs[\"values\"] = c[idx]\n kwargs[\"units\"] = c.units\n kwargs[\"label\"] = c.label\n kwargs[\"signed\"] = c.signed\n kwargs.update(c.attrs)\n data.create_channel(**kwargs)\n new_axes = [a.expression for a in kept_axes if a.expression not in at.keys()]\n new_axis_units = [a.units for a in kept_axes if a.expression not in at.keys()]\n data.transform(*new_axes)\n for const in self.constant_expressions:\n data.create_constant(const, verbose=False)\n for ax in self.axis_expressions:\n if ax not in new_axes:\n data.create_constant(ax, verbose=False)\n for j, units in enumerate(new_axis_units):\n data.axes[j].convert(units)\n i += 1\n out.flush()\n # return\n if verbose:\n print(\"chopped data into %d piece(s)\" % len(out), \"in\", new_axes)\n return out\n\n def gradient(self, axis, *, channel=0):\n \"\"\"\n Compute the gradient along one axis.\n\n New channels have names ``<channel name>_<axis name>_gradient``.\n\n Parameters\n ----------\n axis : int or str\n The axis to differentiate along.\n If given as an integer, the axis in the underlying array is used,\n and unitary spacing is assumed.\n If given as a string, the axis must exist, and be a 1D array-aligned axis.\n (i.e. have a shape with a single value which is not ``1``)\n The axis to collapse along is inferred from the shape of the axis.\n channel : int or str\n The channel to differentiate.\n Default is the first channel.\n \"\"\"\n # get axis index --------------------------------------------------------------------------\n if isinstance(axis, int):\n axis_index = axis\n elif isinstance(axis, str):\n index = self.axis_names.index(axis)\n axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]\n if len(axes) > 1:\n raise wt_exceptions.MultidimensionalAxisError(axis, \"collapse\")\n elif len(axes) == 0:\n raise wt_exceptions.ValueError(\n \"Axis '{}' is a single point, cannot compute gradient\".format(axis)\n )\n axis_index = axes[0]\n else:\n raise wt_exceptions.TypeError(\"axis: expected {int, str}, got %s\" % type(axis))\n\n channel_index = wt_kit.get_index(self.channel_names, channel)\n channel = self.channel_names[channel_index]\n\n if self[channel].shape[axis_index] == 1:\n raise wt_exceptions.ValueError(\n \"Channel '{}' has a single point along Axis '{}', cannot compute gradient\".format(\n channel, axis\n )\n )\n rtype = np.result_type(self[channel].dtype, float)\n new = self.create_channel(\n \"{}_{}_gradient\".format(channel, axis),\n values=np.empty(self[channel].shape, dtype=rtype),\n )\n\n channel = self[channel]\n if axis == axis_index:\n new[:] = np.gradient(channel[:], axis=axis_index)\n else:\n new[:] = np.gradient(channel[:], self[axis].points, axis=axis_index)\n\n def moment(self, axis, channel=0, moment=1, *, resultant=None):\n \"\"\"Take the nth moment the dataset along one axis, adding lower rank channels.\n\n New channels have names ``<channel name>_<axis name>_moment_<moment num>``.\n\n Moment 0 is the integral of the slice.\n Moment 1 is the weighted average or \"Center of Mass\", normalized by the integral\n Moment 2 is the variance, the central moment about the center of mass,\n normalized by the integral\n Moments 3+ are central moments about the center of mass, normalized by the integral\n and by the standard deviation to the power of the moment.\n\n Moments, especially higher order moments, are susceptible to noise and baseline.\n It is recommended when used with real data to use :meth:`WrightTools.data.Channel.clip`\n in conjunction with moments to reduce effects of noise.\n\n Parameters\n ----------\n axis : int or str\n The axis to take the moment along.\n If given as an integer, the axis with that index is used.\n If given as a string, the axis with that name is used.\n The axis must exist, and be a 1D array-aligned axis.\n (i.e. have a shape with a single value which is not ``1``)\n The collapsed axis must be monotonic to produce correct results.\n The axis to collapse along is inferred from the shape of the axis.\n channel : int or str\n The channel to take the moment.\n If given as an integer, the channel with that index is used.\n If given as a string, the channel with that name is used.\n The channel must have values along the axis\n (i.e. its shape must not be ``1`` in the dimension for which the axis is not ``1``)\n Default is 0, the first channel.\n moment : int or tuple of int\n The moments to take.\n One channel will be created for each number given.\n Default is 1, the center of mass.\n resultant : tuple of int\n The resultant shape after the moment operation.\n By default, it is intuited by the axis along which the moment is being taken.\n This default only works if that axis is 1D, so resultant is required if a\n multidimensional axis is passed as the first argument.\n The requirement of monotonicity applies on a per pixel basis.\n\n See Also\n --------\n collapse\n Reduce dimensionality by some mathematical operation\n clip\n Set values above/below a threshold to a particular value\n WrightTools.kit.joint_shape\n Useful for setting `resultant` kwarg based off of axes not collapsed.\n \"\"\"\n # get axis index --------------------------------------------------------------------------\n axis_index = None\n if resultant is not None:\n for i, (s, r) in enumerate(zip(wt_kit.joint_shape(*self.axes), resultant)):\n if s != r and r == 1 and axis_index is None:\n axis_index = i\n elif s == r:\n continue\n else:\n raise wt_exceptions.ValueError(\n f\"Invalid resultant shape '{resultant}' for shape {wt_kit.joint_shape(*self.axes)}. \"\n + \"Consider using `wt.kit.joint_shape` to join non-collapsed axes.\"\n )\n\n index = wt_kit.get_index(self.axis_names, axis)\n if axis_index is None:\n axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]\n if len(axes) > 1:\n raise wt_exceptions.MultidimensionalAxisError(axis, \"moment\")\n elif len(axes) == 0:\n raise wt_exceptions.ValueError(\n \"Axis {} is a single point, cannot compute moment\".format(axis)\n )\n axis_index = axes[0]\n\n warnings.warn(\"moment\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n\n channel_index = wt_kit.get_index(self.channel_names, channel)\n channel = self.channel_names[channel_index]\n\n if self[channel].shape[axis_index] == 1:\n raise wt_exceptions.ValueError(\n \"Channel '{}' has a single point along Axis '{}', cannot compute moment\".format(\n channel, axis\n )\n )\n\n new_shape = list(self[channel].shape)\n new_shape[axis_index] = 1\n\n channel = self[channel]\n axis_inp = axis\n axis = self.axes[index]\n x = axis[:]\n if np.any(np.isnan(x)):\n raise wt_exceptions.ValueError(\"Axis '{}' includes NaN\".format(axis_inp))\n y = np.nan_to_num(channel[:])\n\n try:\n moments = tuple(moment)\n except TypeError:\n moments = (moment,)\n\n multiplier = 1\n if 0 in moments:\n # May be possible to optimize, probably doesn't need the sum\n # only matters for integral, all others normalize by integral\n multiplier = np.sign(\n np.sum(np.diff(x, axis=axis_index), axis=axis_index, keepdims=True)\n )\n\n for moment in moments:\n about = 0\n norm = 1\n if moment > 0:\n norm = np.trapz(y, x, axis=axis_index)\n norm = np.array(norm)\n norm.shape = new_shape\n if moment > 1:\n about = np.trapz(x * y, x, axis=axis_index)\n about = np.array(about)\n about.shape = new_shape\n about /= norm\n if moment > 2:\n sigma = np.trapz((x - about) ** 2 * y, x, axis=axis_index)\n sigma = np.array(sigma)\n sigma.shape = new_shape\n sigma /= norm\n sigma **= 0.5\n norm *= sigma ** moment\n\n values = np.trapz((x - about) ** moment * y, x, axis=axis_index)\n values = np.array(values)\n values.shape = new_shape\n values /= norm\n if moment == 0:\n values *= multiplier\n self.create_channel(\n \"{}_{}_{}_{}\".format(channel.natural_name, axis_inp, \"moment\", moment),\n values=values,\n )\n\n def collapse(self, axis, method=\"sum\"):\n \"\"\"Collapse the dataset along one axis, adding lower rank channels.\n\n New channels have names ``<channel name>_<axis name>_<method>``.\n\n Parameters\n ----------\n axis : int or str\n The axis to collapse along.\n If given as an integer, the axis in the underlying array is used.\n If given as a string, the axis must exist, and be a 1D array-aligned axis.\n (i.e. have a shape with a single value which is not ``1``)\n The axis to collapse along is inferred from the shape of the axis.\n method : {'average', 'sum', 'max', 'min'} (optional)\n The method of collapsing the given axis. Method may also be list\n of methods corresponding to the channels of the object. Default\n is sum. NaNs are ignored.\n Can also be a list, allowing for different treatment for varied channels.\n In this case, None indicates that no change to that channel should occur.\n\n See Also\n --------\n chop\n Divide the dataset into its lower-dimensionality components.\n split\n Split the dataset while maintaining its dimensionality.\n moment\n Take the moment along a particular axis\n \"\"\"\n if method in (\"int\", \"integrate\"):\n warnings.warn(\n \"integrate method of collapse is deprecated, use moment(moment=0) instead\",\n wt_exceptions.VisibleDeprecationWarning,\n )\n for channel in self.channel_names:\n try:\n self.moment(axis, channel, moment=0)\n self.rename_channels(\n **{self.channel_names[-1]: f\"{channel}_{axis}_{method}\"}, verbose=False\n )\n except wt_exceptions.ValueError:\n pass # may have some channels which fail, do so silently\n return\n # get axis index --------------------------------------------------------------------------\n if isinstance(axis, int):\n axis_index = axis\n elif isinstance(axis, str):\n index = self.axis_names.index(axis)\n axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]\n if len(axes) > 1:\n raise wt_exceptions.MultidimensionalAxisError(axis, \"collapse\")\n elif len(axes) == 0:\n raise wt_exceptions.ValueError(\n \"Axis {} is a single point, cannot collapse\".format(axis)\n )\n axis_index = axes[0]\n else:\n raise wt_exceptions.TypeError(\"axis: expected {int, str}, got %s\" % type(axis))\n\n new_shape = list(self.shape)\n new_shape[axis_index] = 1\n func = {\n \"sum\": np.nansum,\n \"max\": np.nanmax,\n \"maximum\": np.nanmax,\n \"min\": np.nanmin,\n \"minimum\": np.nanmin,\n \"ave\": np.nanmean,\n \"average\": np.nanmean,\n \"mean\": np.nanmean,\n }\n\n # methods ---------------------------------------------------------------------------------\n if isinstance(method, str):\n methods = [method for _ in self.channels]\n if isinstance(method, list):\n if len(method) == len(self.channels):\n methods = method\n else:\n raise wt_exceptions.ValueError(\n \"method argument must have same number of elements as there are channels\"\n )\n for m in methods:\n if m not in func.keys():\n raise wt_exceptions.ValueError(\"method '{}' not recognized\".format(m))\n\n warnings.warn(\"collapse\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n\n # collapse --------------------------------------------------------------------------------\n for method, channel in zip(methods, self.channel_names):\n if method is None:\n continue\n\n if self[channel].shape[axis_index] == 1:\n continue # Cannot collapse any further, don't clutter data object\n\n new_shape = list(self[channel].shape)\n new_shape[axis_index] = 1\n rtype = self[channel].dtype\n if method in [\"ave\", \"average\", \"mean\"]:\n rtype = np.result_type(self[channel].dtype, float)\n\n new = self.create_channel(\n \"{}_{}_{}\".format(channel, axis, method),\n values=np.empty(new_shape, dtype=rtype),\n units=self[channel].units,\n )\n\n new[:] = func[method](self[channel], axis=axis_index, keepdims=True)\n\n def convert(self, destination_units, *, convert_variables=False, verbose=True):\n \"\"\"Convert all compatable axes and constants to given units.\n\n Parameters\n ----------\n destination_units : str\n Destination units.\n convert_variables : boolean (optional)\n Toggle conversion of stored arrays. Default is False\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n See Also\n --------\n Axis.convert\n Convert a single axis object to compatable units. Call on an\n axis object in data.axes.\n \"\"\"\n # apply to all compatible axes\n for axis in self.axes:\n if wt_units.is_valid_conversion(axis.units, destination_units):\n orig = axis.units\n axis.convert(destination_units, convert_variables=convert_variables)\n if verbose:\n print(\n \"axis {} converted from {} to {}\".format(\n axis.expression, orig, destination_units\n )\n )\n # apply to all compatible constants\n for constant in self.constants:\n if wt_units.is_valid_conversion(constant.units, destination_units):\n orig = constant.units\n constant.convert(destination_units, convert_variables=convert_variables)\n if verbose:\n print(\n \"constant {} converted from {} to {}\".format(\n constant.expression, orig, destination_units\n )\n )\n if convert_variables:\n for var in self.variables:\n if wt_units.is_valid_conversion(var.units, destination_units):\n orig = var.units\n var.convert(destination_units)\n if verbose:\n print(\n \"variable {} converted from {} to {}\".format(\n var.natural_name, orig, destination_units\n )\n )\n self._on_axes_updated()\n self._on_constants_updated()\n\n def create_channel(\n self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs\n ) -> Channel:\n \"\"\"Append a new channel.\n\n Parameters\n ----------\n name : string\n Unique name for this channel.\n values : array (optional)\n Array. If None, an empty array equaling the data shape is\n created. Default is None.\n shape : tuple of int\n Shape to use. Must broadcast with the full shape.\n Only used if `values` is None.\n Default is the full shape of self.\n units : string (optional)\n Channel units. Default is None.\n dtype : numpy.dtype (optional)\n dtype to use for dataset, default is np.float64.\n Only used if `values` is None.\n kwargs : dict\n Additional keyword arguments passed to Channel instantiation.\n\n Returns\n -------\n Channel\n Created channel.\n \"\"\"\n if name in self.channel_names:\n warnings.warn(name, wt_exceptions.ObjectExistsWarning)\n return self[name]\n elif name in self.variable_names:\n raise wt_exceptions.NameNotUniqueError(name)\n\n require_kwargs = {\"chunks\": True}\n if values is None:\n if shape is None:\n require_kwargs[\"shape\"] = self.shape\n else:\n require_kwargs[\"shape\"] = shape\n if dtype is None:\n require_kwargs[\"dtype\"] = np.dtype(np.float64)\n else:\n require_kwargs[\"dtype\"] = dtype\n if require_kwargs[\"dtype\"].kind in \"fcmM\":\n require_kwargs[\"fillvalue\"] = np.nan\n else:\n require_kwargs[\"fillvalue\"] = 0\n else:\n require_kwargs[\"data\"] = values\n require_kwargs[\"shape\"] = values.shape\n require_kwargs[\"dtype\"] = values.dtype\n if np.prod(require_kwargs[\"shape\"]) == 1:\n require_kwargs[\"chunks\"] = None\n # create dataset\n dataset_id = self.require_dataset(name=name, **require_kwargs).id\n channel = Channel(self, dataset_id, units=units, **kwargs)\n # finish\n self.attrs[\"channel_names\"] = np.append(self.attrs[\"channel_names\"], name.encode())\n return channel\n\n def create_variable(\n self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs\n ) -> Variable:\n \"\"\"Add new child variable.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n values : array-like (optional)\n Array to populate variable with. If None, an variable will be filled with NaN.\n Default is None.\n shape : tuple of int\n Shape to use. must broadcast with the full shape.\n Only used if `values` is None.\n Default is the full shape of self.\n units : string (optional)\n Variable units. Default is None.\n dtype : numpy.dtype (optional)\n dtype to use for dataset, default is np.float64.\n Only used if `values` is None.\n kwargs\n Additional kwargs to variable instantiation.\n\n Returns\n -------\n WrightTools Variable\n New child variable.\n \"\"\"\n if name in self.variable_names:\n warnings.warn(name, wt_exceptions.ObjectExistsWarning)\n return self[name]\n elif name in self.channel_names:\n raise wt_exceptions.NameNotUniqueError(name)\n if values is None:\n if shape is None:\n shape = self.shape\n if dtype is None:\n dtype = np.dtype(np.float64)\n if dtype.kind in \"fcmM\":\n fillvalue = np.nan\n else:\n fillvalue = 0\n else:\n shape = values.shape\n dtype = values.dtype\n fillvalue = None\n # create dataset\n id = self.require_dataset(\n name=name, data=values, shape=shape, dtype=dtype, fillvalue=fillvalue\n ).id\n variable = Variable(self, id, units=units, **kwargs)\n # finish\n self._variables = None\n self.attrs[\"variable_names\"] = np.append(self.attrs[\"variable_names\"], name.encode())\n return variable\n\n def get_nadir(self, channel=0) -> tuple:\n \"\"\"Get the coordinates, in units, of the minimum in a channel.\n\n Parameters\n ----------\n channel : int or str (optional)\n Channel. Default is 0.\n\n Returns\n -------\n generator of numbers\n Coordinates in units for each axis.\n \"\"\"\n # get channel\n if isinstance(channel, int):\n channel_index = channel\n elif isinstance(channel, str):\n channel_index = self.channel_names.index(channel)\n else:\n raise TypeError(\"channel: expected {int, str}, got %s\" % type(channel))\n channel = self.channels[channel_index]\n # get indicies\n idx = channel.argmin()\n # finish\n return tuple(a[idx] for a in self._axes)\n\n def get_zenith(self, channel=0) -> tuple:\n \"\"\"Get the coordinates, in units, of the maximum in a channel.\n\n Parameters\n ----------\n channel : int or str (optional)\n Channel. Default is 0.\n\n Returns\n -------\n generator of numbers\n Coordinates in units for each axis.\n \"\"\"\n # get channel\n if isinstance(channel, int):\n channel_index = channel\n elif isinstance(channel, str):\n channel_index = self.channel_names.index(channel)\n else:\n raise TypeError(\"channel: expected {int, str}, got %s\" % type(channel))\n channel = self.channels[channel_index]\n # get indicies\n idx = channel.argmax()\n # finish\n return tuple(a[idx] for a in self._axes)\n\n def heal(self, channel=0, method=\"linear\", fill_value=np.nan, verbose=True):\n \"\"\"\n Remove nans from channel using interpolation.\n\n Parameters\n ----------\n channel : int or str (optional)\n Channel to heal. Default is 0.\n method : {'linear', 'nearest', 'cubic'} (optional)\n The interpolation method. Note that cubic interpolation is only\n possible for 1D and 2D data. See `griddata`__ for more information.\n Default is linear.\n fill_value : number-like (optional)\n The value written to pixels that cannot be filled by interpolation.\n Default is nan.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n\n __ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html\n\n\n .. note:: Healing may take several minutes for large datasets.\n Interpolation time goes as nearest, linear, then cubic.\n\n\n \"\"\"\n warnings.warn(\"heal\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n timer = wt_kit.Timer(verbose=False)\n with timer:\n # channel\n if isinstance(channel, int):\n channel_index = channel\n elif isinstance(channel, str):\n channel_index = self.channel_names.index(channel)\n else:\n raise TypeError(\"channel: expected {int, str}, got %s\" % type(channel))\n channel = self.channels[channel_index]\n values = self.channels[channel_index][:]\n points = [axis[:] for axis in self._axes]\n xi = tuple(np.meshgrid(*points, indexing=\"ij\"))\n # 'undo' gridding\n arr = np.zeros((len(self._axes) + 1, values.size))\n for i in range(len(self._axes)):\n arr[i] = xi[i].flatten()\n arr[-1] = values.flatten()\n # remove nans\n arr = arr[:, ~np.isnan(arr).any(axis=0)]\n # grid data wants tuples\n tup = tuple([arr[i] for i in range(len(arr) - 1)])\n # grid data\n out = griddata(tup, arr[-1], xi, method=method, fill_value=fill_value)\n self.channels[channel_index][:] = out\n # print\n if verbose:\n print(\n \"channel {0} healed in {1} seconds\".format(\n channel.name, np.around(timer.interval, decimals=3)\n )\n )\n\n def level(self, channel, axis, npts, *, verbose=True):\n \"\"\"Subtract the average value of npts at the edge of a given axis.\n\n Parameters\n ----------\n channel : int or str\n Channel to level.\n axis : int\n Axis to level along.\n npts : int\n Number of points to average for each slice. Positive numbers\n take points at leading indicies and negative numbers take points\n at trailing indicies.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n \"\"\"\n warnings.warn(\"level\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n channel_index = wt_kit.get_index(self.channel_names, channel)\n channel = self.channels[channel_index]\n # verify npts not zero\n npts = int(npts)\n if npts == 0:\n raise wt_exceptions.ValueError(\"npts must not be zero\")\n # get subtrahend\n ss = [slice(None)] * self.ndim\n if npts > 0:\n ss[axis] = slice(0, npts, None)\n else:\n ss[axis] = slice(npts, None, None)\n subtrahend = np.nanmean(channel[ss], axis=axis)\n if self.ndim > 1:\n subtrahend = np.expand_dims(subtrahend, axis=axis)\n # level\n channel -= subtrahend\n # finish\n channel._null = 0\n if verbose:\n print(\"channel {0} leveled along axis {1}\".format(channel.natural_name, axis))\n\n def map_variable(\n self, variable, points, input_units=\"same\", *, name=None, parent=None, verbose=True\n ) -> \"Data\":\n \"\"\"Map points of an axis to new points using linear interpolation.\n\n Out-of-bounds points are written nan.\n\n Parameters\n ----------\n variable : string\n The variable to map onto.\n points : array-like or int\n If array, the new points. If int, new points will have the same\n limits, with int defining the number of evenly spaced points\n between.\n input_units : str (optional)\n The units of the new points. Default is same, which assumes\n the new points have the same units as the axis.\n name : string (optional)\n The name of the new data object. If None, generated from\n natural_name. Default is None.\n parent : WrightTools.Collection (optional)\n Parent of new data object. If None, data is made at root of a\n new temporary file.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n WrightTools.Data\n New data object.\n \"\"\"\n # get variable index\n variable_index = wt_kit.get_index(self.variable_names, variable)\n variable = self.variables[variable_index]\n # get points\n if isinstance(points, int):\n points = np.linspace(variable.min(), variable.max(), points)\n points = np.array(points)\n # points dimensionality\n if points.ndim < variable.ndim:\n for i, d in enumerate(variable.shape):\n if d == 1:\n points = np.expand_dims(points, axis=i)\n # convert points\n if input_units == \"same\":\n pass\n else:\n points = wt_units.converter(points, input_units, variable.units)\n # construct new data object\n special = [\"name\", \"axes\", \"constants\", \"channel_names\", \"variable_names\"]\n kwargs = {k: v for k, v in self.attrs.items() if k not in special}\n if name is None:\n name = \"{0}_{1}_mapped\".format(self.natural_name, variable.natural_name)\n kwargs[\"name\"] = name\n kwargs[\"parent\"] = parent\n out = Data(**kwargs)\n # mapped variable\n values = points\n out.create_variable(values=values, **variable.attrs)\n # orthogonal variables\n for v in self.variables:\n if wt_kit.orthogonal(v.shape, variable.shape):\n out.create_variable(values=v[:], **v.attrs)\n out.transform(*self.axis_expressions)\n # interpolate\n if self.ndim == 1:\n\n def interpolate(dataset, points):\n function = scipy.interpolate.interp1d(variable[:], dataset[:], bounds_error=False)\n return function(points)\n\n else:\n pts = np.array([a.full.flatten() for a in self.axes]).T\n out_pts = np.array([a.full.flatten() for a in out.axes]).T\n\n def interpolate(dataset, points):\n values = dataset.full.flatten()\n function = scipy.interpolate.LinearNDInterpolator(pts, values, rescale=True)\n new = function(out_pts)\n new.shape = out.shape\n return new\n\n for v in self.variables:\n if v.natural_name not in out.variable_names:\n out.create_variable(values=interpolate(v, points), **v.attrs)\n out.variable_names = self.variable_names # enforce old order\n out._variables = None # force regeneration of variables @property\n for channel in self.channels:\n out.create_channel(values=interpolate(channel, points), **channel.attrs)\n # finish\n if verbose:\n print(\"data mapped from {0} to {1}\".format(self.shape, out.shape))\n return out\n\n def offset(\n self,\n points,\n offsets,\n along,\n offset_axis,\n units=\"same\",\n offset_units=\"same\",\n mode=\"valid\",\n method=\"linear\",\n verbose=True,\n ):\n \"\"\"Offset one axis based on another axis' values.\n\n Useful for correcting instrumental artifacts such as zerotune.\n\n Parameters\n ----------\n points : 1D array-like\n Points.\n offsets : 1D array-like\n Offsets.\n along : str or int\n Axis that points array lies along.\n offset_axis : str or int\n Axis to offset using offsets.\n units : str (optional)\n Units of points array.\n offset_units : str (optional)\n Units of offsets aray.\n mode : {'valid', 'full', 'old'} (optional)\n Define how far the new axis will extend. Points outside of valid\n interpolation range will be written nan.\n method : {'linear', 'nearest', 'cubic'} (optional)\n The interpolation method. Note that cubic interpolation is only\n possible for 1D and 2D data. See `griddata`__ for more information.\n Default is linear.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n\n __ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html\n\n >>> points # an array of w1 points\n >>> offsets # an array of d1 corrections\n >>> data.offset(points, offsets, 'w1', 'd1')\n\n \"\"\"\n raise NotImplementedError\n # axis ------------------------------------------------------------------------------------\n if isinstance(along, int):\n axis_index = along\n elif isinstance(along, str):\n axis_index = self.axis_names.index(along)\n else:\n raise TypeError(\"along: expected {int, str}, got %s\" % type(along))\n axis = self._axes[axis_index]\n # values & points -------------------------------------------------------------------------\n # get values, points, units\n if units == \"same\":\n input_units = axis.units\n else:\n input_units = units\n # check offsets is 1D or 0D\n if len(offsets.shape) == 1:\n pass\n else:\n raise RuntimeError(\"values must be 1D or 0D in offset!\")\n # check if units is compatible, convert\n dictionary = getattr(wt_units, axis.units_kind)\n if input_units in dictionary.keys():\n pass\n else:\n raise RuntimeError(\"units incompatible in offset!\")\n points = wt_units.converter(points, input_units, axis.units)\n # create correction array\n function = interp1d(points, offsets, bounds_error=False)\n corrections = function(axis[:])\n # remove nans\n finite_indicies = np.where(np.isfinite(corrections))[0]\n left_pad_width = finite_indicies[0]\n right_pad_width = len(corrections) - finite_indicies[-1] - 1\n corrections = np.pad(\n corrections[np.isfinite(corrections)],\n (int(left_pad_width), int(right_pad_width)),\n mode=\"edge\",\n )\n # do correction ---------------------------------------------------------------------------\n # transpose so axis is last\n transpose_order = np.arange(len(self._axes))\n transpose_order[axis_index] = len(self._axes) - 1\n transpose_order[-1] = axis_index\n self.transpose(transpose_order, verbose=False)\n # get offset axis index\n if isinstance(offset_axis, int):\n offset_axis_index = offset_axis\n elif isinstance(offset_axis, str):\n offset_axis_index = self.axis_names.index(offset_axis)\n else:\n raise TypeError(\"offset_axis: expected {int, str}, got %s\" % type(offset_axis))\n # new points\n new_points = [a[:] for a in self._axes]\n old_offset_axis_points = self._axes[offset_axis_index][:]\n spacing = abs(\n (old_offset_axis_points.max() - old_offset_axis_points.min())\n / float(len(old_offset_axis_points))\n )\n if mode == \"old\":\n new_offset_axis_points = old_offset_axis_points\n elif mode == \"valid\":\n _max = old_offset_axis_points.max() + corrections.min()\n _min = old_offset_axis_points.min() + corrections.max()\n n = int(abs(np.ceil((_max - _min) / spacing)))\n new_offset_axis_points = np.linspace(_min, _max, n)\n elif mode == \"full\":\n _max = old_offset_axis_points.max() + corrections.max()\n _min = old_offset_axis_points.min() + corrections.min()\n n = np.ceil((_max - _min) / spacing)\n new_offset_axis_points = np.linspace(_min, _max, n)\n new_points[offset_axis_index] = new_offset_axis_points\n new_xi = tuple(np.meshgrid(*new_points, indexing=\"ij\"))\n xi = tuple(np.meshgrid(*[a[:] for a in self._axes], indexing=\"ij\"))\n for channel in self.channels:\n # 'undo' gridding\n arr = np.zeros((len(self._axes) + 1, channel[:].size))\n for i in range(len(self._axes)):\n arr[i] = xi[i].flatten()\n arr[-1] = channel[:].flatten()\n # do corrections\n corrections = list(corrections)\n corrections = corrections * int((len(arr[0]) / len(corrections)))\n arr[offset_axis_index] += corrections\n # grid data\n tup = tuple([arr[i] for i in range(len(arr) - 1)])\n # note that rescale is crucial in this operation\n out = griddata(tup, arr[-1], new_xi, method=method, fill_value=np.nan, rescale=True)\n channel[:] = out\n self._axes[offset_axis_index][:] = new_offset_axis_points\n # transpose out\n self.transpose(transpose_order, verbose=False)\n\n def print_tree(self, *, verbose=True):\n \"\"\"Print a ascii-formatted tree representation of the data contents.\"\"\"\n print(\"{0} ({1})\".format(self.natural_name, self.filepath))\n self._print_branch(\"\", depth=0, verbose=verbose)\n\n def prune(self, keep_channels=True, *, verbose=True):\n \"\"\"Remove unused variables and (optionally) channels from the Data object.\n\n Unused variables are those that are not included in either axes or constants.\n Unused channels are those not specified in keep_channels, or the first channel.\n\n Parameters\n ----------\n keep_channels : boolean or int or str or tuple\n If False, removes all but the first channel.\n If int or str, removes all but that index/name channel.\n If tuple, removes all channels except those in the tuple by index or name.\n Default is True: do not delete channels\n verbose : boolean\n Toggle talkback. Default is True.\n \"\"\"\n for v in self.variables:\n for var in wt_kit.flatten_list([ax.variables for ax in self._axes + self._constants]):\n if v == var:\n break\n else:\n self.remove_variable(v.natural_name, implied=False, verbose=verbose)\n if keep_channels is not True:\n try:\n if isinstance(keep_channels, str):\n raise TypeError\n indexes = tuple(keep_channels)\n except TypeError:\n indexes = (keep_channels,)\n\n for i, ch in enumerate(self.channels):\n if i not in indexes and not ch.natural_name in indexes:\n self.remove_channel(ch.natural_name, verbose=verbose)\n\n def remove_channel(self, channel, *, verbose=True):\n \"\"\"Remove channel from data.\n\n Parameters\n ----------\n channel : int or str\n Channel index or name to remove.\n verbose : boolean (optional)\n Toggle talkback. Default is True.\n \"\"\"\n channel_index = wt_kit.get_index(self.channel_names, channel)\n new = list(self.channel_names)\n name = new.pop(channel_index)\n del self[name]\n self.channel_names = new\n if verbose:\n print(\"channel {0} removed\".format(name))\n\n def remove_variable(self, variable, *, implied=True, verbose=True):\n \"\"\"Remove variable from data.\n\n Parameters\n ----------\n variable : int or str\n Variable index or name to remove.\n implied : boolean (optional)\n Toggle deletion of other variables that start with the same\n name. Default is True.\n verbose : boolean (optional)\n Toggle talkback. Default is True.\n \"\"\"\n if isinstance(variable, int):\n variable = self.variable_names[variable]\n # find all of the implied variables\n removed = []\n if implied:\n for n in self.variable_names:\n if n.startswith(variable):\n removed.append(n)\n else:\n removed = [variable]\n # check that axes will not be ruined\n for n in removed:\n for a in self._axes:\n if n in [v.natural_name for v in a.variables]:\n message = \"{0} is contained in axis {1}\".format(n, a.expression)\n raise RuntimeError(message)\n for c in self._constants:\n if n in [v.natural_name for v in c.variables]:\n warnings.warn(\n \"Variable being removed used in a constant\",\n wt_exceptions.WrightToolsWarning,\n )\n\n # do removal\n for n in removed:\n variable_index = wt_kit.get_index(self.variable_names, n)\n new = list(self.variable_names)\n name = new.pop(variable_index)\n del self[name]\n self.variable_names = new\n self._variables = None\n # finish\n if verbose:\n print(\"{0} variable(s) removed:\".format(len(removed)))\n for n in removed:\n print(\" {0}\".format(n))\n\n def rename_channels(self, *, verbose=True, **kwargs):\n \"\"\"Rename a set of channels.\n\n Parameters\n ----------\n kwargs\n Keyword arguments of the form current:'new'.\n verbose : boolean (optional)\n Toggle talkback. Default is True\n \"\"\"\n # ensure that items will remain unique\n changed = kwargs.keys()\n for k, v in kwargs.items():\n if v not in changed and v in self.keys():\n raise wt_exceptions.NameNotUniqueError(v)\n # compile references to items that are changing\n new = {}\n for k, v in kwargs.items():\n obj = self[k]\n index = self.channel_names.index(k)\n # rename\n new[v] = obj, index\n Group._instances.pop(obj.fullpath, None)\n obj.natural_name = str(v)\n # remove old references\n del self[k]\n # apply new references\n names = list(self.channel_names)\n for v, value in new.items():\n obj, index = value\n self[v] = obj\n names[index] = v\n self.channel_names = names\n # finish\n if verbose:\n print(\"{0} channel(s) renamed:\".format(len(kwargs)))\n for k, v in kwargs.items():\n print(\" {0} --> {1}\".format(k, v))\n\n def rename_variables(self, *, implied=True, verbose=True, **kwargs):\n \"\"\"Rename a set of variables.\n\n Parameters\n ----------\n kwargs\n Keyword arguments of the form current:'new'.\n implied : boolean (optional)\n Toggle inclusion of other variables that start with the same\n name. Default is True.\n verbose : boolean (optional)\n Toggle talkback. Default is True\n \"\"\"\n # find all of the implied variables\n kwargs = collections.OrderedDict(kwargs)\n if implied:\n new = collections.OrderedDict()\n for k, v in kwargs.items():\n for n in self.variable_names:\n if n.startswith(k):\n new[n] = n.replace(k, v, 1)\n kwargs = new\n # ensure that items will remain unique\n changed = kwargs.keys()\n for k, v in kwargs.items():\n if v not in changed and v in self.keys():\n raise wt_exceptions.NameNotUniqueError(v)\n # compile references to items that are changing\n new = {}\n for k, v in kwargs.items():\n obj = self[k]\n index = self.variable_names.index(k)\n # rename\n new[v] = obj, index\n Group._instances.pop(obj.fullpath, None)\n obj.natural_name = str(v)\n # remove old references\n del self[k]\n # apply new references\n names = list(self.variable_names)\n for v, value in new.items():\n obj, index = value\n self[v] = obj\n names[index] = v\n self.variable_names = names\n units = self.units\n new = list(self.axis_expressions)\n for i, v in enumerate(kwargs.keys()):\n for j, n in enumerate(new):\n new[j] = n.replace(v, \"{%i}\" % i)\n for i, n in enumerate(new):\n new[i] = n.format(*kwargs.values())\n self.transform(*new)\n for a, u in zip(self._axes, units):\n a.convert(u)\n units = self.constant_units\n new = list(self.constant_expressions)\n for i, v in enumerate(kwargs.keys()):\n for j, n in enumerate(new):\n new[j] = n.replace(v, \"{%i}\" % i)\n for i, n in enumerate(new):\n new[i] = n.format(*kwargs.values())\n self.set_constants(*new)\n for c, u in zip(self._constants, units):\n c.convert(u)\n # finish\n if verbose:\n print(\"{0} variable(s) renamed:\".format(len(kwargs)))\n for k, v in kwargs.items():\n print(\" {0} --> {1}\".format(k, v))\n\n def share_nans(self):\n \"\"\"Share not-a-numbers between all channels.\n\n If any channel is nan at a given index, all channels will be nan\n at that index after this operation.\n\n Uses the share_nans method found in wt.kit.\n \"\"\"\n\n def f(_, s, channels):\n outs = wt_kit.share_nans(*[c[s] for c in channels])\n for c, o in zip(channels, outs):\n c[s] = o\n\n self.channels[0].chunkwise(f, self.channels)\n\n def smooth(self, factors, channel=None, verbose=True) -> \"Data\":\n \"\"\"Smooth a channel using an n-dimenional kaiser window.\n\n Note, all arrays are loaded into memory.\n\n For more info see `Kaiser_window`__ wikipedia entry.\n\n __ https://en.wikipedia.org/wiki/Kaiser_window\n\n Parameters\n ----------\n factors : int or list of int\n The smoothing factor. You may provide a list of smoothing factors\n for each axis.\n channel : int or str or None (optional)\n The channel to smooth. If None, all channels will be smoothed.\n Default is None.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n \"\"\"\n warnings.warn(\"smooth\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n # get factors -----------------------------------------------------------------------------\n\n if isinstance(factors, list):\n pass\n else:\n dummy = np.zeros(len(self._axes))\n dummy[::] = factors\n factors = list(dummy)\n # get channels ----------------------------------------------------------------------------\n if channel is None:\n channels = self.channels\n else:\n if isinstance(channel, int):\n channel_index = channel\n elif isinstance(channel, str):\n channel_index = self.channel_names.index(channel)\n else:\n raise TypeError(\"channel: expected {int, str}, got %s\" % type(channel))\n channels = [self.channels[channel_index]]\n # smooth ----------------------------------------------------------------------------------\n for channel in channels:\n values = channel[:]\n for axis_index in range(len(factors)):\n factor = factors[axis_index]\n # transpose so the axis of interest is last\n transpose_order = range(len(values.shape))\n # replace axis_index with zero\n transpose_order = [\n len(values.shape) - 1 if i == axis_index else i for i in transpose_order\n ]\n transpose_order[len(values.shape) - 1] = axis_index\n values = values.transpose(transpose_order)\n # get kaiser window\n beta = 5.0\n w = np.kaiser(2 * factor + 1, beta)\n # for all slices...\n for index in np.ndindex(values[..., 0].shape):\n current_slice = values[index]\n temp_slice = np.pad(current_slice, int(factor), mode=str(\"edge\"))\n values[index] = np.convolve(temp_slice, w / w.sum(), mode=str(\"valid\"))\n # transpose out\n values = values.transpose(transpose_order)\n # return array to channel object\n channel[:] = values\n if verbose:\n print(\"smoothed data\")\n\n def split(\n self, expression, positions, *, units=None, parent=None, verbose=True\n ) -> wt_collection.Collection:\n \"\"\"\n Split the data object along a given expression, in units.\n\n Parameters\n ----------\n expression : int or str\n The expression to split along. If given as an integer, the axis at that index\n is used.\n positions : number-type or 1D array-type\n The position(s) to split at, in units.\n units : str (optional)\n The units of the given positions. Default is same, which assumes\n input units are identical to first variable units.\n parent : WrightTools.Collection (optional)\n The parent collection in which to place the 'split' collection.\n Default is a new Collection.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n WrightTools.collection.Collection\n A Collection of data objects.\n The order of the objects is such that the axis points retain their original order.\n\n See Also\n --------\n chop\n Divide the dataset into its lower-dimensionality components.\n collapse\n Collapse the dataset along one axis.\n \"\"\"\n # axis ------------------------------------------------------------------------------------\n old_expr = self.axis_expressions\n old_units = self.units\n out = wt_collection.Collection(name=\"split\", parent=parent)\n if isinstance(expression, int):\n if units is None:\n units = self._axes[expression].units\n expression = self._axes[expression].expression\n elif isinstance(expression, str):\n pass\n else:\n raise TypeError(\"expression: expected {int, str}, got %s\" % type(expression))\n\n self.transform(expression)\n if units:\n self.convert(units, verbose=False)\n\n try:\n positions = [-np.inf] + sorted(list(positions)) + [np.inf]\n except TypeError:\n positions = [-np.inf, positions, np.inf]\n\n values = self._axes[0].full\n masks = [(values >= lo) & (values < hi) for lo, hi in wt_kit.pairwise(positions)]\n omasks = []\n cuts = []\n for mask in masks:\n try:\n omasks.append(wt_kit.mask_reduce(mask))\n cuts.append([i == 1 for i in omasks[-1].shape])\n # Ensure at least one axis is kept\n if np.all(cuts[-1]):\n cuts[-1][0] = False\n except ValueError:\n omasks.append(None)\n cuts.append(None)\n for i in range(len(positions) - 1):\n out.create_data(\"split%03i\" % i)\n\n for var in self.variables:\n for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):\n if omask is None:\n # Zero length split\n continue\n omask = wt_kit.enforce_mask_shape(omask, var.shape)\n omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])\n out_arr = np.full(omask.shape, np.nan)\n imask = wt_kit.enforce_mask_shape(imask, var.shape)\n out_arr[omask] = var[:][imask]\n out[i].create_variable(values=out_arr, **var.attrs)\n\n for ch in self.channels:\n for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):\n if omask is None:\n # Zero length split\n continue\n omask = wt_kit.enforce_mask_shape(omask, ch.shape)\n omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])\n out_arr = np.full(omask.shape, np.nan)\n imask = wt_kit.enforce_mask_shape(imask, ch.shape)\n out_arr[omask] = ch[:][imask]\n out[i].create_channel(values=out_arr, **ch.attrs)\n\n if verbose:\n for d in out.values():\n try:\n d.transform(expression)\n except IndexError:\n continue\n\n print(\"split data into {0} pieces along <{1}>:\".format(len(positions) - 1, expression))\n for i, (lo, hi) in enumerate(wt_kit.pairwise(positions)):\n new_data = out[i]\n if new_data.shape == ():\n print(\" {0} : None\".format(i))\n else:\n new_axis = new_data.axes[0]\n print(\n \" {0} : {1:0.2f} to {2:0.2f} {3} {4}\".format(\n i, lo, hi, self.axes[0].units, new_axis.shape\n )\n )\n\n for d in out.values():\n try:\n d.transform(*old_expr)\n keep = []\n keep_units = []\n for ax, u in zip(d.axes, old_units):\n if ax.size > 1:\n keep.append(ax.expression)\n keep_units.append(u)\n else:\n d.create_constant(ax.expression, verbose=False)\n d.transform(*keep)\n for ax, u in zip(d.axes, keep_units):\n ax.convert(u)\n except IndexError:\n continue\n tempax = Axis(d, expression)\n if all(\n np.all(\n np.sum(~np.isnan(tempax.masked), axis=tuple(set(range(tempax.ndim)) - {j}))\n <= 1\n )\n for j in range(tempax.ndim)\n ):\n d.create_constant(expression, verbose=False)\n self.transform(*old_expr)\n for ax, u in zip(self.axes, old_units):\n ax.convert(u)\n\n return out\n\n def transform(self, *axes, verbose=True):\n \"\"\"Transform the data.\n\n Parameters\n ----------\n axes : strings\n Expressions for the new set of axes.\n verbose : boolean (optional)\n Toggle talkback. Default is True\n\n See Also\n --------\n set_constants\n Similar method except for constants\n \"\"\"\n # TODO: ensure that transform does not break data\n # create\n new = []\n newt = \"newt\" in self.axis_expressions\n current = {a.expression: a for a in self._axes}\n for expression in axes:\n axis = current.get(expression, Axis(self, expression))\n new.append(axis)\n self._axes = new\n # units\n for a in self._axes:\n if a.units is None:\n a.convert(a.variables[0].units)\n # finish\n self.flush()\n self._on_axes_updated()\n nownewt = \"newt\" in self.axis_expressions\n if verbose and nownewt and not newt:\n print(\"Look she turned me into a newt\")\n elif verbose and newt and not nownewt:\n print(\"I got better\")\n\n def set_constants(self, *constants, verbose=True):\n \"\"\"Set the constants associated with the data.\n\n Parameters\n ----------\n constants : str\n Expressions for the new set of constants.\n verbose : boolean (optional)\n Toggle talkback. Default is True\n\n See Also\n --------\n transform\n Similar method except for axes.\n create_constant\n Add an individual constant.\n remove_constant\n Remove an individual constant.\n \"\"\"\n # create\n new = []\n current = {c.expression: c for c in self._constants}\n for expression in constants:\n constant = current.get(expression, Constant(self, expression))\n new.append(constant)\n self._constants = new\n # units\n for c in self._constants:\n if c.units is None:\n c.convert(c.variables[0].units)\n # finish\n self.flush()\n self._on_constants_updated()\n\n def create_constant(self, expression, *, verbose=True):\n \"\"\"Append a constant to the stored list.\n\n Parameters\n ----------\n expression : str\n Expression for the new constant.\n verbose : boolean (optional)\n Toggle talkback. Default is True\n\n See Also\n --------\n set_constants\n Remove and replace all constants.\n remove_constant\n Remove an individual constant.\n \"\"\"\n if expression in self.constant_expressions:\n wt_exceptions.ObjectExistsWarning.warn(expression)\n return self.constants[self.constant_expressions.index(expression)]\n constant = Constant(self, expression)\n if constant.units is None:\n constant.convert(constant.variables[0].units)\n self._constants.append(constant)\n self.flush()\n self._on_constants_updated()\n if verbose:\n print(\"Constant '{}' added\".format(constant.expression))\n return constant\n\n def remove_constant(self, constant, *, verbose=True):\n \"\"\"Remove a constant from the stored list.\n\n Parameters\n ----------\n constant : str or Constant or int\n Expression for the new constant.\n verbose : boolean (optional)\n Toggle talkback. Default is True\n\n See Also\n --------\n set_constants\n Remove and replace all constants.\n create_constant\n Add an individual constant.\n \"\"\"\n if isinstance(constant, (str, int)):\n constant_index = wt_kit.get_index(self.constant_expressions, constant)\n elif isinstance(constant, Constant):\n constant_index = wt_kit.get_index(self.constants, constant)\n constant = self._constants[constant_index]\n self._constants.pop(constant_index)\n self.flush()\n self._on_constants_updated()\n if verbose:\n print(\"Constant '{}' removed\".format(constant.expression))\n\n def zoom(self, factor, order=1, verbose=True):\n \"\"\"Zoom the data array using spline interpolation of the requested order.\n\n The number of points along each axis is increased by factor.\n See `scipy ndimage`__ for more info.\n\n __ http://docs.scipy.org/doc/scipy/reference/\n generated/scipy.ndimage.interpolation.zoom.html\n\n Parameters\n ----------\n factor : float\n The number of points along each axis will increase by this factor.\n order : int (optional)\n The order of the spline used to interpolate onto new points.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n \"\"\"\n raise NotImplementedError\n import scipy.ndimage\n\n # axes\n for axis in self._axes:\n axis[:] = scipy.ndimage.interpolation.zoom(axis[:], factor, order=order)\n # channels\n for channel in self.channels:\n channel[:] = scipy.ndimage.interpolation.zoom(channel[:], factor, order=order)\n # return\n if verbose:\n print(\"data zoomed to new shape:\", self.shape)\n"
] | [
[
"scipy.interpolate.griddata",
"numpy.sum",
"scipy.interpolate.interp1d",
"numpy.diff",
"numpy.dtype",
"numpy.kaiser",
"numpy.trapz",
"numpy.meshgrid",
"numpy.nan_to_num",
"numpy.isfinite",
"numpy.ndindex",
"numpy.nanmean",
"scipy.ndimage.interpolation.zoom",
"numpy.expand_dims",
"numpy.isnan",
"numpy.around",
"numpy.linspace",
"numpy.ceil",
"scipy.interpolate.LinearNDInterpolator",
"numpy.all",
"numpy.prod",
"numpy.array",
"numpy.empty",
"numpy.gradient",
"numpy.result_type",
"numpy.full"
]
] |
ahmedsabie/tensorflow | [
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd"
] | [
"tensorflow/python/framework/func_graph.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"FuncGraph and related functionality.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections as py_collections\nimport itertools\nimport weakref\n\nimport numpy as np\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.eager.graph_only_ops import graph_placeholder\nfrom tensorflow.python.framework import auto_control_deps\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework import type_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import custom_gradient\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import memory\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util import tf_decorator\n\nALLOWLIST_COLLECTIONS = [\n ops.GraphKeys.GLOBAL_VARIABLES,\n ops.GraphKeys.LOCAL_VARIABLES,\n ops.GraphKeys.TRAINABLE_VARIABLES,\n variable_scope._VARSTORE_KEY, # pylint: disable=protected-access\n variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access\n]\n\n\n_EAGER_CONST_THRESHOLD = 128\n\n\nclass UnknownArgument(object):\n \"\"\"Signifies an argument which is not currently handled.\"\"\"\n pass\n\n\ndef convert_structure_to_signature(structure, arg_names=None):\n \"\"\"Convert a potentially nested structure to a signature.\n\n Args:\n structure: Structure to convert, where top level collection is a list or a\n tuple.\n arg_names: Optional list of arguments that has equal number of elements as\n `structure` and is used for naming corresponding TensorSpecs.\n\n Returns:\n Identical structure that has TensorSpec objects instead of Tensors and\n UnknownArgument instead of any unsupported types.\n \"\"\"\n def encode_arg(arg, path):\n \"\"\"A representation for this argument, for converting into signatures.\"\"\"\n if isinstance(arg, ops.Tensor):\n user_specified_name = None\n try:\n user_specified_name = compat.as_str(\n arg.op.get_attr(\"_user_specified_name\"))\n except ValueError:\n pass\n\n if path and user_specified_name and user_specified_name != path[0]:\n # The user has explicitly named the argument differently than the name\n # of the function argument.\n name = user_specified_name\n else:\n name = \"/\".join(str(p) for p in path)\n return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)\n if isinstance(arg, composite_tensor.CompositeTensor):\n # TODO(b/133606651) Do we need to inject arg_name?\n return arg._type_spec # pylint: disable=protected-access\n if isinstance(arg, resource_variable_ops.BaseResourceVariable):\n name = \"/\".join(str(p) for p in path)\n return resource_variable_ops.VariableSpec(arg.shape, arg.dtype, name)\n if isinstance(arg, (\n int,\n float,\n bool,\n str,\n type(None),\n dtypes.DType,\n tensor_spec.TensorSpec,\n type_spec.TypeSpec,\n )):\n return arg\n return UnknownArgument()\n\n # We are using the flattened paths to name the TensorSpecs. We need an\n # explicit name for them downstream.\n flattened = nest.flatten_with_tuple_paths(structure)\n if arg_names:\n if len(arg_names) != len(structure):\n raise ValueError(\n \"Passed in arg_names don't match actual signature (%s).\" % arg_names)\n # Replace all top-level names with their actual arg_names. If a path before\n # was \"(2,'a',1)\", it will become \"(arg_names[2],'a',1)\".\n flattened = [\n ((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened\n ]\n\n mapped = [encode_arg(arg, path) for path, arg in flattened]\n return nest.pack_sequence_as(structure, mapped)\n\n\nclass FuncGraph(ops.Graph):\n \"\"\"Graph representing a function body.\n\n Attributes:\n name: The name of the function.\n inputs: Placeholder tensors representing the inputs to this function. The\n tensors are in this FuncGraph. This represents \"regular\" inputs as well as\n captured inputs (i.e. the values of self.captures), with the regular\n inputs coming first.\n outputs: Tensors that will be returned by this function. The tensors are in\n this FuncGraph.\n control_outputs: Operations that must be executed before the function\n represented by this graph can be said to have been executed.\n structured_input_signature: A tuple of (args, kwargs), which are both\n possibly-nested python objects that were received by this function. Note\n that these structures might contain Python `None`s.\n structured_outputs: A possibly-nested python object which will be returned\n by this function. The Tensors in this structure are the same as those of\n self.outputs. Note that this structure might contain Python `None`s.\n variables: Variables that should be watched during function execution.\n outer_graph: The graph this function is defined in. May be another FuncGraph\n or the global default Graph.\n captures: Maps external tensor -> internal tensor (i.e. input placeholder).\n The entries are in the order they were captured.\n control_captures: Set of external ops on which this graph has a control\n dependency.\n seed: The graph-level random seed.\n capture_by_value: If True, the func graph will capture Variables by value\n instead of reference.\n \"\"\"\n\n def __init__(self, name, collections=None, capture_by_value=None):\n \"\"\"Construct a new FuncGraph.\n\n The graph will inherit its graph key, collections, seed, and distribution\n strategy stack from the current context or graph.\n\n Args:\n name: the name of the function.\n collections: a dictionary of collections this FuncGraph should start\n with. If not specified (None), the FuncGraph will read (but not write\n to) the outer graph's collections that are not allowlisted, and both\n read and write to the outer graph's collections that are allowlisted.\n The current allowlisted collections are the global variables, the\n local variables, and the trainable variables.\n Defaults to None.\n capture_by_value: An optional boolean. If True, the func graph will\n capture Variables by value instead of reference. By default inherit\n from outer graphs, and failing that will default to False.\n \"\"\"\n super(FuncGraph, self).__init__()\n\n self.name = name\n self.inputs = []\n self.outputs = []\n self.control_outputs = []\n self.control_captures = set()\n self.structured_input_signature = None\n self.structured_outputs = None\n self._weak_variables = []\n self._watched_variables = object_identity.ObjectIdentityWeakSet()\n self.is_control_flow_graph = False\n\n outer_graph = ops.get_default_graph()\n self._weak_outer_graph = weakref.ref(outer_graph)\n while outer_graph.building_function:\n outer_graph = outer_graph.outer_graph\n # If self._weak_outer_graph is deleted, we revert to the outermost Graph\n # active when the FuncGraph was traced. This will not be a FuncGraph.\n self._fallback_outer_graph = outer_graph\n self._captures = py_collections.OrderedDict()\n # If not None, records the names of output args of this function. Used to\n # preserve the output names in the signature of a serialized+deserialized\n # function. Private at the moment mostly because it's often out of date.\n self._output_names = None\n # Maps arbitrary key -> (closure, nest of placeholders), where at function\n # call time the value of closure() will be used to feed the nest of\n # placeholders.\n self._deferred_captures = py_collections.OrderedDict()\n # Inherit capture-by-value from outer graph.\n if capture_by_value is not None:\n self.capture_by_value = capture_by_value\n elif self.outer_graph is not None and isinstance(\n self.outer_graph, FuncGraph):\n self.capture_by_value = self.outer_graph.capture_by_value\n else:\n self.capture_by_value = False\n\n self._building_function = True\n # Map from resource tensor name to last op (in program order) which uses\n # this tensor. Used to enforce that execution order matches program order\n # for resource tensors.\n self._last_op_using_resource_tensor = {}\n\n graph = self.outer_graph\n\n if context.executing_eagerly():\n self.seed = context.global_seed()\n # [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of\n # any None op_seed for random_op in the function, in which case we end up\n # using function seed, which could be unintended behavior for the op.\n self._seed_used = False\n else:\n self.seed = graph.seed\n self._seed_used = False\n # TODO(allenl): Figure out if we can remove colocation stack\n # specialization (currently used in cond_v2), here and in the cache key.\n self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access\n\n if collections is None:\n for collection_name in graph.get_all_collection_keys():\n if collection_name not in ALLOWLIST_COLLECTIONS:\n self._collections[collection_name] = graph.get_collection(\n collection_name)\n for collection_name in ALLOWLIST_COLLECTIONS:\n self._collections[collection_name] = graph.get_collection_ref(\n collection_name)\n else:\n self._collections = collections\n\n # Keep track of whether this FuncGraph is exportable to SavedModel. Use\n # `graph.mark_as_unsaveable(reason)` to mark this FuncGraph and any\n # dependent functions as unsaveable.\n self._saveable = True\n self._saving_errors = set()\n\n # Keep track of callbacks to run when this graph exits default scope\n self._scope_exit_callbacks = None\n\n def __str__(self):\n return \"FuncGraph(name=%s, id=%s)\" % (self.name, id(self))\n\n def watch_variable(self, v):\n \"\"\"Marks the variable v as accessed while building this graph.\"\"\"\n while self is not None and isinstance(self, FuncGraph):\n self._watched_variables.add(v)\n self = self.outer_graph\n\n def capture_call_time_value(self, closure, spec, key=None):\n \"\"\"Creates a placeholder which at call time has the value closure().\n\n Useful, for example, to respect TensorFlow context managers, which are often\n dynamically scoped.\n\n Args:\n closure: function which takes no arguments, to be evaluated at function\n call time, returning a nest of tensors compatible with `spec`.\n spec: nest of TypeSpec for the value to capture.\n key: optional. If not None, multiple calls to lazy_capture with the same\n key in the same graph will return the same placeholder, and the\n first closure will be used at function call time.\n\n Returns:\n Nest of placeholders which, at function call time, will be fed with the\n result of calling closure().\n\n Raises:\n ValueError: at function call time, if the return value of closure() is\n not compatible with `spec`.\n \"\"\"\n if key is None:\n key = object()\n if key not in self._deferred_captures:\n\n def convert_to_placeholder(s):\n if not isinstance(s, tensor_spec.DenseSpec):\n raise TypeError(\n \"Expected a nest of `TypeSpec` objects, found %s of type %s.\" %\n (s, type(s)))\n return array_ops.placeholder(dtype=s.dtype, shape=s.shape)\n\n placeholder = nest.map_structure(\n convert_to_placeholder, spec, expand_composites=True)\n\n def wrapped_closure():\n ret_nest = closure()\n nest.assert_same_structure(spec, ret_nest, expand_composites=True)\n # This uses the tensor dtype defined in `spec` when converting values\n # in `ret_nest` to tensors.\n # pylint: disable=protected-access\n y = nest.map_structure(lambda s, r: s._to_components(r), spec, ret_nest,\n expand_composites=False)\n # pylint: enable=protected-access\n return nest.flatten(y, expand_composites=True)\n\n self._deferred_captures[key] = (wrapped_closure, placeholder)\n return self._deferred_captures[key][1]\n\n def control_dependencies(self, control_inputs):\n \"\"\"Handles control dependencies.\n\n FuncGraph wraps Graph's control_dependencies logic by first filtering out\n any external tensors / operations and storing them in the graph's\n control_captures member. Any consumers of this function graph must then\n decide how to handle the control captures.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which\n must be executed or computed before running the operations\n defined in the context. Can also be `None` to clear the control\n dependencies.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n\n Raises:\n TypeError: If `control_inputs` is not a list of `Operation` or\n `Tensor` objects.\n \"\"\"\n if control_inputs is None:\n return super(FuncGraph, self).control_dependencies(control_inputs)\n\n filtered_control_inputs = []\n for c in control_inputs:\n # Check for _UnreadVariable\n if (isinstance(c, ops.IndexedSlices) or\n (hasattr(c, \"_handle\") and hasattr(c, \"op\"))):\n c = c.op\n graph_element = ops._as_graph_element(c) # pylint: disable=protected-access\n if graph_element is None:\n graph_element = c\n if graph_element is not None and getattr(\n graph_element, \"graph\", None) is not self:\n self.control_captures.add(graph_element)\n else:\n filtered_control_inputs.append(graph_element)\n return super(FuncGraph, self).control_dependencies(filtered_control_inputs)\n\n def as_default(self):\n outer_cm = super(FuncGraph, self).as_default()\n\n @tf_contextlib.contextmanager\n def inner_cm():\n \"\"\"Context manager for copying distribute.Strategy scope information.\"\"\"\n # pylint: disable=protected-access\n # TODO(b/112906995, nareshmodi): distribution strategy depends on\n # inheriting this stack from the default graph even in eager mode. Maybe\n # it should be part of the eager context? This would also allow us to\n # remove a get_default_graph() call from the function cache lookup.\n graph = ops.get_default_graph()\n old_strategy_stack = self._distribution_strategy_stack\n self._distribution_strategy_stack = list(\n graph._distribution_strategy_stack)\n\n # We ignore device placements from any outer scopes while tracing the\n # function when possible, to avoid hard-coding them in the function\n # graph. \"Default\" placements come from the PartitionedCallOp's placement,\n # so that the same trace of the Python function may be placed on several\n # different devices and saved functions may be placed on new devices when\n # restored.\n # However, we need to preserve the outer device stack in the following\n # cases in non eager context:\n # 1. device stack is callable\n # 2. When using distribution strategy with legacy graph mode.\n old_device_stack = self._device_function_stack\n if (not context.executing_eagerly() and\n (device_stack_has_callable(graph._device_function_stack) or\n (self._distribution_strategy_stack and\n not ops.executing_eagerly_outside_functions()))):\n # Hard-code devices from device functions in the function body\n self._device_function_stack = graph._device_function_stack.copy()\n\n old_creator_stack = self._variable_creator_stack\n self._variable_creator_stack = graph._variable_creator_stack\n # Inherit the graph key, since this is used for matching variables in\n # optimizers.\n old_graph_key = self._graph_key\n self._graph_key = graph._graph_key\n # pylint: enable=protected-access\n\n old_scope_exit_callbacks = self._scope_exit_callbacks\n self._scope_exit_callbacks = []\n\n with outer_cm as g:\n try:\n yield g\n finally:\n try:\n for fn in self._scope_exit_callbacks:\n fn()\n finally:\n self._scope_exit_callbacks = old_scope_exit_callbacks\n self._distribution_strategy_stack = old_strategy_stack\n self._device_function_stack = old_device_stack\n self._variable_creator_stack = old_creator_stack\n self._graph_key = old_graph_key\n return inner_cm()\n\n @property\n def outer_graph(self):\n \"\"\"The Graph this FuncGraph is nested in.\n\n Functions may capture Tensors from graphs they are nested in (transitive).\n\n Returns:\n A Graph object. Initially set to the current default graph when the\n FuncGraph was created. If the previous `outer_graph` was deleted because\n the function that owns it was deleted, `outer_graph` is reset to the\n outermost default graph active when the FuncGraph was created. This\n FuncGraph won't have captured anything from the new `outer_graph` (and\n likely not from the previous setting, since that would have created a\n strong reference), but it is returned so that FuncGraphs always have a\n parent.\n \"\"\"\n current = self._weak_outer_graph()\n if current is None:\n return self._fallback_outer_graph\n return current\n\n @outer_graph.setter\n def outer_graph(self, new_outer_graph):\n \"\"\"Sets `outer_graph` to `new_outer_graph`.\"\"\"\n self._weak_outer_graph = weakref.ref(new_outer_graph)\n\n @property\n def output_types(self):\n return [t.dtype for t in self.outputs]\n\n @property\n def output_shapes(self):\n return [t.shape for t in self.outputs]\n\n @property\n def trainable_variables(self):\n \"\"\"A sequence of trainable variables accessed by this FuncGraph.\n\n Note that functions keep only weak references to variables. Calling the\n function after a variable it accesses has been deleted is an error.\n\n Returns:\n Sequence of trainable variables for this func graph.\n \"\"\"\n return tuple(v for v in self.variables if v.trainable)\n\n @property\n def variables(self):\n \"\"\"A sequence of variables accessed by this FuncGraph.\n\n Note that functions keep only weak references to variables. Calling the\n function after a variable it accesses has been deleted is an error.\n\n Returns:\n Sequence of variables for this func graph.\n \"\"\"\n def deref(weak_v):\n v = weak_v()\n if v is None:\n raise AssertionError(\n \"Called a function referencing variables which have been deleted. \"\n \"This likely means that function-local variables were created and \"\n \"not referenced elsewhere in the program. This is generally a \"\n \"mistake; consider storing variables in an object attribute on \"\n \"first call.\")\n return v\n\n return tuple(deref(v) for v in self._weak_variables)\n\n @variables.setter\n def variables(self, var_list):\n self._weak_variables = [weakref.ref(v) for v in var_list]\n\n def _capture_by_value(\n self,\n op_type,\n inputs,\n dtypes, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_device=True):\n # When capturing by value, do the read outside\n reverse_captures = dict((id(v), k) for k, v in self.captures)\n uncaptured_inputs = [reverse_captures.get(id(t), t) for t in inputs]\n with ops.init_scope():\n if context.executing_eagerly():\n attr_list = (\"dtype\", int(attrs[\"dtype\"].type))\n value, = execute.execute(\n compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,\n context.context())\n else:\n op = ops.get_default_graph()._create_op_internal( # pylint: disable=protected-access\n op_type,\n uncaptured_inputs,\n dtypes,\n input_types,\n name,\n attrs,\n op_def,\n compute_device)\n value = op.outputs[0]\n captured_value = self.capture(value)\n return captured_value.op\n\n def _create_op_internal(\n self,\n op_type,\n inputs,\n dtypes=None, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_device=True):\n \"\"\"Like Graph.create_op, except handles external input tensors.\n\n This overload adds functionality to create_op to \"capture\" any external\n input tensors, i.e. tensors from the eager context or outer function graphs\n if this is a nested function. See `capture` for more information.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: (Optional) A list of `DType` objects that will be the types of the\n tensors that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of\n the tensors that the operation consumes. By default, uses the base\n `DType` of each input in `inputs`. Operations that expect\n reference-typed inputs must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_device: (Optional.) If True, device functions will be executed\n to compute the device property of the Operation.\n\n Returns:\n An `Operation` object.\n \"\"\"\n if self.capture_by_value and op_type in [\"ReadVariableOp\",\n \"ResourceGather\"]:\n return self._capture_by_value(op_type, inputs, dtypes, input_types, name,\n attrs, op_def, compute_device)\n\n # This capturing logic interacts poorly with control flow contexts which\n # want to replace inputs of ops far too late in the process. This can lead\n # the context to get confused and try to create an Enter for an Enter. We\n # can detect this here and skip the additional Enter which can confuse loop\n # validation logic.\n if op_type == \"Enter\" and inputs[0].op.type == \"Enter\":\n if inputs[0].op.get_attr(\"frame_name\") == attrs[\"frame_name\"].s:\n return inputs[0].op\n # Calling AddValue on the control flow contexts to force creation of the\n # backward accumulators in the original graph before we create placeholders\n # to capture the inputs.\n ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access\n # Use a different list to avoid modifying the original inputs list.\n captured_inputs = []\n for inp in inputs:\n # TPU Estimator defines a control flow context with no AddValue method.\n if ctxt is not None and hasattr(ctxt, \"AddValue\"):\n inp = ctxt.AddValue(inp)\n inp = self.capture(inp)\n captured_inputs.append(inp)\n return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access\n op_type, captured_inputs, dtypes, input_types, name, attrs, op_def,\n compute_device)\n\n def capture(self, tensor, name=None, shape=None):\n \"\"\"Captures `tensor` if it's external to this graph.\n\n If `tensor` is from a different graph, returns a placeholder for it.\n `tensor` and the placeholder will appear in self.captures, and the\n placeholder will appear in self.inputs. Multiple calls to this method with\n the same `tensor` argument will return the same placeholder. If `tensor` is\n from this graph, returns `tensor`.\n\n Args:\n tensor: Tensor. May be from this FuncGraph or a different graph.\n name: Optional name if a placeholder is created.\n shape: Optional shape if a placeholder is created.\n\n Returns:\n Tensor from this FuncGraph.\n\n Raises:\n InaccessibleTensorError: if any tensors are accessed in a manner that\n bypasses the mechanisms required for the data dependencies to be correctly\n wired.\n \"\"\"\n if isinstance(tensor, ops.EagerTensor):\n if name is None:\n name = str(ops.uid())\n\n # Small EagerTensors are captured with Const ops\n if (tensor.dtype in dtypes.TF_VALUE_DTYPES and\n np.prod(tensor.shape) <= _EAGER_CONST_THRESHOLD):\n return self.capture_eager_tensor(tensor, name)\n\n # Large EagerTensors and resources are captured with Placeholder ops\n return self._capture_helper(tensor, name, shape)\n if tensor.graph is not self:\n if name is None:\n name = tensor.op.name\n inner_graph = tensor.graph\n while inner_graph is not None and isinstance(inner_graph, FuncGraph):\n if inner_graph is self:\n raise errors.InaccessibleTensorError(\n \"The tensor '%s' cannot be accessed here: it is defined\"\n \" in another function or code block. Use return values,\"\n \" explicit Python locals or TensorFlow collections to access\"\n \" it. Defined in: %s; accessed from: %s.\\n\"\n % (tensor, tensor.graph, self))\n inner_graph = inner_graph.outer_graph\n return self._capture_helper(tensor, name)\n return tensor\n\n def _capture_helper(self, tensor, name, shape=None):\n capture = self._captures.get(id(tensor))\n if capture is None:\n placeholder = _create_substitute_placeholder(\n tensor, name=name, dtype=tensor.dtype, shape=shape)\n # Record the composite device as an attribute to the placeholder.\n # This attribute would be propogated into the arg_attr of the FunctionDef.\n # Currently, a packed eager tensor is always placed on a CompositeDevice.\n if isinstance(tensor, ops.EagerTensor) and tensor.is_packed:\n placeholder.op._set_attr( # pylint: disable=protected-access\n \"_composite_device\",\n attr_value_pb2.AttrValue(s=compat.as_bytes(tensor.device)))\n self.add_capture(tensor, placeholder)\n else:\n placeholder = capture[1]\n tape.record_operation(\"captured_value\", [placeholder], [tensor],\n backward_function=lambda x: [x],\n forward_function=lambda x: [x])\n return placeholder\n\n @property\n def captures(self):\n \"\"\"Order list of tuples containing external and internal captures.\"\"\"\n return self._captures.values()\n\n def add_capture(self, tensor, placeholder):\n \"\"\"Capture a specific tensor and utilize the provided placeholder.\n\n Args:\n tensor: Tensor to captures.\n placeholder: Provided placeholder for the tensor.\n \"\"\"\n self._captures[id(tensor)] = (tensor, placeholder)\n self.inputs.append(placeholder)\n\n def replace_capture(self, tensor, placeholder):\n \"\"\"Replace already existing capture.\"\"\"\n self._captures[id(tensor)] = (tensor, placeholder)\n\n def reset_captures(self, capture_list):\n \"\"\"Set the captures with the provided list of captures & placeholder.\"\"\"\n self._captures = py_collections.OrderedDict()\n for tensor, placeholder in capture_list:\n self._captures[id(tensor)] = (tensor, placeholder)\n\n def pop_capture(self, tensor):\n \"\"\"Remove the capture and return the generated placeholder.\"\"\"\n capture = self._captures.pop(id(tensor), None)\n if capture is None:\n return None\n\n return capture[1]\n\n def clear_captures(self):\n # TODO(b/115366440): Delete this method when a custom OrderedDict is added.\n # Clearing captures using clear() leaves some cycles around.\n while self._captures:\n self._captures.popitem()\n memory.dismantle_ordered_dict(self._captures)\n while self._deferred_captures:\n self._deferred_captures.popitem()\n memory.dismantle_ordered_dict(self._deferred_captures)\n\n def capture_distributed_variable(self, variable, placeholder):\n \"\"\"Add given distributed variable to captures with given placeholder.\"\"\"\n self._captures[id(variable)] = (variable, placeholder)\n tape.record_operation(\"captured_value\", [placeholder], [variable],\n backward_function=lambda x: [x],\n forward_function=lambda x: [x])\n\n def capture_eager_tensor(self, tensor, name):\n capture = self._captures.get(id(tensor))\n if capture is None:\n # We clear all control dependencies and place the Const op on the same\n # device as the source tensor. The device placement may be relaxed at\n # a later date.\n with ops.control_dependencies(None), self.device(tensor.device):\n constant_value = tensor_util.constant_value(tensor)\n if constant_value is None:\n # Some eager tensors, e.g. parallel tensors, are not convertible to a\n # single constant. We'll use a placeholder for this case.\n return self._capture_helper(tensor, name)\n graph_const = constant_op.constant(constant_value, dtype=tensor.dtype,\n shape=tensor.shape, name=name)\n self.add_capture(tensor, graph_const)\n else:\n graph_const = capture[1]\n tape.record_operation(\"captured_value\", [graph_const], [tensor],\n backward_function=lambda x: [x],\n forward_function=lambda x: [x])\n return graph_const\n\n def captured(self, tensor):\n \"\"\"Check if the specified tensor has been captured.\"\"\"\n return id(tensor) in self._captures\n\n @property\n def external_captures(self):\n \"\"\"External tensors captured by this function.\"\"\"\n return [c[0] for c in self._captures.values()]\n\n @property\n def internal_captures(self):\n \"\"\"Placeholders in this function corresponding captured tensors.\"\"\"\n return [c[1] for c in self._captures.values()]\n\n @property\n def deferred_external_captures(self):\n \"\"\"Ordered nest of tensors whose placeholders will be fed at call time.\"\"\"\n return [c[0] for c in self._deferred_captures.values()]\n\n @property\n def deferred_internal_captures(self):\n \"\"\"List of nest of placeholders which at call time will be fed.\"\"\"\n return [c[1] for c in self._deferred_captures.values()]\n\n @property\n def variable_captures(self):\n \"\"\"Map of python object ids of variables to variables which are captured.\"\"\"\n return {\n id(self._captures[id(v)][1]): v\n for v in self.variables\n if id(v) in self._captures\n }\n\n def mark_as_unsaveable(self, error_message):\n \"\"\"Marks this FuncGraph as unsaveable.\n\n Any attempts to export this FuncGraph will raise an error with the specified\n message.\n\n Args:\n error_message: List or string containing the error message to be raised\n when saving this FuncGraph to SavedModel.\n \"\"\"\n self._saveable = False\n if isinstance(error_message, str):\n error_message = [error_message]\n self._saving_errors.update(error_message)\n\n @property\n def saveable(self):\n \"\"\"Returns whether this FuncGraph is saveable.\"\"\"\n return self._saveable\n\n @property\n def saving_errors(self):\n \"\"\"Returns set of errors preventing this FuncGraph from being saved.\"\"\"\n return self._saving_errors\n\n def _add_scope_exit_callback(self, fn):\n \"\"\"Add a function to call when this graph exits the default scope.\"\"\"\n if not callable(fn):\n raise TypeError(\"fn is not callable: {}\".format(fn))\n if self._scope_exit_callbacks is None:\n raise RuntimeError(\n \"Attempting to add a scope exit callback, but the default graph is \"\n \"not the context scope graph. Did you forget to call \"\n \"'with graph.as_default(): ...'?\")\n self._scope_exit_callbacks.append(fn)\n\n\ndef func_graph_from_py_func(name,\n python_func,\n args,\n kwargs,\n signature=None,\n func_graph=None,\n autograph=False,\n autograph_options=None,\n add_control_dependencies=True,\n arg_names=None,\n op_return_value=None,\n collections=None,\n capture_by_value=None,\n override_flat_arg_shapes=None):\n \"\"\"Returns a `FuncGraph` generated from `python_func`.\n\n Args:\n name: an identifier for the function.\n python_func: the Python function to trace.\n args: the positional args with which the Python function should be called;\n ignored if a signature is provided.\n kwargs: the keyword args with which the Python function should be called;\n ignored if a signature is provided.\n signature: a possibly nested sequence of `TensorSpecs` specifying the shapes\n and dtypes of the arguments. When a signature is provided, `args` and\n `kwargs` are ignored, and `python_func` is traced with Tensors conforming\n to `signature`. If `None`, the shapes and dtypes are inferred from the\n inputs.\n func_graph: Optional. An instance of FuncGraph. If provided, we will use\n this graph else a new one is built and returned.\n autograph: whether to use autograph to compile `python_func`.\n See https://www.tensorflow.org/guide/autograph for more information.\n autograph_options: additional knobs to control when `autograph=True`.\n See https://www.tensorflow.org/guide/autograph for more information.\n add_control_dependencies: If True, automatically adds control dependencies\n to ensure program order matches execution order and stateful ops always\n execute.\n arg_names: Optional list of argument names, used to give input placeholders\n recognizable names.\n op_return_value: Optional. A Tensor. If set and `python_func` returns\n Operations, those return values will be replaced with this value. If not\n set, returning an Operation triggers an error.\n collections: a dictionary of collections this FuncGraph should start\n with. If not specified (None), the FuncGraph will read (but not write to)\n the outer graph's collections that are not allowlisted, and both\n read and write to the outer graph's collections that are allowlisted.\n The current allowlisted collections are the global variables, the\n local variables, and the trainable variables.\n Defaults to None.\n capture_by_value: An optional boolean. If True, the func graph will capture\n Variables by value instead of reference. By default inherit from outer\n graphs, and failing that will default to False.\n override_flat_arg_shapes: An optional list of instances that are either\n `None` or `TensorShape`. The length must match that of\n `nest.flatten((args, kwargs), expand_composites=True)`. The entries\n containing value `None` must match entries in flattened arguments\n containing non-tensors, while entries containing a `TensorShape` must\n match entries in the flattened arguments containing tensors.\n\n Returns:\n A FuncGraph.\n\n Raises:\n TypeError: If any of `python_func`'s return values is neither `None` nor a\n `Tensor`.\n ValueError: If both `signature` and `override_flat_arg_shapes` are\n passed in.\n \"\"\"\n if op_return_value is not None:\n assert isinstance(op_return_value, ops.Tensor), op_return_value\n if func_graph is None:\n func_graph = FuncGraph(name, collections=collections,\n capture_by_value=capture_by_value)\n assert isinstance(func_graph, FuncGraph)\n if add_control_dependencies:\n deps_control_manager = auto_control_deps.AutomaticControlDependencies()\n else:\n deps_control_manager = ops.NullContextmanager()\n\n with func_graph.as_default(), deps_control_manager as deps_ctx:\n current_scope = variable_scope.get_variable_scope()\n default_use_recource = current_scope.use_resource\n current_scope.set_use_resource(True)\n\n if signature is not None and override_flat_arg_shapes is not None:\n raise ValueError(\n \"Passed both signature and override_flat_arg_shapes: %s and %s.\"\n % (signature, override_flat_arg_shapes))\n\n if signature is not None:\n args = signature\n kwargs = {}\n\n # Creates and names placeholders for all arguments.\n if override_flat_arg_shapes is not None:\n flat_args = nest.flatten(args, expand_composites=True)\n arg_shapes = override_flat_arg_shapes[:len(flat_args)]\n kwarg_shapes = override_flat_arg_shapes[len(flat_args):]\n else:\n arg_shapes = None\n kwarg_shapes = None\n func_args = _get_defun_inputs_from_args(\n args, arg_names, flat_shapes=arg_shapes)\n func_kwargs = _get_defun_inputs_from_kwargs(\n kwargs, flat_shapes=kwarg_shapes)\n\n # Convert all Tensors into TensorSpecs before saving the structured inputs.\n # If storing pure concrete functions that are not called through polymorphic\n # functions, we don't have access to FunctionSpec, so we need to call the\n # TensorSpecs by their `arg_names` for later binding.\n func_graph.structured_input_signature = (\n convert_structure_to_signature(func_args, arg_names),\n convert_structure_to_signature(func_kwargs))\n\n flat_func_args = nest.flatten(func_args, expand_composites=True)\n flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)\n # Temporarily set inputs to allow graph building code to inspect\n # them. Reassigned below.\n func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs\n if isinstance(arg, ops.Tensor)]\n\n # Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.\n # Variables to help check whether mutation happens in calling the function\n # Copy the recursive list, tuple and map structure, but not base objects\n func_args_before = nest.pack_sequence_as(func_args, flat_func_args,\n expand_composites=True)\n func_kwargs_before = nest.pack_sequence_as(\n func_kwargs, flat_func_kwargs, expand_composites=True)\n\n def convert(x):\n \"\"\"Converts a function output to a Tensor.\"\"\"\n if x is None:\n return None\n if op_return_value is not None and isinstance(x, ops.Operation):\n # TODO(b/79881896): we currently can't capture external control deps, so\n # this won't work if x needs to be captured (i.e. if python_func returns\n # captured Operations).\n with ops.control_dependencies([x]):\n x = array_ops.identity(op_return_value)\n elif not isinstance(x, tensor_array_ops.TensorArray):\n try:\n x = ops.convert_to_tensor_or_composite(x)\n except (ValueError, TypeError):\n raise TypeError(\n \"To be compatible with tf.eager.defun, Python functions \"\n \"must return zero or more Tensors; in compilation of %s, found \"\n \"return value of type %s, which is not a Tensor.\" %\n (str(python_func), type(x)))\n if add_control_dependencies:\n x = deps_ctx.mark_as_return(x)\n return x\n\n try:\n if autograph:\n from tensorflow.python import autograph # pylint: disable=g-import-not-at-top\n _, original_func = tf_decorator.unwrap(python_func)\n\n def wrapper(*args, **kwargs):\n \"\"\"Calls a converted version of original_func.\"\"\"\n # TODO(mdan): Push this block higher in tf.function's call stack.\n try:\n return autograph.converted_call(\n original_func,\n args,\n kwargs,\n options=autograph.ConversionOptions(\n recursive=True,\n optional_features=autograph_options,\n user_requested=True,\n ))\n except Exception as e: # pylint:disable=broad-except\n if hasattr(e, \"ag_error_metadata\"):\n raise e.ag_error_metadata.to_exception(e)\n else:\n raise\n\n # Wrapping around a decorator allows checks like tf_inspect.getargspec\n # to be accurate.\n converted_func = tf_decorator.make_decorator(original_func, wrapper)\n python_func = tf_decorator.rewrap(python_func, original_func,\n converted_func)\n\n else:\n _, original_func = tf_decorator.unwrap(python_func)\n\n func_outputs = python_func(*func_args, **func_kwargs)\n\n # invariant: `func_outputs` contains only Tensors, CompositeTensors,\n # TensorArrays and `None`s.\n func_outputs = nest.map_structure(convert, func_outputs,\n expand_composites=True)\n\n check_mutation(func_args_before, func_args, original_func)\n check_mutation(func_kwargs_before, func_kwargs, original_func)\n finally:\n current_scope.set_use_resource(default_use_recource)\n\n # Variables in `func_args`, `func_kwargs` should be explicit inputs\n # to the function, not captured inputs.\n graph_variables = list(func_graph._watched_variables) # pylint: disable=protected-access\n arg_variables = object_identity.ObjectIdentitySet()\n inputs = []\n for arg in (nest.flatten(func_args, expand_composites=True) +\n nest.flatten(func_kwargs, expand_composites=True)):\n if isinstance(arg, resource_variable_ops.BaseResourceVariable):\n # Even if an argument variable was not used in the function, we've\n # already manually captured the resource Tensor when creating argument\n # placeholders.\n resource_placeholder = func_graph.pop_capture(arg.handle)\n if resource_placeholder is None:\n continue\n arg_variables.add(arg)\n inputs.append(resource_placeholder)\n elif isinstance(arg, ops.Tensor):\n inputs.append(arg)\n variables = [v for v in graph_variables if v not in arg_variables]\n func_graph.inputs = (\n inputs + func_graph.internal_captures + nest.flatten(\n func_graph.deferred_internal_captures, expand_composites=True))\n func_graph.structured_outputs = func_outputs\n # Returning a closed-over tensor does not trigger convert_to_tensor.\n func_graph.outputs.extend(\n func_graph.capture(x)\n for x in flatten(func_graph.structured_outputs)\n if x is not None)\n\n func_graph.variables = variables\n\n if add_control_dependencies:\n func_graph.control_outputs.extend(deps_control_manager.ops_which_must_run)\n func_graph.collective_manager_ids_used = (\n deps_control_manager.collective_manager_ids_used)\n\n return func_graph\n\n\ndef maybe_captured(tensor):\n \"\"\"If t is a captured value placeholder, returns the original captured value.\n\n Args:\n tensor: Tensor.\n\n Returns:\n A tensor, potentially from a different Graph/FuncGraph.\n \"\"\"\n if (not isinstance(tensor, ops.EagerTensor) and\n tensor.op.graph.building_function and tensor.op.type == \"Placeholder\"):\n for input_t, placeholder_t in tensor.op.graph.captures:\n if tensor == placeholder_t:\n return maybe_captured(input_t)\n # pylint: enable=protected-access\n return tensor\n\n\ndef device_stack_has_callable(device_stack):\n \"\"\"Checks whether a device stack contains a callable.\"\"\"\n return any(callable(spec._device_name_or_function) # pylint: disable=protected-access\n for spec in device_stack.peek_objs())\n\n\ndef check_mutation(n1, n2, func):\n \"\"\"Check if two list of arguments are exactly the same.\"\"\"\n func_name = getattr(func, \"__name__\", func)\n\n errmsg = (\"{}() should not modify its Python input arguments.\"\n \" Check if it modifies any lists or dicts passed as\"\n \" arguments. Modifying a copy is allowed.\".format(func_name))\n try:\n # TODO(mdan): Compare more robustly so that argument names can be reported.\n nest.assert_same_structure(n1, n2, expand_composites=True)\n except ValueError:\n raise ValueError(errmsg)\n\n for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),\n nest.flatten(n2, expand_composites=True)):\n if arg1 is not arg2:\n raise ValueError(errmsg)\n\n\n# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.\ndef flatten(sequence):\n \"\"\"Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.\n\n Args:\n sequence: A nested structure of Tensors, CompositeTensors, and\n TensorArrays.\n\n Returns:\n A list of tensors.\n \"\"\"\n flat_sequence = nest.flatten(sequence, expand_composites=True)\n return [\n item.flow if isinstance(item, tensor_array_ops.TensorArray) else item\n for item in flat_sequence]\n\n\n# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.\ndef pack_sequence_as(structure, flat_sequence):\n \"\"\"Like `nest.pack_sequence_as` but also builds TensorArrays from flows.\n\n Args:\n structure: The structure to pack into. May contain Tensors,\n CompositeTensors, or TensorArrays.\n flat_sequence: An iterable containing tensors.\n\n Returns:\n A nested structure.\n\n Raises:\n AssertionError if `structure` and `flat_sequence` are not compatible.\n \"\"\"\n flat_sequence = list(flat_sequence)\n flattened_structure = nest.flatten(structure, expand_composites=True)\n if len(flattened_structure) != len(flat_sequence):\n raise ValueError(\"Mismatch in element count\")\n for i in range(len(flat_sequence)):\n if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):\n flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(\n old_ta=flattened_structure[i], flow=flat_sequence[i])\n return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)\n\n\ndef _create_substitute_placeholder(value, name=None, dtype=None, shape=None):\n \"\"\"Creates a placeholder for `value` and propagates shape info to it.\"\"\"\n # Note: setting ops.control_dependencies(None) ensures we always put\n # capturing placeholders outside of any control flow context.\n if shape is None:\n shape = value.shape\n with ops.control_dependencies(None):\n placeholder = graph_placeholder(\n dtype=dtype or value.dtype, shape=shape, name=name)\n custom_gradient.copy_handle_data(value, placeholder)\n return placeholder\n\n\ndef _get_defun_inputs_from_args(args, names, flat_shapes=None):\n \"\"\"Maps Python function positional args to graph-construction inputs.\"\"\"\n return _get_defun_inputs(\n args, names, structure=args, flat_shapes=flat_shapes)\n\n\ndef _get_composite_tensor_spec(x):\n \"\"\"Returns the TypeSpec for x if it's a composite tensor, or x otherwise.\"\"\"\n return (x._type_spec # pylint: disable=protected-access\n if isinstance(x, composite_tensor.CompositeTensor) else x)\n\n\ndef _get_defun_inputs(args, names, structure, flat_shapes=None):\n \"\"\"Maps python function args to graph-construction inputs.\n\n Args:\n args: A flat list of user-specified arguments.\n names: A list of strings with user-specified argument names, same length as\n `args`. May be `None`, in which case a generic name is used.\n structure: The original argument list or dictionary.\n flat_shapes: A flat list of values that are either `None` or\n instances of `TensorShape`. If provided, then length must match\n that of `nest.flatten(args, expand_composites=True)`; and locations where\n `args` are instances of `Tensor` must have a corresponding `TensorShape`\n in `flat_shapes`. May be `None`, in which case exact shapes are read\n directly from the args.\n\n Returns:\n Placeholders with the same structure as `structure`.\n\n Raises:\n RuntimeError: if `flat_shapes` is provided, but\n `len(flat_shapes) != len(nest.flatten(args, expand_composites=True))`.\n RuntimeError: if a shape from `flat_shapes` is not None\n for an argument that is not a `Tensor`, `TensorSpec`,\n or `ResourceVariable`.\n \"\"\"\n func_graph = ops.get_default_graph()\n function_inputs = []\n if names is None:\n names = [None] * len(args)\n if flat_shapes is None:\n shapes_iter = itertools.repeat(None)\n else:\n len_flat_args = len(nest.flatten(args, expand_composites=True))\n if len_flat_args != len(flat_shapes):\n raise RuntimeError(\n \"Length of fully flat shapes (%d) must match that of \"\n \"flatten(args) (%d). args: %s, flat_shapes: %s\"\n % (len(flat_shapes),\n len_flat_args,\n args,\n flat_shapes))\n shapes_iter = iter(flat_shapes)\n for arg_value, name in zip(args, names):\n\n # Replace any composite tensors with their TypeSpecs. This is important\n # for ensuring that shape information that's not preserved by the TypeSpec\n # (such as the number of values in a SparseTensor) gets properly masked.\n arg_value = nest.map_structure(_get_composite_tensor_spec, arg_value)\n\n flattened = nest.flatten(arg_value, expand_composites=True)\n\n for arg in flattened:\n # We have a shape entry for each arg, regardless of whether it's a real\n # Tensor or not. For non-tensor entries it should be None.\n shape = next(shapes_iter)\n if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):\n arg_is_spec = isinstance(arg, tensor_spec.TensorSpec)\n if arg_is_spec and arg.name:\n requested_name = arg.name\n else:\n requested_name = name\n placeholder_shape = shape if shape is not None else arg.shape\n try:\n placeholder = graph_placeholder(\n arg.dtype, placeholder_shape,\n name=requested_name)\n except ValueError:\n # Sometimes parameter names are not valid op names, so fall back to\n # unnamed placeholders.\n placeholder = graph_placeholder(arg.dtype, placeholder_shape)\n if not arg_is_spec:\n custom_gradient.copy_handle_data(arg, placeholder)\n if name is not None:\n # Record the requested/user-specified name in case it's different than\n # the uniquified name, for validation when exporting signatures.\n placeholder.op._set_attr( # pylint: disable=protected-access\n \"_user_specified_name\",\n attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))\n function_inputs.append(placeholder)\n elif isinstance(arg, (resource_variable_ops.BaseResourceVariable,\n resource_variable_ops.VariableSpec)):\n if isinstance(arg, resource_variable_ops.VariableSpec):\n name = arg.name or name\n with func_graph.outer_graph.as_default():\n placeholder = graph_placeholder(dtypes.resource, arg.shape,\n name=name)\n\n arg = resource_variable_ops.BaseResourceVariable(\n name=name,\n shape=arg.shape,\n dtype=arg.dtype,\n handle=placeholder,\n handle_name=name)\n # Capture arg variables to create placeholders for them. These will be\n # removed as captures after the function is traced (since otherwise we'd\n # just add it back with a new placeholder when the variable was\n # referenced).\n placeholder = func_graph.capture(arg.handle, name=name)\n placeholder.op._set_attr( # pylint: disable=protected-access\n \"_user_specified_name\",\n attr_value_pb2.AttrValue(s=compat.as_bytes(name)))\n function_inputs.append(arg)\n else:\n if shape is not None:\n raise RuntimeError(\n \"Expected provided shape override to be None for arg that isn't \"\n \"a Tensor, but saw arg: '%s', shape: '%s'. args: %s\"\n % (arg, shape, args))\n function_inputs.append(arg)\n return nest.pack_sequence_as(structure, function_inputs,\n expand_composites=True)\n\n\ndef _get_defun_inputs_from_kwargs(kwargs, flat_shapes):\n \"\"\"Maps Python function keyword args to graph-construction inputs.\"\"\"\n if kwargs:\n names, args = zip(*sorted(kwargs.items()))\n else:\n names = []\n args = []\n return _get_defun_inputs(\n args, names, structure=kwargs, flat_shapes=flat_shapes)\n\n\ndef dismantle_func_graph(func_graph):\n \"\"\"Removes reference cycles in `func_graph` FuncGraph.\n\n Helpful for making sure the garbage collector doesn't need to run when\n the FuncGraph goes out of scope, e.g. in tests using defun with\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).\n\n Args:\n func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable\n after this function.\n \"\"\"\n func_graph.clear_captures()\n ops.dismantle_graph(func_graph)\n\n\ndef override_func_graph_name_scope(func_graph, name_scope):\n func_graph._name_stack = name_scope # pylint: disable=protected-access\n"
] | [
[
"tensorflow.python.framework.ops.NullContextmanager",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.util.tf_decorator.make_decorator",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.eager.graph_only_ops.graph_placeholder",
"tensorflow.python.ops.resource_variable_ops.VariableSpec",
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.python.framework.ops.convert_to_tensor_or_composite",
"tensorflow.python.framework.ops.uid",
"tensorflow.python.util.object_identity.ObjectIdentitySet",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.custom_gradient.copy_handle_data",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.autograph.ConversionOptions",
"tensorflow.python.eager.context.global_seed",
"tensorflow.python.framework.ops.dismantle_graph",
"tensorflow.python.framework.errors.InaccessibleTensorError",
"tensorflow.python.eager.tape.record_operation",
"tensorflow.python.framework.ops._as_graph_element",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.framework.auto_control_deps.AutomaticControlDependencies",
"tensorflow.python.ops.tensor_array_ops.build_ta_with_new_flow",
"tensorflow.python.util.memory.dismantle_ordered_dict",
"tensorflow.python.util.tf_decorator.unwrap",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.context.context",
"tensorflow.python.util.object_identity.ObjectIdentityWeakSet",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.ops.resource_variable_ops.BaseResourceVariable",
"numpy.prod",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.util.tf_decorator.rewrap",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.util.nest.flatten_with_tuple_paths",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions"
]
] |
dendisuhubdy/MinkowskiEngine | [
"a1cdcba68ef925bfefed2fe161f62e1ec78573b9"
] | [
"MinkowskiEngine/MinkowskiFunctional.py"
] | [
"import torch.nn.functional as F\n\nfrom SparseTensor import SparseTensor\n\n\ndef relu(input):\n output = F.relu(input.F)\n return SparseTensor(\n output, coords_key=input.coords_key, coords_manager=input.coords_man)\n"
] | [
[
"torch.nn.functional.relu"
]
] |
QuantumHardware/qiskit-experiments | [
"c09cf35bb922419354955abe8d536a97a9ea286b",
"c09cf35bb922419354955abe8d536a97a9ea286b"
] | [
"test/calibration/experiments/test_drag.py",
"qiskit_experiments/curve_analysis/visualization/fit_result_plotters.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Test drag calibration experiment.\"\"\"\n\nfrom test.base import QiskitExperimentsTestCase\nimport unittest\nimport numpy as np\n\nfrom qiskit.circuit import Parameter\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.pulse import DriveChannel, Drag\nimport qiskit.pulse as pulse\nfrom qiskit.qobj.utils import MeasLevel\nfrom qiskit import transpile\n\nfrom qiskit_experiments.exceptions import CalibrationError\nfrom qiskit_experiments.library import RoughDrag, RoughDragCal\nfrom qiskit_experiments.test.mock_iq_backend import DragBackend\nfrom qiskit_experiments.calibration_management.basis_gate_library import FixedFrequencyTransmon\nfrom qiskit_experiments.calibration_management import Calibrations\n\n\nclass TestDragEndToEnd(QiskitExperimentsTestCase):\n \"\"\"Test the drag experiment.\"\"\"\n\n def setUp(self):\n \"\"\"Setup some schedules.\"\"\"\n super().setUp()\n\n beta = Parameter(\"β\")\n\n with pulse.build(name=\"xp\") as xp:\n pulse.play(Drag(duration=160, amp=0.208519, sigma=40, beta=beta), DriveChannel(0))\n\n self.x_plus = xp\n self.test_tol = 0.05\n\n def test_reps(self):\n \"\"\"Test that setting reps raises and error if reps is not of length three.\"\"\"\n\n drag = RoughDrag(0, self.x_plus)\n\n with self.assertRaises(CalibrationError):\n drag.set_experiment_options(reps=[1, 2, 3, 4])\n\n def test_end_to_end(self):\n \"\"\"Test the drag experiment end to end.\"\"\"\n\n backend = DragBackend(gate_name=\"Drag(xp)\")\n\n drag = RoughDrag(1, self.x_plus)\n\n expdata = drag.run(backend)\n self.assertExperimentDone(expdata)\n result = expdata.analysis_results(1)\n\n self.assertTrue(abs(result.value.n - backend.ideal_beta) < self.test_tol)\n self.assertEqual(result.quality, \"good\")\n\n # Small leakage will make the curves very flat, in this case one should\n # rather increase beta.\n backend = DragBackend(error=0.0051, gate_name=\"Drag(xp)\")\n\n drag = RoughDrag(0, self.x_plus)\n drag.analysis.set_options(p0={\"beta\": 1.2})\n exp_data = drag.run(backend)\n self.assertExperimentDone(exp_data)\n result = exp_data.analysis_results(1)\n\n self.assertTrue(abs(result.value.n - backend.ideal_beta) < self.test_tol)\n self.assertEqual(result.quality, \"good\")\n\n # Large leakage will make the curves oscillate quickly.\n backend = DragBackend(error=0.05, gate_name=\"Drag(xp)\")\n\n drag = RoughDrag(1, self.x_plus, betas=np.linspace(-4, 4, 31))\n drag.set_run_options(shots=200)\n drag.analysis.set_options(p0={\"beta\": 1.8, \"freq0\": 0.08, \"freq1\": 0.16, \"freq2\": 0.32})\n exp_data = drag.run(backend)\n self.assertExperimentDone(exp_data)\n result = exp_data.analysis_results(1)\n\n meas_level = exp_data.metadata[\"job_metadata\"][-1][\"run_options\"][\"meas_level\"]\n\n self.assertEqual(meas_level, MeasLevel.CLASSIFIED)\n self.assertTrue(abs(result.value.n - backend.ideal_beta) < self.test_tol)\n self.assertEqual(result.quality, \"good\")\n\n\nclass TestDragCircuits(QiskitExperimentsTestCase):\n \"\"\"Test the circuits of the drag calibration.\"\"\"\n\n def setUp(self):\n \"\"\"Setup some schedules.\"\"\"\n super().setUp()\n\n beta = Parameter(\"β\")\n\n with pulse.build(name=\"xp\") as xp:\n pulse.play(Drag(duration=160, amp=0.208519, sigma=40, beta=beta), DriveChannel(0))\n\n self.x_plus = xp\n\n def test_default_circuits(self):\n \"\"\"Test the default circuit.\"\"\"\n\n backend = DragBackend(error=0.005, gate_name=\"Drag(xp)\")\n\n drag = RoughDrag(0, self.x_plus)\n drag.set_experiment_options(reps=[2, 4, 8])\n drag.backend = DragBackend(gate_name=\"Drag(xp)\")\n circuits = drag.circuits()\n\n for idx, expected in enumerate([4, 8, 16]):\n ops = transpile(circuits[idx * 51], backend).count_ops()\n self.assertEqual(ops[\"Drag(xp)\"], expected)\n\n def test_raise_multiple_parameter(self):\n \"\"\"Check that the experiment raises with unassigned parameters.\"\"\"\n\n beta = Parameter(\"β\")\n amp = Parameter(\"amp\")\n\n with pulse.build(name=\"xp\") as xp:\n pulse.play(Drag(duration=160, amp=amp, sigma=40, beta=beta), DriveChannel(0))\n\n with self.assertRaises(QiskitError):\n RoughDrag(1, xp, betas=np.linspace(-3, 3, 21))\n\n\nclass TestRoughDragCalUpdate(QiskitExperimentsTestCase):\n \"\"\"Test that a Drag calibration experiment properly updates the calibrations.\"\"\"\n\n def setUp(self):\n \"\"\"Setup the tests\"\"\"\n super().setUp()\n\n library = FixedFrequencyTransmon()\n\n self.backend = DragBackend(gate_name=\"Drag(x)\")\n self.cals = Calibrations.from_backend(self.backend, library)\n self.test_tol = 0.05\n\n def test_update(self):\n \"\"\"Test that running RoughDragCal updates the calibrations.\"\"\"\n\n qubit = 0\n prev_beta = self.cals.get_parameter_value(\"β\", (0,), \"x\")\n self.assertEqual(prev_beta, 0)\n\n expdata = RoughDragCal(qubit, self.cals, backend=self.backend).run()\n self.assertExperimentDone(expdata)\n\n new_beta = self.cals.get_parameter_value(\"β\", (0,), \"x\")\n self.assertTrue(abs(new_beta - self.backend.ideal_beta) < self.test_tol)\n self.assertTrue(abs(new_beta) > self.test_tol)\n\n def test_dragcal_experiment_config(self):\n \"\"\"Test RoughDragCal config can round trip\"\"\"\n exp = RoughDragCal(0, self.cals, backend=self.backend)\n loaded_exp = RoughDragCal.from_config(exp.config())\n self.assertNotEqual(exp, loaded_exp)\n self.assertTrue(self.json_equiv(exp, loaded_exp))\n\n @unittest.skip(\"Calibration experiments are not yet JSON serializable\")\n def test_dragcal_roundtrip_serializable(self):\n \"\"\"Test round trip JSON serialization\"\"\"\n exp = RoughDragCal(0, self.cals)\n self.assertRoundTripSerializable(exp, self.json_equiv)\n\n def test_drag_experiment_config(self):\n \"\"\"Test RoughDrag config can roundtrip\"\"\"\n with pulse.build(name=\"xp\") as sched:\n pulse.play(pulse.Drag(160, 0.5, 40, Parameter(\"β\")), pulse.DriveChannel(0))\n exp = RoughDrag(0, backend=self.backend, schedule=sched)\n loaded_exp = RoughDrag.from_config(exp.config())\n self.assertNotEqual(exp, loaded_exp)\n self.assertTrue(self.json_equiv(exp, loaded_exp))\n\n @unittest.skip(\"Schedules are not yet JSON serializable\")\n def test_drag_roundtrip_serializable(self):\n \"\"\"Test round trip JSON serialization\"\"\"\n with pulse.build(name=\"xp\") as sched:\n pulse.play(pulse.Drag(160, 0.5, 40, Parameter(\"β\")), pulse.DriveChannel(0))\n exp = RoughDrag(0, backend=self.backend, schedule=sched)\n self.assertRoundTripSerializable(exp, self.json_equiv)\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nA collection of functions that draw formatted curve analysis results.\n\nFor example, this visualization contains not only fit curves and raw data points,\nbut also some extra fitting information, such as fit values of some interesting parameters\nand goodness of the fitting represented by chi-squared. These extra information can be\nalso visualized as a fit report.\n\nNote that plotter is a class that only has a class method to draw the image.\nThis is just like a function, but allows serialization via Enum.\n\"\"\"\n\nfrom collections import defaultdict\nfrom typing import List, Dict, Optional\n\nimport uncertainties\nimport numpy as np\nfrom matplotlib.ticker import FuncFormatter\nfrom qiskit.utils import detach_prefix\n\nfrom qiskit_experiments.curve_analysis.curve_data import SeriesDef, FitData, CurveData\nfrom qiskit_experiments.framework import AnalysisResultData\nfrom qiskit_experiments.framework.matplotlib import get_non_gui_ax\nfrom .curves import plot_scatter, plot_errorbar, plot_curve_fit\nfrom .style import PlotterStyle\n\n\nclass MplDrawSingleCanvas:\n \"\"\"A plotter to draw a single canvas figure for fit result.\"\"\"\n\n @classmethod\n def draw(\n cls,\n series_defs: List[SeriesDef],\n raw_samples: List[CurveData],\n fit_samples: List[CurveData],\n tick_labels: Dict[str, str],\n fit_data: FitData,\n result_entries: List[AnalysisResultData],\n style: Optional[PlotterStyle] = None,\n axis: Optional[\"matplotlib.axes.Axes\"] = None,\n ) -> \"pyplot.Figure\":\n \"\"\"Create a fit result of all curves in the single canvas.\n\n Args:\n series_defs: List of definition for each curve.\n raw_samples: List of raw sample data for each curve.\n fit_samples: List of formatted sample data for each curve.\n tick_labels: Dictionary of axis label information. Axis units and label for x and y\n value should be explained.\n fit_data: fit data generated by the analysis.\n result_entries: List of analysis result data entries.\n style: Optional. A configuration object to modify the appearance of the figure.\n axis: Optional. A matplotlib Axis object.\n\n Returns:\n A matplotlib figure of the curve fit result.\n \"\"\"\n if axis is None:\n axis = get_non_gui_ax()\n\n # update image size to experiment default\n figure = axis.get_figure()\n figure.set_size_inches(*style.figsize)\n else:\n figure = axis.get_figure()\n\n # draw all curves on the same canvas\n for series_def, raw_samp, fit_samp in zip(series_defs, raw_samples, fit_samples):\n draw_single_curve_mpl(\n axis=axis,\n series_def=series_def,\n raw_sample=raw_samp,\n fit_sample=fit_samp,\n fit_data=fit_data,\n style=style,\n )\n\n # add legend\n if len(series_defs) > 1:\n axis.legend(loc=style.legend_loc)\n\n # get axis scaling factor\n for this_axis in (\"x\", \"y\"):\n sub_axis = getattr(axis, this_axis + \"axis\")\n unit = tick_labels[this_axis + \"val_unit\"]\n label = tick_labels[this_axis + \"label\"]\n if unit:\n maxv = np.max(np.abs(sub_axis.get_data_interval()))\n scaled_maxv, prefix = detach_prefix(maxv, decimal=3)\n prefactor = scaled_maxv / maxv\n # pylint: disable=cell-var-from-loop\n sub_axis.set_major_formatter(FuncFormatter(lambda x, p: f\"{x * prefactor: .3g}\"))\n sub_axis.set_label_text(f\"{label} [{prefix}{unit}]\", fontsize=style.axis_label_size)\n else:\n sub_axis.set_label_text(label, fontsize=style.axis_label_size)\n axis.ticklabel_format(axis=this_axis, style=\"sci\", scilimits=(-3, 3))\n\n if tick_labels[\"xlim\"]:\n axis.set_xlim(tick_labels[\"xlim\"])\n\n if tick_labels[\"ylim\"]:\n axis.set_ylim(tick_labels[\"ylim\"])\n\n # write analysis report\n if fit_data:\n report_str = write_fit_report(result_entries)\n report_str += r\"Fit $\\chi^2$ = \" + f\"{fit_data.reduced_chisq: .4g}\"\n\n report_handler = axis.text(\n *style.fit_report_rpos,\n report_str,\n ha=\"center\",\n va=\"top\",\n size=style.fit_report_text_size,\n transform=axis.transAxes,\n )\n\n bbox_props = dict(boxstyle=\"square, pad=0.3\", fc=\"white\", ec=\"black\", lw=1, alpha=0.8)\n report_handler.set_bbox(bbox_props)\n\n axis.tick_params(labelsize=style.tick_label_size)\n axis.grid(True)\n\n return figure\n\n\nclass MplDrawMultiCanvasVstack:\n \"\"\"A plotter to draw a vertically stacked multi canvas figure for fit result.\"\"\"\n\n @classmethod\n def draw(\n cls,\n series_defs: List[SeriesDef],\n raw_samples: List[CurveData],\n fit_samples: List[CurveData],\n tick_labels: Dict[str, str],\n fit_data: FitData,\n result_entries: List[AnalysisResultData],\n style: Optional[PlotterStyle] = None,\n axis: Optional[\"matplotlib.axes.Axes\"] = None,\n ) -> \"pyplot.Figure\":\n \"\"\"Create a fit result of all curves in the single canvas.\n\n Args:\n series_defs: List of definition for each curve.\n raw_samples: List of raw sample data for each curve.\n fit_samples: List of formatted sample data for each curve.\n tick_labels: Dictionary of axis label information. Axis units and label for x and y\n value should be explained.\n fit_data: fit data generated by the analysis.\n result_entries: List of analysis result data entries.\n style: Optional. A configuration object to modify the appearance of the figure.\n axis: Optional. A matplotlib Axis object.\n\n Returns:\n A matplotlib figure of the curve fit result.\n \"\"\"\n if axis is None:\n axis = get_non_gui_ax()\n\n # update image size to experiment default\n figure = axis.get_figure()\n figure.set_size_inches(*style.figsize)\n else:\n figure = axis.get_figure()\n\n # get canvas number\n n_subplots = max(series_def.canvas for series_def in series_defs) + 1\n\n # use inset axis. this allows us to draw multiple canvases on a given single axis object\n inset_ax_h = (1 - (0.05 * (n_subplots - 1))) / n_subplots\n inset_axes = [\n axis.inset_axes(\n [0, 1 - (inset_ax_h + 0.05) * n_axis - inset_ax_h, 1, inset_ax_h],\n transform=axis.transAxes,\n zorder=1,\n )\n for n_axis in range(n_subplots)\n ]\n\n # show x label only in the bottom canvas\n for inset_axis in inset_axes[:-1]:\n inset_axis.set_xticklabels([])\n inset_axes[-1].get_shared_x_axes().join(*inset_axes)\n\n # remove original axis frames\n axis.spines.right.set_visible(False)\n axis.spines.left.set_visible(False)\n axis.spines.top.set_visible(False)\n axis.spines.bottom.set_visible(False)\n axis.set_xticks([])\n axis.set_yticks([])\n\n # collect data source per canvas\n plot_map = defaultdict(list)\n for curve_ind, series_def in enumerate(series_defs):\n plot_map[series_def.canvas].append(curve_ind)\n\n y_labels = tick_labels[\"ylabel\"].split(\",\")\n if len(y_labels) == 1:\n y_labels = y_labels * n_subplots\n\n for ax_ind, curve_inds in plot_map.items():\n inset_axis = inset_axes[ax_ind]\n\n for curve_ind in curve_inds:\n draw_single_curve_mpl(\n axis=inset_axis,\n series_def=series_defs[curve_ind],\n raw_sample=raw_samples[curve_ind],\n fit_sample=fit_samples[curve_ind],\n fit_data=fit_data,\n style=style,\n )\n\n # add legend to each inset axis\n if len(curve_inds) > 1:\n inset_axis.legend(loc=style.legend_loc)\n\n # format y axis tick value of each inset axis\n yaxis = getattr(inset_axis, \"yaxis\")\n unit = tick_labels[\"yval_unit\"]\n label = y_labels[ax_ind]\n if unit:\n maxv = np.max(np.abs(yaxis.get_data_interval()))\n scaled_maxv, prefix = detach_prefix(maxv, decimal=3)\n prefactor = scaled_maxv / maxv\n # pylint: disable=cell-var-from-loop\n yaxis.set_major_formatter(FuncFormatter(lambda x, p: f\"{x * prefactor: .3g}\"))\n yaxis.set_label_text(f\"{label} [{prefix}{unit}]\", fontsize=style.axis_label_size)\n else:\n inset_axis.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(-3, 3))\n yaxis.set_label_text(label, fontsize=style.axis_label_size)\n\n if tick_labels[\"ylim\"]:\n inset_axis.set_ylim(tick_labels[\"ylim\"])\n\n # format x axis\n xaxis = getattr(inset_axes[-1], \"xaxis\")\n unit = tick_labels[\"xval_unit\"]\n label = tick_labels[\"xlabel\"]\n if unit:\n maxv = np.max(np.abs(xaxis.get_data_interval()))\n scaled_maxv, prefix = detach_prefix(maxv, decimal=3)\n prefactor = scaled_maxv / maxv\n # pylint: disable=cell-var-from-loop\n xaxis.set_major_formatter(FuncFormatter(lambda x, p: f\"{x * prefactor: .3g}\"))\n xaxis.set_label_text(f\"{label} [{prefix}{unit}]\", fontsize=style.axis_label_size)\n else:\n axis.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(-3, 3))\n xaxis.set_label_text(label, fontsize=style.axis_label_size)\n\n if tick_labels[\"xlim\"]:\n inset_axes[-1].set_xlim(tick_labels[\"xlim\"])\n\n # write analysis report\n if fit_data:\n report_str = write_fit_report(result_entries)\n report_str += r\"Fit $\\chi^2$ = \" + f\"{fit_data.reduced_chisq: .4g}\"\n\n report_handler = axis.text(\n *style.fit_report_rpos,\n report_str,\n ha=\"center\",\n va=\"top\",\n size=style.fit_report_text_size,\n transform=axis.transAxes,\n )\n\n bbox_props = dict(boxstyle=\"square, pad=0.3\", fc=\"white\", ec=\"black\", lw=1, alpha=0.8)\n report_handler.set_bbox(bbox_props)\n\n axis.tick_params(labelsize=style.tick_label_size)\n axis.grid(True)\n\n return figure\n\n\ndef draw_single_curve_mpl(\n axis: \"matplotlib.axes.Axes\",\n series_def: SeriesDef,\n raw_sample: CurveData,\n fit_sample: CurveData,\n fit_data: FitData,\n style: PlotterStyle,\n):\n \"\"\"A function that draws a single curve on the given plotter canvas.\n\n Args:\n axis: Drawer canvas.\n series_def: Definition of the curve to draw.\n raw_sample: Raw sample data.\n fit_sample: Formatted sample data.\n fit_data: Fitting parameter collection.\n style: Style sheet for plotting.\n \"\"\"\n\n # plot raw data if data is formatted\n if not np.array_equal(raw_sample.y, fit_sample.y):\n plot_scatter(xdata=raw_sample.x, ydata=raw_sample.y, ax=axis, zorder=0)\n\n # plot formatted data\n if np.all(np.isnan(fit_sample.y_err)):\n sigma = None\n else:\n sigma = np.nan_to_num(fit_sample.y_err)\n\n plot_errorbar(\n xdata=fit_sample.x,\n ydata=fit_sample.y,\n sigma=sigma,\n ax=axis,\n label=series_def.name,\n marker=series_def.plot_symbol,\n color=series_def.plot_color,\n zorder=1,\n linestyle=\"\",\n )\n\n # plot fit curve\n if fit_data:\n plot_curve_fit(\n func=series_def.fit_func,\n result=fit_data,\n ax=axis,\n color=series_def.plot_color,\n zorder=2,\n fit_uncertainty=style.plot_sigma,\n )\n\n\ndef write_fit_report(result_entries: List[AnalysisResultData]) -> str:\n \"\"\"A function that generates fit reports documentation from list of data.\n\n Args:\n result_entries: List of data entries.\n\n Returns:\n Documentation of fit reports.\n \"\"\"\n analysis_description = \"\"\n\n def format_val(float_val: float) -> str:\n if np.abs(float_val) < 1e-3 or np.abs(float_val) > 1e3:\n return f\"{float_val: .4e}\"\n return f\"{float_val: .4g}\"\n\n for res in result_entries:\n if isinstance(res.value, uncertainties.UFloat):\n fitval = res.value\n unit = res.extra.get(\"unit\", None)\n if unit:\n # unit is defined. do detaching prefix, i.e. 1000 Hz -> 1 kHz\n val, val_prefix = detach_prefix(fitval.nominal_value, decimal=3)\n val_unit = val_prefix + unit\n value_repr = f\"{val: .3g}\"\n\n # write error bar if it is finite value\n if fitval.std_dev is not None and np.isfinite(fitval.std_dev):\n # with stderr\n err, err_prefix = detach_prefix(fitval.std_dev, decimal=3)\n err_unit = err_prefix + unit\n if val_unit == err_unit:\n # same value scaling, same prefix\n value_repr += f\" \\u00B1 {err: .2f} {val_unit}\"\n else:\n # different value scaling, different prefix\n value_repr += f\" {val_unit} \\u00B1 {err: .2f} {err_unit}\"\n else:\n # without stderr, just append unit\n value_repr += f\" {val_unit}\"\n else:\n # unit is not defined. raw value formatting is performed.\n value_repr = format_val(fitval.nominal_value)\n if np.isfinite(fitval.std_dev):\n # with stderr\n value_repr += f\" \\u00B1 {format_val(fitval.std_dev)}\"\n\n analysis_description += f\"{res.name} = {value_repr}\\n\"\n\n return analysis_description\n"
] | [
[
"numpy.linspace"
],
[
"matplotlib.ticker.FuncFormatter",
"numpy.abs",
"numpy.array_equal",
"numpy.isnan",
"numpy.nan_to_num",
"numpy.isfinite"
]
] |
thyneb19/lux | [
"07a282d6a5f60c05942d866fa6f33636c3428abc"
] | [
"tests/test_type.py"
] | [
"# Copyright 2019-2020 The Lux Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .context import lux\nimport pytest\nimport random\nimport pandas as pd\nimport warnings\n\n\n# Suite of test that checks if data_type inferred correctly by Lux\ndef test_check_cars():\n lux.config.set_SQL_connection(\"\")\n df = pd.read_csv(\"lux/data/car.csv\")\n df.maintain_metadata()\n assert df.data_type[\"Name\"] == \"nominal\"\n assert df.data_type[\"MilesPerGal\"] == \"quantitative\"\n assert df.data_type[\"Cylinders\"] == \"nominal\"\n assert df.data_type[\"Displacement\"] == \"quantitative\"\n assert df.data_type[\"Horsepower\"] == \"quantitative\"\n assert df.data_type[\"Weight\"] == \"quantitative\"\n assert df.data_type[\"Acceleration\"] == \"quantitative\"\n assert df.data_type[\"Year\"] == \"temporal\"\n assert df.data_type[\"Origin\"] == \"nominal\"\n\n\ndef test_check_int_id():\n df = pd.read_csv(\n \"https://github.com/lux-org/lux-datasets/blob/master/data/instacart_sample.csv?raw=true\"\n )\n df._repr_html_()\n inverted_data_type = lux.config.executor.invert_data_type(df.data_type)\n assert len(inverted_data_type[\"id\"]) == 3\n assert (\n \"<code>order_id</code>, <code>product_id</code>, <code>user_id</code> is not visualized since it resembles an ID field.\"\n in df._message.to_html()\n )\n\n\ndef test_check_str_id():\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/churn.csv?raw=true\")\n df._repr_html_()\n assert (\n \"<code>customerID</code> is not visualized since it resembles an ID field.</li>\"\n in df._message.to_html()\n )\n\n\ndef test_check_hpi():\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/hpi.csv?raw=true\")\n df.maintain_metadata()\n\n assert df.data_type == {\n \"HPIRank\": \"quantitative\",\n \"Country\": \"geographical\",\n \"SubRegion\": \"nominal\",\n \"AverageLifeExpectancy\": \"quantitative\",\n \"AverageWellBeing\": \"quantitative\",\n \"HappyLifeYears\": \"quantitative\",\n \"Footprint\": \"quantitative\",\n \"InequalityOfOutcomes\": \"quantitative\",\n \"InequalityAdjustedLifeExpectancy\": \"quantitative\",\n \"InequalityAdjustedWellbeing\": \"quantitative\",\n \"HappyPlanetIndex\": \"quantitative\",\n \"GDPPerCapita\": \"quantitative\",\n \"Population\": \"quantitative\",\n }\n\n\ndef test_check_airbnb():\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/airbnb_nyc.csv?raw=true\")\n df.maintain_metadata()\n assert df.data_type == {\n \"id\": \"id\",\n \"name\": \"nominal\",\n \"host_id\": \"id\",\n \"host_name\": \"nominal\",\n \"neighbourhood_group\": \"nominal\",\n \"neighbourhood\": \"nominal\",\n \"latitude\": \"quantitative\",\n \"longitude\": \"quantitative\",\n \"room_type\": \"nominal\",\n \"price\": \"quantitative\",\n \"minimum_nights\": \"quantitative\",\n \"number_of_reviews\": \"quantitative\",\n \"last_review\": \"temporal\",\n \"reviews_per_month\": \"quantitative\",\n \"calculated_host_listings_count\": \"quantitative\",\n \"availability_365\": \"quantitative\",\n }\n\n\ndef test_check_airports():\n df = pd.read_csv(\n \"https://raw.githubusercontent.com/altair-viz/vega_datasets/master/vega_datasets/_data/airports.csv\"\n )\n df.maintain_metadata()\n assert df.data_type == {\n \"iata\": \"id\",\n \"name\": \"nominal\",\n \"city\": \"nominal\",\n \"state\": \"geographical\",\n \"country\": \"geographical\",\n \"latitude\": \"quantitative\",\n \"longitude\": \"quantitative\",\n }\n\n\ndef test_check_datetime():\n df = pd.DataFrame(\n {\n \"a\": [\"2020-01-01\"],\n \"b\": [\"20-01-01\"],\n \"c\": [\"20-jan-01\"],\n \"d\": [\"20-january-01\"],\n \"e\": [\"2020 January 01\"],\n \"f\": [\"2020 January 01 00:00:00 pm PT\"],\n \"g\": [\"2020 January 01 13:00:00\"],\n \"h\": [\"2020 January 01 23:59:59 GTC-6\"],\n }\n )\n df.maintain_metadata()\n assert df.data_type == {\n \"a\": \"temporal\",\n \"b\": \"temporal\",\n \"c\": \"temporal\",\n \"d\": \"temporal\",\n \"e\": \"temporal\",\n \"f\": \"temporal\",\n \"g\": \"temporal\",\n \"h\": \"temporal\",\n }\n\n\ndef test_check_datetime_numeric_values():\n car_df = pd.read_csv(\"lux/data/car.csv\")\n car_df = car_df.rename(columns={\"Year\": \"blah\"})\n car_df.maintain_metadata()\n assert car_df.data_type[\"blah\"] == \"temporal\"\n\n spotify_df = pd.read_csv(\n \"https://raw.githubusercontent.com/lux-org/lux-datasets/master/data/spotify.csv\"\n )\n spotify_df = spotify_df.rename(columns={\"year\": \"blah\"})\n spotify_df.maintain_metadata()\n assert spotify_df.data_type[\"blah\"] == \"temporal\"\n assert spotify_df.data_type[\"release_date\"] == \"temporal\"\n\n\ndef test_check_stock():\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/stocks.csv?raw=true\")\n df.maintain_metadata()\n assert df.data_type == {\n \"symbol\": \"nominal\",\n \"monthdate\": \"temporal\",\n \"price\": \"quantitative\",\n }, \"Stock dataset type detection error\"\n\n\ndef test_check_college():\n df = pd.read_csv(\"lux/data/college.csv\")\n df.maintain_metadata()\n assert df.data_type == {\n \"Name\": \"nominal\",\n \"PredominantDegree\": \"nominal\",\n \"HighestDegree\": \"nominal\",\n \"FundingModel\": \"nominal\",\n \"Region\": \"nominal\",\n \"Geography\": \"nominal\",\n \"AdmissionRate\": \"quantitative\",\n \"ACTMedian\": \"quantitative\",\n \"SATAverage\": \"quantitative\",\n \"AverageCost\": \"quantitative\",\n \"Expenditure\": \"quantitative\",\n \"AverageFacultySalary\": \"quantitative\",\n \"MedianDebt\": \"quantitative\",\n \"AverageAgeofEntry\": \"quantitative\",\n \"MedianFamilyIncome\": \"quantitative\",\n \"MedianEarnings\": \"quantitative\",\n }\n\n\ndef test_float_categorical():\n values = [\n {\"A\": 6.0, \"B\": 1.0, \"C\": 1.0, \"D\": 3.0, \"E\": 2.0, \"F\": 5.0},\n {\"A\": 5.0, \"B\": 2.0, \"C\": 2.0, \"D\": 2.0, \"E\": 2.0, \"F\": 3.0},\n {\"A\": 3.0, \"B\": 6.0, \"C\": 3.0, \"D\": 3.0, \"E\": 2.0, \"F\": 5.0},\n {\"A\": 6.0, \"B\": 3.0, \"C\": 3.0, \"D\": 2.0, \"E\": 2.0, \"F\": 2.0},\n {\"A\": 7.0, \"B\": 4.0, \"C\": 2.0, \"D\": 2.0, \"E\": 2.0, \"F\": 4.0},\n {\"A\": 5.0, \"B\": 3.0, \"C\": 6.0, \"D\": 3.0, \"E\": 3.0, \"F\": 4.0},\n {\"A\": 3.0, \"B\": 4.0, \"C\": 3.0, \"D\": 6.0, \"E\": 5.0, \"F\": 5.0},\n {\"A\": 3.0, \"B\": 3.0, \"C\": 2.0, \"D\": 2.0, \"E\": 4.0, \"F\": 5.0},\n {\"A\": 3.0, \"B\": 2.0, \"C\": 2.0, \"D\": 2.0, \"E\": 2.0, \"F\": 4.0},\n {\"A\": 1.0, \"B\": 2.0, \"C\": 2.0, \"D\": 2.0, \"E\": 2.0, \"F\": 6.0},\n {\"A\": 3.0, \"B\": 3.0, \"C\": 2.0, \"D\": 3.0, \"E\": 3.0, \"F\": 5.0},\n {\"A\": 7.0, \"B\": 1.0, \"C\": 1.0, \"D\": 2.0, \"E\": 2.0, \"F\": 3.0},\n {\"A\": 6.0, \"B\": 2.0, \"C\": 2.0, \"D\": 2.0, \"E\": 2.0, \"F\": 3.0},\n {\"A\": 2.0, \"B\": 3.0, \"C\": 2.0, \"D\": 3.0, \"E\": 3.0, \"F\": 4.0},\n {\"A\": 6.0, \"B\": 2.0, \"C\": 3.0, \"D\": 3.0, \"E\": 3.0, \"F\": 5.0},\n ]\n df = pd.DataFrame(values)\n df.maintain_metadata()\n inverted_data_type = lux.config.executor.invert_data_type(df.data_type)\n assert inverted_data_type[\"nominal\"] == [\n \"A\",\n \"B\",\n \"C\",\n \"D\",\n \"E\",\n \"F\",\n ], \"Float column should be detected as categorical\"\n for x in list(df.dtypes):\n assert x == \"float64\", \"Source dataframe preserved as float dtype\"\n\n\ndef test_set_data_type():\n df = pd.read_csv(\n \"https://github.com/lux-org/lux-datasets/blob/master/data/real_estate_tutorial.csv?raw=true\"\n )\n with pytest.warns(UserWarning) as w:\n df._repr_html_()\n assert \"starter template that you can use\" in str(w[-1].message)\n assert \"df.set_data_type\" in str(w[-1].message)\n\n df.set_data_type({\"Month\": \"nominal\", \"Year\": \"nominal\"})\n assert df.data_type[\"Month\"] == \"nominal\"\n assert df.data_type[\"Year\"] == \"nominal\"\n with warnings.catch_warnings() as w:\n warnings.simplefilter(\"always\")\n df._repr_html_()\n assert not w\n\n\ndef test_set_data_type_invalid():\n df = pd.read_csv(\n \"https://github.com/lux-org/lux-datasets/blob/master/data/real_estate_tutorial.csv?raw=true\"\n )\n with pytest.raises(ValueError):\n df.set_data_type({\"Month\": \"nomnal\", \"Year\": \"nomnal\"})\n\n\ndef test_set_wrong_data_type():\n df = pd.read_csv(\n \"https://github.com/lux-org/lux-datasets/blob/master/data/real_estate_tutorial.csv?raw=true\"\n )\n df.set_data_type({\"Year\": \"quantitative\"})\n assert df.data_type[\"Year\"] == \"quantitative\"\n\n\ndef test_id_with_label():\n df = pd.read_csv(\n \"https://github.com/lux-org/lux-datasets/blob/master/data/state_timeseries.csv?raw=true\"\n )\n df.maintain_metadata()\n assert df.data_type == {\"Date\": \"temporal\", \"State\": \"geographical\", \"Value\": \"quantitative\"}\n\n\ndef test_ID_random():\n \"\"\"Tests whether a ID column not satisfying other properties of an ID gets recognized.\"\"\"\n values = [\n {\"ID\": random.randint(0, 1000), \"A\": 6.0, \"B\": 1.0, \"C\": 1.0, \"D\": 3.0, \"E\": 2.0, \"F\": 5.0}\n for x in range(1000)\n ]\n df = pd.DataFrame(values)\n df.maintain_metadata()\n assert df.data_type == {\n \"ID\": \"quantitative\",\n \"A\": \"nominal\",\n \"B\": \"nominal\",\n \"C\": \"nominal\",\n \"D\": \"nominal\",\n \"E\": \"nominal\",\n \"F\": \"nominal\",\n }\n\n\ndef test_ID():\n \"\"\"Tests different ways of writing id\"\"\"\n values = [{\"ID\": x, \"A\": 6.0, \"B\": 1.0, \"C\": 1.0, \"D\": 3.0, \"E\": 2.0, \"F\": 5.0} for x in range(1000)]\n df = pd.DataFrame(values)\n df.maintain_metadata()\n assert df.data_type == {\n \"ID\": \"id\",\n \"A\": \"nominal\",\n \"B\": \"nominal\",\n \"C\": \"nominal\",\n \"D\": \"nominal\",\n \"E\": \"nominal\",\n \"F\": \"nominal\",\n }\n\n\ndef test_id_aug_test():\n \"\"\"Tests in a different dataset\n Reference: https://www.kaggle.com/arashnic/hr-analytics-job-change-of-data-scientists\n \"\"\"\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/aug_test.csv?raw=true\")\n df.maintain_metadata()\n assert df.data_type == {\n \"enrollee_id\": \"id\",\n \"city\": \"nominal\",\n \"city_development_index\": \"quantitative\",\n \"gender\": \"nominal\",\n \"relevent_experience\": \"nominal\",\n \"enrolled_university\": \"nominal\",\n \"education_level\": \"nominal\",\n \"major_discipline\": \"nominal\",\n \"experience\": \"nominal\",\n \"company_size\": \"nominal\",\n \"company_type\": \"nominal\",\n \"last_new_job\": \"nominal\",\n \"training_hours\": \"quantitative\",\n }\n\n\ndef test_id_music_data():\n \"\"\"Tests in a different dataset if a column not named as an ID is recognized as an identification.\n Reference: https://www.kaggle.com/yamaerenay/spotify-dataset-19212020-160k-tracks\n \"\"\"\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/spotify.csv?raw=true\")\n df[\"unique_num\"] = df[\"id\"]\n df.drop(columns=[\"id\"])\n df.maintain_metadata()\n assert df.data_type == {\n \"valence\": \"quantitative\",\n \"year\": \"temporal\",\n \"acousticness\": \"quantitative\",\n \"artists\": \"nominal\",\n \"danceability\": \"quantitative\",\n \"duration_ms\": \"quantitative\",\n \"energy\": \"quantitative\",\n \"explicit\": \"nominal\",\n \"unique_num\": \"id\",\n \"instrumentalness\": \"quantitative\",\n \"key\": \"nominal\",\n \"liveness\": \"quantitative\",\n \"loudness\": \"quantitative\",\n \"mode\": \"nominal\",\n \"name\": \"nominal\",\n \"popularity\": \"quantitative\",\n \"release_date\": \"temporal\",\n \"speechiness\": \"quantitative\",\n \"tempo\": \"quantitative\",\n \"id\": \"id\",\n }\n\n\ndef test_id_absenteeism_data():\n \"\"\" Tests whether an id named column is not recognized because even though it is named an id, it is not with its nature. \"\"\"\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/absenteeism.csv?raw=true\")\n df.maintain_metadata()\n assert df.data_type == {\n \"ID\": \"quantitative\",\n \"Reason for absence\": \"quantitative\",\n \"Month of absence\": \"nominal\",\n \"Day of the week\": \"nominal\",\n \"Seasons\": \"nominal\",\n \"Transportation expense\": \"quantitative\",\n \"Distance from Residence to Work\": \"quantitative\",\n \"Service time\": \"nominal\",\n \"Age\": \"quantitative\",\n \"Work load Average/day \": \"quantitative\",\n \"Hit target\": \"nominal\",\n \"Disciplinary failure\": \"nominal\",\n \"Education\": \"nominal\",\n \"Son\": \"nominal\",\n \"Social drinker\": \"nominal\",\n \"Social smoker\": \"nominal\",\n \"Pet\": \"nominal\",\n \"Weight\": \"quantitative\",\n \"Height\": \"nominal\",\n \"Body mass index\": \"nominal\",\n \"Absenteeism time in hours\": \"nominal\",\n }\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
leipzig/gatk-sv | [
"bf3704bd1d705339577530e267cd4d1b2f77a17f"
] | [
"src/sv-pipeline/pre_SVCalling_and_QC/raw_vcf_qc/calc_num_svs_pick_outlier.py"
] | [
"#!/usr/bin/env python\n\nimport sys\nfrom typing import Sequence, Set\nimport argparse\nimport numpy\nimport pandas\n\n\n_zero_svs_are_outliers = True\n_outlier_std_threshold = 5.0\n_column_order = [\"CHROM\", \"SVTYPE\", \"Mean\", \"Median\", \"STD\",\n \"Outlier_Sample\", \"Outlier_Number\", \"Outlier_Cate\"]\n\n\ndef read_statfile(statfile: str) -> pandas.DataFrame:\n \"\"\"\n Special function needed to read in stats data table because\n a) pandas doesn't understand that the '#' means header\n b) there are multiple stats files concatenated together, resulting in headers being randomly mixed in\n Args:\n statfile: str\n File name with concatenated tab-separated tables of variant stats\n Returns:\n stats_data: pandas.DataFrame\n Table of variant stats\n \"\"\"\n with open(statfile, 'r') as f_in:\n # get column header from first line, stripping '#'\n columns = f_in.readline().lstrip('#').split()\n # read rest of tsv file, using these columns as header and ignoring any future lines starting with '#'\n return pandas.read_csv(statfile, sep='\\t', comment='#', names=columns)\n\n\ndef pick_outliers_by_group(\n chrom: str,\n sv_type: str,\n check_stats: pandas.DataFrame,\n all_samples: Set[str],\n zero_svs_are_outliers: bool = _zero_svs_are_outliers,\n outlier_std_threshold: float = _outlier_std_threshold\n) -> pandas.DataFrame:\n \"\"\"\n For given combination of contig and SV type, find samples that have outlier number of SVs. Return table of outliers\n along with statistics about SV count.\n Args:\n chrom: str\n Contig for checking SV counts\n sv_type: str\n SV type for checking SV counts\n check_stats: pandas.DataFrame\n Table with SV counts on this contig with this sv_type\n all_samples: Set[str]\n Set of all sample IDs in cohort\n zero_svs_are_outliers: bool\n Whether to treat samples with no counts as automatic outliers, or explicitly code as zero counts\n outlier_std_threshold: float\n Threshold for outlier status as multiple of standard deviation of SV counts\n Returns:\n outliers: pandas.DataFrame\n Table of outliers\n \"\"\"\n # find samples that are missing: they have 0 SVs of this type on this contig\n missing_samples = pandas.DataFrame(\n tuple(\n {\"CHROM\": chrom, \"SVTYPE\": sv_type, \"SAMPLE\": sample_id, \"NUM\": 0}\n for sample_id in all_samples.difference(check_stats[\"SAMPLE\"])\n )\n )\n\n if zero_svs_are_outliers:\n # THIS IS THE ORIGINAL PIPELINE BEHAVIOR\n # compute basic stats about observed nonzero SV counts\n count_mean = check_stats[\"NUM\"].mean()\n count_median = check_stats[\"NUM\"].median()\n count_std = check_stats[\"NUM\"].std()\n # Amongst samples that have SVs, find counts deviating by more than set multiple of std from the median\n is_outlier = numpy.abs(\n check_stats[\"NUM\"] - count_median) > outlier_std_threshold * count_std\n # Treat missing samples as outliers.\n outliers = pandas.concat(\n (missing_samples, check_stats.loc[is_outlier]), axis=0)\n else:\n # THIS FINDS FEWER, MORE MEANINGFUL OUTLIERS\n # Which samples are missing / included but have zero counts is unpredictable.\n # 1) concatenate all samples together\n check_stats = pandas.concat((check_stats, missing_samples), axis=0)\n # 2) compute stats from non-zero SV counts\n nonzero = check_stats[\"NUM\"] > 0\n count_mean = check_stats.loc[nonzero, \"NUM\"].mean()\n count_median = check_stats.loc[nonzero, \"NUM\"].median()\n count_std = check_stats.loc[nonzero, \"NUM\"].std()\n # 3) check outliers by usual means from those stats\n # Set threshold to be set multiple of greater of: std of counts, sqrt(median of counts)\n # (i.e. greater of std or expected Poisson std)\n # Find counts those deviating by more than threshold from the median (including zeros)\n is_outlier = (\n numpy.abs(check_stats[\"NUM\"] - count_median) >\n outlier_std_threshold * numpy.maximum(count_std, numpy.sqrt(count_median))\n )\n outliers = check_stats.loc[is_outlier].copy()\n\n if outliers.empty:\n return pandas.DataFrame([], columns=_column_order)\n # augment outlier table with some statistics\n outliers[\"Mean\"] = count_mean\n outliers[\"Median\"] = count_median\n outliers[\"STD\"] = count_std\n outliers[\"Outlier_Cate\"] = numpy.where(\n outliers[\"NUM\"] > count_median, \"high\", \"low\")\n # rename and re-order columns\n return outliers.rename({\"NUM\": \"Outlier_Number\", \"SAMPLE\": \"Outlier_Sample\"}, axis=1).reindex(_column_order, axis=1)\n\n\ndef pick_outliers(\n stats_data: pandas.DataFrame,\n zero_svs_are_outliers: bool = _zero_svs_are_outliers,\n outlier_std_threshold: float = _outlier_std_threshold\n) -> pandas.DataFrame:\n \"\"\"\n Find samples that have outlier number of SVs when broken down by contig and SV type. Return table of outliers\n along with statistics about SV count.\n Args:\n stats_data: pandas.DataFrame\n Table with SV counts\n zero_svs_are_outliers: bool\n Whether to treat samples with no counts as automatic outliers, or explicitly code as zero counts\n outlier_std_threshold: float\n Threshold for outlier status as multiple of standard deviation of SV counts\n Returns:\n outliers: pandas.DataFrame\n Table of outliers\n \"\"\"\n # get set of all samples in stats data\n all_samples = set(stats_data[\"SAMPLE\"])\n\n # loop over unique combinations of contig and sv type\n # find outliers from each unique combination\n # and concatenate those outliers into one table\n outliers = pandas.concat(\n tuple(\n pick_outliers_by_group(\n chrom=chrom, sv_type=sv_type, check_stats=check_stats, all_samples=all_samples,\n zero_svs_are_outliers=zero_svs_are_outliers, outlier_std_threshold=outlier_std_threshold\n )\n for (chrom, sv_type), check_stats in stats_data.groupby(\n [\"CHROM\", \"SVTYPE\"], sort=False, as_index=False, group_keys=False\n )\n ),\n axis=0\n )\n return outliers\n\n\ndef write_outliers_file(\n outliers: pandas.DataFrame,\n outname: str,\n outlier_type: str\n):\n \"\"\"\n Write outliers of the appropriate type (\"low\" or \"high\") to TSV file.\n Args:\n outliers: pandas.DataFrame\n Table of outlier data\n outname: str\n Base name of outlier TSV file. Final file name will have \".low\" or \".high\" appended to it.\n outlier_type: str\n \"low\" or \"high\".\n \"\"\"\n # write outliers to tsv. Add \"#\" in front of header\n with open(outname + \".\" + outlier_type, 'w') as f_out:\n f_out.write(\"#\") # add '#' in front of header\n outlier_wanted = outliers[\"Outlier_Cate\"] == outlier_type\n outliers.loc[outlier_wanted].to_csv(f_out, sep='\\t', index=False)\n\n\ndef calc_num_svs_pick_outlier(\n statfile: str,\n outname: str,\n zero_svs_are_outliers: bool = _zero_svs_are_outliers,\n outlier_std_threshold: float = _outlier_std_threshold\n):\n \"\"\"\n Find samples that have outlier number of SVs when broken down by contig and SV type.\n Write two tables of outliers, along with statistics about SV count: one for those with above-median counts (\"high\")\n and one for those at median or below (\"low\").\n Args:\n statfile: str\n TSV file with table with SV counts\n outname: str\n Base name for saving outlier files. Low file will have \".low\" appended to the name, and high file will have\n \".high\"\n zero_svs_are_outliers: bool\n Whether to treat samples with no counts as automatic outliers, or explicitly code as zero counts\n outlier_std_threshold: float\n Threshold for outlier status as multiple of standard deviation of SV counts\n \"\"\"\n stats_data = read_statfile(statfile)\n outliers = pick_outliers(stats_data, zero_svs_are_outliers=zero_svs_are_outliers,\n outlier_std_threshold=outlier_std_threshold)\n write_outliers_file(outliers, outname, \"low\")\n write_outliers_file(outliers, outname, \"high\")\n\n\ndef _parse_arguments(argv: Sequence[str]) -> argparse.Namespace:\n # noinspection PyTypeChecker\n parser = argparse.ArgumentParser(\n description=\"Find outliers in SV counts broken down by contig and SV type\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\"statfile\", type=str,\n help=\"name of stats concatinated from all samples\")\n parser.add_argument(\"outname\", type=str, help=\"name of output file\")\n parser.add_argument(\"-z\", \"--zero-counts-are-not-outliers\", action=\"store_true\",\n help=\"don't make zero SV counts an automatic outlier, check deviation from median as usual\")\n parser.add_argument(\"-t\", \"--outlier-std-threshold\", type=float, default=_outlier_std_threshold,\n help=\"threshold multiple of std of counts for outliers\")\n return parser.parse_args(argv[1:])\n\n\nif __name__ == \"__main__\":\n args = _parse_arguments(sys.argv)\n calc_num_svs_pick_outlier(statfile=args.statfile, outname=args.outname,\n zero_svs_are_outliers=not args.zero_counts_are_not_outliers,\n outlier_std_threshold=args.outlier_std_threshold)\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.abs",
"pandas.concat",
"numpy.sqrt",
"numpy.where"
]
] |
savelov/nowcast | [
"9c1168b1ba642f15bc4ffb000bdbca6db27c29b1"
] | [
"pysteps/io/exporters.py"
] | [
"\"\"\"\npysteps.io.exporter\n===================\n\nMethods for exporting forecasts of 2d precipitation fields into various file\nformats.\n\nEach exporter method in this module has its own initialization function that\nimplements the following interface::\n\n initialize_forecast_exporter_xxx(filename, startdate, timestep,\n num_timesteps, shape, num_ens_members,\n metadata, incremental=None)\n\nwhere xxx is the name (or abbreviation) of the file format.\n\nThis function creates the file and writes the metadata. The datasets are written\nby calling :py:func:`pysteps.io.exporters.export_forecast_dataset`, and\nthe file is closed by calling :py:func:`pysteps.io.exporters.close_forecast_file`.\n\nThe arguments in the above are defined as follows:\n\n.. tabularcolumns:: |p{2cm}|p{2cm}|L|\n\n+---------------+-------------------+-----------------------------------------+\n| Argument | Type/values | Description |\n+===============+===================+=========================================+\n| filename | str | name of the output file |\n+---------------+-------------------+-----------------------------------------+\n| startdate | datetime.datetime | start date of the forecast |\n+---------------+-------------------+-----------------------------------------+\n| timestep | int | time step of the forecast (minutes) |\n+---------------+-------------------+-----------------------------------------+\n| n_timesteps | int | number of time steps in the forecast |\n| | | this argument is ignored if |\n| | | incremental is set to 'timestep'. |\n+---------------+-------------------+-----------------------------------------+\n| shape | tuple | two-element tuple defining the shape |\n| | | (height,width) of the forecast grids |\n+---------------+-------------------+-----------------------------------------+\n| n_ens_members | int | number of ensemble members in the |\n| | | forecast. This argument is ignored if |\n| | | incremental is set to 'member' |\n+---------------+-------------------+-----------------------------------------+\n| metadata | dict | metadata dictionary containing the |\n| | | projection,x1,x2,y1,y2 and unit |\n| | | attributes described in the |\n| | | documentation of pysteps.io.importers |\n+---------------+-------------------+-----------------------------------------+\n| incremental | {None, 'timestep',| Allow incremental writing of datasets |\n| | 'member'} | into the netCDF file |\n| | | the available options are: |\n| | | 'timestep' = write a forecast or a |\n| | | forecast ensemble for a given |\n| | | time step |\n| | | 'member' = write a forecast sequence |\n| | | for a given ensemble member |\n+---------------+-------------------+-----------------------------------------+\n\nThe return value is a dictionary containing an exporter object. This can be\nused with :py:func:`pysteps.io.exporters.export_forecast_dataset` to write \ndatasets into the given file format.\n\nAvailable Exporters\n-------------------\n\n.. autosummary::\n :toctree: ../generated/\n\n initialize_forecast_exporter_kineros\n initialize_forecast_exporter_netcdf\n\nGeneric functions\n-----------------\n\n.. autosummary::\n :toctree: ../generated/\n\n export_forecast_dataset\n close_forecast_file\n\"\"\"\n\nfrom datetime import datetime\nimport numpy as np\nimport os\nfrom pysteps.exceptions import MissingOptionalDependency\n\ntry:\n import netCDF4\n netcdf4_imported = True\nexcept ImportError:\n netcdf4_imported = False\ntry:\n import pyproj\n pyproj_imported = True\nexcept ImportError:\n pyproj_imported = False\n\n# TODO(exporters): This is a draft version of the kineros exporter.\n# Revise the variable names and\n# the structure of the file if necessary.\n\ndef initialize_forecast_exporter_kineros(filename, startdate, timestep,\n n_timesteps, shape, n_ens_members,\n metadata, incremental=None):\n \"\"\"Initialize a KINEROS2 Rainfall .pre file as specified\n in https://www.tucson.ars.ag.gov/kineros/.\n\n Grid points are treated as individual rain gauges and a separate file is\n produced for each ensemble member.\n \n Parameters\n ----------\n filename : str\n Name of the output file.\n \n startdate : datetime.datetime\n Start date of the forecast as datetime object.\n \n timestep : int\n Time step of the forecast (minutes).\n \n n_timesteps : int\n Number of time steps in the forecast this argument is ignored if \n incremental is set to 'timestep'.\n \n shape : tuple of int\n Two-element tuple defining the shape (height,width) of the forecast \n grids.\n \n n_ens_members : int\n Number of ensemble members in the forecast. This argument is ignored if\n incremental is set to 'member'.\n \n metadata: dict\n Metadata dictionary containing the projection,x1,x2,y1,y2 and unit \n attributes described in the documentation of \n :py:mod:`pysteps.io.importers`.\n \n incremental : {None}, optional\n Currently not implemented for this method.\n\n Returns\n -------\n exporter : dict\n The return value is a dictionary containing an exporter object. This c\n an be used with :py:func:`pysteps.io.exporters.export_forecast_dataset` \n to write datasets into the given file format.\n \n \"\"\"\n\n if incremental is not None:\n raise ValueError(\"unknown option %s: incremental writing is not supported\" % incremental)\n\n exporter = {}\n\n basefn, extfn = os.path.splitext(filename)\n if extfn == \"\":\n extfn = \".pre\"\n\n # one file for each member\n n_ens_members = np.min((99, n_ens_members))\n fns = []\n for i in range(n_ens_members):\n fn = \"%s_N%02d%s\" % (basefn, i, extfn)\n with open(fn, \"w\") as fd:\n # write header\n fd.writelines(\"! pysteps-generated nowcast.\\n\")\n fd.writelines(\"! created the %s.\\n\" % datetime.now().strftime(\"%c\"))\n # TODO(exporters): Add pySTEPS version here\n fd.writelines(\"! Member = %02d.\\n\" % i)\n fd.writelines(\"! Startdate = %s.\\n\" % startdate.strftime(\"%c\"))\n fns.append(fn)\n fd.close()\n\n h, w = shape\n\n if metadata[\"unit\"] == \"mm/h\":\n var_name = \"Intensity\"\n var_long_name = \"Intensity in mm/hr\"\n var_unit = \"mm/hr\"\n elif metadata[\"unit\"] == \"mm\":\n var_name = \"Depth\"\n var_long_name = \"Accumulated depth in mm\"\n var_unit = \"mm\"\n else:\n raise ValueError(\"unsupported unit %s\" % metadata[\"unit\"])\n\n xr = np.linspace(metadata[\"x1\"], metadata[\"x2\"], w+1)[:-1]\n xr += 0.5 * (xr[1] - xr[0])\n yr = np.linspace(metadata[\"y1\"], metadata[\"y2\"], h+1)[:-1]\n yr += 0.5 * (yr[1] - yr[0])\n X, Y = np.meshgrid(xr, yr)\n XY_coords = np.stack([X, Y])\n\n exporter[\"method\"] = \"kineros\"\n exporter[\"ncfile\"] = fns\n exporter[\"XY_coords\"] = XY_coords\n exporter[\"var_name\"] = var_name\n exporter[\"var_long_name\"] = var_long_name\n exporter[\"var_unit\"] = var_unit\n exporter[\"startdate\"] = startdate\n exporter[\"timestep\"] = timestep\n exporter[\"metadata\"] = metadata\n exporter[\"incremental\"] = incremental\n exporter[\"num_timesteps\"] = n_timesteps\n exporter[\"num_ens_members\"] = n_ens_members\n exporter[\"shape\"] = shape\n\n return exporter\n\n\n# TODO(exporters): This is a draft version of the netcdf exporter.\n# Revise the variable names and\n# the structure of the file if necessary.\n\ndef initialize_forecast_exporter_netcdf(filename, startdate, timestep,\n n_timesteps, shape, n_ens_members,\n metadata, product='precip_intensity',\n incremental=None):\n \"\"\"Initialize a netCDF forecast exporter.\n \n Parameters\n ----------\n filename : str\n Name of the output file.\n \n startdate : datetime.datetime\n Start date of the forecast as datetime object.\n \n timestep : int\n Time step of the forecast (minutes).\n \n n_timesteps : int\n Number of time steps in the forecast this argument is ignored if \n incremental is set to 'timestep'.\n \n shape : tuple of int\n Two-element tuple defining the shape (height,width) of the forecast \n grids.\n \n n_ens_members : int\n Number of ensemble members in the forecast. This argument is ignored if\n incremental is set to 'member'.\n \n metadata: dict\n Metadata dictionary containing the projection,x1,x2,y1,y2 and unit \n attributes described in the documentation of \n :py:mod:`pysteps.io.importers`.\n\n product: str\n product name can be 'precip_intensity' for intensity export,\n 'precip_probability' for probability export.\n\n incremental : {None,'timestep','member'}, optional\n Allow incremental writing of datasets into the netCDF file.\\n\n The available options are: 'timestep' = write a forecast or a forecast \n ensemble for a given time step; 'member' = write a forecast sequence \n for a given ensemble member. If set to None, incremental writing is \n disabled.\n\n Returns\n -------\n exporter : dict\n The return value is a dictionary containing an exporter object. This c\n an be used with :py:func:`pysteps.io.exporters.export_forecast_dataset` \n to write datasets into the given file format.\n \n \"\"\"\n if not netcdf4_imported:\n raise MissingOptionalDependency(\n \"netCDF4 package is required for netcdf \"\n \"exporters but it is not installed\")\n\n if not pyproj_imported:\n raise MissingOptionalDependency(\n \"pyproj package is required for netcdf \"\n \"exporters but it is not installed\")\n\n if incremental not in [None, \"timestep\", \"member\"]:\n raise ValueError(\"unknown option %s: incremental must be 'timestep' or 'member'\" % incremental)\n\n if incremental == \"timestep\":\n n_timesteps = None\n elif incremental == \"member\":\n n_ens_members = None\n elif incremental is not None:\n raise ValueError(\"unknown argument value incremental='%s': must be 'timestep' or 'member'\" % str(incremental))\n\n exporter = {}\n\n filename = os.path.realpath(filename)\n if not os.path.exists(os.path.dirname(filename)):\n os.mkdir(os.path.dirname(filename))\n ncf = netCDF4.Dataset(filename, 'w', format=\"NETCDF4\")\n\n ncf.Conventions = \"CF-1.7\"\n ncf.title = \"pysteps-generated nowcast\"\n ncf.institution = \"the pySTEPS community (https://pysteps.github.io)\"\n ncf.source = \"pysteps\" # TODO(exporters): Add pySTEPS version here\n ncf.history = \"\"\n ncf.references = \"\"\n ncf.comment = \"\"\n\n h, w = shape\n\n # if product != 'precip_probability':\n # ncf.createDimension(\"ens_number\", size=n_ens_members)\n ncf.createDimension(\"time\", size=n_timesteps)\n ncf.createDimension(\"y\", size=h)\n ncf.createDimension(\"x\", size=w)\n\n # necessary settings for probability nowcasting\n ncf.datetime = str(startdate)\n if product == 'precip_probability':\n #TODO: Add this metadata unit percent in the source\n metadata[\"unit\"] = \"percent\"\n\n if metadata[\"unit\"] == \"mm/h\":\n var_name = \"precip_intensity\"\n var_standard_name = None\n var_long_name = \"instantaneous precipitation rate\"\n var_unit = \"mm h-1\"\n elif metadata[\"unit\"] == \"percent\":\n var_name = \"precip_probability\"\n var_standard_name = None\n var_long_name = \"probablistic precipitation\"\n var_unit = \"percent\"\n elif metadata[\"unit\"] == \"mm\":\n var_name = \"precip_accum\"\n var_standard_name = None\n var_long_name = \"accumulated precipitation\"\n var_unit = \"mm\"\n elif metadata[\"unit\"] == \"dBZ\":\n var_name = \"reflectivity\"\n var_long_name = \"equivalent reflectivity factor\"\n var_standard_name = \"equivalent_reflectivity_factor\"\n var_unit = \"dBZ\"\n else:\n raise ValueError(\"unknown unit %s\" % metadata[\"unit\"])\n\n xr = np.linspace(metadata[\"x1\"], metadata[\"x2\"], w+1)[:-1]\n xr += 0.5 * (xr[1] - xr[0])\n yr = np.linspace(metadata[\"y1\"], metadata[\"y2\"], h+1)[:-1]\n yr += 0.5 * (yr[1] - yr[0])\n\n var_xc = ncf.createVariable(\"xc\", np.float32, dimensions=(\"x\",))\n var_xc[:] = xr\n var_xc.axis = 'X'\n var_xc.standard_name = \"projection_x_coordinate\"\n var_xc.long_name = \"x-coordinate in Cartesian system\"\n # TODO(exporters): Don't hard-code the unit.\n var_xc.units = 'm'\n\n var_yc = ncf.createVariable(\"yc\", np.float32, dimensions=(\"y\",))\n var_yc[:] = yr\n var_yc.axis = 'Y'\n var_yc.standard_name = \"projection_y_coordinate\"\n var_yc.long_name = \"y-coordinate in Cartesian system\"\n # TODO(exporters): Don't hard-code the unit.\n var_yc.units = 'm'\n\n X, Y = np.meshgrid(xr, yr)\n pr = pyproj.Proj(metadata[\"projection\"])\n lon,lat = pr(X.flatten(), Y.flatten(), inverse=True)\n\n lon, lat = pr(X.flatten(), Y.flatten(), inverse=True)\n new_long, new_lat = np.zeros((h, w), dtype=np.float), np.zeros((h, w), dtype=np.float)\n idx = 0\n for row in range(h):\n for col in range(w):\n new_long[row][col] = lon[idx]\n idx += 1\n idx = 0\n for row in range(h):\n for col in range(w):\n new_lat[row][col] = lat[idx]\n idx += 1\n\n var_lon = ncf.createVariable(\"lon\", np.float32, dimensions=(\"y\", \"x\"))\n var_lon[:] = new_long\n var_lon.standard_name = \"longitude\"\n var_lon.long_name = \"longitude coordinate\"\n # TODO(exporters): Don't hard-code the unit.\n var_lon.units = \"degrees_east\"\n\n var_lat = ncf.createVariable(\"lat\", np.float, dimensions=(\"y\", \"x\"))\n var_lat[:] = new_lat\n var_lat.standard_name = \"latitude\"\n var_lat.long_name = \"latitude coordinate\"\n # TODO(exporters): Don't hard-code the unit.\n var_lat.units = \"degrees_north\"\n\n ncf.projection = metadata[\"projection\"]\n\n grid_mapping_var_name, grid_mapping_name, grid_mapping_params = \\\n _convert_proj4_to_grid_mapping(metadata[\"projection\"])\n # skip writing the grid mapping if a matching name was not found\n if grid_mapping_var_name is not None:\n var_gm = ncf.createVariable(grid_mapping_var_name, np.int,\n dimensions=())\n var_gm.grid_mapping_name = grid_mapping_name\n for i in grid_mapping_params.items():\n var_gm.setncattr(i[0], i[1])\n\n # if product != 'precip_probability':\n # var_ens_num = ncf.createVariable(\"ens_number\", np.int,\n # dimensions=(\"ens_number\",))\n # if incremental != \"member\":\n # var_ens_num[:] = list(range(1, n_ens_members+1))\n # var_ens_num.long_name = \"ensemble member\"\n # var_ens_num.units = \"\"\n\n var_time = ncf.createVariable(\"time\", np.int, dimensions=(\"time\",))\n if incremental != \"timestep\":\n if product == 'precip_probability':\n var_time[:] = [i*timestep for i in range(1, n_timesteps+1)]\n else:\n var_time[:] = [i*timestep*60 for i in range(1, n_timesteps+1)]\n\n var_time.long_name = \"forecast time\"\n startdate_str = datetime.strftime(startdate, \"%Y-%m-%d %H:%M:%S\")\n var_time.units = \"minutes since %s\" % startdate_str if product == 'precip_probability' \\\n else \"seconds since %s\" % startdate_str\n\n dimensions = (\"time\", \"y\", \"x\")\n\n var_F = ncf.createVariable(var_name, np.float32,\n dimensions=dimensions,\n zlib=True, complevel=9)\n\n if var_standard_name is not None:\n var_F.standard_name = var_standard_name\n var_F.long_name = var_long_name\n var_F.coordinates = \"y x\"\n var_F.units = var_unit\n\n exporter[\"method\"] = \"netcdf\"\n exporter[\"ncfile\"] = ncf\n exporter[\"var_F\"] = var_F\n # if product != 'precip_probability':\n # exporter[\"var_ens_num\"] = var_ens_num\n exporter[\"var_time\"] = var_time\n exporter[\"var_name\"] = var_name\n exporter[\"startdate\"] = startdate\n exporter[\"timestep\"] = timestep\n exporter[\"metadata\"] = metadata\n exporter[\"incremental\"] = incremental\n exporter[\"num_timesteps\"] = n_timesteps\n exporter[\"num_ens_members\"] = n_ens_members\n exporter[\"shape\"] = shape\n\n return exporter\n\n\ndef export_forecast_dataset(F, exporter, mask=None):\n \"\"\"Write a forecast array into a file.\n\n The written dataset has dimensions\n (num_ens_members,num_timesteps,shape[0],shape[1]), where shape refers to\n the shape of the two-dimensional forecast grids. If the exporter was\n initialized with incremental!=None, the array is appended to the existing\n dataset either along the ensemble member or time axis.\n\n Parameters\n ----------\n exporter : dict\n An exporter object created with any initialization method implemented\n in :py:mod:`pysteps.io.exporters`.\n F : array_like\n The array to write. The required shape depends on the choice of the\n 'incremental' parameter the exporter was initialized with:\n\n :TODO: Update this table incorporating 'precip_probability'\n +-----------------+---------------------------------------------------+\n | incremental | required shape |\n +=================+===================================================+\n | None | (num_ens_members,num_timesteps,shape[0],shape[1]) |\n +-----------------+---------------------------------------------------+\n | 'timestep' | (num_ens_members,shape[0],shape[1]) |\n +-----------------+---------------------------------------------------+\n | 'member' | (num_timesteps,shape[0],shape[1]) |\n +-----------------+---------------------------------------------------+\n\n \"\"\"\n if exporter[\"method\"] == \"netcdf\" and not netcdf4_imported:\n raise MissingOptionalDependency(\n \"netCDF4 package is required for netcdf \"\n \"exporters but it is not installed\")\n\n if exporter[\"incremental\"] is None:\n shp = (exporter[\"num_timesteps\"], exporter[\"shape\"][0], exporter[\"shape\"][1])\n if F.shape != shp:\n raise ValueError(\"F has invalid shape: %s != %s\" % (str(F.shape),str(shp)))\n elif exporter[\"incremental\"] == \"timestep\":\n shp = (exporter[\"num_ens_members\"], exporter[\"shape\"][0],\n exporter[\"shape\"][1])\n if F.shape != shp:\n raise ValueError(\"F has invalid shape: %s != %s\" % (str(F.shape),str(shp)))\n elif exporter[\"incremental\"] == \"member\":\n shp = (exporter[\"num_timesteps\"], exporter[\"shape\"][0],\n exporter[\"shape\"][1])\n if F.shape != shp:\n raise ValueError(\"F has invalid shape: %s != %s\" % (str(F.shape),str(shp)))\n\n if exporter[\"method\"] == \"netcdf\":\n _export_netcdf(F, exporter, mask)\n elif exporter[\"method\"] == \"kineros\":\n _export_kineros(F, exporter)\n else:\n raise ValueError(\"unknown exporter method %s\" % exporter[\"method\"])\n\n\ndef close_forecast_file(exporter):\n \"\"\"Close the file associated with a forecast exporter.\n\n Finish writing forecasts and close the file associated with a forecast\n exporter.\n\n Parameters\n ----------\n exporter : dict\n An exporter object created with any initialization method implemented\n in :py:mod:`pysteps.io.exporters`.\n\n \"\"\"\n if exporter[\"method\"] == \"kineros\":\n pass # no need to close the file\n else:\n exporter[\"ncfile\"].close()\n\n\ndef _export_kineros(F, exporter):\n\n num_timesteps = exporter[\"num_timesteps\"]\n num_ens_members = exporter[\"num_ens_members\"]\n startdate = exporter[\"startdate\"]\n timestep = exporter[\"timestep\"]\n xgrid = exporter[\"XY_coords\"][0, :, :].flatten()\n ygrid = exporter[\"XY_coords\"][1, :, :].flatten()\n\n timemin = [(t + 1)*timestep for t in range(num_timesteps)]\n\n for n in range(num_ens_members):\n fn = exporter[\"ncfile\"][n]\n F_ = F[n, :, :, :].reshape((num_timesteps, -1))\n if exporter[\"var_name\"] == \"Depth\":\n F_ = np.cumsum(F_, axis=0)\n with open(fn, \"a\") as fd:\n for m in range(F_.shape[1]):\n fd.writelines(\"BEGIN RG%03d\\n\" % (m + 1))\n fd.writelines(\" X = %.2f, Y = %.2f\\n\" % (xgrid[m], ygrid[m]))\n fd.writelines(\" N = %i\\n\" % num_timesteps)\n fd.writelines(\" TIME %s\\n\" % exporter[\"var_name\"].upper())\n fd.writelines(\"! (min) (%s)\\n\" % exporter[\"var_unit\"])\n for t in range(num_timesteps):\n line_new = \"{:6.1f} {:11.2f}\\n\".format(timemin[t], F_[t, m])\n fd.writelines(line_new)\n fd.writelines(\"END\\n\\n\")\n\n\ndef _export_netcdf(F, exporter, mask=None):\n var_F = exporter[\"var_F\"]\n\n if exporter[\"incremental\"] is None:\n var_F[:] = F[:,::-1,:]\n elif exporter[\"incremental\"] == \"timestep\":\n var_F[:, var_F.shape[1], :, :] = F\n var_time = exporter[\"var_time\"]\n var_time[len(var_time)-1] = len(var_time) * exporter[\"timestep\"] * 60\n else:\n var_F[var_F.shape[0], :, :, :] = F\n var_ens_num = exporter[\"var_time\"]\n var_ens_num[len(var_ens_num)-1] = len(var_ens_num)\n\n\n# TODO(exporters): Write methods for converting Proj.4 projection definitions\n# into CF grid mapping attributes. Currently this has been implemented for\n# the stereographic projection.\n# The conversions implemented here are take from:\n# https://github.com/cf-convention/cf-convention.github.io/blob/master/wkt-proj-4.md\n\ndef _convert_proj4_to_grid_mapping(proj4str):\n tokens = proj4str.split('+')\n\n d = {}\n for t in tokens[1:]:\n t = t.split('=')\n if len(t) > 1:\n d[t[0]] = t[1].strip()\n\n params = {}\n # TODO(exporters): implement more projection types here\n if d[\"proj\"] == \"stere\":\n grid_mapping_var_name = \"polar_stereographic\"\n grid_mapping_name = \"polar_stereographic\"\n v = d[\"lon_0\"] if d[\"lon_0\"][-1] not in [\"E\", \"W\"] else d[\"lon_0\"][:-1]\n params[\"straight_vertical_longitude_from_pole\"] = float(v)\n v = d[\"lat_0\"] if d[\"lat_0\"][-1] not in [\"N\", \"S\"] else d[\"lat_0\"][:-1]\n params[\"latitude_of_projection_origin\"] = float(v)\n if \"lat_ts\" in list(d.keys()):\n params[\"standard_parallel\"] = float(d[\"lat_ts\"])\n elif \"k_0\" in list(d.keys()):\n params[\"scale_factor_at_projection_origin\"] = float(d[\"k_0\"])\n params[\"false_easting\"] = float(d[\"x_0\"])\n params[\"false_northing\"] = float(d[\"y_0\"])\n elif d[\"proj\"] == \"sterea\":\n grid_mapping_var_name = \"oblique_stereographic\"\n grid_mapping_name = \"oblique_stereographic\"\n v = d[\"lon_0\"] if d[\"lon_0\"][-1] not in [\"E\", \"W\"] else d[\"lon_0\"][:-1]\n params[\"longitude_of_projection_origin\"] = float(v)\n v = d[\"lat_0\"] if d[\"lat_0\"][-1] not in [\"N\", \"S\"] else d[\"lat_0\"][:-1]\n params[\"latitude_of_projection_origin\"] = float(v)\n if \"lat_ts\" in list(d.keys()):\n params[\"standard_parallel\"] = float(d[\"lat_ts\"])\n elif \"k_0\" in list(d.keys()):\n params[\"scale_factor_at_projection_origin\"] = float(d[\"k_0\"])\n params[\"false_easting\"] = float(d[\"x_0\"])\n params[\"false_northing\"] = float(d[\"y_0\"])\n elif d[\"proj\"] == \"aea\": # Albers Conical Equal Area\n grid_mapping_var_name = \"proj\"\n grid_mapping_name = \"albers_conical_equal_area\"\n params[\"false_easting\"] = float(d[\"x_0\"]) if \"x_0\" in d else float(0)\n params[\"false_northing\"] = float(d[\"y_0\"]) if \"y_0\" in d else float(0)\n v = d[\"lon_0\"] if \"lon_0\" in d else float(0)\n params[\"longitude_of_central_meridian\"] = float(v)\n v = d[\"lat_0\"] if \"lat_0\" in d else float(0)\n params[\"latitude_of_projection_origin\"] = float(v)\n v1 = d[\"lat_1\"] if \"lat_1\" in d else float(0)\n v2 = d[\"lat_2\"] if \"lat_2\" in d else float(0)\n params[\"standard_parallel\"] = (float(v1), float(v2))\n else:\n print('unknown projection', d[\"proj\"])\n return None, None, None\n\n return grid_mapping_var_name, grid_mapping_name, params\n"
] | [
[
"numpy.cumsum",
"numpy.zeros",
"numpy.min",
"numpy.stack",
"numpy.meshgrid",
"numpy.linspace"
]
] |
StewSchrieff/riddlerHoopGame | [
"3d63f494aa803c7571ace83f87a40ce5d6b0dfc1"
] | [
"venv/Lib/site-packages/matplotlib/backends/backend_qt5.py"
] | [
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport six\n\nimport functools\nimport os\nimport re\nimport signal\nimport sys\nfrom six import unichr\nimport traceback\n\nimport matplotlib\n\nfrom matplotlib._pylab_helpers import Gcf\nfrom matplotlib.backend_bases import (\n _Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,\n TimerBase, cursors, ToolContainerBase, StatusbarBase)\nimport matplotlib.backends.qt_editor.figureoptions as figureoptions\nfrom matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool\nfrom matplotlib.figure import Figure\nfrom matplotlib.backend_managers import ToolManager\nfrom matplotlib import backend_tools\n\nfrom .qt_compat import (\n QtCore, QtGui, QtWidgets, _getSaveFileName, is_pyqt5, __version__, QT_API)\n\nbackend_version = __version__\n\n# SPECIAL_KEYS are keys that do *not* return their unicode name\n# instead they have manually specified names\nSPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',\n QtCore.Qt.Key_Shift: 'shift',\n QtCore.Qt.Key_Alt: 'alt',\n QtCore.Qt.Key_Meta: 'super',\n QtCore.Qt.Key_Return: 'enter',\n QtCore.Qt.Key_Left: 'left',\n QtCore.Qt.Key_Up: 'up',\n QtCore.Qt.Key_Right: 'right',\n QtCore.Qt.Key_Down: 'down',\n QtCore.Qt.Key_Escape: 'escape',\n QtCore.Qt.Key_F1: 'f1',\n QtCore.Qt.Key_F2: 'f2',\n QtCore.Qt.Key_F3: 'f3',\n QtCore.Qt.Key_F4: 'f4',\n QtCore.Qt.Key_F5: 'f5',\n QtCore.Qt.Key_F6: 'f6',\n QtCore.Qt.Key_F7: 'f7',\n QtCore.Qt.Key_F8: 'f8',\n QtCore.Qt.Key_F9: 'f9',\n QtCore.Qt.Key_F10: 'f10',\n QtCore.Qt.Key_F11: 'f11',\n QtCore.Qt.Key_F12: 'f12',\n QtCore.Qt.Key_Home: 'home',\n QtCore.Qt.Key_End: 'end',\n QtCore.Qt.Key_PageUp: 'pageup',\n QtCore.Qt.Key_PageDown: 'pagedown',\n QtCore.Qt.Key_Tab: 'tab',\n QtCore.Qt.Key_Backspace: 'backspace',\n QtCore.Qt.Key_Enter: 'enter',\n QtCore.Qt.Key_Insert: 'insert',\n QtCore.Qt.Key_Delete: 'delete',\n QtCore.Qt.Key_Pause: 'pause',\n QtCore.Qt.Key_SysReq: 'sysreq',\n QtCore.Qt.Key_Clear: 'clear', }\n\n# define which modifier keys are collected on keyboard events.\n# elements are (mpl names, Modifier Flag, Qt Key) tuples\nSUPER = 0\nALT = 1\nCTRL = 2\nSHIFT = 3\nMODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),\n ('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),\n ('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),\n ('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),\n ]\n\nif sys.platform == 'darwin':\n # in OSX, the control and super (aka cmd/apple) keys are switched, so\n # switch them back.\n SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'cmd', # cmd/apple key\n QtCore.Qt.Key_Meta: 'control',\n })\n MODIFIER_KEYS[0] = ('cmd', QtCore.Qt.ControlModifier,\n QtCore.Qt.Key_Control)\n MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,\n QtCore.Qt.Key_Meta)\n\n\ncursord = {\n cursors.MOVE: QtCore.Qt.SizeAllCursor,\n cursors.HAND: QtCore.Qt.PointingHandCursor,\n cursors.POINTER: QtCore.Qt.ArrowCursor,\n cursors.SELECT_REGION: QtCore.Qt.CrossCursor,\n cursors.WAIT: QtCore.Qt.WaitCursor,\n }\n\n\n# make place holder\nqApp = None\n\n\ndef _create_qApp():\n \"\"\"\n Only one qApp can exist at a time, so check before creating one.\n \"\"\"\n global qApp\n\n if qApp is None:\n app = QtWidgets.QApplication.instance()\n if app is None:\n # check for DISPLAY env variable on X11 build of Qt\n if is_pyqt5():\n try:\n from PyQt5 import QtX11Extras\n is_x11_build = True\n except ImportError:\n is_x11_build = False\n else:\n is_x11_build = hasattr(QtGui, \"QX11Info\")\n if is_x11_build:\n display = os.environ.get('DISPLAY')\n if display is None or not re.search(r':\\d', display):\n raise RuntimeError('Invalid DISPLAY variable')\n\n qApp = QtWidgets.QApplication([b\"matplotlib\"])\n qApp.lastWindowClosed.connect(qApp.quit)\n else:\n qApp = app\n\n if is_pyqt5():\n try:\n qApp.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)\n qApp.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)\n except AttributeError:\n pass\n\n\ndef _allow_super_init(__init__):\n \"\"\"\n Decorator for ``__init__`` to allow ``super().__init__`` on PyQt4/PySide2.\n \"\"\"\n\n if QT_API == \"PyQt5\":\n\n return __init__\n\n else:\n # To work around lack of cooperative inheritance in PyQt4, PySide,\n # and PySide2, when calling FigureCanvasQT.__init__, we temporarily\n # patch QWidget.__init__ by a cooperative version, that first calls\n # QWidget.__init__ with no additional arguments, and then finds the\n # next class in the MRO with an __init__ that does support cooperative\n # inheritance (i.e., not defined by the PyQt4, PySide, PySide2, sip\n # or Shiboken packages), and manually call its `__init__`, once again\n # passing the additional arguments.\n\n qwidget_init = QtWidgets.QWidget.__init__\n\n def cooperative_qwidget_init(self, *args, **kwargs):\n qwidget_init(self)\n mro = type(self).__mro__\n next_coop_init = next(\n cls for cls in mro[mro.index(QtWidgets.QWidget) + 1:]\n if cls.__module__.split(\".\")[0] not in [\n \"PyQt4\", \"sip\", \"PySide\", \"PySide2\", \"Shiboken\"])\n next_coop_init.__init__(self, *args, **kwargs)\n\n @functools.wraps(__init__)\n def wrapper(self, **kwargs):\n try:\n QtWidgets.QWidget.__init__ = cooperative_qwidget_init\n __init__(self, **kwargs)\n finally:\n # Restore __init__\n QtWidgets.QWidget.__init__ = qwidget_init\n\n return wrapper\n\n\nclass TimerQT(TimerBase):\n '''\n Subclass of :class:`backend_bases.TimerBase` that uses Qt timer events.\n\n Attributes\n ----------\n interval : int\n The time between timer events in milliseconds. Default is 1000 ms.\n single_shot : bool\n Boolean flag indicating whether this timer should\n operate as single shot (run once and then stop). Defaults to False.\n callbacks : list\n Stores list of (func, args) tuples that will be called upon timer\n events. This list can be manipulated directly, or the functions\n `add_callback` and `remove_callback` can be used.\n\n '''\n\n def __init__(self, *args, **kwargs):\n TimerBase.__init__(self, *args, **kwargs)\n\n # Create a new timer and connect the timeout() signal to the\n # _on_timer method.\n self._timer = QtCore.QTimer()\n self._timer.timeout.connect(self._on_timer)\n self._timer_set_interval()\n\n def _timer_set_single_shot(self):\n self._timer.setSingleShot(self._single)\n\n def _timer_set_interval(self):\n self._timer.setInterval(self._interval)\n\n def _timer_start(self):\n self._timer.start()\n\n def _timer_stop(self):\n self._timer.stop()\n\n\nclass FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):\n\n # map Qt button codes to MouseEvent's ones:\n buttond = {QtCore.Qt.LeftButton: 1,\n QtCore.Qt.MidButton: 2,\n QtCore.Qt.RightButton: 3,\n # QtCore.Qt.XButton1: None,\n # QtCore.Qt.XButton2: None,\n }\n\n @_allow_super_init\n def __init__(self, figure):\n _create_qApp()\n super(FigureCanvasQT, self).__init__(figure=figure)\n\n self.figure = figure\n # We don't want to scale up the figure DPI more than once.\n # Note, we don't handle a signal for changing DPI yet.\n figure._original_dpi = figure.dpi\n self._update_figure_dpi()\n # In cases with mixed resolution displays, we need to be careful if the\n # dpi_ratio changes - in this case we need to resize the canvas\n # accordingly. We could watch for screenChanged events from Qt, but\n # the issue is that we can't guarantee this will be emitted *before*\n # the first paintEvent for the canvas, so instead we keep track of the\n # dpi_ratio value here and in paintEvent we resize the canvas if\n # needed.\n self._dpi_ratio_prev = None\n\n self._draw_pending = False\n self._is_drawing = False\n self._draw_rect_callback = lambda painter: None\n\n self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)\n self.setMouseTracking(True)\n self.resize(*self.get_width_height())\n # Key auto-repeat enabled by default\n self._keyautorepeat = True\n\n palette = QtGui.QPalette(QtCore.Qt.white)\n self.setPalette(palette)\n\n def _update_figure_dpi(self):\n dpi = self._dpi_ratio * self.figure._original_dpi\n self.figure._set_dpi(dpi, forward=False)\n\n @property\n def _dpi_ratio(self):\n # Not available on Qt4 or some older Qt5.\n try:\n # self.devicePixelRatio() returns 0 in rare cases\n return self.devicePixelRatio() or 1\n except AttributeError:\n return 1\n\n def _update_dpi(self):\n # As described in __init__ above, we need to be careful in cases with\n # mixed resolution displays if dpi_ratio is changing between painting\n # events.\n # Return whether we triggered a resizeEvent (and thus a paintEvent)\n # from within this function.\n if self._dpi_ratio != self._dpi_ratio_prev:\n # We need to update the figure DPI.\n self._update_figure_dpi()\n self._dpi_ratio_prev = self._dpi_ratio\n # The easiest way to resize the canvas is to emit a resizeEvent\n # since we implement all the logic for resizing the canvas for\n # that event.\n event = QtGui.QResizeEvent(self.size(), self.size())\n self.resizeEvent(event)\n # resizeEvent triggers a paintEvent itself, so we exit this one\n # (after making sure that the event is immediately handled).\n return True\n return False\n\n def get_width_height(self):\n w, h = FigureCanvasBase.get_width_height(self)\n return int(w / self._dpi_ratio), int(h / self._dpi_ratio)\n\n def enterEvent(self, event):\n FigureCanvasBase.enter_notify_event(self, guiEvent=event)\n\n def leaveEvent(self, event):\n QtWidgets.QApplication.restoreOverrideCursor()\n FigureCanvasBase.leave_notify_event(self, guiEvent=event)\n\n def mouseEventCoords(self, pos):\n \"\"\"Calculate mouse coordinates in physical pixels\n\n Qt5 use logical pixels, but the figure is scaled to physical\n pixels for rendering. Transform to physical pixels so that\n all of the down-stream transforms work as expected.\n\n Also, the origin is different and needs to be corrected.\n\n \"\"\"\n dpi_ratio = self._dpi_ratio\n x = pos.x()\n # flip y so y=0 is bottom of canvas\n y = self.figure.bbox.height / dpi_ratio - pos.y()\n return x * dpi_ratio, y * dpi_ratio\n\n def mousePressEvent(self, event):\n x, y = self.mouseEventCoords(event.pos())\n button = self.buttond.get(event.button())\n if button is not None:\n FigureCanvasBase.button_press_event(self, x, y, button,\n guiEvent=event)\n\n def mouseDoubleClickEvent(self, event):\n x, y = self.mouseEventCoords(event.pos())\n button = self.buttond.get(event.button())\n if button is not None:\n FigureCanvasBase.button_press_event(self, x, y,\n button, dblclick=True,\n guiEvent=event)\n\n def mouseMoveEvent(self, event):\n x, y = self.mouseEventCoords(event)\n FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)\n\n def mouseReleaseEvent(self, event):\n x, y = self.mouseEventCoords(event)\n button = self.buttond.get(event.button())\n if button is not None:\n FigureCanvasBase.button_release_event(self, x, y, button,\n guiEvent=event)\n\n if is_pyqt5():\n def wheelEvent(self, event):\n x, y = self.mouseEventCoords(event)\n # from QWheelEvent::delta doc\n if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:\n steps = event.angleDelta().y() / 120\n else:\n steps = event.pixelDelta().y()\n if steps:\n FigureCanvasBase.scroll_event(\n self, x, y, steps, guiEvent=event)\n else:\n def wheelEvent(self, event):\n x = event.x()\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.y()\n # from QWheelEvent::delta doc\n steps = event.delta() / 120\n if event.orientation() == QtCore.Qt.Vertical:\n FigureCanvasBase.scroll_event(\n self, x, y, steps, guiEvent=event)\n\n def keyPressEvent(self, event):\n key = self._get_key(event)\n if key is not None:\n FigureCanvasBase.key_press_event(self, key, guiEvent=event)\n\n def keyReleaseEvent(self, event):\n key = self._get_key(event)\n if key is not None:\n FigureCanvasBase.key_release_event(self, key, guiEvent=event)\n\n @property\n def keyAutoRepeat(self):\n \"\"\"\n If True, enable auto-repeat for key events.\n \"\"\"\n return self._keyautorepeat\n\n @keyAutoRepeat.setter\n def keyAutoRepeat(self, val):\n self._keyautorepeat = bool(val)\n\n def resizeEvent(self, event):\n # _dpi_ratio_prev will be set the first time the canvas is painted, and\n # the rendered buffer is useless before anyways.\n if self._dpi_ratio_prev is None:\n return\n w = event.size().width() * self._dpi_ratio\n h = event.size().height() * self._dpi_ratio\n dpival = self.figure.dpi\n winch = w / dpival\n hinch = h / dpival\n self.figure.set_size_inches(winch, hinch, forward=False)\n # pass back into Qt to let it finish\n QtWidgets.QWidget.resizeEvent(self, event)\n # emit our resize events\n FigureCanvasBase.resize_event(self)\n\n def sizeHint(self):\n w, h = self.get_width_height()\n return QtCore.QSize(w, h)\n\n def minumumSizeHint(self):\n return QtCore.QSize(10, 10)\n\n def _get_key(self, event):\n if not self._keyautorepeat and event.isAutoRepeat():\n return None\n\n event_key = event.key()\n event_mods = int(event.modifiers()) # actually a bitmask\n\n # get names of the pressed modifier keys\n # bit twiddling to pick out modifier keys from event_mods bitmask,\n # if event_key is a MODIFIER, it should not be duplicated in mods\n mods = [name for name, mod_key, qt_key in MODIFIER_KEYS\n if event_key != qt_key and (event_mods & mod_key) == mod_key]\n try:\n # for certain keys (enter, left, backspace, etc) use a word for the\n # key, rather than unicode\n key = SPECIAL_KEYS[event_key]\n except KeyError:\n # unicode defines code points up to 0x0010ffff\n # QT will use Key_Codes larger than that for keyboard keys that are\n # are not unicode characters (like multimedia keys)\n # skip these\n # if you really want them, you should add them to SPECIAL_KEYS\n MAX_UNICODE = 0x10ffff\n if event_key > MAX_UNICODE:\n return None\n\n key = unichr(event_key)\n # qt delivers capitalized letters. fix capitalization\n # note that capslock is ignored\n if 'shift' in mods:\n mods.remove('shift')\n else:\n key = key.lower()\n\n mods.reverse()\n return '+'.join(mods + [key])\n\n def new_timer(self, *args, **kwargs):\n \"\"\"\n Creates a new backend-specific subclass of\n :class:`backend_bases.Timer`. This is useful for getting\n periodic events through the backend's native event\n loop. Implemented only for backends with GUIs.\n\n Other Parameters\n ----------------\n interval : scalar\n Timer interval in milliseconds\n\n callbacks : list\n Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``\n will be executed by the timer every *interval*.\n\n \"\"\"\n return TimerQT(*args, **kwargs)\n\n def flush_events(self):\n qApp.processEvents()\n\n def start_event_loop(self, timeout=0):\n if hasattr(self, \"_event_loop\") and self._event_loop.isRunning():\n raise RuntimeError(\"Event loop already running\")\n self._event_loop = event_loop = QtCore.QEventLoop()\n if timeout:\n timer = QtCore.QTimer.singleShot(timeout * 1000, event_loop.quit)\n event_loop.exec_()\n\n def stop_event_loop(self, event=None):\n if hasattr(self, \"_event_loop\"):\n self._event_loop.quit()\n\n def draw(self):\n \"\"\"Render the figure, and queue a request for a Qt draw.\n \"\"\"\n # The renderer draw is done here; delaying causes problems with code\n # that uses the result of the draw() to update plot elements.\n if self._is_drawing:\n return\n self._is_drawing = True\n try:\n super(FigureCanvasQT, self).draw()\n finally:\n self._is_drawing = False\n self.update()\n\n def draw_idle(self):\n \"\"\"Queue redraw of the Agg buffer and request Qt paintEvent.\n \"\"\"\n # The Agg draw needs to be handled by the same thread matplotlib\n # modifies the scene graph from. Post Agg draw request to the\n # current event loop in order to ensure thread affinity and to\n # accumulate multiple draw requests from event handling.\n # TODO: queued signal connection might be safer than singleShot\n if not (self._draw_pending or self._is_drawing):\n self._draw_pending = True\n QtCore.QTimer.singleShot(0, self._draw_idle)\n\n def _draw_idle(self):\n if self.height() < 0 or self.width() < 0:\n self._draw_pending = False\n if not self._draw_pending:\n return\n try:\n self.draw()\n except Exception:\n # Uncaught exceptions are fatal for PyQt5, so catch them instead.\n traceback.print_exc()\n finally:\n self._draw_pending = False\n\n def drawRectangle(self, rect):\n # Draw the zoom rectangle to the QPainter. _draw_rect_callback needs\n # to be called at the end of paintEvent.\n if rect is not None:\n def _draw_rect_callback(painter):\n pen = QtGui.QPen(QtCore.Qt.black, 1 / self._dpi_ratio,\n QtCore.Qt.DotLine)\n painter.setPen(pen)\n painter.drawRect(*(pt / self._dpi_ratio for pt in rect))\n else:\n def _draw_rect_callback(painter):\n return\n self._draw_rect_callback = _draw_rect_callback\n self.update()\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n closing = QtCore.Signal()\n\n def closeEvent(self, event):\n self.closing.emit()\n QtWidgets.QMainWindow.closeEvent(self, event)\n\n\nclass FigureManagerQT(FigureManagerBase):\n \"\"\"\n Attributes\n ----------\n canvas : `FigureCanvas`\n The FigureCanvas instance\n num : int or str\n The Figure number\n toolbar : qt.QToolBar\n The qt.QToolBar\n window : qt.QMainWindow\n The qt.QMainWindow\n\n \"\"\"\n\n def __init__(self, canvas, num):\n FigureManagerBase.__init__(self, canvas, num)\n self.canvas = canvas\n self.window = MainWindow()\n self.window.closing.connect(canvas.close_event)\n self.window.closing.connect(self._widgetclosed)\n\n self.window.setWindowTitle(\"Figure %d\" % num)\n image = os.path.join(matplotlib.rcParams['datapath'],\n 'images', 'matplotlib.svg')\n self.window.setWindowIcon(QtGui.QIcon(image))\n\n # Give the keyboard focus to the figure instead of the\n # manager; StrongFocus accepts both tab and click to focus and\n # will enable the canvas to process event w/o clicking.\n # ClickFocus only takes the focus is the window has been\n # clicked\n # on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or\n # http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum\n self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.canvas.setFocus()\n\n self.window._destroying = False\n\n self.toolmanager = self._get_toolmanager()\n self.toolbar = self._get_toolbar(self.canvas, self.window)\n self.statusbar = None\n\n if self.toolmanager:\n backend_tools.add_tools_to_manager(self.toolmanager)\n if self.toolbar:\n backend_tools.add_tools_to_container(self.toolbar)\n self.statusbar = StatusbarQt(self.window, self.toolmanager)\n\n if self.toolbar is not None:\n self.window.addToolBar(self.toolbar)\n if not self.toolmanager:\n # add text label to status bar\n statusbar_label = QtWidgets.QLabel()\n self.window.statusBar().addWidget(statusbar_label)\n self.toolbar.message.connect(statusbar_label.setText)\n tbs_height = self.toolbar.sizeHint().height()\n else:\n tbs_height = 0\n\n # resize the main window so it will display the canvas with the\n # requested size:\n cs = canvas.sizeHint()\n sbs = self.window.statusBar().sizeHint()\n self._status_and_tool_height = tbs_height + sbs.height()\n height = cs.height() + self._status_and_tool_height\n self.window.resize(cs.width(), height)\n\n self.window.setCentralWidget(self.canvas)\n\n if matplotlib.is_interactive():\n self.window.show()\n self.canvas.draw_idle()\n\n def notify_axes_change(fig):\n # This will be called whenever the current axes is changed\n if self.toolbar is not None:\n self.toolbar.update()\n self.canvas.figure.add_axobserver(notify_axes_change)\n self.window.raise_()\n\n def full_screen_toggle(self):\n if self.window.isFullScreen():\n self.window.showNormal()\n else:\n self.window.showFullScreen()\n\n def _widgetclosed(self):\n if self.window._destroying:\n return\n self.window._destroying = True\n try:\n Gcf.destroy(self.num)\n except AttributeError:\n pass\n # It seems that when the python session is killed,\n # Gcf can get destroyed before the Gcf.destroy\n # line is run, leading to a useless AttributeError.\n\n def _get_toolbar(self, canvas, parent):\n # must be inited after the window, drawingArea and figure\n # attrs are set\n if matplotlib.rcParams['toolbar'] == 'toolbar2':\n toolbar = NavigationToolbar2QT(canvas, parent, False)\n elif matplotlib.rcParams['toolbar'] == 'toolmanager':\n toolbar = ToolbarQt(self.toolmanager, self.window)\n else:\n toolbar = None\n return toolbar\n\n def _get_toolmanager(self):\n if matplotlib.rcParams['toolbar'] == 'toolmanager':\n toolmanager = ToolManager(self.canvas.figure)\n else:\n toolmanager = None\n return toolmanager\n\n def resize(self, width, height):\n 'set the canvas size in pixels'\n self.window.resize(width, height + self._status_and_tool_height)\n\n def show(self):\n self.window.show()\n self.window.activateWindow()\n self.window.raise_()\n\n def destroy(self, *args):\n # check for qApp first, as PySide deletes it in its atexit handler\n if QtWidgets.QApplication.instance() is None:\n return\n if self.window._destroying:\n return\n self.window._destroying = True\n if self.toolbar:\n self.toolbar.destroy()\n self.window.close()\n\n def get_window_title(self):\n return six.text_type(self.window.windowTitle())\n\n def set_window_title(self, title):\n self.window.setWindowTitle(title)\n\n\nclass NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):\n message = QtCore.Signal(str)\n\n def __init__(self, canvas, parent, coordinates=True):\n \"\"\" coordinates: should we show the coordinates on the right? \"\"\"\n self.canvas = canvas\n self.parent = parent\n self.coordinates = coordinates\n self._actions = {}\n \"\"\"A mapping of toolitem method names to their QActions\"\"\"\n\n QtWidgets.QToolBar.__init__(self, parent)\n NavigationToolbar2.__init__(self, canvas)\n\n def _icon(self, name):\n if is_pyqt5():\n name = name.replace('.png', '_large.png')\n pm = QtGui.QPixmap(os.path.join(self.basedir, name))\n if hasattr(pm, 'setDevicePixelRatio'):\n pm.setDevicePixelRatio(self.canvas._dpi_ratio)\n return QtGui.QIcon(pm)\n\n def _init_toolbar(self):\n self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')\n\n for text, tooltip_text, image_file, callback in self.toolitems:\n if text is None:\n self.addSeparator()\n else:\n a = self.addAction(self._icon(image_file + '.png'),\n text, getattr(self, callback))\n self._actions[callback] = a\n if callback in ['zoom', 'pan']:\n a.setCheckable(True)\n if tooltip_text is not None:\n a.setToolTip(tooltip_text)\n if text == 'Subplots':\n a = self.addAction(self._icon(\"qt4_editor_options.png\"),\n 'Customize', self.edit_parameters)\n a.setToolTip('Edit axis, curve and image parameters')\n\n self.buttons = {}\n\n # Add the x,y location widget at the right side of the toolbar\n # The stretch factor is 1 which means any resizing of the toolbar\n # will resize this label instead of the buttons.\n if self.coordinates:\n self.locLabel = QtWidgets.QLabel(\"\", self)\n self.locLabel.setAlignment(\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)\n self.locLabel.setSizePolicy(\n QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,\n QtWidgets.QSizePolicy.Ignored))\n labelAction = self.addWidget(self.locLabel)\n labelAction.setVisible(True)\n\n # reference holder for subplots_adjust window\n self.adj_window = None\n\n # Esthetic adjustments - we need to set these explicitly in PyQt5\n # otherwise the layout looks different - but we don't want to set it if\n # not using HiDPI icons otherwise they look worse than before.\n if is_pyqt5():\n self.setIconSize(QtCore.QSize(24, 24))\n self.layout().setSpacing(12)\n\n if is_pyqt5():\n # For some reason, self.setMinimumHeight doesn't seem to carry over to\n # the actual sizeHint, so override it instead in order to make the\n # aesthetic adjustments noted above.\n def sizeHint(self):\n size = super(NavigationToolbar2QT, self).sizeHint()\n size.setHeight(max(48, size.height()))\n return size\n\n def edit_parameters(self):\n allaxes = self.canvas.figure.get_axes()\n if not allaxes:\n QtWidgets.QMessageBox.warning(\n self.parent, \"Error\", \"There are no axes to edit.\")\n return\n elif len(allaxes) == 1:\n axes, = allaxes\n else:\n titles = []\n for axes in allaxes:\n name = (axes.get_title() or\n \" - \".join(filter(None, [axes.get_xlabel(),\n axes.get_ylabel()])) or\n \"<anonymous {} (id: {:#x})>\".format(\n type(axes).__name__, id(axes)))\n titles.append(name)\n item, ok = QtWidgets.QInputDialog.getItem(\n self.parent, 'Customize', 'Select axes:', titles, 0, False)\n if ok:\n axes = allaxes[titles.index(six.text_type(item))]\n else:\n return\n\n figureoptions.figure_edit(axes, self)\n\n def _update_buttons_checked(self):\n # sync button checkstates to match active mode\n self._actions['pan'].setChecked(self._active == 'PAN')\n self._actions['zoom'].setChecked(self._active == 'ZOOM')\n\n def pan(self, *args):\n super(NavigationToolbar2QT, self).pan(*args)\n self._update_buttons_checked()\n\n def zoom(self, *args):\n super(NavigationToolbar2QT, self).zoom(*args)\n self._update_buttons_checked()\n\n def set_message(self, s):\n self.message.emit(s)\n if self.coordinates:\n self.locLabel.setText(s)\n\n def set_cursor(self, cursor):\n self.canvas.setCursor(cursord[cursor])\n\n def draw_rubberband(self, event, x0, y0, x1, y1):\n height = self.canvas.figure.bbox.height\n y1 = height - y1\n y0 = height - y0\n rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]\n self.canvas.drawRectangle(rect)\n\n def remove_rubberband(self):\n self.canvas.drawRectangle(None)\n\n def configure_subplots(self):\n image = os.path.join(matplotlib.rcParams['datapath'],\n 'images', 'matplotlib.png')\n dia = SubplotToolQt(self.canvas.figure, self.parent)\n dia.setWindowIcon(QtGui.QIcon(image))\n dia.exec_()\n\n def save_figure(self, *args):\n filetypes = self.canvas.get_supported_filetypes_grouped()\n sorted_filetypes = sorted(six.iteritems(filetypes))\n default_filetype = self.canvas.get_default_filetype()\n\n startpath = os.path.expanduser(\n matplotlib.rcParams['savefig.directory'])\n start = os.path.join(startpath, self.canvas.get_default_filename())\n filters = []\n selectedFilter = None\n for name, exts in sorted_filetypes:\n exts_list = \" \".join(['*.%s' % ext for ext in exts])\n filter = '%s (%s)' % (name, exts_list)\n if default_filetype in exts:\n selectedFilter = filter\n filters.append(filter)\n filters = ';;'.join(filters)\n\n fname, filter = _getSaveFileName(self.parent,\n \"Choose a filename to save to\",\n start, filters, selectedFilter)\n if fname:\n # Save dir for next time, unless empty str (i.e., use cwd).\n if startpath != \"\":\n matplotlib.rcParams['savefig.directory'] = (\n os.path.dirname(six.text_type(fname)))\n try:\n self.canvas.figure.savefig(six.text_type(fname))\n except Exception as e:\n QtWidgets.QMessageBox.critical(\n self, \"Error saving file\", six.text_type(e),\n QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)\n\n\nclass SubplotToolQt(UiSubplotTool):\n def __init__(self, targetfig, parent):\n UiSubplotTool.__init__(self, None)\n\n self._figure = targetfig\n\n for lower, higher in [(\"bottom\", \"top\"), (\"left\", \"right\")]:\n self._widgets[lower].valueChanged.connect(\n lambda val: self._widgets[higher].setMinimum(val + .001))\n self._widgets[higher].valueChanged.connect(\n lambda val: self._widgets[lower].setMaximum(val - .001))\n\n self._attrs = [\"top\", \"bottom\", \"left\", \"right\", \"hspace\", \"wspace\"]\n self._defaults = {attr: vars(self._figure.subplotpars)[attr]\n for attr in self._attrs}\n\n # Set values after setting the range callbacks, but before setting up\n # the redraw callbacks.\n self._reset()\n\n for attr in self._attrs:\n self._widgets[attr].valueChanged.connect(self._on_value_changed)\n for action, method in [(\"Export values\", self._export_values),\n (\"Tight layout\", self._tight_layout),\n (\"Reset\", self._reset),\n (\"Close\", self.close)]:\n self._widgets[action].clicked.connect(method)\n\n def _export_values(self):\n # Explicitly round to 3 decimals (which is also the spinbox precision)\n # to avoid numbers of the form 0.100...001.\n dialog = QtWidgets.QDialog()\n layout = QtWidgets.QVBoxLayout()\n dialog.setLayout(layout)\n text = QtWidgets.QPlainTextEdit()\n text.setReadOnly(True)\n layout.addWidget(text)\n text.setPlainText(\n \",\\n\".join(\"{}={:.3}\".format(attr, self._widgets[attr].value())\n for attr in self._attrs))\n # Adjust the height of the text widget to fit the whole text, plus\n # some padding.\n size = text.maximumSize()\n size.setHeight(\n QtGui.QFontMetrics(text.document().defaultFont())\n .size(0, text.toPlainText()).height() + 20)\n text.setMaximumSize(size)\n dialog.exec_()\n\n def _on_value_changed(self):\n self._figure.subplots_adjust(**{attr: self._widgets[attr].value()\n for attr in self._attrs})\n self._figure.canvas.draw_idle()\n\n def _tight_layout(self):\n self._figure.tight_layout()\n for attr in self._attrs:\n widget = self._widgets[attr]\n widget.blockSignals(True)\n widget.setValue(vars(self._figure.subplotpars)[attr])\n widget.blockSignals(False)\n self._figure.canvas.draw_idle()\n\n def _reset(self):\n for attr, value in self._defaults.items():\n self._widgets[attr].setValue(value)\n\n\nclass ToolbarQt(ToolContainerBase, QtWidgets.QToolBar):\n def __init__(self, toolmanager, parent):\n ToolContainerBase.__init__(self, toolmanager)\n QtWidgets.QToolBar.__init__(self, parent)\n self._toolitems = {}\n self._groups = {}\n self._last = None\n\n @property\n def _icon_extension(self):\n if is_pyqt5():\n return '_large.png'\n return '.png'\n\n def add_toolitem(\n self, name, group, position, image_file, description, toggle):\n\n button = QtWidgets.QToolButton(self)\n button.setIcon(self._icon(image_file))\n button.setText(name)\n if description:\n button.setToolTip(description)\n\n def handler():\n self.trigger_tool(name)\n if toggle:\n button.setCheckable(True)\n button.toggled.connect(handler)\n else:\n button.clicked.connect(handler)\n\n self._last = button\n self._toolitems.setdefault(name, [])\n self._add_to_group(group, name, button, position)\n self._toolitems[name].append((button, handler))\n\n def _add_to_group(self, group, name, button, position):\n gr = self._groups.get(group, [])\n if not gr:\n sep = self.addSeparator()\n gr.append(sep)\n before = gr[position]\n widget = self.insertWidget(before, button)\n gr.insert(position, widget)\n self._groups[group] = gr\n\n def _icon(self, name):\n pm = QtGui.QPixmap(name)\n if hasattr(pm, 'setDevicePixelRatio'):\n pm.setDevicePixelRatio(self.toolmanager.canvas._dpi_ratio)\n return QtGui.QIcon(pm)\n\n def toggle_toolitem(self, name, toggled):\n if name not in self._toolitems:\n return\n for button, handler in self._toolitems[name]:\n button.toggled.disconnect(handler)\n button.setChecked(toggled)\n button.toggled.connect(handler)\n\n def remove_toolitem(self, name):\n for button, handler in self._toolitems[name]:\n button.setParent(None)\n del self._toolitems[name]\n\n\nclass StatusbarQt(StatusbarBase, QtWidgets.QLabel):\n def __init__(self, window, *args, **kwargs):\n StatusbarBase.__init__(self, *args, **kwargs)\n QtWidgets.QLabel.__init__(self)\n window.statusBar().addWidget(self)\n\n def set_message(self, s):\n self.setText(s)\n\n\nclass ConfigureSubplotsQt(backend_tools.ConfigureSubplotsBase):\n def trigger(self, *args):\n image = os.path.join(matplotlib.rcParams['datapath'],\n 'images', 'matplotlib.png')\n parent = self.canvas.manager.window\n dia = SubplotToolQt(self.figure, parent)\n dia.setWindowIcon(QtGui.QIcon(image))\n dia.exec_()\n\n\nclass SaveFigureQt(backend_tools.SaveFigureBase):\n def trigger(self, *args):\n filetypes = self.canvas.get_supported_filetypes_grouped()\n sorted_filetypes = sorted(six.iteritems(filetypes))\n default_filetype = self.canvas.get_default_filetype()\n\n startpath = os.path.expanduser(\n matplotlib.rcParams['savefig.directory'])\n start = os.path.join(startpath, self.canvas.get_default_filename())\n filters = []\n selectedFilter = None\n for name, exts in sorted_filetypes:\n exts_list = \" \".join(['*.%s' % ext for ext in exts])\n filter = '%s (%s)' % (name, exts_list)\n if default_filetype in exts:\n selectedFilter = filter\n filters.append(filter)\n filters = ';;'.join(filters)\n\n parent = self.canvas.manager.window\n fname, filter = _getSaveFileName(parent,\n \"Choose a filename to save to\",\n start, filters, selectedFilter)\n if fname:\n # Save dir for next time, unless empty str (i.e., use cwd).\n if startpath != \"\":\n matplotlib.rcParams['savefig.directory'] = (\n os.path.dirname(six.text_type(fname)))\n try:\n self.canvas.figure.savefig(six.text_type(fname))\n except Exception as e:\n QtWidgets.QMessageBox.critical(\n self, \"Error saving file\", six.text_type(e),\n QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)\n\n\nclass SetCursorQt(backend_tools.SetCursorBase):\n def set_cursor(self, cursor):\n self.canvas.setCursor(cursord[cursor])\n\n\nclass RubberbandQt(backend_tools.RubberbandBase):\n def draw_rubberband(self, x0, y0, x1, y1):\n height = self.canvas.figure.bbox.height\n y1 = height - y1\n y0 = height - y0\n rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]\n self.canvas.drawRectangle(rect)\n\n def remove_rubberband(self):\n self.canvas.drawRectangle(None)\n\n\nbackend_tools.ToolSaveFigure = SaveFigureQt\nbackend_tools.ToolConfigureSubplots = ConfigureSubplotsQt\nbackend_tools.ToolSetCursor = SetCursorQt\nbackend_tools.ToolRubberband = RubberbandQt\n\n\ndef error_msg_qt(msg, parent=None):\n if not isinstance(msg, six.string_types):\n msg = ','.join(map(str, msg))\n\n QtWidgets.QMessageBox.warning(None, \"Matplotlib\",\n msg, QtGui.QMessageBox.Ok)\n\n\ndef exception_handler(type, value, tb):\n \"\"\"Handle uncaught exceptions\n It does not catch SystemExit\n \"\"\"\n msg = ''\n # get the filename attribute if available (for IOError)\n if hasattr(value, 'filename') and value.filename is not None:\n msg = value.filename + ': '\n if hasattr(value, 'strerror') and value.strerror is not None:\n msg += value.strerror\n else:\n msg += six.text_type(value)\n\n if len(msg):\n error_msg_qt(msg)\n\n\n@_Backend.export\nclass _BackendQT5(_Backend):\n FigureCanvas = FigureCanvasQT\n FigureManager = FigureManagerQT\n\n @staticmethod\n def trigger_manager_draw(manager):\n manager.canvas.draw_idle()\n\n @staticmethod\n def mainloop():\n # allow KeyboardInterrupt exceptions to close the plot window.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n qApp.exec_()\n"
] | [
[
"matplotlib.backend_bases.FigureCanvasBase.enter_notify_event",
"matplotlib.backend_tools.add_tools_to_manager",
"matplotlib.backend_bases.FigureCanvasBase.scroll_event",
"matplotlib.backend_bases.NavigationToolbar2.__init__",
"matplotlib.backend_bases.FigureCanvasBase.key_release_event",
"matplotlib.backend_bases.FigureCanvasBase.button_release_event",
"matplotlib.backend_bases.FigureCanvasBase.leave_notify_event",
"matplotlib.backend_bases.StatusbarBase.__init__",
"matplotlib.backends.qt_editor.formsubplottool.UiSubplotTool.__init__",
"matplotlib.backend_managers.ToolManager",
"matplotlib._pylab_helpers.Gcf.destroy",
"matplotlib.backend_bases.FigureManagerBase.__init__",
"matplotlib.backends.qt_editor.figureoptions.figure_edit",
"matplotlib.backend_bases.FigureCanvasBase.get_width_height",
"matplotlib.backend_bases.TimerBase.__init__",
"matplotlib.backend_bases.FigureCanvasBase.resize_event",
"matplotlib.backend_tools.add_tools_to_container",
"matplotlib.backend_bases.ToolContainerBase.__init__",
"matplotlib.backend_bases.FigureCanvasBase.motion_notify_event",
"matplotlib.backend_bases.FigureCanvasBase.button_press_event",
"matplotlib.backend_bases.FigureCanvasBase.key_press_event",
"matplotlib.is_interactive"
]
] |
LueyEscargot/pyGuiTest | [
"c072fe29a7c94dc60ec54344a5d4a91253d25f3f"
] | [
"pyside/lesson_08_main.py"
] | [
"\nimport sys\nimport argparse\nimport pandas as pd\n\nfrom PySide2.QtCore import QDateTime, QTimeZone\nfrom PySide2.QtWidgets import QApplication\nfrom lesson_08_main_window import MainWindow\nfrom lesson_08_mainWidget import Widget\n\n\ndef transform_date(utc, timezone=None):\n utc_fmt = \"yyyy-MM-ddTHH:mm:ss.zzzZ\"\n new_date = QDateTime().fromString(utc, utc_fmt)\n if timezone:\n new_date.setTimeZone(timezone)\n return new_date\n\n\ndef read_data(fname):\n # Read the CSV content\n df = pd.read_csv(fname)\n\n # Remove wrong magnitudes\n df = df.drop(df[df.mag < 0].index)\n magnitudes = df[\"mag\"]\n\n # My local timezone\n timezone = QTimeZone(b\"Aisa/ShangHai\")\n\n # Get timestamp transformed to our timezone\n times = df[\"time\"].apply(lambda x: transform_date(x, timezone))\n\n return times, magnitudes\n\n\nif __name__ == \"__main__\":\n options = argparse.ArgumentParser()\n options.add_argument(\"-f\", \"--file\", type=str, required=True)\n args = options.parse_args()\n data = read_data(args.file)\n\n # Qt Application\n app = QApplication(sys.argv)\n\n widget = Widget(data)\n window = MainWindow(widget)\n window.show()\n\n sys.exit(app.exec_())"
] | [
[
"pandas.read_csv"
]
] |
wjsi/mars | [
"a69fb19edfe748d4393b90ff2c4941a76c084596",
"a69fb19edfe748d4393b90ff2c4941a76c084596"
] | [
"mars/tensor/fft/ifftn.py",
"mars/tensor/fft/irfft.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ..datasource import tensor as astensor\nfrom .core import TensorComplexFFTNMixin, validate_fftn, TensorStandardFFTN\n\n\nclass TensorIFFTN(TensorStandardFFTN, TensorComplexFFTNMixin):\n _op_type_ = OperandDef.IFFTN\n\n def __init__(self, shape=None, axes=None, norm=None, **kw):\n super().__init__(_shape=shape, _axes=axes, _norm=norm, **kw)\n\n\ndef ifftn(a, s=None, axes=None, norm=None):\n \"\"\"\n Compute the N-dimensional inverse discrete Fourier Transform.\n\n This function computes the inverse of the N-dimensional discrete\n Fourier Transform over any number of axes in an M-dimensional tensor by\n means of the Fast Fourier Transform (FFT). In other words,\n ``ifftn(fftn(a)) == a`` to within numerical accuracy.\n For a description of the definitions and conventions used, see `mt.fft`.\n\n The input, analogously to `ifft`, should be ordered in the same way as is\n returned by `fftn`, i.e. it should have the term for zero frequency\n in all axes in the low-order corner, the positive frequency terms in the\n first half of all axes, the term for the Nyquist frequency in the middle\n of all axes and the negative frequency terms in the second half of all\n axes, in order of decreasingly negative frequency.\n\n Parameters\n ----------\n a : array_like\n Input tensor, can be complex.\n s : sequence of ints, optional\n Shape (length of each transformed axis) of the output\n (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).\n This corresponds to ``n`` for ``ifft(x, n)``.\n Along any axis, if the given shape is smaller than that of the input,\n the input is cropped. If it is larger, the input is padded with zeros.\n if `s` is not given, the shape of the input along the axes specified\n by `axes` is used. See notes for issue on `ifft` zero padding.\n axes : sequence of ints, optional\n Axes over which to compute the IFFT. If not given, the last ``len(s)``\n axes are used, or all axes if `s` is also not specified.\n Repeated indices in `axes` means that the inverse transform over that\n axis is performed multiple times.\n norm : {None, \"ortho\"}, optional\n Normalization mode (see `mt.fft`). Default is None.\n\n Returns\n -------\n out : complex Tensor\n The truncated or zero-padded input, transformed along the axes\n indicated by `axes`, or by a combination of `s` or `a`,\n as explained in the parameters section above.\n\n Raises\n ------\n ValueError\n If `s` and `axes` have different length.\n IndexError\n If an element of `axes` is larger than than the number of axes of `a`.\n\n See Also\n --------\n mt.fft : Overall view of discrete Fourier transforms, with definitions\n and conventions used.\n fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.\n ifft : The one-dimensional inverse FFT.\n ifft2 : The two-dimensional inverse FFT.\n ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning\n of tensor.\n\n Notes\n -----\n See `mt.fft` for definitions and conventions used.\n\n Zero-padding, analogously with `ifft`, is performed by appending zeros to\n the input along the specified dimension. Although this is the common\n approach, it might lead to surprising results. If another form of zero\n padding is desired, it must be performed before `ifftn` is called.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> a = mt.eye(4)\n >>> mt.fft.ifftn(mt.fft.fftn(a, axes=(0,)), axes=(1,)).execute()\n array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],\n [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],\n [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],\n [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])\n\n\n Create and plot an image with band-limited frequency content:\n\n >>> import matplotlib.pyplot as plt\n >>> n = mt.zeros((200,200), dtype=complex)\n >>> n[60:80, 20:40] = mt.exp(1j*mt.random.uniform(0, 2*mt.pi, (20, 20)))\n >>> im = mt.fft.ifftn(n).real\n >>> plt.imshow(im.execute())\n <matplotlib.image.AxesImage object at 0x...>\n >>> plt.show()\n\n \"\"\"\n a = astensor(a)\n axes = validate_fftn(a, s=s, axes=axes, norm=norm)\n op = TensorIFFTN(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.complex_))\n return op(a)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ..datasource import tensor as astensor\nfrom .core import TensorFFTMixin, validate_fft, TensorRealFFT\n\n\nclass TensorIRFFT(TensorRealFFT, TensorFFTMixin):\n _op_type_ = OperandDef.IRFFT\n\n def __init__(self, n=None, axis=-1, norm=None, **kw):\n super().__init__(_n=n, _axis=axis, _norm=norm, **kw)\n\n @classmethod\n def _get_shape(cls, op, shape):\n new_shape = list(shape)\n if op.n is not None:\n new_shape[op.axis] = op.n\n else:\n new_shape[op.axis] = 2 * (new_shape[op.axis] - 1)\n return tuple(new_shape)\n\n\ndef irfft(a, n=None, axis=-1, norm=None):\n \"\"\"\n Compute the inverse of the n-point DFT for real input.\n\n This function computes the inverse of the one-dimensional *n*-point\n discrete Fourier Transform of real input computed by `rfft`.\n In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical\n accuracy. (See Notes below for why ``len(a)`` is necessary here.)\n\n The input is expected to be in the form returned by `rfft`, i.e. the\n real zero-frequency term followed by the complex positive frequency terms\n in order of increasing frequency. Since the discrete Fourier Transform of\n real input is Hermitian-symmetric, the negative frequency terms are taken\n to be the complex conjugates of the corresponding positive frequency terms.\n\n Parameters\n ----------\n a : array_like\n The input tensor.\n n : int, optional\n Length of the transformed axis of the output.\n For `n` output points, ``n//2+1`` input points are necessary. If the\n input is longer than this, it is cropped. If it is shorter than this,\n it is padded with zeros. If `n` is not given, it is determined from\n the length of the input along the axis specified by `axis`.\n axis : int, optional\n Axis over which to compute the inverse FFT. If not given, the last\n axis is used.\n norm : {None, \"ortho\"}, optional\n Normalization mode (see `mt.fft`). Default is None.\n\n Returns\n -------\n out : Tensor\n The truncated or zero-padded input, transformed along the axis\n indicated by `axis`, or the last one if `axis` is not specified.\n The length of the transformed axis is `n`, or, if `n` is not given,\n ``2*(m-1)`` where ``m`` is the length of the transformed axis of the\n input. To get an odd number of output points, `n` must be specified.\n\n Raises\n ------\n IndexError\n If `axis` is larger than the last axis of `a`.\n\n See Also\n --------\n mt.fft : For definition of the DFT and conventions used.\n rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.\n fft : The one-dimensional FFT.\n irfft2 : The inverse of the two-dimensional FFT of real input.\n irfftn : The inverse of the *n*-dimensional FFT of real input.\n\n Notes\n -----\n Returns the real valued `n`-point inverse discrete Fourier transform\n of `a`, where `a` contains the non-negative frequency terms of a\n Hermitian-symmetric sequence. `n` is the length of the result, not the\n input.\n\n If you specify an `n` such that `a` must be zero-padded or truncated, the\n extra/removed values will be added/removed at high frequencies. One can\n thus resample a series to `m` points via Fourier interpolation by:\n ``a_resamp = irfft(rfft(a), m)``.\n\n Examples\n --------\n >>> import mars.tenosr as mt\n\n >>> mt.fft.ifft([1, -1j, -1, 1j]).execute()\n array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])\n >>> mt.fft.irfft([1, -1j, -1]).execute()\n array([ 0., 1., 0., 0.])\n\n Notice how the last term in the input to the ordinary `ifft` is the\n complex conjugate of the second term, and the output has zero imaginary\n part everywhere. When calling `irfft`, the negative frequencies are not\n specified, and the output array is purely real.\n\n \"\"\"\n a = astensor(a)\n validate_fft(a, axis=axis, norm=norm)\n op = TensorIRFFT(n=n, axis=axis, norm=norm, dtype=np.dtype(np.float_))\n return op(a)\n"
] | [
[
"numpy.dtype"
],
[
"numpy.dtype"
]
] |
leelige/mindspore | [
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"
] | [
"research/cv/Neighbor2Neighbor/src/dataset.py",
"official/recommend/ncf/src/dataset.py",
"official/cv/ssd/src/box_utils.py",
"research/cv/Pix2Pix/postprocess.py",
"official/cv/crnn_seq2seq_ocr/src/dataset.py",
"research/cv/mobilenetV3_small_x1_0/src/monitor.py",
"official/cv/unet3d/eval.py"
] | [
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n'''dataloader'''\nimport os\nimport glob\nimport numpy as np\nimport PIL.Image as Image\nimport mindspore.dataset as ds\nimport mindspore.dataset.vision.c_transforms as CV\n\nclass DataLoader_Imagenet_val:\n '''DataLoader_Imagenet_val'''\n def __init__(self, data_dir, patch=256, noise_style=\"gauss25\", batch_size=4):\n super(DataLoader_Imagenet_val, self).__init__()\n self.data_dir = data_dir\n self.patch = patch\n self.train_fns = glob.glob(os.path.join(self.data_dir, \"*\"))\n self.train_fns.sort()\n print('fetch {} samples for training'.format(len(self.train_fns)))\n self.noise_generator = AugmentNoise(noise_style)\n self.batch_size = batch_size\n self.test = 1\n def __getitem__(self, index):\n # fetch image\n fn = self.train_fns[index]\n im = Image.open(fn)\n im = np.array(im, dtype=np.float32)\n # random crop\n H = im.shape[0]\n W = im.shape[1]\n if H - self.patch > 0:\n xx = np.random.randint(0, H - self.patch)\n im = im[xx:xx + self.patch, :, :]\n if W - self.patch > 0:\n yy = np.random.randint(0, W - self.patch)\n im = im[:, yy:yy + self.patch, :]\n im /= 255.0 #clean image\n noisy = self.noise_generator.add_noise(im)\n\n return im, noisy\n\n def __len__(self):\n return len(self.train_fns)\n\nclass AugmentNoise():\n '''AugmentNoise'''\n def __init__(self, style):\n if style.startswith('gauss'):\n self.params = [\n float(p) / 255.0 for p in style.replace('gauss', '').split('_')\n ]\n if len(self.params) == 1:\n self.style = \"gauss_fix\"\n elif len(self.params) == 2:\n self.style = \"gauss_range\"\n elif style.startswith('poisson'):\n self.params = [\n float(p) for p in style.replace('poisson', '').split('_')\n ]\n if len(self.params) == 1:\n self.style = \"poisson_fix\"\n elif len(self.params) == 2:\n self.style = \"poisson_range\"\n\n def add_noise(self, x):\n '''add_noise'''\n shape = x.shape\n if self.style == \"gauss_fix\":\n std = self.params[0]\n return np.array(x + np.random.normal(size=shape) * std,\n dtype=np.float32)\n if self.style == \"gauss_range\":\n min_std, max_std = self.params\n std = np.random.uniform(low=min_std, high=max_std, size=(1, 1, 1))\n return np.array(x + np.random.normal(size=shape) * std,\n dtype=np.float32)\n if self.style == \"poisson_fix\":\n lam = self.params[0]\n return np.array(np.random.poisson(lam * x) / lam, dtype=np.float32)\n assert self.style == \"poisson_range\"\n min_lam, max_lam = self.params\n lam = np.random.uniform(low=min_lam, high=max_lam, size=(1, 1, 1))\n return np.array(np.random.poisson(lam * x) / lam, dtype=np.float32)\n\n\ndef create_Dataset(data_dir, patch, noise_style, batch_size, device_num, rank, shuffle):\n\n dataset = DataLoader_Imagenet_val(data_dir, patch, noise_style, batch_size)\n hwc_to_chw = CV.HWC2CHW()\n data_set = ds.GeneratorDataset(dataset, column_names=[\"image\", \"noisy\"], \\\n num_parallel_workers=8, shuffle=shuffle, num_shards=device_num, shard_id=rank)\n data_set = data_set.map(input_columns=[\"image\"], operations=hwc_to_chw, num_parallel_workers=8)\n data_set = data_set.map(input_columns=[\"noisy\"], operations=hwc_to_chw, num_parallel_workers=8)\n data_set = data_set.batch(batch_size, drop_remainder=True)\n return data_set, data_set.get_dataset_size()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Dataset loading, creation and processing\"\"\"\nimport logging\nimport math\nimport os\nimport time\nimport timeit\nimport pickle\nimport numpy as np\nimport pandas as pd\n\nfrom mindspore.dataset import GeneratorDataset, Sampler\n\nimport src.constants as rconst\nimport src.movielens as movielens\nimport src.stat_utils as stat_utils\n\nDATASET_TO_NUM_USERS_AND_ITEMS = {\n \"ml-1m\": (6040, 3706),\n \"ml-20m\": (138493, 26744)\n}\n\n_EXPECTED_CACHE_KEYS = (\n rconst.TRAIN_USER_KEY, rconst.TRAIN_ITEM_KEY, rconst.EVAL_USER_KEY,\n rconst.EVAL_ITEM_KEY, rconst.USER_MAP, rconst.ITEM_MAP)\n\n\ndef load_data(data_dir, dataset):\n \"\"\"\n Load data in .csv format and output structured data.\n\n This function reads in the raw CSV of positive items, and performs three\n preprocessing transformations:\n\n 1) Filter out all users who have not rated at least a certain number\n of items. (Typically 20 items)\n\n 2) Zero index the users and items such that the largest user_id is\n `num_users - 1` and the largest item_id is `num_items - 1`\n\n 3) Sort the dataframe by user_id, with timestamp as a secondary sort key.\n This allows the dataframe to be sliced by user in-place, and for the last\n item to be selected simply by calling the `-1` index of a user's slice.\n\n While all of these transformations are performed by Pandas (and are therefore\n single-threaded), they only take ~2 minutes, and the overhead to apply a\n MapReduce pattern to parallel process the dataset adds significant complexity\n for no computational gain. For a larger dataset parallelizing this\n preprocessing could yield speedups. (Also, this preprocessing step is only\n performed once for an entire run.\n \"\"\"\n logging.info(\"Beginning loading data...\")\n\n raw_rating_path = os.path.join(data_dir, dataset, movielens.RATINGS_FILE)\n cache_path = os.path.join(data_dir, dataset, rconst.RAW_CACHE_FILE)\n\n valid_cache = os.path.exists(cache_path)\n if valid_cache:\n with open(cache_path, 'rb') as f:\n cached_data = pickle.load(f)\n\n for key in _EXPECTED_CACHE_KEYS:\n if key not in cached_data:\n valid_cache = False\n\n if not valid_cache:\n logging.info(\"Removing stale raw data cache file.\")\n os.remove(cache_path)\n\n if valid_cache:\n data = cached_data\n else:\n # process data and save to .csv\n with open(raw_rating_path) as f:\n df = pd.read_csv(f)\n\n # Get the info of users who have more than 20 ratings on items\n grouped = df.groupby(movielens.USER_COLUMN)\n df = grouped.filter(lambda x: len(x) >= rconst.MIN_NUM_RATINGS)\n\n original_users = df[movielens.USER_COLUMN].unique()\n original_items = df[movielens.ITEM_COLUMN].unique()\n\n # Map the ids of user and item to 0 based index for following processing\n logging.info(\"Generating user_map and item_map...\")\n user_map = {user: index for index, user in enumerate(original_users)}\n item_map = {item: index for index, item in enumerate(original_items)}\n\n df[movielens.USER_COLUMN] = df[movielens.USER_COLUMN].apply(\n lambda user: user_map[user])\n df[movielens.ITEM_COLUMN] = df[movielens.ITEM_COLUMN].apply(\n lambda item: item_map[item])\n\n num_users = len(original_users)\n num_items = len(original_items)\n\n assert num_users <= np.iinfo(rconst.USER_DTYPE).max\n assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max\n assert df[movielens.USER_COLUMN].max() == num_users - 1\n assert df[movielens.ITEM_COLUMN].max() == num_items - 1\n\n # This sort is used to shard the dataframe by user, and later to select\n # the last item for a user to be used in validation.\n logging.info(\"Sorting by user, timestamp...\")\n\n # This sort is equivalent to\n # df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],\n # inplace=True)\n # except that the order of items with the same user and timestamp are\n # sometimes different. For some reason, this sort results in a better\n # hit-rate during evaluation, matching the performance of the MLPerf\n # reference implementation.\n df.sort_values(by=movielens.TIMESTAMP_COLUMN, inplace=True)\n df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],\n inplace=True, kind=\"mergesort\")\n\n # The dataframe does not reconstruct indices in the sort or filter steps.\n df = df.reset_index()\n\n grouped = df.groupby(movielens.USER_COLUMN, group_keys=False)\n eval_df, train_df = grouped.tail(1), grouped.apply(lambda x: x.iloc[:-1])\n\n data = {\n rconst.TRAIN_USER_KEY:\n train_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),\n rconst.TRAIN_ITEM_KEY:\n train_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),\n rconst.EVAL_USER_KEY:\n eval_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),\n rconst.EVAL_ITEM_KEY:\n eval_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),\n rconst.USER_MAP: user_map,\n rconst.ITEM_MAP: item_map,\n \"create_time\": time.time(),\n }\n\n logging.info(\"Writing raw data cache.\")\n with open(cache_path, \"wb\") as f:\n pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n num_users, num_items = DATASET_TO_NUM_USERS_AND_ITEMS[dataset]\n if num_users != len(data[rconst.USER_MAP]):\n raise ValueError(\"Expected to find {} users, but found {}\".format(\n num_users, len(data[rconst.USER_MAP])))\n if num_items != len(data[rconst.ITEM_MAP]):\n raise ValueError(\"Expected to find {} items, but found {}\".format(\n num_items, len(data[rconst.ITEM_MAP])))\n\n return data, num_users, num_items\n\n\ndef construct_lookup_variables(train_pos_users, train_pos_items, num_users):\n \"\"\"Lookup variables\"\"\"\n index_bounds = None\n sorted_train_pos_items = None\n\n def index_segment(user):\n lower, upper = index_bounds[user:user + 2]\n items = sorted_train_pos_items[lower:upper]\n\n negatives_since_last_positive = np.concatenate(\n [items[0][np.newaxis], items[1:] - items[:-1] - 1])\n\n return np.cumsum(negatives_since_last_positive)\n\n start_time = timeit.default_timer()\n inner_bounds = np.argwhere(train_pos_users[1:] -\n train_pos_users[:-1])[:, 0] + 1\n (upper_bound,) = train_pos_users.shape\n index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound])\n\n # Later logic will assume that the users are in sequential ascending order.\n assert np.array_equal(train_pos_users[index_bounds[:-1]], np.arange(num_users))\n\n sorted_train_pos_items = train_pos_items.copy()\n\n for i in range(num_users):\n lower, upper = index_bounds[i:i + 2]\n sorted_train_pos_items[lower:upper].sort()\n\n total_negatives = np.concatenate([\n index_segment(i) for i in range(num_users)])\n\n logging.info(\"Negative total vector built. Time: {:.1f} seconds\".format(\n timeit.default_timer() - start_time))\n\n return total_negatives, index_bounds, sorted_train_pos_items\n\n\nclass NCFDataset:\n \"\"\"\n A dataset for NCF network.\n \"\"\"\n\n def __init__(self,\n pos_users,\n pos_items,\n num_users,\n num_items,\n batch_size,\n total_negatives,\n index_bounds,\n sorted_train_pos_items,\n num_neg,\n is_training=True):\n self._pos_users = pos_users\n self._pos_items = pos_items\n self._num_users = num_users\n self._num_items = num_items\n\n self._batch_size = batch_size\n\n self._total_negatives = total_negatives\n self._index_bounds = index_bounds\n self._sorted_train_pos_items = sorted_train_pos_items\n\n self._is_training = is_training\n\n if self._is_training:\n self._train_pos_count = self._pos_users.shape[0]\n else:\n self._eval_users_per_batch = int(\n batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))\n\n _pos_count = pos_users.shape[0]\n _num_samples = (1 + num_neg) * _pos_count\n self.dataset_len = math.ceil(_num_samples / batch_size)\n\n def lookup_negative_items(self, negative_users):\n \"\"\"Lookup negative items\"\"\"\n output = np.zeros(shape=negative_users.shape, dtype=rconst.ITEM_DTYPE) - 1\n\n left_index = self._index_bounds[negative_users]\n right_index = self._index_bounds[negative_users + 1] - 1\n\n num_positives = right_index - left_index + 1\n num_negatives = self._num_items - num_positives\n neg_item_choice = stat_utils.very_slightly_biased_randint(num_negatives)\n\n # Shortcuts:\n # For points where the negative is greater than or equal to the tally before\n # the last positive point there is no need to bisect. Instead the item id\n # corresponding to the negative item choice is simply:\n # last_postive_index + 1 + (neg_choice - last_negative_tally)\n # Similarly, if the selection is less than the tally at the first positive\n # then the item_id is simply the selection.\n #\n # Because MovieLens organizes popular movies into low integers (which is\n # preserved through the preprocessing), the first shortcut is very\n # efficient, allowing ~60% of samples to bypass the bisection. For the same\n # reason, the second shortcut is rarely triggered (<0.02%) and is therefore\n # not worth implementing.\n use_shortcut = neg_item_choice >= self._total_negatives[right_index]\n output[use_shortcut] = (\n self._sorted_train_pos_items[right_index] + 1 +\n (neg_item_choice - self._total_negatives[right_index])\n )[use_shortcut]\n\n if np.all(use_shortcut):\n # The bisection code is ill-posed when there are no elements.\n return output\n\n not_use_shortcut = np.logical_not(use_shortcut)\n left_index = left_index[not_use_shortcut]\n right_index = right_index[not_use_shortcut]\n neg_item_choice = neg_item_choice[not_use_shortcut]\n\n num_loops = np.max(\n np.ceil(np.log2(num_positives[not_use_shortcut])).astype(np.int32))\n\n for _ in range(num_loops):\n mid_index = (left_index + right_index) // 2\n right_criteria = self._total_negatives[mid_index] > neg_item_choice\n left_criteria = np.logical_not(right_criteria)\n\n right_index[right_criteria] = mid_index[right_criteria]\n left_index[left_criteria] = mid_index[left_criteria]\n\n # Expected state after bisection pass:\n # The right index is the smallest index whose tally is greater than the\n # negative item choice index.\n\n assert np.all((right_index - left_index) <= 1)\n\n output[not_use_shortcut] = (\n self._sorted_train_pos_items[right_index] - (self._total_negatives[right_index] - neg_item_choice)\n )\n\n assert np.all(output >= 0)\n\n return output\n\n def _get_train_item(self, index):\n \"\"\"Get train item\"\"\"\n (mask_start_index,) = index.shape\n index_mod = np.mod(index, self._train_pos_count)\n\n # get batch of users\n users = self._pos_users[index_mod]\n\n # get batch of items\n negative_indices = np.greater_equal(index, self._train_pos_count)\n negative_users = users[negative_indices]\n negative_items = self.lookup_negative_items(negative_users=negative_users)\n items = self._pos_items[index_mod]\n items[negative_indices] = negative_items\n\n # get batch of labels\n labels = np.logical_not(negative_indices)\n\n # pad last partial batch\n pad_length = self._batch_size - index.shape[0]\n if pad_length:\n user_pad = np.arange(pad_length, dtype=users.dtype) % self._num_users\n item_pad = np.arange(pad_length, dtype=items.dtype) % self._num_items\n label_pad = np.zeros(shape=(pad_length,), dtype=labels.dtype)\n users = np.concatenate([users, user_pad])\n items = np.concatenate([items, item_pad])\n labels = np.concatenate([labels, label_pad])\n\n users = np.reshape(users, (self._batch_size, 1)) # (_batch_size, 1), int32\n items = np.reshape(items, (self._batch_size, 1)) # (_batch_size, 1), int32\n mask_start_index = np.array(mask_start_index, dtype=np.int32) # (_batch_size, 1), int32\n valid_pt_mask = np.expand_dims(\n np.less(np.arange(self._batch_size), mask_start_index), -1).astype(np.float32) # (_batch_size, 1), bool\n labels = np.reshape(labels, (self._batch_size, 1)).astype(np.int32) # (_batch_size, 1), bool\n\n return users, items, labels, valid_pt_mask\n\n @staticmethod\n def _assemble_eval_batch(users, positive_items, negative_items,\n users_per_batch):\n \"\"\"Construct duplicate_mask and structure data accordingly.\n\n The positive items should be last so that they lose ties. However, they\n should not be masked out if the true eval positive happens to be\n selected as a negative. So instead, the positive is placed in the first\n position, and then switched with the last element after the duplicate\n mask has been computed.\n\n Args:\n users: An array of users in a batch. (should be identical along axis 1)\n positive_items: An array (batch_size x 1) of positive item indices.\n negative_items: An array of negative item indices.\n users_per_batch: How many users should be in the batch. This is passed\n as an argument so that ncf_test.py can use this method.\n\n Returns:\n User, item, and duplicate_mask arrays.\n \"\"\"\n items = np.concatenate([positive_items, negative_items], axis=1)\n\n # We pad the users and items here so that the duplicate mask calculation\n # will include padding. The metric function relies on all padded elements\n # except the positive being marked as duplicate to mask out padded points.\n if users.shape[0] < users_per_batch:\n pad_rows = users_per_batch - users.shape[0]\n padding = np.zeros(shape=(pad_rows, users.shape[1]), dtype=np.int32)\n users = np.concatenate([users, padding.astype(users.dtype)], axis=0)\n items = np.concatenate([items, padding.astype(items.dtype)], axis=0)\n\n duplicate_mask = stat_utils.mask_duplicates(items, axis=1).astype(np.float32)\n\n items[:, (0, -1)] = items[:, (-1, 0)]\n duplicate_mask[:, (0, -1)] = duplicate_mask[:, (-1, 0)]\n\n assert users.shape == items.shape == duplicate_mask.shape\n return users, items, duplicate_mask\n\n def _get_eval_item(self, index):\n \"\"\"Get eval item\"\"\"\n low_index, high_index = index\n users = np.repeat(self._pos_users[low_index:high_index, np.newaxis],\n 1 + rconst.NUM_EVAL_NEGATIVES, axis=1)\n positive_items = self._pos_items[low_index:high_index, np.newaxis]\n negative_items = (self.lookup_negative_items(negative_users=users[:, :-1])\n .reshape(-1, rconst.NUM_EVAL_NEGATIVES))\n\n users, items, duplicate_mask = self._assemble_eval_batch(\n users, positive_items, negative_items, self._eval_users_per_batch)\n\n users = np.reshape(users.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), int32\n items = np.reshape(items.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), int32\n duplicate_mask = np.reshape(duplicate_mask.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), bool\n\n return users, items, duplicate_mask\n\n def __getitem__(self, index):\n \"\"\"\n Get a batch of samples.\n \"\"\"\n if self._is_training:\n return self._get_train_item(index)\n\n return self._get_eval_item(index)\n\n def __len__(self):\n \"\"\"\n Return length of the dataset, i.e., the number of batches for an epoch\n \"\"\"\n return self.dataset_len\n\n\nclass RandomSampler(Sampler):\n \"\"\"\n A random sampler for dataset.\n \"\"\"\n\n def __init__(self, pos_count, num_train_negatives, batch_size):\n self.pos_count = pos_count\n self._num_samples = (1 + num_train_negatives) * self.pos_count\n self._batch_size = batch_size\n self._num_batches = math.ceil(self._num_samples / self._batch_size)\n super().__init__(self._num_batches)\n\n def __iter__(self):\n \"\"\"\n Return indices of all batches within an epoch.\n \"\"\"\n indices = stat_utils.permutation((self._num_samples, stat_utils.random_int32()))\n\n batch_indices = [indices[x * self._batch_size:(x + 1) * self._batch_size] for x in range(self._num_batches)]\n\n # padding last batch indices if necessary\n if len(batch_indices) > 2 and len(batch_indices[-2]) != len(batch_indices[-1]):\n pad_nums = len(batch_indices[-2]) - len(batch_indices[-1])\n pad_indices = np.random.randint(0, self._num_samples, pad_nums)\n batch_indices[-1] = np.hstack((batch_indices[-1], pad_indices))\n\n return iter(batch_indices)\n\n\nclass DistributedSamplerOfTrain:\n \"\"\"\n A distributed sampler for dataset.\n \"\"\"\n\n def __init__(self, pos_count, num_train_negatives, batch_size, rank_id, rank_size):\n \"\"\"\n Distributed sampler of training dataset.\n \"\"\"\n self._num_samples = (1 + num_train_negatives) * pos_count\n self._rank_id = rank_id\n self._rank_size = rank_size\n self._batch_size = batch_size\n\n self._batchs_per_rank = int(math.ceil(self._num_samples / self._batch_size / rank_size))\n self._samples_per_rank = int(math.ceil(self._batchs_per_rank * self._batch_size))\n self._total_num_samples = self._samples_per_rank * self._rank_size\n\n def __iter__(self):\n \"\"\"\n Returns the data after each sampling.\n \"\"\"\n indices = stat_utils.permutation((self._num_samples, stat_utils.random_int32()))\n indices = indices.tolist()\n indices.extend(indices[:self._total_num_samples - len(indices)])\n indices = indices[self._rank_id:self._total_num_samples:self._rank_size]\n batch_indices = [indices[x * self._batch_size:(x + 1) * self._batch_size] for x in range(self._batchs_per_rank)]\n\n return iter(np.array(batch_indices))\n\n def __len__(self):\n \"\"\"\n Returns the length after each sampling.\n \"\"\"\n return self._batchs_per_rank\n\n\nclass SequenceSampler(Sampler):\n \"\"\"\n A sequence sampler for dataset.\n \"\"\"\n\n def __init__(self, eval_batch_size, num_users):\n self._eval_users_per_batch = int(\n eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))\n self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)\n self._eval_batches_per_epoch = self.count_batches(\n self._eval_elements_in_epoch, eval_batch_size)\n super().__init__(self._eval_batches_per_epoch)\n\n def __iter__(self):\n indices = [(x * self._eval_users_per_batch, (x + 1) * self._eval_users_per_batch)\n for x in range(self._eval_batches_per_epoch)]\n\n # padding last batch indices if necessary\n if len(indices) > 2 and len(indices[-2]) != len(indices[-1]):\n pad_nums = len(indices[-2]) - len(indices[-1])\n pad_indices = np.random.randint(0, self._eval_elements_in_epoch, pad_nums)\n indices[-1] = np.hstack((indices[-1], pad_indices))\n\n return iter(indices)\n\n @staticmethod\n def count_batches(example_count, batch_size, batches_per_step=1):\n \"\"\"Determine the number of batches, rounding up to fill all devices.\"\"\"\n x = (example_count + batch_size - 1) // batch_size\n return (x + batches_per_step - 1) // batches_per_step * batches_per_step\n\n\nclass DistributedSamplerOfEval:\n \"\"\"\n A distributed sampler for eval dataset.\n \"\"\"\n\n def __init__(self, eval_batch_size, num_users, rank_id, rank_size):\n self._eval_users_per_batch = int(\n eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))\n self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)\n self._eval_batches_per_epoch = self.count_batches(\n self._eval_elements_in_epoch, eval_batch_size)\n\n self._rank_id = rank_id\n self._rank_size = rank_size\n self._eval_batch_size = eval_batch_size\n\n self._batchs_per_rank = int(math.ceil(self._eval_batches_per_epoch / rank_size))\n\n def __iter__(self):\n indices = [(x * self._eval_users_per_batch, (x + self._rank_id + 1) * self._eval_users_per_batch)\n for x in range(self._batchs_per_rank)]\n\n return iter(np.array(indices))\n\n @staticmethod\n def count_batches(example_count, batch_size, batches_per_step=1):\n \"\"\"Determine the number of batches, rounding up to fill all devices.\"\"\"\n x = (example_count + batch_size - 1) // batch_size\n return (x + batches_per_step - 1) // batches_per_step * batches_per_step\n\n def __len__(self):\n return self._batchs_per_rank\n\n\ndef parse_eval_batch_size(eval_batch_size):\n \"\"\"\n Parse eval batch size.\n \"\"\"\n if eval_batch_size % (1 + rconst.NUM_EVAL_NEGATIVES):\n raise ValueError(\"Eval batch size {} is not divisible by {}\".format(\n eval_batch_size, 1 + rconst.NUM_EVAL_NEGATIVES))\n return eval_batch_size\n\n\ndef create_dataset(test_train=True, data_dir='./dataset/', dataset='ml-1m', train_epochs=14, batch_size=256,\n eval_batch_size=160000, num_neg=4, rank_id=None, rank_size=None):\n \"\"\"\n Create NCF dataset.\n \"\"\"\n data, num_users, num_items = load_data(data_dir, dataset)\n\n train_pos_users = data[rconst.TRAIN_USER_KEY]\n train_pos_items = data[rconst.TRAIN_ITEM_KEY]\n eval_pos_users = data[rconst.EVAL_USER_KEY]\n eval_pos_items = data[rconst.EVAL_ITEM_KEY]\n\n total_negatives, index_bounds, sorted_train_pos_items = \\\n construct_lookup_variables(train_pos_users, train_pos_items, num_users)\n\n if test_train:\n print(train_pos_users, train_pos_items, num_users, num_items, batch_size, total_negatives, index_bounds,\n sorted_train_pos_items)\n dataset = NCFDataset(train_pos_users, train_pos_items, num_users, num_items, batch_size, total_negatives,\n index_bounds, sorted_train_pos_items, num_neg)\n sampler = RandomSampler(train_pos_users.shape[0], num_neg, batch_size)\n if rank_id is not None and rank_size is not None:\n sampler = DistributedSamplerOfTrain(train_pos_users.shape[0], num_neg, batch_size, rank_id, rank_size)\n\n ds = GeneratorDataset(dataset,\n column_names=[movielens.USER_COLUMN,\n movielens.ITEM_COLUMN,\n \"labels\",\n rconst.VALID_POINT_MASK],\n sampler=sampler)\n\n else:\n eval_batch_size = parse_eval_batch_size(eval_batch_size=eval_batch_size)\n dataset = NCFDataset(eval_pos_users, eval_pos_items, num_users, num_items,\n eval_batch_size, total_negatives, index_bounds,\n sorted_train_pos_items, num_neg, is_training=False)\n sampler = SequenceSampler(eval_batch_size, num_users)\n\n ds = GeneratorDataset(dataset,\n column_names=[movielens.USER_COLUMN,\n movielens.ITEM_COLUMN,\n rconst.DUPLICATE_MASK],\n sampler=sampler)\n\n repeat_count = train_epochs if test_train else train_epochs + 1\n ds = ds.repeat(repeat_count)\n\n return ds, num_users, num_items\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Bbox utils\"\"\"\n\nimport math\nimport itertools as it\nimport numpy as np\nfrom src.model_utils.config import config\nfrom .anchor_generator import GridAnchorGenerator\n\n\nclass GeneratDefaultBoxes():\n \"\"\"\n Generate Default boxes for SSD, follows the order of (W, H, archor_sizes).\n `self.default_boxes` has a shape of [archor_sizes, H, W, 4], the last dimension is [y, x, h, w].\n `self.default_boxes_tlbr` has a shape as `self.default_boxes`, the last dimension is [y1, x1, y2, x2].\n \"\"\"\n def __init__(self):\n fk = config.img_shape[0] / np.array(config.steps)\n scale_rate = (config.max_scale - config.min_scale) / (len(config.num_default) - 1)\n scales = [config.min_scale + scale_rate * i for i in range(len(config.num_default))] + [1.0]\n self.default_boxes = []\n for idex, feature_size in enumerate(config.feature_size):\n sk1 = scales[idex]\n sk2 = scales[idex + 1]\n sk3 = math.sqrt(sk1 * sk2)\n if idex == 0 and not config.aspect_ratios[idex]:\n w, h = sk1 * math.sqrt(2), sk1 / math.sqrt(2)\n all_sizes = [(0.1, 0.1), (w, h), (h, w)]\n else:\n all_sizes = [(sk1, sk1)]\n for aspect_ratio in config.aspect_ratios[idex]:\n w, h = sk1 * math.sqrt(aspect_ratio), sk1 / math.sqrt(aspect_ratio)\n all_sizes.append((w, h))\n all_sizes.append((h, w))\n all_sizes.append((sk3, sk3))\n\n assert len(all_sizes) == config.num_default[idex]\n\n for i, j in it.product(range(feature_size), repeat=2):\n for w, h in all_sizes:\n cx, cy = (j + 0.5) / fk[idex], (i + 0.5) / fk[idex]\n self.default_boxes.append([cy, cx, h, w])\n\n def to_tlbr(cy, cx, h, w):\n return cy - h / 2, cx - w / 2, cy + h / 2, cx + w / 2\n\n # For IoU calculation\n self.default_boxes_tlbr = np.array(tuple(to_tlbr(*i) for i in self.default_boxes), dtype='float32')\n self.default_boxes = np.array(self.default_boxes, dtype='float32')\n\nif hasattr(config, 'use_anchor_generator') and config.use_anchor_generator:\n generator = GridAnchorGenerator(config.img_shape, 4, 2, [1.0, 2.0, 0.5])\n default_boxes, default_boxes_tlbr = generator.generate_multi_levels(config.steps)\nelse:\n default_boxes_tlbr = GeneratDefaultBoxes().default_boxes_tlbr\n default_boxes = GeneratDefaultBoxes().default_boxes\ny1, x1, y2, x2 = np.split(default_boxes_tlbr[:, :4], 4, axis=-1)\nvol_anchors = (x2 - x1) * (y2 - y1)\nmatching_threshold = config.match_threshold\n\n\ndef ssd_bboxes_encode(boxes):\n \"\"\"\n Labels anchors with ground truth inputs.\n\n Args:\n boxex: ground truth with shape [N, 5], for each row, it stores [y, x, h, w, cls].\n\n Returns:\n gt_loc: location ground truth with shape [num_anchors, 4].\n gt_label: class ground truth with shape [num_anchors, 1].\n num_matched_boxes: number of positives in an image.\n \"\"\"\n\n def jaccard_with_anchors(bbox):\n \"\"\"Compute jaccard score a box and the anchors.\"\"\"\n # Intersection bbox and volume.\n ymin = np.maximum(y1, bbox[0])\n xmin = np.maximum(x1, bbox[1])\n ymax = np.minimum(y2, bbox[2])\n xmax = np.minimum(x2, bbox[3])\n w = np.maximum(xmax - xmin, 0.)\n h = np.maximum(ymax - ymin, 0.)\n\n # Volumes.\n inter_vol = h * w\n union_vol = vol_anchors + (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) - inter_vol\n jaccard = inter_vol / union_vol\n return np.squeeze(jaccard)\n\n pre_scores = np.zeros((config.num_ssd_boxes), dtype=np.float32)\n t_boxes = np.zeros((config.num_ssd_boxes, 4), dtype=np.float32)\n t_label = np.zeros((config.num_ssd_boxes), dtype=np.int64)\n for bbox in boxes:\n label = int(bbox[4])\n scores = jaccard_with_anchors(bbox)\n idx = np.argmax(scores)\n scores[idx] = 2.0\n mask = (scores > matching_threshold)\n mask = mask & (scores > pre_scores)\n pre_scores = np.maximum(pre_scores, scores * mask)\n t_label = mask * label + (1 - mask) * t_label\n for i in range(4):\n t_boxes[:, i] = mask * bbox[i] + (1 - mask) * t_boxes[:, i]\n\n index = np.nonzero(t_label)\n\n # Transform to tlbr.\n bboxes = np.zeros((config.num_ssd_boxes, 4), dtype=np.float32)\n bboxes[:, [0, 1]] = (t_boxes[:, [0, 1]] + t_boxes[:, [2, 3]]) / 2\n bboxes[:, [2, 3]] = t_boxes[:, [2, 3]] - t_boxes[:, [0, 1]]\n\n # Encode features.\n bboxes_t = bboxes[index]\n default_boxes_t = default_boxes[index]\n bboxes_t[:, :2] = (bboxes_t[:, :2] - default_boxes_t[:, :2]) / (default_boxes_t[:, 2:] * config.prior_scaling[0])\n tmp = np.maximum(bboxes_t[:, 2:4] / default_boxes_t[:, 2:4], 0.000001)\n bboxes_t[:, 2:4] = np.log(tmp) / config.prior_scaling[1]\n bboxes[index] = bboxes_t\n\n num_match = np.array([len(np.nonzero(t_label)[0])], dtype=np.int32)\n return bboxes, t_label.astype(np.int32), num_match\n\n\ndef ssd_bboxes_decode(boxes):\n \"\"\"Decode predict boxes to [y, x, h, w]\"\"\"\n boxes_t = boxes.copy()\n default_boxes_t = default_boxes.copy()\n boxes_t[:, :2] = boxes_t[:, :2] * config.prior_scaling[0] * default_boxes_t[:, 2:] + default_boxes_t[:, :2]\n boxes_t[:, 2:4] = np.exp(boxes_t[:, 2:4] * config.prior_scaling[1]) * default_boxes_t[:, 2:4]\n\n bboxes = np.zeros((len(boxes_t), 4), dtype=np.float32)\n\n bboxes[:, [0, 1]] = boxes_t[:, [0, 1]] - boxes_t[:, [2, 3]] / 2\n bboxes[:, [2, 3]] = boxes_t[:, [0, 1]] + boxes_t[:, [2, 3]] / 2\n\n return np.clip(bboxes, 0, 1)\n\n\ndef intersect(box_a, box_b):\n \"\"\"Compute the intersect of two sets of boxes.\"\"\"\n max_yx = np.minimum(box_a[:, 2:4], box_b[2:4])\n min_yx = np.maximum(box_a[:, :2], box_b[:2])\n inter = np.clip((max_yx - min_yx), a_min=0, a_max=np.inf)\n return inter[:, 0] * inter[:, 1]\n\n\ndef jaccard_numpy(box_a, box_b):\n \"\"\"Compute the jaccard overlap of two sets of boxes.\"\"\"\n inter = intersect(box_a, box_b)\n area_a = ((box_a[:, 2] - box_a[:, 0]) *\n (box_a[:, 3] - box_a[:, 1]))\n area_b = ((box_b[2] - box_b[0]) *\n (box_b[3] - box_b[1]))\n union = area_a + area_b - inter\n return inter / union\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n postprocess\n\"\"\"\nimport os\nimport numpy as np\nfrom PIL import Image\nfrom src.utils.config import get_args\nfrom mindspore import Tensor\n\ndef save_image(img, img_path):\n \"\"\"Save a numpy image to the disk\n\n Parameters:\n img (numpy array / Tensor): image to save.\n image_path (str): the path of the image.\n \"\"\"\n if isinstance(img, Tensor):\n img = img.asnumpy()\n elif not isinstance(img, np.ndarray):\n raise ValueError(\"img should be Tensor or numpy array, but get {}\".format(type(img)))\n img = decode_image(img)\n\n img_pil = Image.fromarray(img)\n img_pil.save(img_path + \".jpg\")\n\ndef decode_image(img):\n \"\"\"Decode a [1, C, H, W] Tensor to image numpy array.\"\"\"\n mean = 0.5 * 255\n std = 0.5 * 255\n\n return (img * std + mean).astype(np.uint8).transpose((1, 2, 0))\n\nif __name__ == '__main__':\n args = get_args()\n\n result_dir = \"./result_Files\"\n object_imageSize = 256\n rst_path = result_dir\n\n for i in range(len(os.listdir(rst_path))):\n file_name = os.path.join(rst_path, \"Pix2Pix_data_bs\" + str(args.batch_size) + '_' + str(i) + '_0.bin')\n output = np.fromfile(file_name, np.float32).reshape(3, object_imageSize, object_imageSize)\n print(output.shape)\n save_image(output, './310_infer_img' + str(i + 1))\n print(\"=======image\", i + 1, \"saved success=======\")\n print(\"Generate images success!\")\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"FSNS dataset\"\"\"\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\nimport mindspore.dataset as de\nimport mindspore.dataset.vision.c_transforms as C\nimport mindspore.dataset.vision.py_transforms as P\nimport mindspore.dataset.transforms.c_transforms as ops\nimport mindspore.common.dtype as mstype\n\nfrom src.model_utils.config import config\n\n\nclass AugmentationOps():\n def __init__(self, min_area_ratio=0.8, aspect_ratio_range=(0.8, 1.2), brightness=32./255.,\n contrast=0.5, saturation=0.5, hue=0.2, img_tile_shape=(150, 150)):\n self.min_area_ratio = min_area_ratio\n self.aspect_ratio_range = aspect_ratio_range\n self.img_tile_shape = img_tile_shape\n self.random_image_distortion_ops = P.RandomColorAdjust(brightness=brightness,\n contrast=contrast,\n saturation=saturation,\n hue=hue)\n\n def __call__(self, img):\n img_h = self.img_tile_shape[0]\n img_w = self.img_tile_shape[1]\n img_new = np.zeros([128, 512, 3], dtype=np.float32)\n\n for i in range(4):\n img_tile = img[:, (i*150):((i+1)*150), :]\n # Random crop cut from the street sign image, resized to the same size.\n # Assures that the crop covers at least 0.8 area of the input image.\n # Aspect ratio of cropped image is within [0.8,1.2] range.\n h = img_h + 1\n w = img_w + 1\n\n while (w >= img_w or h >= img_h):\n aspect_ratio = np.random.uniform(self.aspect_ratio_range[0],\n self.aspect_ratio_range[1])\n h_low = np.ceil(np.sqrt(self.min_area_ratio * img_h * img_w / aspect_ratio))\n h_high = np.floor(np.sqrt(img_h * img_w / aspect_ratio))\n h = np.random.randint(h_low, h_high)\n w = int(h * aspect_ratio)\n\n y = np.random.randint(img_w - w)\n x = np.random.randint(img_h - h)\n img_tile = img_tile[x:(x+h), y:(y+w), :]\n # Randomly chooses one of the 4 interpolation resize methods.\n interpolation = np.random.choice([cv2.INTER_LINEAR,\n cv2.INTER_CUBIC,\n cv2.INTER_AREA,\n cv2.INTER_NEAREST])\n img_tile = cv2.resize(img_tile, (128, 128), interpolation=interpolation)\n # Random color distortion ops.\n img_tile_pil = Image.fromarray(img_tile)\n img_tile_pil = self.random_image_distortion_ops(img_tile_pil)\n img_tile = np.array(img_tile_pil)\n img_new[:, (i*128):((i+1)*128), :] = img_tile\n\n img_new = 2 * (img_new / 255.) - 1\n return img_new\n\n\nclass ImageResizeWithRescale():\n def __init__(self, standard_img_height, standard_img_width, channel_size=3):\n self.standard_img_height = standard_img_height\n self.standard_img_width = standard_img_width\n self.channel_size = channel_size\n\n def __call__(self, img):\n img = cv2.resize(img, (self.standard_img_width, self.standard_img_height))\n img = 2 * (img / 255.) - 1\n return img\n\n\ndef random_teacher_force(images, source_ids, target_ids):\n teacher_force = np.random.random() < config.teacher_force_ratio\n teacher_force_array = np.array([teacher_force], dtype=bool)\n return images, source_ids, target_ids, teacher_force_array\n\n\ndef create_ocr_train_dataset(mindrecord_file, batch_size=32, rank_size=1, rank_id=0,\n is_training=True, num_parallel_workers=4, use_multiprocessing=True):\n ds = de.MindDataset(mindrecord_file,\n columns_list=[\"image\", \"decoder_input\", \"decoder_target\"],\n num_shards=rank_size,\n shard_id=rank_id,\n num_parallel_workers=num_parallel_workers,\n shuffle=is_training)\n aug_ops = AugmentationOps()\n transforms = [C.Decode(),\n aug_ops,\n C.HWC2CHW()]\n ds = ds.map(operations=transforms, input_columns=[\"image\"], python_multiprocessing=use_multiprocessing,\n num_parallel_workers=num_parallel_workers)\n ds = ds.map(operations=ops.PadEnd([config.max_length], 0), input_columns=[\"decoder_target\"])\n ds = ds.map(operations=random_teacher_force, input_columns=[\"image\", \"decoder_input\", \"decoder_target\"],\n output_columns=[\"image\", \"decoder_input\", \"decoder_target\", \"teacher_force\"],\n column_order=[\"image\", \"decoder_input\", \"decoder_target\", \"teacher_force\"])\n type_cast_op_bool = ops.TypeCast(mstype.bool_)\n ds = ds.map(operations=type_cast_op_bool, input_columns=\"teacher_force\")\n print(\"Train dataset size= %s\" % (int(ds.get_dataset_size())))\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef create_ocr_val_dataset(mindrecord_file, batch_size=32, rank_size=1, rank_id=0,\n num_parallel_workers=4, use_multiprocessing=True):\n ds = de.MindDataset(mindrecord_file,\n columns_list=[\"image\", \"annotation\", \"decoder_input\", \"decoder_target\"],\n num_shards=rank_size,\n shard_id=rank_id,\n num_parallel_workers=num_parallel_workers,\n shuffle=False)\n resize_rescale_op = ImageResizeWithRescale(standard_img_height=128, standard_img_width=512)\n transforms = [C.Decode(),\n resize_rescale_op,\n C.HWC2CHW()]\n ds = ds.map(operations=transforms, input_columns=[\"image\"], python_multiprocessing=use_multiprocessing,\n num_parallel_workers=num_parallel_workers)\n ds = ds.map(operations=ops.PadEnd([config.max_length], 0), input_columns=[\"decoder_target\"],\n python_multiprocessing=use_multiprocessing, num_parallel_workers=8)\n ds = ds.map(operations=ops.PadEnd([config.max_length], 0), input_columns=[\"decoder_input\"],\n python_multiprocessing=use_multiprocessing, num_parallel_workers=8)\n ds = ds.batch(batch_size, drop_remainder=True)\n print(\"Val dataset size= %s\" % (str(int(ds.get_dataset_size())*batch_size)))\n return ds\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Monitor loss and time\"\"\"\nimport time\nimport numpy as np\nfrom mindspore import Tensor\nfrom mindspore.train.callback import Callback\nclass Monitor(Callback):\n \"\"\"\n Monitor loss and time.\n\n Args:\n lr_init (numpy array): train lr\n\n Returns:\n None\n\n Examples:\n >>> Monitor(100,lr_init=Tensor([0.05]*100).asnumpy())\n \"\"\"\n\n def __init__(self, lr_init=None):\n super(Monitor, self).__init__()\n self.lr_init = lr_init\n self.lr_init_len = len(lr_init)\n\n def epoch_begin(self, run_context):\n self.losses = []\n self.epoch_time = time.time()\n\n def epoch_end(self, run_context):\n cb_params = run_context.original_args()\n\n epoch_mseconds = (time.time() - self.epoch_time) * 1000\n per_step_mseconds = epoch_mseconds / cb_params.batch_num\n print(\"epoch time: {:5.3f}, per step time: {:5.3f}, avg loss: {:5.3f}\".format(epoch_mseconds,\n per_step_mseconds,\n np.mean(self.losses)))\n\n def step_begin(self, run_context):\n self.step_time = time.time()\n\n def step_end(self, run_context):\n \"\"\"step end\"\"\"\n cb_params = run_context.original_args()\n step_mseconds = (time.time() - self.step_time) * 1000\n step_loss = cb_params.net_outputs\n\n if isinstance(step_loss, (tuple, list)) and isinstance(step_loss[0], Tensor):\n step_loss = step_loss[0]\n if isinstance(step_loss, Tensor):\n step_loss = np.mean(step_loss.asnumpy())\n\n self.losses.append(step_loss)\n cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num\n\n print(\"epoch: [{:3d}/{:3d}], step:[{:5d}/{:5d}], loss:[{:5.3f}/{:5.3f}], time:[{:5.3f}], lr:[{:5.3f}]\".format(\n cb_params.cur_epoch_num -\n 1, cb_params.epoch_num, cur_step_in_epoch, cb_params.batch_num, step_loss,\n np.mean(self.losses), step_mseconds, self.lr_init[cb_params.cur_step_num - 1]))\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport os\nimport numpy as np\nfrom mindspore import dtype as mstype\nfrom mindspore import Model, context, Tensor\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\nfrom src.dataset import create_dataset\nfrom src.unet3d_model import UNet3d, UNet3d_\nfrom src.utils import create_sliding_window, CalculateDice\nfrom src.model_utils.config import config\nfrom src.model_utils.moxing_adapter import moxing_wrapper\n\ndevice_id = int(os.getenv('DEVICE_ID'))\ncontext.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, save_graphs=False, device_id=device_id)\n\n@moxing_wrapper()\ndef test_net(data_path, ckpt_path):\n data_dir = data_path + \"/image/\"\n seg_dir = data_path + \"/seg/\"\n eval_dataset = create_dataset(data_path=data_dir, seg_path=seg_dir, is_training=False)\n eval_data_size = eval_dataset.get_dataset_size()\n print(\"train dataset length is:\", eval_data_size)\n\n if config.device_target == 'Ascend':\n network = UNet3d()\n else:\n network = UNet3d_()\n network.set_train(False)\n param_dict = load_checkpoint(ckpt_path)\n load_param_into_net(network, param_dict)\n model = Model(network)\n index = 0\n total_dice = 0\n for batch in eval_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):\n image = batch[\"image\"]\n seg = batch[\"seg\"]\n print(\"current image shape is {}\".format(image.shape), flush=True)\n sliding_window_list, slice_list = create_sliding_window(image, config.roi_size, config.overlap)\n image_size = (config.batch_size, config.num_classes) + image.shape[2:]\n output_image = np.zeros(image_size, np.float32)\n count_map = np.zeros(image_size, np.float32)\n importance_map = np.ones(config.roi_size, np.float32)\n for window, slice_ in zip(sliding_window_list, slice_list):\n window_image = Tensor(window, mstype.float32)\n pred_probs = model.predict(window_image)\n output_image[slice_] += pred_probs.asnumpy()\n count_map[slice_] += importance_map\n output_image = output_image / count_map\n dice, _ = CalculateDice(output_image, seg)\n print(\"The {} batch dice is {}\".format(index, dice), flush=True)\n total_dice += dice\n index = index + 1\n avg_dice = total_dice / eval_data_size\n print(\"**********************End Eval***************************************\")\n print(\"eval average dice is {}\".format(avg_dice))\n\nif __name__ == '__main__':\n test_net(data_path=config.data_path,\n ckpt_path=config.checkpoint_file_path)\n"
] | [
[
"numpy.random.uniform",
"numpy.random.normal",
"numpy.random.poisson",
"numpy.array",
"numpy.random.randint"
],
[
"numpy.log2",
"numpy.cumsum",
"numpy.argwhere",
"numpy.zeros",
"pandas.read_csv",
"numpy.reshape",
"numpy.repeat",
"numpy.mod",
"numpy.arange",
"numpy.logical_not",
"numpy.greater_equal",
"numpy.all",
"numpy.hstack",
"numpy.iinfo",
"numpy.array",
"numpy.concatenate",
"numpy.random.randint"
],
[
"numpy.array",
"numpy.zeros",
"numpy.squeeze",
"numpy.argmax",
"numpy.exp",
"numpy.clip",
"numpy.log",
"numpy.maximum",
"numpy.minimum",
"numpy.nonzero",
"numpy.split"
],
[
"numpy.fromfile"
],
[
"numpy.random.uniform",
"numpy.sqrt",
"numpy.zeros",
"numpy.random.choice",
"numpy.random.random",
"numpy.array",
"numpy.random.randint"
],
[
"numpy.mean"
],
[
"numpy.ones",
"numpy.zeros"
]
] |
iust-projects/Data-Mining-IUST | [
"88f7a5541278f1fe907ca9b70c990a27f60900b2"
] | [
"Project/EnhancedDeepPath/scripts/sl_policy.py"
] | [
"from __future__ import division\nfrom __future__ import print_function\nimport tensorflow as tf \nimport numpy as np\nfrom itertools import count\nimport sys\n\nfrom networks import policy_nn\nfrom utils import *\nfrom env import Env\nfrom BFS.KB import KB\nfrom BFS.BFS import BFS\nimport time\n\nrelation = sys.argv[1]\n# episodes = int(sys.argv[2])\ngraphpath = dataPath + 'tasks/' + relation + '/' + 'graph.txt'\nrelationPath = dataPath + 'tasks/' + relation + '/' + 'train_pos'\n\nclass SupervisedPolicy(object):\n\t\"\"\"docstring for SupervisedPolicy\"\"\"\n\tdef __init__(self, learning_rate = 0.001):\n\t\tself.initializer = tf.contrib.layers.xavier_initializer()\n\t\twith tf.variable_scope('supervised_policy'):\n\t\t\tself.state = tf.placeholder(tf.float32, [None, state_dim], name = 'state')\n\t\t\tself.action = tf.placeholder(tf.int32, [None], name = 'action')\n\t\t\tself.action_prob = policy_nn(self.state, state_dim, action_space, self.initializer)\n\n\t\t\taction_mask = tf.cast(tf.one_hot(self.action, depth = action_space), tf.bool)\n\t\t\tself.picked_action_prob = tf.boolean_mask(self.action_prob, action_mask)\n\n\t\t\tself.loss = tf.reduce_sum(-tf.log(self.picked_action_prob)) + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope = 'supervised_policy'))\n\t\t\tself.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)\n\t\t\tself.train_op = self.optimizer.minimize(self.loss)\n\n\tdef predict(self, state, sess = None):\n\t\tsess = sess or tf.get_default_session()\n\t\treturn sess.run(self.action_prob, {self.state: state})\n\n\tdef update(self, state, action, sess = None):\n\t\tsess = sess or tf.get_default_session()\n\t\t_, loss = sess.run([self.train_op, self.loss], {self.state: state, self.action: action})\n\t\treturn loss\n\ndef train():\n\ttf.reset_default_graph()\n\tpolicy_nn = SupervisedPolicy()\n\n\tf = open(relationPath)\n\ttrain_data = f.readlines()\n\tf.close()\n\n\tnum_samples = len(train_data)\n\n\tsaver = tf.train.Saver()\n\twith tf.Session() as sess:\n\t\tsess.run(tf.global_variables_initializer())\n\t\tif num_samples > 500:\n\t\t\tnum_samples = 500\n\t\telse:\n\t\t\tnum_episodes = num_samples\n\n\t\tfor episode in range(num_samples):\n\t\t\tprint(\"Episode %d\" % episode)\n\t\t\tprint('Training Sample:', train_data[episode%num_samples][:-1])\n\n\t\t\tenv = Env(dataPath, train_data[episode%num_samples])\n\t\t\tsample = train_data[episode%num_samples].split()\n\n\t\t\ttry:\n\t\t\t\tgood_episodes = teacher(sample[0], sample[1], 5, env, graphpath)\n\t\t\texcept Exception as e:\n\t\t\t\tprint('Cannot find a path')\n\t\t\t\tcontinue\n\n\t\t\tfor item in good_episodes:\n\t\t\t\tstate_batch = []\n\t\t\t\taction_batch = []\n\t\t\t\tfor t, transition in enumerate(item):\n\t\t\t\t\tstate_batch.append(transition.state)\n\t\t\t\t\taction_batch.append(transition.action)\n\t\t\t\tstate_batch = np.squeeze(state_batch)\n\t\t\t\tstate_batch = np.reshape(state_batch, [-1, state_dim])\n\t\t\t\tpolicy_nn.update(state_batch, action_batch)\n\n\t\tsaver.save(sess, 'models/policy_supervised_' + relation)\n\t\tprint('Model saved')\n\n\ndef test(test_episodes):\n\ttf.reset_default_graph()\n\tpolicy_nn = SupervisedPolicy()\n\n\tf = open(relationPath)\n\ttest_data = f.readlines()\n\tf.close()\n\n\ttest_num = len(test_data)\n\n\ttest_data = test_data[-test_episodes:]\n\tprint(len(test_data))\n\t\n\tsuccess = 0\n\n\tsaver = tf.train.Saver()\n\twith tf.Session() as sess:\n\t\tsaver.restore(sess, 'models/policy_supervised_'+ relation)\n\t\tprint('Model reloaded')\n\t\tfor episode in range(len(test_data)):\n\t\t\tprint('Test sample %d: %s' % (episode,test_data[episode][:-1]))\n\t\t\tenv = Env(dataPath, test_data[episode])\n\t\t\tsample = test_data[episode].split()\n\t\t\tstate_idx = [env.entity2id_[sample[0]], env.entity2id_[sample[1]], 0]\n\t\t\tfor t in count():\n\t\t\t\tstate_vec = env.idx_state(state_idx)\n\t\t\t\taction_probs = policy_nn.predict(state_vec)\n\t\t\t\taction_chosen = np.random.choice(np.arange(action_space), p = np.squeeze(action_probs))\n\t\t\t\treward, new_state, done = env.interact(state_idx, action_chosen)\n\t\t\t\tif done or t == max_steps_test:\n\t\t\t\t\tif done:\n\t\t\t\t\t\tprint('Success')\n\t\t\t\t\t\tsuccess += 1\n\t\t\t\t\tprint('Episode ends\\n')\n\t\t\t\t\tbreak\n\t\t\t\tstate_idx = new_state\n\n\tprint('Success persentage:', success/test_episodes)\n\nif __name__ == \"__main__\":\n\ttrain()\n\t# test(50)\n\n"
] | [
[
"tensorflow.placeholder",
"numpy.squeeze",
"tensorflow.global_variables_initializer",
"tensorflow.get_collection",
"tensorflow.train.AdamOptimizer",
"numpy.reshape",
"tensorflow.variable_scope",
"tensorflow.get_default_session",
"numpy.arange",
"tensorflow.one_hot",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.log",
"tensorflow.boolean_mask",
"tensorflow.reset_default_graph"
]
] |
AndreasKaratzas/stonne | [
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9"
] | [
"pytorch-frontend/caffe2/python/operator_test/glu_op_test.py",
"pytorch-frontend/caffe2/python/workspace.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom caffe2.python import core\nimport caffe2.python.hypothesis_test_util as hu\nimport caffe2.python.serialized_test.serialized_test_util as serial\nfrom hypothesis import assume, given, settings, HealthCheck\nimport hypothesis.strategies as st\nimport numpy as np\n\nimport unittest\n\n\[email protected]\ndef _glu_old_input(draw):\n dims = draw(st.lists(st.integers(min_value=1, max_value=5), min_size=1, max_size=3))\n axis = draw(st.integers(min_value=0, max_value=len(dims)))\n # The axis dimension must be divisible by two\n axis_dim = 2 * draw(st.integers(min_value=1, max_value=2))\n dims.insert(axis, axis_dim)\n X = draw(hu.arrays(dims, np.float32, None))\n return (X, axis)\n\n\nclass TestGlu(serial.SerializedTestCase):\n @given(\n X_axis=_glu_old_input(),\n **hu.gcs\n )\n @settings(deadline=10000)\n def test_glu_old(self, X_axis, gc, dc):\n X, axis = X_axis\n\n def glu_ref(X):\n x1, x2 = np.split(X, [X.shape[axis] // 2], axis=axis)\n Y = x1 * (1. / (1. + np.exp(-x2)))\n return [Y]\n\n op = core.CreateOperator(\"Glu\", [\"X\"], [\"Y\"], dim=axis)\n self.assertReferenceChecks(gc, op, [X], glu_ref)\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"## @package workspace\n# Module caffe2.python.workspace\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport collections\nimport contextlib\nfrom google.protobuf.message import Message\nfrom multiprocessing import Process\nimport os\nfrom collections import defaultdict\nimport logging\nimport numpy as np\nfrom past.builtins import basestring\nimport shutil\nimport socket\nimport tempfile\n\nfrom caffe2.proto import caffe2_pb2\nfrom caffe2.python import scope, utils\nfrom caffe2.python.lazy import TriggerLazyImport\n\nimport caffe2.python._import_c_extension as C\n\nlogger = logging.getLogger(__name__)\n\nBlobs = C.blobs\nResetBlob = C.reset_blob\nCreateBlob = C.create_blob\nCurrentWorkspace = C.current_workspace\nDeserializeBlob = C.deserialize_blob\nGlobalInit = C.global_init\nHasBlob = C.has_blob\nRegisteredOperators = C.registered_operators\nSerializeBlob = C.serialize_blob\nSwitchWorkspace = C.switch_workspace\nRootFolder = C.root_folder\nWorkspaces = C.workspaces\nBenchmarkNet = C.benchmark_net\nBenchmarkNetOnce = C.benchmark_net_once\nGetStats = C.get_stats\nCreateOfflineTensor = C.create_offline_tensor\n\noperator_tracebacks = defaultdict(dict)\n\nis_asan = C.is_asan\nhas_cuda_support = C.has_cuda_support\nhas_hip_support = C.has_hip_support\nhas_gpu_support = C.has_gpu_support\nif has_cuda_support:\n GpuDeviceType = caffe2_pb2.CUDA\n NumCudaDevices = C.num_cuda_devices\n # This is a duplicate of NumCudaDevices. Remove\n # NumCudaDevices once replaced everywhere in the code\n NumGpuDevices = C.num_cuda_devices\n GetCUDAVersion = C.get_cuda_version\n GetCuDNNVersion = C.get_cudnn_version\n\n def GetGpuPeerAccessPattern():\n return np.asarray(C.get_cuda_peer_access_pattern())\n\n GetDeviceProperties = C.get_device_properties\n GetGPUMemoryInfo = C.get_gpu_memory_info\nelse:\n NumCudaDevices = lambda: 0 # noqa\n GetCUDAVersion = lambda: 0 # noqa\n GetCuDNNVersion = lambda: 0 # noqa\n\nif has_hip_support:\n GpuDeviceType = caffe2_pb2.HIP\n NumGpuDevices = C.num_hip_devices\n GetHIPVersion = C.get_hip_version\n\n def GetGpuPeerAccessPattern():\n return np.asarray(C.get_hip_peer_access_pattern())\n GetDeviceProperties = C.get_device_properties\n GetGPUMemoryInfo = C.get_gpu_memory_info\n\nif not has_gpu_support:\n # setting cuda as the default GpuDeviceType as some tests\n # like core, scope tests use GpuDeviceType even without gpu support\n GpuDeviceType = caffe2_pb2.CUDA\n NumGpuDevices = lambda: 0 # noqa\n GetDeviceProperties = lambda x: None # noqa\n GetGpuPeerAccessPattern = lambda: np.array([]) # noqa\n GetGPUMemoryInfo = lambda: None # noqa\n\nIsNUMAEnabled = C.is_numa_enabled\nGetNumNUMANodes = C.get_num_numa_nodes\nGetBlobNUMANode = C.get_blob_numa_node\nGetBlobSizeBytes = C.get_blob_size_bytes\n\n\ndef FillRandomNetworkInputs(net, input_dims, input_types):\n C.fill_random_network_inputs(net.Proto().SerializeToString(), input_dims, input_types)\n\n\ndef _GetFreeFlaskPort():\n \"\"\"Get a free flask port.\"\"\"\n # We will prefer to use 5000. If not, we will then pick a random port.\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', 5000))\n if result == 0:\n return 5000\n else:\n s = socket.socket()\n s.bind(('', 0))\n port = s.getsockname()[1]\n s.close()\n # Race condition: between the interval we close the socket and actually\n # start a mint process, another process might have occupied the port. We\n # don't do much here as this is mostly for convenience in research\n # rather than 24x7 service.\n return port\n\ndef StartMint(root_folder=None, port=None):\n \"\"\"Start a mint instance.\n\n TODO(Yangqing): this does not work well under ipython yet. According to\n https://github.com/ipython/ipython/issues/5862\n writing up some fix is a todo item.\n \"\"\"\n from caffe2.python.mint import app\n if root_folder is None:\n # Get the root folder from the current workspace\n root_folder = C.root_folder()\n if port is None:\n port = _GetFreeFlaskPort()\n process = Process(\n target=app.main,\n args=(\n ['-p', str(port), '-r', root_folder],\n )\n )\n process.start()\n print('Mint running at http://{}:{}'.format(socket.getfqdn(), port))\n return process\n\n\ndef StringifyProto(obj):\n \"\"\"Stringify a protocol buffer object.\n\n Inputs:\n obj: a protocol buffer object, or a Pycaffe2 object that has a Proto()\n function.\n Outputs:\n string: the output protobuf string.\n Raises:\n AttributeError: if the passed in object does not have the right attribute.\n \"\"\"\n if isinstance(obj, basestring):\n return obj\n else:\n if isinstance(obj, Message):\n # First, see if this object is a protocol buffer, which we can\n # simply serialize with the SerializeToString() call.\n return obj.SerializeToString()\n elif hasattr(obj, 'Proto'):\n return obj.Proto().SerializeToString()\n else:\n raise ValueError(\"Unexpected argument to StringifyProto of type \" +\n type(obj).__name__)\n\n\ndef ResetWorkspace(root_folder=None):\n if root_folder is None:\n # Reset the workspace, but keep the current root folder setting.\n return C.reset_workspace(C.root_folder())\n else:\n if not os.path.exists(root_folder):\n os.makedirs(root_folder)\n return C.reset_workspace(root_folder)\n\n\ndef CreateNet(net, overwrite=False, input_blobs=None):\n TriggerLazyImport()\n if input_blobs is None:\n input_blobs = []\n for input_blob in input_blobs:\n C.create_blob(input_blob)\n return CallWithExceptionIntercept(\n C.create_net,\n C.Workspace.current._last_failed_op_net_position,\n GetNetName(net),\n StringifyProto(net), overwrite,\n )\n\n\ndef Predictor(init_net, predict_net):\n return C.Predictor(StringifyProto(init_net), StringifyProto(predict_net))\n\n\ndef GetOperatorCost(operator, blobs):\n return C.get_operator_cost(StringifyProto(operator), blobs)\n\n\ndef RunOperatorOnce(operator):\n return C.run_operator_once(StringifyProto(operator))\n\n\ndef RunOperatorMultiple(operator, num_runs):\n return C.run_operator_multiple(StringifyProto(operator), num_runs)\n\n\ndef RunOperatorsOnce(operators):\n for op in operators:\n success = RunOperatorOnce(op)\n if not success:\n return False\n return True\n\n\ndef ClearGlobalNetObserver():\n return C.clear_global_net_observer()\n\n\ndef CallWithExceptionIntercept(func, op_id_fetcher, net_name, *args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception:\n op_id = op_id_fetcher()\n net_tracebacks = operator_tracebacks.get(net_name, None)\n logger.warning(\n 'Original python traceback for operator `{}` in network '\n '`{}` in exception above (most recent call last):'.format(\n op_id, net_name))\n if net_tracebacks and op_id in net_tracebacks:\n tb = net_tracebacks[op_id]\n for line in reversed(tb):\n logger.warning(' File \"{}\", line {}, in {}'.format(\n line[0], line[1], line[2]))\n raise\n\n\ndef RunNetOnce(net):\n return CallWithExceptionIntercept(\n C.run_net_once,\n C.Workspace.current._last_failed_op_net_position,\n GetNetName(net),\n StringifyProto(net),\n )\n\n\ndef RunNet(name, num_iter=1, allow_fail=False):\n \"\"\"Runs a given net.\n\n Inputs:\n name: the name of the net, or a reference to the net.\n num_iter: number of iterations to run\n allow_fail: if True, does not assert on net exec failure but returns False\n Returns:\n True or an exception.\n \"\"\"\n return CallWithExceptionIntercept(\n C.run_net,\n C.Workspace.current._last_failed_op_net_position,\n GetNetName(name),\n StringifyNetName(name), num_iter, allow_fail,\n )\n\n\ndef RunPlan(plan_or_step):\n # TODO(jiayq): refactor core.py/workspace.py to avoid circular deps\n import caffe2.python.core as core\n if isinstance(plan_or_step, core.ExecutionStep):\n plan_or_step = core.Plan(plan_or_step)\n return C.run_plan(StringifyProto(plan_or_step))\n\n\ndef RunPlanInBackground(plan_or_step):\n # TODO(jiayq): refactor core.py/workspace.py to avoid circular deps\n import caffe2.python.core as core\n if isinstance(plan_or_step, core.ExecutionStep):\n plan_or_step = core.Plan(plan_or_step)\n return C.run_plan_in_background(StringifyProto(plan_or_step))\n\n\ndef InferShapesAndTypes(nets, blob_dimensions=None, nets_proto=False,\n blob_types=None):\n \"\"\"Infers the shapes and types for the specified nets.\n\n Inputs:\n nets: the list of nets\n blob_dimensions (optional): a dictionary of blobs and their dimensions.\n If not specified, the workspace blobs are used.\n nets_proto (optional): a boolean flag indicating whether the protobuffer\n representation is passed to the routine.\n Returns:\n A tuple of (shapes, types) dictionaries keyed by blob name.\n \"\"\"\n if nets_proto:\n net_protos = [StringifyProto(n) for n in nets]\n else:\n net_protos = [StringifyProto(n.Proto()) for n in nets]\n if blob_dimensions is None:\n assert blob_types is None\n blobdesc_prototxt = C.infer_shapes_and_types_from_workspace(net_protos)\n elif blob_types is None:\n blobdesc_prototxt = C.infer_shapes_and_types_from_map(\n net_protos, blob_dimensions\n )\n else:\n blobdesc_prototxt = C.infer_shapes_and_types_from_map(\n net_protos, blob_dimensions, blob_types\n )\n blobdesc_proto = caffe2_pb2.TensorShapes()\n blobdesc_proto.ParseFromString(blobdesc_prototxt)\n shapes = {}\n types = {}\n for ts in blobdesc_proto.shapes:\n if not ts.unknown_shape:\n shapes[ts.name] = list(ts.dims)\n types[ts.name] = ts.data_type\n\n return (shapes, types)\n\n\ndef _StringifyName(name, expected_type):\n if isinstance(name, basestring):\n return name\n assert type(name).__name__ == expected_type, \\\n \"Expected a string or %s\" % expected_type\n return str(name)\n\n\ndef StringifyBlobName(name):\n return _StringifyName(name, \"BlobReference\")\n\n\ndef StringifyNetName(name):\n return _StringifyName(name, \"Net\")\n\n\ndef GetNetName(net):\n if isinstance(net, basestring):\n return net\n if type(net).__name__ == \"Net\":\n return net.Name()\n if isinstance(net, caffe2_pb2.NetDef):\n return net.name\n raise Exception(\"Not a Net object: {}\".format(str(net)))\n\n\ndef FeedBlob(name, arr, device_option=None):\n \"\"\"Feeds a blob into the workspace.\n\n Inputs:\n name: the name of the blob.\n arr: either a TensorProto object or a numpy array object to be fed into\n the workspace.\n device_option (optional): the device option to feed the data with.\n Returns:\n True or False, stating whether the feed is successful.\n \"\"\"\n ws = C.Workspace.current\n return _Workspace_feed_blob(ws, name, arr, device_option)\n\n\ndef FetchBlobs(names):\n \"\"\"Fetches a list of blobs from the workspace.\n\n Inputs:\n names: list of names of blobs - strings or BlobReferences\n Returns:\n list of fetched blobs\n \"\"\"\n return [FetchBlob(name) for name in names]\n\n\ndef FetchBlob(name):\n \"\"\"Fetches a blob from the workspace.\n\n Inputs:\n name: the name of the blob - a string or a BlobReference\n Returns:\n Fetched blob (numpy array or string) if successful\n \"\"\"\n result = C.fetch_blob(StringifyBlobName(name))\n if isinstance(result, tuple):\n raise TypeError(\n \"Use FetchInt8Blob to fetch Int8 Blob {}\".format(\n StringifyBlobName(name)\n )\n )\n return result\n\n\ndef FetchTorch(name):\n ws = C.Workspace.current\n return ws.blobs[name].to_torch()\n\n\nInt8Tensor = collections.namedtuple(\n 'Int8Tensor', ['data', 'scale', 'zero_point']\n)\n\n\ndef FetchInt8Blob(name):\n \"\"\"Fetches an Int8 blob from the workspace. It shared backend implementation\n with FetchBlob but it is recommended when fetching Int8 Blobs\n\n Inputs:\n name: the name of the Int8 blob - a string or a BlobReference\n Returns:\n data: int8 numpy array, data\n scale: float, fake quantization scale\n zero_point: int, fake quantization offset\n \"\"\"\n result = C.fetch_blob(StringifyBlobName(name))\n assert isinstance(result, tuple), \\\n 'You are not fetching an Int8Blob {}. Please use FetchBlob'.format(\n StringifyBlobName(name))\n return Int8Tensor(*result)\n\n\ndef FetchInt8BlobRealVal(name):\n \"\"\"Fetches an Int8 blob from the workspace and return its real value representation.\n\n Inputs:\n name: the name of the Int8 blob - a string or a BlobReference\n Returns:\n real value representation of int8 numpy array\n \"\"\"\n result = C.fetch_blob(StringifyBlobName(name))\n assert isinstance(result, tuple), \\\n 'You are not fetching an Int8Blob {}. Please use FetchBlob'.format(\n StringifyBlobName(name))\n int8_blob = Int8Tensor(*result)\n return (int8_blob.data.astype(np.int32) - int(int8_blob.zero_point)).astype(\n np.float32) * int8_blob.scale\n\n\ndef _Workspace_fetch_int8_blob(ws, name):\n \"\"\"Fetches an Int8 blob from the workspace. It shared backend implementation\n with FetchBlob but it is recommended when fetching Int8 Blobs\n\n Inputs:\n name: the name of the Int8 blob - a string or a BlobReference\n Returns:\n data: int8 numpy array, data\n scale: float, fake quantization scale\n zero_point: int, fake quantization offset\n \"\"\"\n result = ws.fetch_blob(name)\n assert isinstance(result, tuple), \\\n 'You are not fetching an Int8Blob {}. Please use fetch_blob'.format(\n StringifyBlobName(name))\n return Int8Tensor(*result)\n\n\nC.Workspace.fetch_int8_blob = _Workspace_fetch_int8_blob\n\n\ndef ApplyTransform(transform_key, net):\n \"\"\"Apply a Transform to a NetDef protobuf object, and returns the new\n transformed NetDef.\n\n Inputs:\n transform_key: the name of the transform, as it is stored in the registry\n net: a NetDef protobuf object\n Returns:\n Transformed NetDef protobuf object.\n \"\"\"\n transformed_net = caffe2_pb2.NetDef()\n transformed_str = C.apply_transform(\n str(transform_key).encode('utf-8'),\n net.SerializeToString(),\n )\n transformed_net.ParseFromString(transformed_str)\n return transformed_net\n\n\ndef ApplyTransformIfFaster(transform_key, net, init_net, **kwargs):\n \"\"\"Apply a Transform to a NetDef protobuf object, and returns the new\n transformed NetDef, only if it runs faster than the original.\n\n The runs are performed on the current active workspace (gWorkspace).\n You should initialize that workspace before making a call to this function.\n\n Inputs:\n transform_key: the name of the transform, as it is stored in the registry\n net: a NetDef protobuf object\n init_net: The net to initialize the workspace.\n warmup_runs (optional):\n Determines how many times the net is run before testing.\n Will be 5 by default.\n main_runs (optional):\n Determines how many times the net is run during testing.\n Will be 10 by default.\n improvement_threshold (optional):\n Determines the factor which the new net needs to be faster\n in order to replace the old. Will be 1.01 by default.\n\n Returns:\n Either a Transformed NetDef protobuf object, or the original netdef.\n \"\"\"\n\n warmup_runs = kwargs['warmup_runs'] if 'warmup_runs' in kwargs else 5\n main_runs = kwargs['main_runs'] if 'main_runs' in kwargs else 10\n improvement_threshold = kwargs['improvement_threshold'] \\\n if 'improvement_threshold' in kwargs else 1.01\n\n transformed_net = caffe2_pb2.NetDef()\n transformed_str = C.apply_transform_if_faster(\n str(transform_key).encode('utf-8'),\n net.SerializeToString(),\n init_net.SerializeToString(),\n warmup_runs,\n main_runs,\n float(improvement_threshold),\n )\n transformed_net.ParseFromString(transformed_str)\n return transformed_net\n\n\ndef GetNameScope():\n \"\"\"Return the current namescope string. To be used to fetch blobs\"\"\"\n return scope.CurrentNameScope()\n\n\nclass _BlobDict(object):\n \"\"\"Provides python dict compatible way to do fetching and feeding\"\"\"\n\n def __getitem__(self, key):\n return FetchBlob(key)\n\n def __setitem__(self, key, value):\n return FeedBlob(key, value)\n\n def __len__(self):\n return len(C.blobs())\n\n def __iter__(self):\n return C.blobs().__iter__()\n\n def __contains__(self, item):\n return C.has_blob(item)\n\n\nblobs = _BlobDict()\n\n\n################################################################################\n# Utilities for immediate mode\n#\n# Caffe2's immediate mode implements the following behavior: between the two\n# function calls StartImmediate() and StopImmediate(), for any operator that is\n# called through CreateOperator(), we will also run that operator in a workspace\n# that is specific to the immediate mode. The user is explicitly expected to\n# make sure that these ops have proper inputs and outputs, i.e. one should not\n# run an op where an external input is not created or fed.\n#\n# Users can use FeedImmediate() and FetchImmediate() to interact with blobs\n# in the immediate workspace.\n#\n# Once StopImmediate() is called, all contents in the immediate workspace is\n# freed up so one can continue using normal runs.\n#\n# The immediate mode is solely for debugging purposes and support will be very\n# sparse.\n################################################################################\n\n_immediate_mode = False\n_immediate_workspace_name = \"_CAFFE2_IMMEDIATE\"\n_immediate_root_folder = ''\n\n\ndef IsImmediate():\n return _immediate_mode\n\n\[email protected]\ndef WorkspaceGuard(workspace_name):\n current = CurrentWorkspace()\n SwitchWorkspace(workspace_name, True)\n yield\n SwitchWorkspace(current)\n\n\ndef StartImmediate(i_know=False):\n global _immediate_mode\n global _immediate_root_folder\n if IsImmediate():\n # already in immediate mode. We will kill the previous one\n # and start from fresh.\n StopImmediate()\n _immediate_mode = True\n with WorkspaceGuard(_immediate_workspace_name):\n _immediate_root_folder = tempfile.mkdtemp()\n ResetWorkspace(_immediate_root_folder)\n if i_know:\n # if the user doesn't want to see the warning message, sure...\n return\n print(\"\"\"\n Enabling immediate mode in caffe2 python is an EXTREMELY EXPERIMENTAL\n feature and may very easily go wrong. This is because Caffe2 uses a\n declarative way of defining operators and models, which is essentially\n not meant to run things in an interactive way. Read the following carefully\n to make sure that you understand the caveats.\n\n (1) You need to make sure that the sequences of operators you create are\n actually runnable sequentially. For example, if you create an op that takes\n an input X, somewhere earlier you should have already created X.\n\n (2) Caffe2 immediate uses one single workspace, so if the set of operators\n you run are intended to be under different workspaces, they will not run.\n To create boundaries between such use cases, you can call FinishImmediate()\n and StartImmediate() manually to flush out everything no longer needed.\n\n (3) Underlying objects held by the immediate mode may interfere with your\n normal run. For example, if there is a leveldb that you opened in immediate\n mode and did not close, your main run will fail because leveldb does not\n support double opening. Immediate mode may also occupy a lot of memory esp.\n on GPUs. Call FinishImmediate() as soon as possible when you no longer\n need it.\n\n (4) Immediate is designed to be slow. Every immediate call implicitly\n creates a temp operator object, runs it, and destroys the operator. This\n slow-speed run is by design to discourage abuse. For most use cases other\n than debugging, do NOT turn on immediate mode.\n\n (5) If there is anything FATAL happening in the underlying C++ code, the\n immediate mode will immediately (pun intended) cause the runtime to crash.\n\n Thus you should use immediate mode with extra care. If you still would\n like to, have fun [https://xkcd.com/149/].\n \"\"\")\n\n\ndef StopImmediate():\n \"\"\"Stops an immediate mode run.\"\"\"\n # Phew, that was a dangerous ride.\n global _immediate_mode\n global _immediate_root_folder\n if not IsImmediate():\n return\n with WorkspaceGuard(_immediate_workspace_name):\n ResetWorkspace()\n shutil.rmtree(_immediate_root_folder)\n _immediate_root_folder = ''\n _immediate_mode = False\n\n\ndef ImmediateBlobs():\n with WorkspaceGuard(_immediate_workspace_name):\n return Blobs()\n\n\ndef RunOperatorImmediate(op):\n with WorkspaceGuard(_immediate_workspace_name):\n RunOperatorOnce(op)\n\n\ndef FetchImmediate(*args, **kwargs):\n with WorkspaceGuard(_immediate_workspace_name):\n return FetchBlob(*args, **kwargs)\n\n\ndef FeedImmediate(*args, **kwargs):\n with WorkspaceGuard(_immediate_workspace_name):\n return FeedBlob(*args, **kwargs)\n\n\n# C.Workspace methods.\n\ndef _Workspace_create_net_with_exception_intercept(ws, net, overwrite=False):\n return CallWithExceptionIntercept(\n ws._create_net,\n ws._last_failed_op_net_position,\n GetNetName(net),\n StringifyProto(net), overwrite,\n )\n\n\ndef _Workspace_run(ws, obj):\n if hasattr(obj, 'Proto'):\n obj = obj.Proto()\n if isinstance(obj, caffe2_pb2.PlanDef):\n return ws._run_plan(obj.SerializeToString())\n if isinstance(obj, caffe2_pb2.NetDef):\n return CallWithExceptionIntercept(\n ws._run_net,\n ws._last_failed_op_net_position,\n GetNetName(obj),\n obj.SerializeToString(),\n )\n # return ws._run_net(obj.SerializeToString())\n if isinstance(obj, caffe2_pb2.OperatorDef):\n return ws._run_operator(obj.SerializeToString())\n raise ValueError(\n \"Don't know how to do Workspace.run() on {}\".format(type(obj)))\n\n\ndef _Workspace_feed_blob(ws, name, arr, device_option=None):\n if type(arr) is caffe2_pb2.TensorProto:\n arr = utils.Caffe2TensorToNumpyArray(arr)\n if type(arr) is np.ndarray and arr.dtype.kind in 'SU':\n # Plain NumPy strings are weird, let's use objects instead\n arr = arr.astype(np.object)\n\n if device_option is None:\n device_option = scope.CurrentDeviceScope()\n\n if device_option and device_option.device_type == caffe2_pb2.CUDA:\n if arr.dtype == np.dtype('float64'):\n logger.warning(\n \"CUDA operators do not support 64-bit doubles, \" +\n \"please use arr.astype(np.float32) or np.int32 for ints.\" +\n \" Blob: {}\".format(name) +\n \" type: {}\".format(str(arr.dtype))\n )\n\n name = StringifyBlobName(name)\n if device_option is not None:\n return ws.create_blob(name).feed(arr, device_option)\n else:\n return ws.create_blob(name).feed(arr)\n\n\ndef _Workspace_remove_blob(ws, blob):\n ws._remove_blob(str(blob))\n\n\nWorkspace = C.Workspace\nWorkspace.create_net = _Workspace_create_net_with_exception_intercept\nWorkspace.run = _Workspace_run\nWorkspace.feed_blob = _Workspace_feed_blob\nWorkspace.remove_blob = _Workspace_remove_blob\n\n# C.Blob methods.\n\n\ndef _Blob_feed(blob, arg, device_option=None):\n # conservative type check to avoid unnecessary import\n if type(arg).__name__ == 'Tensor' and type(arg).__module__ == 'torch':\n import torch\n if isinstance(arg, torch.Tensor):\n assert device_option is None, \\\n \"device_option doesn't make sense with PyTorch tensors\"\n handle = torch._C._tensor_impl_raw_handle(arg)\n blob._wrap_tensor_impl(handle)\n return True # _feed() returns True for some reason\n if device_option is not None:\n device_option = StringifyProto(device_option)\n return blob._feed(arg, device_option)\n\n\nC.Blob.feed = _Blob_feed\n\n\ndef _Tensor_to_torch(tensor):\n \"\"\"\n PyTorch tensor interop (TensorCPU methods)\n\n Can be accessed as:\n workspace.Workspace.current.blobs['foo'].tensor().to_torch()\n \"\"\"\n # avoiding circular dependency\n import torch\n handle = tensor._tensor_impl_raw_handle()\n return torch._C._wrap_tensor_impl(handle)\n\nC.TensorCPU.to_torch = _Tensor_to_torch\n\n\ndef _Blob_to_torch(blob):\n if not blob.is_tensor():\n raise RuntimeError(\"Blob has to be a tensor\")\n return blob.as_tensor().to_torch()\n\nC.Blob.to_torch = _Blob_to_torch\n"
] | [
[
"numpy.exp",
"numpy.split"
],
[
"numpy.array",
"numpy.dtype",
"torch._C._wrap_tensor_impl",
"torch._C._tensor_impl_raw_handle"
]
] |
adshieh/cvxpy | [
"73b696b71dbb2ceb66a805798c922461e33afc6b"
] | [
"cvxpy/problems/problem.py"
] | [
"\"\"\"\nCopyright 2013 Steven Diamond, 2017 Akshay Agrawal\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom cvxpy import settings as s\nfrom cvxpy import error\nfrom cvxpy.problems.objective import Minimize, Maximize\nfrom cvxpy.reductions.chain import Chain\nfrom cvxpy.reductions.dgp2dcp.dgp2dcp import Dgp2Dcp\nfrom cvxpy.reductions.dqcp2dcp import dqcp2dcp\nfrom cvxpy.reductions.eval_params import EvalParams\nfrom cvxpy.reductions.flip_objective import FlipObjective\nfrom cvxpy.reductions.solvers.solving_chain import construct_solving_chain\nfrom cvxpy.interface.matrix_utilities import scalar_value\nfrom cvxpy.reductions.solvers import bisection\nfrom cvxpy.reductions.solvers import defines as slv_def\nfrom cvxpy.utilities.deterministic import unique_list\nimport cvxpy.utilities.performance_utils as perf\nfrom cvxpy.constraints import Equality, Inequality, NonPos, Zero, NonNeg\nimport cvxpy.utilities as u\n\nfrom collections import namedtuple\nimport numpy as np\nimport time\n\n\nSolveResult = namedtuple(\n 'SolveResult',\n ['opt_value', 'status', 'primal_values', 'dual_values'])\n\n\nclass Cache(object):\n def __init__(self):\n self.key = None\n self.solving_chain = None\n self.param_prog = None\n self.inverse_data = None\n\n def invalidate(self):\n self.key = None\n self.solving_chain = None\n self.param_prog = None\n self.inverse_data = None\n\n def make_key(self, solver, gp):\n return (solver, gp)\n\n def gp(self):\n return self.key is not None and self.key[1]\n\n\nclass Problem(u.Canonical):\n \"\"\"A convex optimization problem.\n\n Problems are immutable, save for modification through the specification\n of :class:`~cvxpy.expressions.constants.parameters.Parameter`\n\n Arguments\n ---------\n objective : Minimize or Maximize\n The problem's objective.\n constraints : list\n The constraints on the problem variables.\n \"\"\"\n\n # The solve methods available.\n REGISTERED_SOLVE_METHODS = {}\n\n def __init__(self, objective, constraints=None):\n if constraints is None:\n constraints = []\n # Check that objective is Minimize or Maximize.\n if not isinstance(objective, (Minimize, Maximize)):\n raise error.DCPError(\"Problem objective must be Minimize or Maximize.\")\n # Constraints and objective are immutable.\n self._objective = objective\n self._constraints = [c for c in constraints]\n self._value = None\n self._status = None\n self._solution = None\n self._cache = Cache()\n self._solver_cache = {}\n # Information about the shape of the problem and its constituent parts\n self._size_metrics = None\n # Benchmarks reported by the solver:\n self._solver_stats = None\n self.args = [self._objective, self._constraints]\n\n @property\n def value(self):\n \"\"\"float : The value from the last time the problem was solved\n (or None if not solved).\n \"\"\"\n if self._value is None:\n return None\n else:\n return scalar_value(self._value)\n\n @property\n def status(self):\n \"\"\"str : The status from the last time the problem was solved; one\n of optimal, infeasible, or unbounded (with or without\n suffix inaccurate).\n \"\"\"\n return self._status\n\n @property\n def solution(self):\n \"\"\"Solution : The solution from the last time the problem was solved.\n \"\"\"\n return self._solution\n\n @property\n def objective(self):\n \"\"\"Minimize or Maximize : The problem's objective.\n\n Note that the objective cannot be reassigned after creation,\n and modifying the objective after creation will result in\n undefined behavior.\n \"\"\"\n return self._objective\n\n @property\n def constraints(self):\n \"\"\"A shallow copy of the problem's constraints.\n\n Note that constraints cannot be reassigned, appended to, or otherwise\n modified after creation, except through parameters.\n \"\"\"\n return self._constraints[:]\n\n @perf.compute_once\n def is_dcp(self, dpp=False):\n \"\"\"Does the problem satisfy DCP rules?\n\n Arguments\n ---------\n dpp : bool, optional\n If True, enforce the disciplined parametrized programming (DPP)\n ruleset; only relevant when the problem involves Parameters.\n DPP is a mild restriction of DCP. When a problem involving\n Parameters is DPP, subsequent solves can be much faster than\n the first one. For more information, consult the documentation at\n\n https://www.cvxpy.org/tutorial/advanced/index.html#disciplined-parametrized-programming\n\n Returns\n -------\n bool\n True if the Expression is DCP, False otherwise.\n \"\"\"\n return all(\n expr.is_dcp(dpp) for expr in self.constraints + [self.objective])\n\n @perf.compute_once\n def is_dgp(self, dpp=False):\n \"\"\"Does the problem satisfy DGP rules?\n\n Arguments\n ---------\n dpp : bool, optional\n If True, enforce the disciplined parametrized programming (DPP)\n ruleset; only relevant when the problem involves Parameters.\n DPP is a mild restriction of DGP. When a problem involving\n Parameters is DPP, subsequent solves can be much faster than\n the first one. For more information, consult the documentation at\n\n https://www.cvxpy.org/tutorial/advanced/index.html#disciplined-parametrized-programming\n\n Returns\n -------\n bool\n True if the Expression is DGP, False otherwise.\n \"\"\"\n return all(\n expr.is_dgp(dpp) for expr in self.constraints + [self.objective])\n\n @perf.compute_once\n def is_dqcp(self):\n \"\"\"Does the problem satisfy the DQCP rules?\n \"\"\"\n return all(\n expr.is_dqcp() for expr in self.constraints + [self.objective])\n\n @perf.compute_once\n def is_dpp(self, context='dcp'):\n \"\"\"Does the problem satisfy DPP rules?\n\n DPP is a mild restriction of DGP. When a problem involving\n Parameters is DPP, subsequent solves can be much faster than\n the first one. For more information, consult the documentation at\n\n https://www.cvxpy.org/tutorial/advanced/index.html#disciplined-parametrized-programming\n\n Arguments\n ---------\n context : str\n Whether to check DPP-compliance for DCP or DGP; ``context`` should\n be either ``'dcp'`` or ``'dgp'``. Calling ``problem.is_dpp('dcp')``\n is equivalent to ``problem.is_dcp(dpp=True)``, and\n `problem.is_dpp('dgp')`` is equivalent to\n `problem.is_dgp(dpp=True)`.\n\n Returns\n -------\n bool\n Whether the problem satisfies the DPP rules.\n \"\"\"\n if context.lower() == 'dcp':\n return self.is_dcp(dpp=True)\n elif context.lower() == 'dgp':\n return self.is_dgp(dpp=True)\n else:\n raise ValueError(\"Unsupported context \", context)\n\n @perf.compute_once\n def is_qp(self):\n \"\"\"Is problem a quadratic program?\n \"\"\"\n for c in self.constraints:\n if not (isinstance(c, (Equality, Zero)) or c.args[0].is_pwl()):\n return False\n for var in self.variables():\n if var.is_psd() or var.is_nsd():\n return False\n return (self.is_dcp() and self.objective.args[0].is_qpwa())\n\n @perf.compute_once\n def is_mixed_integer(self):\n return any(v.attributes['boolean'] or v.attributes['integer']\n for v in self.variables())\n\n @perf.compute_once\n def variables(self):\n \"\"\"Accessor method for variables.\n\n Returns\n -------\n list of :class:`~cvxpy.expressions.variable.Variable`\n A list of the variables in the problem.\n \"\"\"\n vars_ = self.objective.variables()\n for constr in self.constraints:\n vars_ += constr.variables()\n return unique_list(vars_)\n\n @perf.compute_once\n def parameters(self):\n \"\"\"Accessor method for parameters.\n\n Returns\n -------\n list of :class:`~cvxpy.expressions.constants.parameter.Parameter`\n A list of the parameters in the problem.\n \"\"\"\n params = self.objective.parameters()\n for constr in self.constraints:\n params += constr.parameters()\n return unique_list(params)\n\n @perf.compute_once\n def constants(self):\n \"\"\"Accessor method for constants.\n\n Returns\n -------\n list of :class:`~cvxpy.expressions.constants.constant.Constant`\n A list of the constants in the problem.\n \"\"\"\n const_dict = {}\n constants_ = self.objective.constants()\n for constr in self.constraints:\n constants_ += constr.constants()\n # Note that numpy matrices are not hashable, so we use the built-in\n # function \"id\"\n const_dict = {id(constant): constant for constant in constants_}\n return list(const_dict.values())\n\n def atoms(self):\n \"\"\"Accessor method for atoms.\n\n Returns\n -------\n list of :class:`~cvxpy.atoms.Atom`\n A list of the atom types in the problem; note that this list\n contains classes, not instances.\n \"\"\"\n atoms = self.objective.atoms()\n for constr in self.constraints:\n atoms += constr.atoms()\n return unique_list(atoms)\n\n @property\n def size_metrics(self):\n \"\"\":class:`~cvxpy.problems.problem.SizeMetrics` : Information about the problem's size.\n \"\"\"\n if self._size_metrics is None:\n self._size_metrics = SizeMetrics(self)\n return self._size_metrics\n\n @property\n def solver_stats(self):\n \"\"\":class:`~cvxpy.problems.problem.SolverStats` : Information returned by the solver.\n \"\"\"\n return self._solver_stats\n\n def solve(self, *args, **kwargs):\n \"\"\"Solves the problem using the specified method.\n\n Populates the :code:`status` and :code:`value` attributes on the\n problem object as a side-effect.\n\n Arguments\n ---------\n solver : str, optional\n The solver to use. For example, 'ECOS', 'SCS', or 'OSQP'.\n verbose : bool, optional\n Overrides the default of hiding solver output.\n gp : bool, optional\n If True, parses the problem as a disciplined geometric program\n instead of a disciplined convex program.\n qcp : bool, optional\n If True, parses the problem as a disciplined quasiconvex program\n instead of a disciplined convex program.\n requires_grad : bool, optional\n Makes it possible to compute gradients of a solution with respect to\n Parameters by calling ``problem.backward()`` after solving, or to\n compute perturbations to the variables given perturbations to Parameters by\n calling ``problem.derivative()``.\n\n Gradients are only supported for DCP and DGP problems, not\n quasiconvex problems. When computing gradients (i.e., when\n this argument is True), the problem must satisfy the DPP rules.\n enforce_dpp : bool, optional\n When True, a DPPError will be thrown when trying to solve a non-DPP\n problem (instead of just a warning). Only relevant for problems\n involving Parameters. Defaults to False.\n method : function, optional\n A custom solve method to use.\n kwargs : keywords, optional\n Additional solver specific arguments. See Notes below.\n\n Notes\n ------\n CVXPY interfaces with a wide range of solvers; the algorithms used by these solvers\n have arguments relating to stopping criteria, and strategies to improve solution quality.\n\n There is no one choice of arguments which is perfect for every problem. If you are not\n getting satisfactory results from a solver, you can try changing its arguments. The\n exact way this is done depends on the specific solver. Here are some examples:\n\n prob.solve(solver='ECOS', abstol=1e-6)\n prob.solve(solver='OSQP', max_iter=10000).\n mydict = {\"MSK_DPAR_INTPNT_CO_TOL_NEAR_REL\": 10}\n prob.solve(solver='MOSEK', mosek_params=mydict).\n\n You should refer to CVXPY's web documentation for details on how to pass solver\n solver arguments, available at\n\n https://www.cvxpy.org/tutorial/advanced/index.html#setting-solver-options\n\n Returns\n -------\n float\n The optimal value for the problem, or a string indicating\n why the problem could not be solved.\n\n Raises\n ------\n cvxpy.error.DCPError\n Raised if the problem is not DCP and `gp` is False.\n cvxpy.error.DGPError\n Raised if the problem is not DGP and `gp` is True.\n cvxpy.error.SolverError\n Raised if no suitable solver exists among the installed solvers,\n or if an unanticipated error is encountered.\n \"\"\"\n func_name = kwargs.pop(\"method\", None)\n if func_name is not None:\n solve_func = Problem.REGISTERED_SOLVE_METHODS[func_name]\n else:\n solve_func = Problem._solve\n return solve_func(self, *args, **kwargs)\n\n @classmethod\n def register_solve(cls, name, func):\n \"\"\"Adds a solve method to the Problem class.\n\n Arguments\n ---------\n name : str\n The keyword for the method.\n func : function\n The function that executes the solve method. This function must\n take as its first argument the problem instance to solve.\n \"\"\"\n cls.REGISTERED_SOLVE_METHODS[name] = func\n\n def get_problem_data(self, solver, gp=False, enforce_dpp=False):\n \"\"\"Returns the problem data used in the call to the solver.\n\n When a problem is solved, CVXPY creates a chain of reductions enclosed\n in a :class:`~cvxpy.reductions.solvers.solving_chain.SolvingChain`,\n and compiles it to some low-level representation that is\n compatible with the targeted solver. This method returns that low-level\n representation.\n\n For some solving chains, this low-level representation is a dictionary\n that contains exactly those arguments that were supplied to the solver;\n however, for other solving chains, the data is an intermediate\n representation that is compiled even further by the solver interfaces.\n\n A solution to the equivalent low-level problem can be obtained via the\n data by invoking the `solve_via_data` method of the returned solving\n chain, a thin wrapper around the code external to CVXPY that further\n processes and solves the problem. Invoke the unpack_results method\n to recover a solution to the original problem.\n\n For example:\n\n ::\n\n objective = ...\n constraints = ...\n problem = cp.Problem(objective, constraints)\n data, chain, inverse_data = problem.get_problem_data(cp.SCS)\n # calls SCS using `data`\n soln = chain.solve_via_data(problem, data)\n # unpacks the solution returned by SCS into `problem`\n problem.unpack_results(soln, chain, inverse_data)\n\n Alternatively, the `data` dictionary returned by this method\n contains enough information to bypass CVXPY and call the solver\n directly.\n\n For example:\n\n ::\n\n problem = cp.Problem(objective, constraints)\n data, _, _ = problem.get_problem_data(cp.SCS)\n\n import scs\n probdata = {\n 'A': data['A'],\n 'b': data['b'],\n 'c': data['c'],\n }\n cone_dims = data['dims']\n cones = {\n \"f\": cone_dims.zero,\n \"l\": cone_dims.nonpos,\n \"q\": cone_dims.soc,\n \"ep\": cone_dims.exp,\n \"s\": cone_dims.psd,\n }\n soln = scs.solve(data, cones)\n\n The structure of the data dict that CVXPY returns depends on the\n solver. For details, consult the solver interfaces in\n `cvxpy/reductions/solvers`.\n\n Arguments\n ---------\n solver : str\n The solver the problem data is for.\n gp : bool, optional\n If True, then parses the problem as a disciplined geometric program\n instead of a disciplined convex program.\n enforce_dpp : bool, optional\n When True, a DPPError will be thrown when trying to parse a non-DPP\n problem (instead of just a warning). Defaults to False.\n\n Returns\n -------\n dict or object\n lowest level representation of problem\n SolvingChain\n The solving chain that created the data.\n list\n The inverse data generated by the chain.\n \"\"\"\n key = self._cache.make_key(solver, gp)\n if key != self._cache.key:\n self._cache.invalidate()\n solving_chain = self._construct_chain(\n solver=solver, gp=gp, enforce_dpp=enforce_dpp)\n self._cache.key = key\n self._cache.solving_chain = solving_chain\n self._solver_cache = {}\n else:\n solving_chain = self._cache.solving_chain\n\n if self._cache.param_prog is not None:\n # fast path, bypasses application of reductions\n if gp:\n dgp2dcp = self._cache.solving_chain.get(Dgp2Dcp)\n # Parameters in the param cone prog are the logs\n # of parameters in the original problem (with one exception:\n # parameters appearing as exponents (in power and gmatmul\n # atoms) are unchanged.\n old_params_to_new_params = dgp2dcp.canon_methods._parameters\n for param in self.parameters():\n if param in old_params_to_new_params:\n old_params_to_new_params[param].value = np.log(\n param.value)\n\n data, solver_inverse_data = solving_chain.solver.apply(\n self._cache.param_prog)\n inverse_data = self._cache.inverse_data + [solver_inverse_data]\n else:\n data, inverse_data = solving_chain.apply(self)\n safe_to_cache = (\n isinstance(data, dict)\n and s.PARAM_PROB in data\n and not any(isinstance(reduction, EvalParams)\n for reduction in solving_chain.reductions)\n )\n if safe_to_cache:\n self._cache.param_prog = data[s.PARAM_PROB]\n # the last datum in inverse_data corresponds to the solver,\n # so we shouldn't cache it\n self._cache.inverse_data = inverse_data[:-1]\n return data, solving_chain, inverse_data\n\n def _find_candidate_solvers(self,\n solver=None,\n gp=False):\n \"\"\"\n Find candiate solvers for the current problem. If solver\n is not None, it checks if the specified solver is compatible\n with the problem passed.\n\n Arguments\n ---------\n solver : string\n The name of the solver with which to solve the problem. If no\n solver is supplied (i.e., if solver is None), then the targeted\n solver may be any of those that are installed. If the problem\n is variable-free, then this parameter is ignored.\n gp : bool\n If True, the problem is parsed as a Disciplined Geometric Program\n instead of as a Disciplined Convex Program.\n\n Returns\n -------\n dict\n A dictionary of compatible solvers divided in `qp_solvers`\n and `conic_solvers`.\n\n Raises\n ------\n cvxpy.error.SolverError\n Raised if the problem is not DCP and `gp` is False.\n cvxpy.error.DGPError\n Raised if the problem is not DGP and `gp` is True.\n \"\"\"\n candidates = {'qp_solvers': [],\n 'conic_solvers': []}\n\n if solver is not None:\n if solver not in slv_def.INSTALLED_SOLVERS:\n raise error.SolverError(\"The solver %s is not installed.\" % solver)\n if solver in slv_def.CONIC_SOLVERS:\n candidates['conic_solvers'] += [solver]\n if solver in slv_def.QP_SOLVERS:\n candidates['qp_solvers'] += [solver]\n else:\n candidates['qp_solvers'] = [s for s in slv_def.INSTALLED_SOLVERS\n if s in slv_def.QP_SOLVERS]\n candidates['conic_solvers'] = [s for s in slv_def.INSTALLED_SOLVERS\n if s in slv_def.CONIC_SOLVERS]\n\n # If gp we must have only conic solvers\n if gp:\n if solver is not None and solver not in slv_def.CONIC_SOLVERS:\n raise error.SolverError(\n \"When `gp=True`, `solver` must be a conic solver \"\n \"(received '%s'); try calling \" % solver +\n \" `solve()` with `solver=cvxpy.ECOS`.\"\n )\n elif solver is None:\n candidates['qp_solvers'] = [] # No QP solvers allowed\n\n if self.is_mixed_integer():\n if len(slv_def.INSTALLED_MI_SOLVERS) == 0:\n msg = \"\"\"\n\n CVXPY needs additional software (a `mixed-integer solver`) to handle this model.\n The web documentation\n https://www.cvxpy.org/tutorial/advanced/index.html#mixed-integer-programs\n reviews open-source and commercial options for mixed-integer solvers.\n\n Quick fix: if you install the python package CVXOPT (pip install cvxopt),\n then CVXPY can use the open-source mixed-integer solver `GLPK`.\n \"\"\"\n raise error.SolverError(msg)\n candidates['qp_solvers'] = [\n s for s in candidates['qp_solvers']\n if slv_def.SOLVER_MAP_QP[s].MIP_CAPABLE]\n candidates['conic_solvers'] = [\n s for s in candidates['conic_solvers']\n if slv_def.SOLVER_MAP_CONIC[s].MIP_CAPABLE]\n if not candidates['conic_solvers'] and \\\n not candidates['qp_solvers']:\n raise error.SolverError(\n \"Problem is mixed-integer, but candidate \"\n \"QP/Conic solvers (%s) are not MIP-capable.\" %\n (candidates['qp_solvers'] +\n candidates['conic_solvers']))\n\n return candidates\n\n def _construct_chain(self, solver=None, gp=False, enforce_dpp=False):\n \"\"\"\n Construct the chains required to reformulate and solve the problem.\n\n In particular, this function\n\n # finds the candidate solvers\n # constructs the solving chain that performs the\n numeric reductions and solves the problem.\n\n Arguments\n ---------\n solver : str, optional\n The solver to use. Defaults to ECOS.\n gp : bool, optional\n If True, the problem is parsed as a Disciplined Geometric Program\n instead of as a Disciplined Convex Program.\n enforce_dpp : bool, optional\n Whether to error on DPP violations.\n\n Returns\n -------\n A solving chain\n \"\"\"\n candidate_solvers = self._find_candidate_solvers(solver=solver, gp=gp)\n return construct_solving_chain(self, candidate_solvers, gp=gp,\n enforce_dpp=enforce_dpp)\n\n def _invalidate_cache(self):\n self._cache_key = None\n self._solving_chain = None\n self._param_prog = None\n self._inverse_data = None\n\n def _solve(self,\n solver=None,\n warm_start=True,\n verbose=False,\n gp=False, qcp=False, requires_grad=False, enforce_dpp=False, **kwargs):\n \"\"\"Solves a DCP compliant optimization problem.\n\n Saves the values of primal and dual variables in the variable\n and constraint objects, respectively.\n\n Arguments\n ---------\n solver : str, optional\n The solver to use. Defaults to ECOS.\n warm_start : bool, optional\n Should the previous solver result be used to warm start?\n verbose : bool, optional\n Overrides the default of hiding solver output.\n gp : bool, optional\n If True, parses the problem as a disciplined geometric program.\n qcp : bool, optional\n If True, parses the problem as a disciplined quasiconvex program.\n requires_grad : bool, optional\n Makes it possible to compute gradients with respect to\n parameters by calling `backward()` after solving, or to compute\n perturbations to the variables by calling `derivative()`. When\n True, the solver must be SCS, and dqcp must be False.\n A DPPError is thrown when problem is not DPP.\n enforce_dpp : bool, optional\n When True, a DPPError will be thrown when trying to solve a non-DPP\n problem (instead of just a warning). Defaults to False.\n kwargs : dict, optional\n A dict of options that will be passed to the specific solver.\n In general, these options will override any default settings\n imposed by cvxpy.\n\n Returns\n -------\n float\n The optimal value for the problem, or a string indicating\n why the problem could not be solved.\n \"\"\"\n for parameter in self.parameters():\n if parameter.value is None:\n raise error.ParameterError(\n \"A Parameter (whose name is '%s') does not have a value \"\n \"associated with it; all Parameter objects must have \"\n \"values before solving a problem.\" % parameter.name())\n\n if requires_grad:\n dpp_context = 'dgp' if gp else 'dcp'\n if qcp:\n raise ValueError(\"Cannot compute gradients of DQCP problems.\")\n elif not self.is_dpp(dpp_context):\n raise error.DPPError(\"Problem is not DPP (when requires_grad \"\n \"is True, problem must be DPP).\")\n elif solver is not None and solver not in [s.SCS, s.DIFFCP]:\n raise ValueError(\"When requires_grad is True, the only \"\n \"supported solver is SCS \"\n \"(received %s).\" % solver)\n elif s.DIFFCP not in slv_def.INSTALLED_SOLVERS:\n raise ImportError(\n \"The Python package diffcp must be installed to \"\n \"differentiate through problems. Please follow the \"\n \"installation instructions at \"\n \"https://github.com/cvxgrp/diffcp\")\n else:\n solver = s.DIFFCP\n else:\n if gp and qcp:\n raise ValueError(\"At most one of `gp` and `qcp` can be True.\")\n if qcp and not self.is_dcp():\n if not self.is_dqcp():\n raise error.DQCPError(\"The problem is not DQCP.\")\n reductions = [dqcp2dcp.Dqcp2Dcp()]\n if type(self.objective) == Maximize:\n reductions = [FlipObjective()] + reductions\n chain = Chain(problem=self, reductions=reductions)\n soln = bisection.bisect(\n chain.reduce(), solver=solver, verbose=verbose, **kwargs)\n self.unpack(chain.retrieve(soln))\n return self.value\n\n data, solving_chain, inverse_data = self.get_problem_data(\n solver, gp, enforce_dpp)\n solution = solving_chain.solve_via_data(\n self, data, warm_start, verbose, kwargs)\n self.unpack_results(solution, solving_chain, inverse_data)\n return self.value\n\n def backward(self):\n \"\"\"Compute the gradient of a solution with respect to Parameters.\n\n This method differentiates through the solution map of the problem,\n obtaining the gradient of a solution with respect to the Parameters.\n In other words, it calculates the sensitivities of the Parameters\n with respect to perturbations in the optimal Variable values. This\n can be useful for integrating CVXPY into automatic differentation\n toolkits.\n\n ``backward()`` populates the ``gradient`` attribute of each Parameter\n in the problem as a side-effect. It can only be called after calling\n ``solve()`` with ``requires_grad=True``.\n\n Below is a simple example:\n\n ::\n\n import cvxpy as cp\n import numpy as np\n\n p = cp.Parameter()\n x = cp.Variable()\n quadratic = cp.square(x - 2 * p)\n problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])\n p.value = 3.0\n problem.solve(requires_grad=True, eps=1e-10)\n # backward() populates the gradient attribute of the parameters\n problem.backward()\n # Because x* = 2 * p, dx*/dp = 2\n np.testing.assert_allclose(p.gradient, 2.0)\n\n In the above example, the gradient could easily be computed by hand.\n The ``backward()`` is useful because for almost all problems, the\n gradient cannot be computed analytically.\n\n This method can be used to differentiate through any DCP or DGP\n problem, as long as the problem is DPP compliant (i.e.,\n ``problem.is_dcp(dpp=True)`` or ``problem.is_dgp(dpp=True)`` evaluates to\n ``True``).\n\n This method uses the chain rule to evaluate the gradients of a\n scalar-valued function of the Variables with respect to the Parameters.\n For example, let x be a variable and p a Parameter; x and p might be\n scalars, vectors, or matrices. Let f be a scalar-valued function, with\n z = f(x). Then this method computes dz/dp = (dz/dx) (dx/p). dz/dx\n is chosen as the all-ones vector by default, corresponding to\n choosing f to be the sum function. You can specify a custom value for\n dz/dx by setting the ``gradient`` attribute on your variables. For example,\n\n ::\n\n import cvxpy as cp\n import numpy as np\n\n\n b = cp.Parameter()\n x = cp.Variable()\n quadratic = cp.square(x - 2 * b)\n problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])\n b.value = 3.\n problem.solve(requires_grad=True, eps=1e-10)\n x.gradient = 4.\n problem.backward()\n # dz/dp = dz/dx dx/dp = 4. * 2. == 8.\n np.testing.assert_allclose(b.gradient, 8.)\n\n The ``gradient`` attribute on a variable can also be interpreted as a\n perturbation to its optimal value.\n\n Raises\n ------\n ValueError\n if solve was not called with ``requires_grad=True``\n SolverError\n if the problem is infeasible or unbounded\n \"\"\"\n if s.DIFFCP not in self._solver_cache:\n raise ValueError(\"backward can only be called after calling \"\n \"solve with `requires_grad=True`\")\n elif self.status not in s.SOLUTION_PRESENT:\n raise error.SolverError(\"Backpropagating through \"\n \"infeasible/unbounded problems is not \"\n \"yet supported. Please file an issue on \"\n \"Github if you need this feature.\")\n\n # TODO(akshayka): Backpropagate through dual variables as well.\n backward_cache = self._solver_cache[s.DIFFCP]\n DT = backward_cache[\"DT\"]\n zeros = np.zeros(backward_cache[\"s\"].shape)\n del_vars = {}\n\n gp = self._cache.gp()\n for variable in self.variables():\n if variable.gradient is None:\n del_vars[variable.id] = np.ones(variable.shape)\n else:\n del_vars[variable.id] = np.asarray(variable.gradient,\n dtype=np.float64)\n if gp:\n # x_gp = exp(x_cone_program),\n # dx_gp/d x_cone_program = exp(x_cone_program) = x_gp\n del_vars[variable.id] *= variable.value\n\n dx = self._cache.param_prog.split_adjoint(del_vars)\n start = time.time()\n dA, db, dc = DT(dx, zeros, zeros)\n end = time.time()\n backward_cache['DT_TIME'] = end - start\n dparams = self._cache.param_prog.apply_param_jac(dc, -dA, db)\n\n if not gp:\n for param in self.parameters():\n param.gradient = dparams[param.id]\n else:\n dgp2dcp = self._cache.solving_chain.get(Dgp2Dcp)\n old_params_to_new_params = dgp2dcp.canon_methods._parameters\n for param in self.parameters():\n # Note: if param is an exponent in a power or gmatmul atom,\n # then the parameter passes through unchanged to the DCP\n # program; if the param is also used elsewhere (not as an\n # exponent), then param will also be in\n # old_params_to_new_params. Therefore, param.gradient =\n # dparams[param.id] (or 0) + 1/param*dparams[new_param.id]\n #\n # Note that param.id is in dparams if and only if\n # param was used as an exponent (because this means that\n # the parameter entered the DCP problem unchanged.)\n grad = 0.0 if param.id not in dparams else dparams[param.id]\n if param in old_params_to_new_params:\n new_param = old_params_to_new_params[param]\n # new_param.value == log(param), apply chain rule\n grad += (1.0 / param.value) * dparams[new_param.id]\n param.gradient = grad\n\n def derivative(self):\n \"\"\"Apply the derivative of the solution map to perturbations in the Parameters\n\n This method applies the derivative of the solution map to perturbations\n in the Parameters to obtain perturbations in the optimal values of the\n Variables. In other words, it tells you how the optimal values of the\n Variables would be changed by small changes to the Parameters.\n\n You can specify perturbations in a Parameter by setting its ``delta``\n attribute (if unspecified, the perturbation defaults to 0).\n\n This method populates the ``delta`` attribute of the Variables as a\n side-effect.\n\n This method can only be called after calling ``solve()`` with\n ``requires_grad=True``. It is compatible with both DCP and DGP\n problems (that are also DPP-compliant).\n\n Below is a simple example:\n\n ::\n\n import cvxpy as cp\n import numpy as np\n\n p = cp.Parameter()\n x = cp.Variable()\n quadratic = cp.square(x - 2 * p)\n problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])\n p.value = 3.0\n problem.solve(requires_grad=True, eps=1e-10)\n # derivative() populates the delta attribute of the variables\n problem.derivative()\n p.delta = 1e-3\n # Because x* = 2 * p, dx*/dp = 2, so (dx*/dp)(p.delta) == 2e-3\n np.testing.assert_allclose(x.delta, 2e-3)\n\n Raises\n ------\n ValueError\n if solve was not called with ``requires_grad=True``\n SolverError\n if the problem is infeasible or unbounded\n \"\"\"\n if s.DIFFCP not in self._solver_cache:\n raise ValueError(\"derivative can only be called after calling \"\n \"solve with `requires_grad=True`\")\n elif self.status not in s.SOLUTION_PRESENT:\n raise ValueError(\"Differentiating through infeasible/unbounded \"\n \"problems is not yet supported. Please file an \"\n \"issue on Github if you need this feature.\")\n # TODO(akshayka): Forward differentiate dual variables as well\n backward_cache = self._solver_cache[s.DIFFCP]\n param_prog = self._cache.param_prog\n D = backward_cache[\"D\"]\n param_deltas = {}\n\n gp = self._cache.gp()\n if gp:\n dgp2dcp = self._cache.solving_chain.get(Dgp2Dcp)\n\n if not self.parameters():\n for variable in self.variables():\n variable.delta = np.zeros(variable.shape)\n return\n\n for param in self.parameters():\n delta = param.delta if param.delta is not None else np.zeros(param.shape)\n if gp:\n if param in dgp2dcp.canon_methods._parameters:\n new_param_id = dgp2dcp.canon_methods._parameters[param].id\n else:\n new_param_id = param.id\n param_deltas[new_param_id] = (\n 1.0/param.value * np.asarray(delta, dtype=np.float64))\n if param.id in param_prog.param_id_to_col:\n # here, param generated a new parameter and also\n # passed through to the param cone prog unchanged\n # (because it was an exponent of a power)\n param_deltas[param.id] = np.asarray(delta,\n dtype=np.float64)\n else:\n param_deltas[param.id] = np.asarray(delta, dtype=np.float64)\n dc, _, dA, db = param_prog.apply_parameters(param_deltas,\n zero_offset=True)\n start = time.time()\n dx, _, _ = D(-dA, db, dc)\n end = time.time()\n backward_cache['D_TIME'] = end - start\n dvars = param_prog.split_solution(\n dx, [v.id for v in self.variables()])\n for variable in self.variables():\n variable.delta = dvars[variable.id]\n if gp:\n # x_gp = exp(x_cone_program),\n # dx_gp/d x_cone_program = exp(x_cone_program) = x_gp\n variable.delta *= variable.value\n\n def _clear_solution(self):\n for v in self.variables():\n v.save_value(None)\n for c in self.constraints:\n for dv in c.dual_variables:\n dv.save_value(None)\n self._value = None\n self._status = None\n self._solution = None\n\n def unpack(self, solution):\n \"\"\"Updates the problem state given a Solution.\n\n Updates problem.status, problem.value and value of primal and dual\n variables. If solution.status is in cvxpy.settins.ERROR, this method\n is a no-op.\n\n Arguments\n _________\n solution : cvxpy.Solution\n A Solution object.\n\n Raises\n ------\n ValueError\n If the solution object has an invalid status\n \"\"\"\n if solution.status in s.SOLUTION_PRESENT:\n for v in self.variables():\n v.save_value(solution.primal_vars[v.id])\n for c in self.constraints:\n if c.id in solution.dual_vars:\n c.save_dual_value(solution.dual_vars[c.id])\n elif solution.status in s.INF_OR_UNB:\n for v in self.variables():\n v.save_value(None)\n for constr in self.constraints:\n for dv in constr.dual_variables:\n dv.save_value(None)\n else:\n raise ValueError(\"Cannot unpack invalid solution: %s\" % solution)\n\n self._value = solution.opt_val\n self._status = solution.status\n self._solution = solution\n\n def unpack_results(self, solution, chain, inverse_data):\n \"\"\"Updates the problem state given the solver results.\n\n Updates problem.status, problem.value and value of\n primal and dual variables.\n\n Arguments\n _________\n solution : object\n The solution returned by applying the chain to the problem\n and invoking the solver on the resulting data.\n chain : SolvingChain\n A solving chain that was used to solve the problem.\n inverse_data : list\n The inverse data returned by applying the chain to the problem.\n\n Raises\n ------\n cvxpy.error.SolverError\n If the solver failed\n \"\"\"\n\n solution = chain.invert(solution, inverse_data)\n if solution.status in s.ERROR:\n raise error.SolverError(\n \"Solver '%s' failed. \" % chain.solver.name() +\n \"Try another solver, or solve with verbose=True for more \"\n \"information.\")\n self.unpack(solution)\n self._solver_stats = SolverStats(self._solution.attr,\n chain.solver.name())\n\n def __str__(self):\n if len(self.constraints) == 0:\n return str(self.objective)\n else:\n subject_to = \"subject to \"\n lines = [str(self.objective),\n subject_to + str(self.constraints[0])]\n for constr in self.constraints[1:]:\n lines += [len(subject_to) * \" \" + str(constr)]\n return '\\n'.join(lines)\n\n def __repr__(self):\n return \"Problem(%s, %s)\" % (repr(self.objective),\n repr(self.constraints))\n\n def __neg__(self):\n return Problem(-self.objective, self.constraints)\n\n def __add__(self, other):\n if other == 0:\n return self\n elif not isinstance(other, Problem):\n return NotImplemented\n return Problem(self.objective + other.objective,\n unique_list(self.constraints + other.constraints))\n\n def __radd__(self, other):\n if other == 0:\n return self\n else:\n return NotImplemented\n\n def __sub__(self, other):\n if not isinstance(other, Problem):\n return NotImplemented\n return Problem(self.objective - other.objective,\n unique_list(self.constraints + other.constraints))\n\n def __rsub__(self, other):\n if other == 0:\n return -self\n else:\n return NotImplemented\n\n def __mul__(self, other):\n if not isinstance(other, (int, float)):\n return NotImplemented\n return Problem(self.objective * other, self.constraints)\n\n __rmul__ = __mul__\n\n def __div__(self, other):\n if not isinstance(other, (int, float)):\n return NotImplemented\n return Problem(self.objective * (1.0 / other), self.constraints)\n\n def is_constant(self):\n return False\n\n __truediv__ = __div__\n\n\nclass SolverStats(object):\n \"\"\"Reports some of the miscellaneous information that is returned\n by the solver after solving but that is not captured directly by\n the Problem instance.\n\n Attributes\n ----------\n solve_time : double\n The time (in seconds) it took for the solver to solve the problem.\n setup_time : double\n The time (in seconds) it took for the solver to setup the problem.\n num_iters : int\n The number of iterations the solver had to go through to find a solution.\n \"\"\"\n def __init__(self, results_dict, solver_name):\n self.solver_name = solver_name\n self.solve_time = None\n self.setup_time = None\n self.num_iters = None\n\n if s.SOLVE_TIME in results_dict:\n self.solve_time = results_dict[s.SOLVE_TIME]\n if s.SETUP_TIME in results_dict:\n self.setup_time = results_dict[s.SETUP_TIME]\n if s.NUM_ITERS in results_dict:\n self.num_iters = results_dict[s.NUM_ITERS]\n\n\nclass SizeMetrics(object):\n \"\"\"Reports various metrics regarding the problem.\n\n Attributes\n ----------\n\n num_scalar_variables : integer\n The number of scalar variables in the problem.\n num_scalar_data : integer\n The number of scalar constants and parameters in the problem. The number of\n constants used across all matrices, vectors, in the problem.\n Some constants are not apparent when the problem is constructed: for example,\n The sum_squares expression is a wrapper for a quad_over_lin expression with a\n constant 1 in the denominator.\n num_scalar_eq_constr : integer\n The number of scalar equality constraints in the problem.\n num_scalar_leq_constr : integer\n The number of scalar inequality constraints in the problem.\n\n max_data_dimension : integer\n The longest dimension of any data block constraint or parameter.\n max_big_small_squared : integer\n The maximum value of (big)(small)^2 over all data blocks of the problem, where\n (big) is the larger dimension and (small) is the smaller dimension\n for each data block.\n \"\"\"\n\n def __init__(self, problem):\n # num_scalar_variables\n self.num_scalar_variables = 0\n for var in problem.variables():\n self.num_scalar_variables += var.size\n\n # num_scalar_data, max_data_dimension, and max_big_small_squared\n self.max_data_dimension = 0\n self.num_scalar_data = 0\n self.max_big_small_squared = 0\n for const in problem.constants()+problem.parameters():\n big = 0\n # Compute number of data\n self.num_scalar_data += const.size\n big = 1 if len(const.shape) == 0 else max(const.shape)\n small = 1 if len(const.shape) == 0 else min(const.shape)\n\n # Get max data dimension:\n if self.max_data_dimension < big:\n self.max_data_dimension = big\n\n max_big_small_squared = float(big)*(float(small)**2)\n if self.max_big_small_squared < max_big_small_squared:\n self.max_big_small_squared = max_big_small_squared\n\n # num_scalar_eq_constr\n self.num_scalar_eq_constr = 0\n for constraint in problem.constraints:\n if isinstance(constraint, (Equality, Zero)):\n self.num_scalar_eq_constr += constraint.expr.size\n\n # num_scalar_leq_constr\n self.num_scalar_leq_constr = 0\n for constraint in problem.constraints:\n if isinstance(constraint, (Inequality, NonPos, NonNeg)):\n self.num_scalar_leq_constr += constraint.expr.size\n"
] | [
[
"numpy.ones",
"numpy.log",
"numpy.asarray",
"numpy.zeros"
]
] |
Danielznn16/RoboticHand-in-KG | [
"27e4eee97ea4ecab40fbd13b24a97e1f94c10258"
] | [
"models/pointnet2_part_seg_msg.py"
] | [
"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom models.pointnet_util import PointNetSetAbstractionMsg,PointNetSetAbstraction,PointNetFeaturePropagation\n\n\nclass get_model(nn.Module):\n def __init__(self, num_classes, normal_channel=False):\n super(get_model, self).__init__()\n if normal_channel:\n additional_channel = 3\n else:\n additional_channel = 0\n self.normal_channel = normal_channel\n self.sa1 = PointNetSetAbstractionMsg(512, [0.1, 0.2, 0.4], [32, 64, 128], 3+additional_channel, [[32, 32, 64], [64, 64, 128], [64, 96, 128]])\n self.sa2 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], 128+128+64, [[128, 128, 256], [128, 196, 256]])\n self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512, 1024], group_all=True)\n self.fp3 = PointNetFeaturePropagation(in_channel=1536, mlp=[256, 256])\n self.fp2 = PointNetFeaturePropagation(in_channel=576, mlp=[256, 128])\n self.fp1 = PointNetFeaturePropagation(in_channel=150+additional_channel, mlp=[128, 128])\n self.conv1 = nn.Conv1d(128, 128, 1)\n self.bn1 = nn.BatchNorm1d(128)\n self.drop1 = nn.Dropout(0.5)\n self.conv2 = nn.Conv1d(128, num_classes, 1)\n\n def forward(self, xyz, cls_label):\n # Set Abstraction layers\n B,C,N = xyz.shape\n if self.normal_channel:\n l0_points = xyz\n l0_xyz = xyz[:,:3,:]\n else:\n l0_points = xyz\n l0_xyz = xyz\n l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)\n l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)\n l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)\n # Feature Propagation layers\n l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)\n l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)\n cls_label_one_hot = cls_label.view(B,16,1).repeat(1,1,N)\n # print(cls_label_one_hot)\n l0_points = self.fp1(l0_xyz, l1_xyz, torch.cat([cls_label_one_hot,l0_xyz,l0_points],1), l1_points)\n # FC layers\n feat = F.relu(self.bn1(self.conv1(l0_points)))\n x = self.drop1(feat)\n x = self.conv2(x)\n x = F.log_softmax(x, dim=1)\n x = x.permute(0, 2, 1)\n return x, l3_points\n\n\nclass get_loss(nn.Module):\n def __init__(self):\n super(get_loss, self).__init__()\n\n def forward(self, pred, target, trans_feat):\n total_loss = F.nll_loss(pred, target)\n\n return total_loss"
] | [
[
"torch.nn.functional.log_softmax",
"torch.nn.BatchNorm1d",
"torch.nn.functional.nll_loss",
"torch.nn.Conv1d",
"torch.cat",
"torch.nn.Dropout"
]
] |
ZvonimirBandic/QuCumber | [
"81f0291951e89346fd8ab5c35cc90341fd8acf35"
] | [
"qucumber/nn_states/density_matrix.py"
] | [
"# Copyright 2019 PIQuIL - All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport warnings\n\nimport torch\nfrom torch.nn import functional as F\n\nfrom qucumber import _warn_on_missing_gpu\nfrom qucumber.utils import cplx, unitaries\nfrom qucumber.rbm import PurificationRBM\nfrom .neural_state import NeuralStateBase\n\n\nclass DensityMatrix(NeuralStateBase):\n r\"\"\"\n :param num_visible: The number of visible units, i.e. the size of the system\n :type num_visible: int\n :param num_hidden: The number of units in the hidden layer\n :type num_hidden: int\n :param num_aux: The number of units in the purification layer\n :type num_aux: int\n :param unitary_dict: A dictionary associating bases with their unitary rotations\n :type unitary_dict: dict[str, torch.Tensor]\n :param gpu: Whether to perform computations on the default gpu.\n :type gpu: bool\n \"\"\"\n\n _rbm_am = None\n _rbm_ph = None\n _device = None\n\n def __init__(\n self,\n num_visible,\n num_hidden=None,\n num_aux=None,\n unitary_dict=None,\n gpu=False,\n module=None,\n ):\n if gpu and torch.cuda.is_available():\n warnings.warn(\n \"Using DensityMatrix on GPU is not recommended due to poor performance compared to CPU.\",\n ResourceWarning,\n 2,\n )\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n\n if module is None:\n self.rbm_am = PurificationRBM(num_visible, num_hidden, num_aux, gpu=gpu)\n self.rbm_ph = PurificationRBM(num_visible, num_hidden, num_aux, gpu=gpu)\n else:\n _warn_on_missing_gpu(gpu)\n self.rbm_am = module.to(self.device)\n self.rbm_am.device = self.device\n self.rbm_ph = module.to(self.device).clone()\n self.rbm_ph.device = self.device\n\n self.num_visible = self.rbm_am.num_visible\n self.num_hidden = self.rbm_am.num_hidden\n self.num_aux = self.rbm_am.num_aux\n self.device = self.rbm_am.device\n\n self.unitary_dict = unitary_dict if unitary_dict else unitaries.create_dict()\n self.unitary_dict = {\n k: v.to(device=self.device) for k, v in self.unitary_dict.items()\n }\n\n @property\n def networks(self):\n return [\"rbm_am\", \"rbm_ph\"]\n\n @property\n def rbm_am(self):\n return self._rbm_am\n\n @rbm_am.setter\n def rbm_am(self, new_val):\n self._rbm_am = new_val\n\n @property\n def rbm_ph(self):\n \"\"\"RBM used to learn the wavefunction phase.\"\"\"\n return self._rbm_ph\n\n @rbm_ph.setter\n def rbm_ph(self, new_val):\n self._rbm_ph = new_val\n\n @property\n def device(self):\n return self._device\n\n @device.setter\n def device(self, new_val):\n self._device = new_val\n\n def pi(self, v, vp, expand=True):\n r\"\"\"Calculates elements of the :math:`\\Pi` matrix.\n If `expand` is `True`, will return a complex matrix\n :math:`A_{ij} = \\langle\\sigma_i|\\Pi|\\sigma'_j\\rangle`.\n Otherwise will return a complex vector\n :math:`A_{i} = \\langle\\sigma_i|\\Pi|\\sigma'_i\\rangle`.\n\n :param v: A batch of visible states, :math:`\\sigma`.\n :type v: torch.Tensor\n :param vp: The other batch of visible state, :math:`\\sigma'`.\n :type vp: torch.Tensor\n :param expand: Whether to return a matrix (`True`) or a vector (`False`).\n :type expand: bool\n\n :returns: The matrix elements given by :math:`\\langle\\sigma|\\Pi|\\sigma'\\rangle`\n :rtype: torch.Tensor\n \"\"\"\n m_am = F.linear(v, self.rbm_am.weights_U, self.rbm_am.aux_bias)\n mp_am = F.linear(vp, self.rbm_am.weights_U, self.rbm_am.aux_bias)\n\n m_ph = F.linear(v, self.rbm_ph.weights_U)\n mp_ph = F.linear(vp, self.rbm_ph.weights_U)\n\n if expand and v.dim() >= 2:\n m_am = m_am.unsqueeze_(1)\n m_ph = m_ph.unsqueeze_(1)\n if expand and vp.dim() >= 2:\n mp_am = mp_am.unsqueeze_(0)\n mp_ph = mp_ph.unsqueeze_(0)\n\n exp_arg = (m_am + mp_am) / 2\n phase = (m_ph - mp_ph) / 2\n\n real = (\n (1 + 2 * exp_arg.exp() * phase.cos() + (2 * exp_arg).exp())\n .sqrt()\n .log()\n .sum(-1)\n )\n\n imag = torch.atan2(\n (exp_arg.exp() * phase.sin()), (1 + exp_arg.exp() * phase.cos())\n ).sum(-1)\n\n return cplx.make_complex(real, imag)\n\n def pi_grad(self, v, vp, phase=False, expand=False):\n r\"\"\"Calculates the gradient of the :math:`\\Pi` matrix with\n respect to the amplitude RBM parameters for two input states\n\n :param v: One of the visible states, :math:`\\sigma`\n :type v: torch.Tensor\n :param vp: The other visible state, :math`\\sigma'`\n :type vp: torch.Tensor\n :param phase: Whether to compute the gradients for the phase RBM (`True`)\n or the amplitude RBM (`False`)\n :type phase: bool\n\n :returns: The matrix element of the gradient given by\n :math:`\\langle\\sigma|\\nabla_\\lambda\\Pi|\\sigma'\\rangle`\n :rtype: torch.Tensor\n \"\"\"\n unsqueezed = v.dim() < 2 or vp.dim() < 2\n v = (v.unsqueeze(0) if v.dim() < 2 else v).to(self.rbm_am.weights_W)\n vp = (vp.unsqueeze(0) if vp.dim() < 2 else vp).to(self.rbm_am.weights_W)\n\n if expand:\n arg_real = 0.5 * (\n F.linear(v, self.rbm_am.weights_U, self.rbm_am.aux_bias).unsqueeze_(1)\n + F.linear(vp, self.rbm_am.weights_U, self.rbm_am.aux_bias).unsqueeze_(\n 0\n )\n )\n arg_imag = 0.5 * (\n F.linear(v, self.rbm_ph.weights_U).unsqueeze_(1)\n - F.linear(vp, self.rbm_ph.weights_U).unsqueeze_(0)\n )\n else:\n arg_real = self.rbm_am.mixing_term(v + vp)\n arg_imag = self.rbm_ph.mixing_term(v - vp)\n\n sig = cplx.sigmoid(arg_real, arg_imag)\n\n batch_sizes = (\n (v.shape[0], vp.shape[0], *v.shape[1:-1]) if expand else (*v.shape[:-1],)\n )\n\n W_grad = torch.zeros_like(self.rbm_am.weights_W).expand(*batch_sizes, -1, -1)\n vb_grad = torch.zeros_like(self.rbm_am.visible_bias).expand(*batch_sizes, -1)\n hb_grad = torch.zeros_like(self.rbm_am.hidden_bias).expand(*batch_sizes, -1)\n\n if phase:\n temp = (v.unsqueeze(1) - vp.unsqueeze(0)) if expand else (v - vp)\n sig = cplx.scalar_mult(sig, cplx.I)\n\n ab_grad_real = torch.zeros_like(self.rbm_ph.aux_bias).expand(\n *batch_sizes, -1\n )\n ab_grad_imag = ab_grad_real.clone()\n else:\n temp = (v.unsqueeze(1) + vp.unsqueeze(0)) if expand else (v + vp)\n\n ab_grad_real = cplx.real(sig)\n ab_grad_imag = cplx.imag(sig)\n\n U_grad = 0.5 * torch.einsum(\"c...j,...k->c...jk\", sig, temp)\n U_grad_real = cplx.real(U_grad)\n U_grad_imag = cplx.imag(U_grad)\n\n vec_real = [\n W_grad.view(*batch_sizes, -1),\n U_grad_real.view(*batch_sizes, -1),\n vb_grad,\n hb_grad,\n ab_grad_real,\n ]\n vec_imag = [\n W_grad.view(*batch_sizes, -1).clone(),\n U_grad_imag.view(*batch_sizes, -1),\n vb_grad.clone(),\n hb_grad.clone(),\n ab_grad_imag,\n ]\n\n if unsqueezed and not expand:\n vec_real = [grad.squeeze_(0) for grad in vec_real]\n vec_imag = [grad.squeeze_(0) for grad in vec_imag]\n\n return cplx.make_complex(\n torch.cat(vec_real, dim=-1), torch.cat(vec_imag, dim=-1)\n )\n\n def rho(self, v, vp=None, expand=True):\n r\"\"\"Computes the matrix elements of the (unnormalized) density matrix.\n If `expand` is `True`, will return a complex matrix\n :math:`A_{ij} = \\langle\\sigma_i|\\widetilde{\\rho}|\\sigma'_j\\rangle`.\n Otherwise will return a complex vector\n :math:`A_{i} = \\langle\\sigma_i|\\widetilde{\\rho}|\\sigma'_i\\rangle`.\n\n :param v: One of the visible states, :math:`\\sigma`.\n :type v: torch.Tensor\n :param vp: The other visible state, :math:`\\sigma'`.\n If `None`, will be set to `v`.\n :type vp: torch.Tensor\n :param expand: Whether to return a matrix (`True`) or a vector (`False`).\n :type expand: bool\n\n :returns: The elements of the current density matrix\n :math:`\\langle\\sigma|\\widetilde{\\rho}|\\sigma'\\rangle`\n :rtype: torch.Tensor\n \"\"\"\n if expand is False and vp is None:\n return cplx.make_complex(self.probability(v))\n elif vp is None:\n vp = v\n\n pi_ = self.pi(v, vp, expand=expand)\n amp = (self.rbm_am.gamma(v, vp, eta=+1, expand=expand) + cplx.real(pi_)).exp()\n phase = self.rbm_ph.gamma(v, vp, eta=-1, expand=expand) + cplx.imag(pi_)\n\n return cplx.make_complex(amp * phase.cos(), amp * phase.sin())\n\n def importance_sampling_numerator(self, vp, v):\n return self.rho(vp, v, expand=False)\n\n def importance_sampling_denominator(self, v):\n return cplx.make_complex(self.probability(v))\n\n def rotated_gradient(self, basis, sample):\n r\"\"\"Computes the gradients rotated into the measurement basis\n\n :param basis: The bases in which the measurement is made\n :type basis: numpy.ndarray\n :param sample: The measurement (either 0 or 1)\n :type sample: torch.Tensor\n\n :returns: A list of two tensors, representing the rotated gradients\n of the amplitude and phase RBMs\n :rtype: list[torch.Tensor, torch.Tensor]\n \"\"\"\n UrhoU, UrhoU_v, v = unitaries.rotate_rho_probs(\n self, basis, sample, include_extras=True\n )\n inv_UrhoU = 1 / (UrhoU + 1e-8) # avoid dividing by zero\n\n raw_grads = [self.am_grads(v), self.ph_grads(v)]\n\n rotated_grad = [\n -cplx.einsum(\"ijb,ijbg->bg\", UrhoU_v, g, imag_part=False) for g in raw_grads\n ]\n\n return [torch.einsum(\"b,bg->g\", inv_UrhoU, g) for g in rotated_grad]\n\n def am_grads(self, v):\n r\"\"\"Computes the gradients of the amplitude RBM for given input states\n\n :param v: The first input state, :math:`\\sigma`\n :type v: torch.Tensor\n\n :returns: The gradients of all amplitude RBM parameters\n :rtype: torch.Tensor\n \"\"\"\n return self.rbm_am.gamma_grad(v, v, eta=+1, expand=True) + self.pi_grad(\n v, v, phase=False, expand=True\n )\n\n def ph_grads(self, v):\n r\"\"\"Computes the gradients of the phase RBM for given input states\n\n :param v: The first input state, :math:`\\sigma`\n :type v: torch.Tensor\n\n :returns: The gradients of all phase RBM parameters\n :rtype: torch.Tensor\n \"\"\"\n return cplx.scalar_mult( # need to multiply Gamma- by i\n self.rbm_ph.gamma_grad(v, v, eta=-1, expand=True), cplx.I\n ) + self.pi_grad(v, v, phase=True, expand=True)\n\n def fit(\n self,\n data,\n epochs=100,\n pos_batch_size=100,\n neg_batch_size=None,\n k=1,\n lr=1,\n input_bases=None,\n progbar=False,\n starting_epoch=1,\n time=False,\n callbacks=None,\n optimizer=torch.optim.SGD,\n optimizer_args=None,\n scheduler=None,\n scheduler_args=None,\n **kwargs,\n ):\n if input_bases is None:\n raise ValueError(\"input_bases must be provided to train a DensityMatrix!\")\n else:\n super().fit(\n data=data,\n epochs=epochs,\n pos_batch_size=pos_batch_size,\n neg_batch_size=neg_batch_size,\n k=k,\n lr=lr,\n input_bases=input_bases,\n progbar=progbar,\n starting_epoch=starting_epoch,\n time=time,\n callbacks=callbacks,\n optimizer=optimizer,\n optimizer_args=optimizer_args,\n scheduler=scheduler,\n scheduler_args=scheduler_args,\n **kwargs,\n )\n\n @staticmethod\n def autoload(location, gpu=False):\n state_dict = torch.load(location)\n nn_state = DensityMatrix(\n unitary_dict=state_dict[\"unitary_dict\"],\n num_visible=len(state_dict[\"rbm_am\"][\"visible_bias\"]),\n num_hidden=len(state_dict[\"rbm_am\"][\"hidden_bias\"]),\n num_aux=len(state_dict[\"rbm_am\"][\"aux_bias\"]),\n gpu=gpu,\n )\n nn_state.load(location)\n return nn_state\n"
] | [
[
"torch.load",
"torch.nn.functional.linear",
"torch.zeros_like",
"torch.cuda.is_available",
"torch.einsum",
"torch.device",
"torch.cat"
]
] |
gsc2001/ConvexNet | [
"a17609bd5bca0a02b6330b1ad8035f2b280109f0"
] | [
"src/models/densenet/model.py"
] | [
"\"\"\"\nVanilla DenseNet implementation\nPaper: https://arxiv.org/abs/1608.06993\nImplementation taken from: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py\n\"\"\"\nimport re\nfrom collections import OrderedDict\nfrom functools import partial\nfrom typing import Any, List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom torch import Tensor\n\nclass _DenseLayer(nn.Module):\n def __init__(\n self, num_input_features: int, growth_rate: int, bn_size: int, drop_rate: float, memory_efficient: bool = False\n ) -> None:\n super().__init__()\n self.norm1: nn.BatchNorm2d\n self.add_module(\"norm1\", nn.BatchNorm2d(num_input_features))\n self.relu1: nn.ReLU\n self.add_module(\"relu1\", nn.ReLU(inplace=True))\n self.conv1: nn.Conv2d\n self.add_module(\n \"conv1\", nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)\n )\n self.norm2: nn.BatchNorm2d\n self.add_module(\"norm2\", nn.BatchNorm2d(bn_size * growth_rate))\n self.relu2: nn.ReLU\n self.add_module(\"relu2\", nn.ReLU(inplace=True))\n self.conv2: nn.Conv2d\n self.add_module(\n \"conv2\", nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)\n )\n self.drop_rate = float(drop_rate)\n self.memory_efficient = memory_efficient\n\n def bn_function(self, inputs: List[Tensor]) -> Tensor:\n concated_features = torch.cat(inputs, 1)\n bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484\n return bottleneck_output\n\n # todo: rewrite when torchscript supports any\n def any_requires_grad(self, input: List[Tensor]) -> bool:\n for tensor in input:\n if tensor.requires_grad:\n return True\n return False\n\n @torch.jit.unused # noqa: T484\n def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:\n def closure(*inputs):\n return self.bn_function(inputs)\n\n return cp.checkpoint(closure, *input)\n\n @torch.jit._overload_method # noqa: F811\n def forward(self, input: List[Tensor]) -> Tensor: # noqa: F811\n pass\n\n @torch.jit._overload_method # noqa: F811\n def forward(self, input: Tensor) -> Tensor: # noqa: F811\n pass\n\n # torchscript does not yet support *args, so we overload method\n # allowing it to take either a List[Tensor] or single Tensor\n def forward(self, input: Tensor) -> Tensor: # noqa: F811\n if isinstance(input, Tensor):\n prev_features = [input]\n else:\n prev_features = input\n\n if self.memory_efficient and self.any_requires_grad(prev_features):\n if torch.jit.is_scripting():\n raise Exception(\"Memory Efficient not supported in JIT\")\n\n bottleneck_output = self.call_checkpoint_bottleneck(prev_features)\n else:\n bottleneck_output = self.bn_function(prev_features)\n\n new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))\n if self.drop_rate > 0:\n new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)\n return new_features\n\n\nclass _DenseBlock(nn.ModuleDict):\n _version = 2\n\n def __init__(\n self,\n num_layers: int,\n num_input_features: int,\n bn_size: int,\n growth_rate: int,\n drop_rate: float,\n memory_efficient: bool = False,\n ) -> None:\n super().__init__()\n for i in range(num_layers):\n layer = _DenseLayer(\n num_input_features + i * growth_rate,\n growth_rate=growth_rate,\n bn_size=bn_size,\n drop_rate=drop_rate,\n memory_efficient=memory_efficient,\n )\n self.add_module(\"denselayer%d\" % (i + 1), layer)\n\n def forward(self, init_features: Tensor) -> Tensor:\n features = [init_features]\n for name, layer in self.items():\n new_features = layer(features)\n features.append(new_features)\n return torch.cat(features, 1)\n\n\nclass _Transition(nn.Sequential):\n def __init__(self, num_input_features: int, num_output_features: int) -> None:\n super().__init__()\n self.add_module(\"norm\", nn.BatchNorm2d(num_input_features))\n self.add_module(\"relu\", nn.ReLU(inplace=True))\n self.add_module(\"conv\", nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))\n self.add_module(\"pool\", nn.AvgPool2d(kernel_size=2, stride=2))\n\n\nclass DenseNet(nn.Module):\n r\"\"\"Densenet-BC model class, based on\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n Args:\n growth_rate (int) - how many filters to add each layer (`k` in paper)\n block_config (list of 4 ints) - how many layers in each pooling block\n num_init_features (int) - the number of filters to learn in the first convolution layer\n bn_size (int) - multiplicative factor for number of bottle neck layers\n (i.e. bn_size * k features in the bottleneck layer)\n drop_rate (float) - dropout rate after each dense layer\n num_classes (int) - number of classification classes\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `\"paper\" <https://arxiv.org/pdf/1707.06990.pdf>`_.\n \"\"\"\n\n def __init__(\n self,\n growth_rate: int = 32,\n block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),\n num_init_features: int = 64,\n bn_size: int = 4,\n drop_rate: float = 0,\n num_classes: int = 1000,\n memory_efficient: bool = False,\n ) -> None:\n\n super().__init__()\n\n # First convolution\n self.features = nn.Sequential(\n OrderedDict(\n [\n (\"conv0\", nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),\n (\"norm0\", nn.BatchNorm2d(num_init_features)),\n (\"relu0\", nn.ReLU(inplace=True)),\n (\"pool0\", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),\n ]\n )\n )\n\n # Each denseblock\n num_features = num_init_features\n for i, num_layers in enumerate(block_config):\n block = _DenseBlock(\n num_layers=num_layers,\n num_input_features=num_features,\n bn_size=bn_size,\n growth_rate=growth_rate,\n drop_rate=drop_rate,\n memory_efficient=memory_efficient,\n )\n self.features.add_module(\"denseblock%d\" % (i + 1), block)\n num_features = num_features + num_layers * growth_rate\n if i != len(block_config) - 1:\n trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)\n self.features.add_module(\"transition%d\" % (i + 1), trans)\n num_features = num_features // 2\n\n # Final batch norm\n self.features.add_module(\"norm5\", nn.BatchNorm2d(num_features))\n\n # Linear layer\n self.classifier = nn.Linear(num_features, num_classes)\n\n # Official init from torch repo.\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x: Tensor) -> Tensor:\n features = self.features(x)\n out = F.relu(features, inplace=True)\n out = F.adaptive_avg_pool2d(out, (1, 1))\n out = torch.flatten(out, 1)\n out = self.classifier(out)\n return out\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.utils.checkpoint.checkpoint",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.functional.dropout",
"torch.nn.init.constant_",
"torch.flatten",
"torch.nn.functional.relu",
"torch.nn.ReLU",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Conv2d",
"torch.jit.is_scripting",
"torch.nn.AvgPool2d",
"torch.cat"
]
] |
gladcolor/seamseg | [
"9e6c7e2828f32b311a7b0c16b279ac194e8aaf94"
] | [
"seamseg/utils/coco_ap.py"
] | [
"import json\nimport tempfile\nimport time\nfrom collections import defaultdict\nfrom os import path, remove\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom PIL import Image\nfrom pycocotools.coco import COCO as _COCO\nfrom pycocotools.cocoeval import COCOeval\nfrom pycocotools.mask import encode as mask_encode\n\nfrom .bbx import invert_roi_bbx, extract_boxes\nfrom .parallel import PackedSequence\nfrom .roi_sampling import roi_sampling\n\n\ndef process_prediction(bbx_pred, cls_pred, obj_pred, msk_pred, img_size, idx, original_size):\n # Move everything to CPU\n bbx_pred, cls_pred, obj_pred = (t.cpu() for t in (bbx_pred, cls_pred, obj_pred))\n msk_pred = msk_pred.cpu() if msk_pred is not None else None\n\n if msk_pred is not None:\n if isinstance(msk_pred, torch.Tensor):\n # ROI-stile prediction\n bbx_inv = invert_roi_bbx(bbx_pred, list(msk_pred.shape[-2:]), list(img_size))\n bbx_idx = torch.arange(0, msk_pred.size(0), dtype=torch.long)\n msk_pred = roi_sampling(msk_pred.unsqueeze(1).sigmoid(), bbx_inv, bbx_idx, list(img_size), padding=\"zero\")\n msk_pred = msk_pred.squeeze(1) > 0.5\n elif isinstance(msk_pred, PackedSequence):\n # Seeds-style prediction\n msk_pred.data = msk_pred.data > 0.5\n msk_pred_exp = msk_pred.data.new_zeros(len(msk_pred), img_size[0], img_size[1])\n\n for it, (msk_pred_i, bbx_pred_i) in enumerate(zip(msk_pred, bbx_pred)):\n i, j = int(bbx_pred_i[0].item()), int(bbx_pred_i[1].item())\n msk_pred_exp[it, i:i + msk_pred_i.size(0), j:j + msk_pred_i.size(1)] = msk_pred_i\n\n msk_pred = msk_pred_exp\n\n # Convert bbx and redo clamping\n bbx_pred[:, [0, 2]] = (bbx_pred[:, [0, 2]] / img_size[0] * original_size[0]).clamp(min=0, max=original_size[0])\n bbx_pred[:, [1, 3]] = (bbx_pred[:, [1, 3]] / img_size[1] * original_size[1]).clamp(min=0, max=original_size[1])\n bbx_pred_size = bbx_pred[:, 2:] - bbx_pred[:, :2]\n\n outs = []\n for i, (bbx_pred_i, bbx_pred_size_i, cls_pred_i, obj_pred_i) in \\\n enumerate(zip(bbx_pred, bbx_pred_size, cls_pred, obj_pred)):\n out = dict(image_id=idx, category_id=int(cls_pred_i.item()), score=float(obj_pred_i.item()))\n\n out[\"bbox\"] = [\n float(bbx_pred_i[1].item()),\n float(bbx_pred_i[0].item()),\n float(bbx_pred_size_i[1].item()),\n float(bbx_pred_size_i[0].item()),\n ]\n\n # Expand and convert mask if present\n if msk_pred is not None:\n segmentation = Image.fromarray(msk_pred[i].numpy()).resize(original_size[::-1], Image.NEAREST)\n\n out[\"segmentation\"] = mask_encode(np.asfortranarray(np.array(segmentation)))\n out[\"segmentation\"][\"counts\"] = str(out[\"segmentation\"][\"counts\"], \"utf-8\")\n\n outs.append(out)\n\n return outs\n\n\ndef process_panoptic_prediction(panoptic_pred, num_stuff, idx, img_size, original_size):\n # Extract panoptic prediction\n msk_pred, cat_pred, obj_pred, iscrowd_pred = panoptic_pred\n\n bbx_pred = extract_boxes(msk_pred, cat_pred.numel())\n\n # Convert bbx and redo clamping\n bbx_pred[:, [0, 2]] = (bbx_pred[:, [0, 2]] / img_size[0] * original_size[0]).clamp(min=0, max=original_size[0])\n bbx_pred[:, [1, 3]] = (bbx_pred[:, [1, 3]] / img_size[1] * original_size[1]).clamp(min=0, max=original_size[1])\n bbx_pred_size = bbx_pred[:, 2:] - bbx_pred[:, :2]\n\n outs = []\n for i, (obj_i, cat_i, bbx_i, iscrowd_i, bbx_size_i) in enumerate(zip(\n obj_pred, cat_pred, bbx_pred, iscrowd_pred, bbx_pred_size)):\n if iscrowd_i.item() == 1 or cat_i.item() < num_stuff or cat_i.item() == 255:\n continue\n out = dict(image_id=idx, category_id=int(cat_i.item()), score=float(obj_i.item()))\n\n out[\"bbox\"] = [\n float(bbx_i[1].item()),\n float(bbx_i[0].item()),\n float(bbx_size_i[1].item()),\n float(bbx_size_i[0].item()),\n ]\n\n segmentation = msk_pred == i\n segmentation = Image.fromarray(segmentation.numpy()).resize(original_size[::-1], Image.NEAREST)\n out[\"segmentation\"] = mask_encode(np.asfortranarray(np.array(segmentation)))\n out[\"segmentation\"][\"counts\"] = str(out[\"segmentation\"][\"counts\"], \"utf-8\")\n\n outs.append(out)\n\n return outs\n\n\ndef summarize(predictions, annotations_file, img_list, mask=False):\n msk_map = 0\n with tempfile.NamedTemporaryFile(\"w\") as fid:\n json.dump(predictions, fid)\n fid.flush()\n\n # Detection\n gt = COCO(annotations_file, img_list)\n pred = gt.loadRes(fid.name)\n pred_eval = COCOeval(gt, pred, \"bbox\")\n pred_eval.evaluate()\n pred_eval.accumulate()\n pred_eval.summarize()\n det_map = pred_eval.stats[0]\n\n if mask:\n pred_eval = COCOeval(gt, pred, \"segm\")\n pred_eval.evaluate()\n pred_eval.accumulate()\n pred_eval.summarize()\n msk_map = pred_eval.stats[0]\n\n return det_map, msk_map\n\n\ndef summarize_mp(predictions, annotations_file, img_list, log_dir, mask=False):\n # Write partial results to file (all workers)\n rank = dist.get_rank()\n with open(path.join(log_dir, \"coco_ap_{:02d}.json\".format(rank)), \"w\") as fid:\n json.dump(predictions, fid)\n with open(path.join(log_dir, \"img_list_{:02d}.json\".format(rank)), \"w\") as fid:\n json.dump(img_list, fid)\n\n dist.barrier()\n\n # Merge results from all workers and run evaluation (only rank 0)\n if rank == 0:\n predictions = []\n img_list = []\n\n for i in range(dist.get_world_size()):\n coco_ap_file = path.join(log_dir, \"coco_ap_{:02d}.json\".format(i))\n with open(coco_ap_file) as fid:\n predictions += json.load(fid)\n remove(coco_ap_file)\n\n img_list_file = path.join(log_dir, \"img_list_{:02d}.json\".format(i))\n with open(img_list_file) as fid:\n img_list += json.load(fid)\n remove(img_list_file)\n\n det_map, msk_map = summarize(predictions, annotations_file, img_list, mask)\n else:\n det_map, msk_map = 0, 0\n\n dist.barrier()\n\n return det_map, msk_map\n\n\nclass COCO(_COCO):\n \"\"\"Modified COCO class that loads only a subset of\"\"\"\n\n def __init__(self, annotation_file, img_list):\n # load dataset\n self.dataset, self.anns, self.cats, self.imgs = dict(), dict(), dict(), dict()\n self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)\n\n print('loading annotations into memory...')\n tic = time.time()\n dataset = json.load(open(annotation_file, 'r'))\n assert type(dataset) == dict, 'annotation file format {} not supported'.format(type(dataset))\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\n\n # Clean-up dataset, removing all images and annotations that are not in img_list\n img_list = set(img_list)\n dataset[\"images\"] = [img for img in dataset[\"images\"] if img[\"id\"] in img_list]\n dataset[\"annotations\"] = [ann for ann in dataset[\"annotations\"] if ann[\"image_id\"] in img_list]\n\n self.dataset = dataset\n self.createIndex()\n"
] | [
[
"numpy.array",
"torch.distributed.barrier",
"torch.distributed.get_world_size",
"torch.distributed.get_rank"
]
] |
GillesVandewiele/pyShapelets | [
"d7e91150c17bf0f5fed55dc36d0c4d2d447e80c9"
] | [
"pyshapelets/lts_smaller_shap_dicts.py"
] | [
"import time\nfrom collections import Counter, defaultdict\nimport warnings; warnings.filterwarnings('ignore')\nimport glob\nimport re\nimport ast\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom algorithms import ShapeletTransformer\nfrom extractors.extractor import MultiGeneticExtractor\nfrom data.load_all_datasets import load_data_train_test\n\nfrom sklearn.metrics import accuracy_score, log_loss\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV\n\nfrom tslearn.shapelets import ShapeletModel\n\n\ndef parse_shapelets(shapelets):\n shapelets = shapelets.replace(']', '],')[:-2]\n shapelets = re.sub(r'\\s+', ', ', shapelets)\n shapelets = re.sub(r',+', ',', shapelets)\n shapelets = shapelets.replace('],[', '], [')\n shapelets = shapelets.replace('[,', '[')\n shapelets = '[' + shapelets + ']'\n shapelets = re.sub(r',\\s+]', ']', shapelets)\n return ast.literal_eval(shapelets)\n\ndef fit_rf(X_distances_train, y_train, X_distances_test, y_test, out_path):\n rf = GridSearchCV(RandomForestClassifier(), {'n_estimators': [10, 25, 50, 100, 500], 'max_depth': [None, 3, 7, 15]})\n rf.fit(X_distances_train, y_train)\n \n hard_preds = rf.predict(X_distances_test)\n proba_preds = rf.predict_proba(X_distances_test)\n\n print(\"[RF] Accuracy = {}\".format(accuracy_score(y_test, hard_preds)))\n print(\"[RF] Logloss = {}\".format(log_loss(y_test, proba_preds)))\n\n hard_preds = pd.DataFrame(hard_preds, columns=['prediction'])\n proba_preds = pd.DataFrame(proba_preds, columns=['proba_{}'.format(x) for x in set(list(y_train) + list(y_test))])\n\n hard_preds.to_csv(out_path.split('.')[0]+'_rf_hard.csv')\n proba_preds.to_csv(out_path.split('.')[0]+'_rf_proba.csv')\n\ndef fit_lr(X_distances_train, y_train, X_distances_test, y_test, out_path):\n lr = GridSearchCV(LogisticRegression(), {'penalty': ['l1', 'l2'], 'C': [0.001, 0.01, 0.1, 1.0, 10.0]})\n lr.fit(X_distances_train, y_train)\n \n hard_preds = lr.predict(X_distances_test)\n proba_preds = lr.predict_proba(X_distances_test)\n\n print(\"[LR] Accuracy = {}\".format(accuracy_score(y_test, hard_preds)))\n print(\"[LR] Logloss = {}\".format(log_loss(y_test, proba_preds)))\n\n hard_preds = pd.DataFrame(hard_preds, columns=['prediction'])\n proba_preds = pd.DataFrame(proba_preds, columns=['proba_{}'.format(x) for x in set(list(y_train) + list(y_test))])\n\n hard_preds.to_csv(out_path.split('.')[0]+'_lr_hard.csv')\n proba_preds.to_csv(out_path.split('.')[0]+'_lr_proba.csv')\n\ndef fit_svm(X_distances_train, y_train, X_distances_test, y_test, out_path):\n svc = GridSearchCV(SVC(kernel='linear', probability=True), {'C': [0.001, 0.01, 0.1, 1.0, 10.0]})\n svc.fit(X_distances_train, y_train)\n \n hard_preds = svc.predict(X_distances_test)\n proba_preds = svc.predict_proba(X_distances_test)\n\n print(\"[SVM] Accuracy = {}\".format(accuracy_score(y_test, hard_preds)))\n print(\"[SVM] Logloss = {}\".format(log_loss(y_test, proba_preds)))\n\n hard_preds = pd.DataFrame(hard_preds, columns=['prediction'])\n proba_preds = pd.DataFrame(proba_preds, columns=['proba_{}'.format(x) for x in set(list(y_train) + list(y_test))])\n\n hard_preds.to_csv(out_path.split('.')[0]+'_svm_hard.csv')\n proba_preds.to_csv(out_path.split('.')[0]+'_svm_proba.csv')\n\ndef fit_lts(X_train, y_train, X_test, y_test, shap_dict, reg, max_it, shap_out_path, pred_out_path, timing_out_path):\n # Fit LTS model, print metrics on test-set, write away predictions and shapelets\n clf = ShapeletModel(n_shapelets_per_size=shap_dict, \n max_iter=max_it, verbose_level=0, batch_size=1,\n optimizer='sgd', weight_regularizer=reg)\n\n start = time.time()\n clf.fit(\n np.reshape(\n X_train, \n (X_train.shape[0], X_train.shape[1], 1)\n ), \n y_train\n )\n learning_time = time.time() - start\n\n print('Learning shapelets took {}s'.format(learning_time))\n\n with open(shap_out_path, 'w+') as ofp:\n for shap in clf.shapelets_:\n ofp.write(str(np.reshape(shap, (-1))) + '\\n')\n\n with open(timing_out_path, 'w+') as ofp:\n ofp.write(str(learning_time))\n\n X_distances_train = clf.transform(X_train)\n X_distances_test = clf.transform(X_test)\n\n print('Max distance value = {}'.format(np.max(X_distances_train)))\n\n fit_rf(X_distances_train, y_train, X_distances_test, y_test, pred_out_path)\n fit_lr(X_distances_train, y_train, X_distances_test, y_test, pred_out_path)\n fit_svm(X_distances_train, y_train, X_distances_test, y_test, pred_out_path)\n\nhyper_parameters_lts = {\n\t'Adiac': \t\t\t\t\t[0.3, 0.2, 3, 0.01, 10000],\n\t'Beef': \t\t\t\t\t[0.15, 0.125, 3, 0.01, 10000],\n\t'BeetleFly': \t\t\t\t[0.15, 0.125, 1, 0.01, 5000],\n\t'BirdChicken': \t\t\t\t[0.3, 0.075, 1, 0.1, 10000],\n\t'ChlorineConcentration': [0.3, 0.2, 3, 0.01, 10000],\n\t'Coffee': \t\t\t\t\t[0.05, 0.075, 2, 0.01, 5000],\n\t'DiatomSizeReduction': \t\t[0.3, 0.175, 2, 0.01, 10000],\n\t'ECGFiveDays': \t\t\t\t[0.05, 0.125, 2, 0.01, 10000],\n\t'FaceFour': \t\t\t\t[0.3, 0.175, 3, 1.0, 5000],\n\t'GunPoint': \t\t\t\t[0.15, 0.2, 3, 0.1, 10000],\n\t'ItalyPowerDemand':\t\t\t[0.3, 0.2, 3, 0.01, 5000],\n\t'Lightning7': \t\t\t\t[0.05, 0.075, 3, 1, 5000],\n\t'MedicalImages': \t\t\t[0.3, 0.2, 2, 1, 10000],\n\t'MoteStrain': \t\t\t\t[0.3, 0.2, 3, 1, 10000],\n\t#NOT AVAILABLE#'Otoliths': \t\t\t\t[0.15, 0.125, 3, 0.01, 2000],\n\t'SonyAIBORobotSurface1': \t[0.3, 0.125, 2, 0.01, 10000],\n\t'SonyAIBORobotSurface2': \t[0.3, 0.125, 2, 0.01, 10000],\n\t'Symbols': \t\t\t\t\t[0.05, 0.175, 1, 0.1, 5000],\n\t'SyntheticControl': \t\t[0.15, 0.125, 3, 0.01, 5000],\n\t'Trace': \t\t\t\t\t[0.15, 0.125, 2, 0.1, 10000],\n\t'TwoLeadECG': \t\t\t\t[0.3, 0.075, 1, 0.1, 10000]\n}\n\ndatasets = [\n 'Adiac',\n 'Beef',\n 'BeetleFly',\n 'BirdChicken',\n 'ChlorineConcentration',\n 'Coffee',\n 'ECGFiveDays',\n 'FaceFour',\n 'GunPoint',\n 'ItalyPowerDemand',\n 'Lightning7',\n 'MedicalImages',\n 'MoteStrain',\n 'SonyAIBORobotSurface1',\n 'SonyAIBORobotSurface2',\n 'Symbols',\n 'SyntheticControl',\n 'Trace',\n 'TwoLeadECG',\n 'DiatomSizeReduction'\n]\n\nlearning_sizes = defaultdict(list)\ngenetic_sizes = defaultdict(list)\n\nmetadata = sorted(load_data_train_test(), key=lambda x: x['train']['n_samples']**2*x['train']['n_features']**3)\n\nfor dataset in metadata:\n\n train_df = pd.read_csv(dataset['train']['data_path'])\n test_df = pd.read_csv(dataset['test']['data_path'])\n X_train = train_df.drop('target', axis=1).values\n y_train = train_df['target']\n X_test = test_df.drop('target', axis=1).values\n y_test = test_df['target']\n\n map_dict = {}\n for j, c in enumerate(np.unique(y_train)):\n map_dict[c] = j\n y_train = y_train.map(map_dict) \n y_test = y_test.map(map_dict)\n\n y_train = y_train.values\n y_test = y_test.values\n\n nr_shap, l, r, reg, max_it = hyper_parameters_lts[dataset['train']['name']]\n \n files = glob.glob('results/lts_vs_genetic/{}_genetic_shapelets*.txt'.format(dataset['train']['name']))\n if len(files):\n\t sizes = []\n\t for f in files:\n\t shaps = parse_shapelets(open(f, 'r').read())\n\t genetic_sizes[dataset['train']['name']].append(len(shaps))\n\t for s in shaps:\n\t \tsizes.append(len(s))\n\t \n\t shap_dict_cntr = Counter(np.random.choice(sizes, size=int(np.mean(genetic_sizes[dataset['train']['name']]))))\n\t shap_dict = {}\n\t for c in shap_dict_cntr:\n\t \tshap_dict[int(c)] = int(shap_dict_cntr[c])\n\n\t fit_lts(X_train, y_train, X_test, y_test, dict(shap_dict), reg, max_it,\n\t 'results/lts_smaller/{}_learned_shapelets_{}.txt'.format(dataset['train']['name'], int(time.time())), \n\t 'results/lts_smaller/{}_learned_shapelets_predictions_{}.csv'.format(dataset['train']['name'], int(time.time())), \n\t 'results/lts_smaller/{}_learned_runtime_{}.csv'.format(dataset['train']['name'], int(time.time()))\n\t )"
] | [
[
"sklearn.svm.SVC",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.reshape",
"sklearn.metrics.accuracy_score",
"numpy.max",
"sklearn.metrics.log_loss",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.linear_model.LogisticRegression",
"numpy.unique",
"numpy.mean"
]
] |
ngiambla/nnflex | [
"7c8bf46218ea70c6dad1efedf9e2069e41c4c3fa"
] | [
"operators/clip.py"
] | [
"''' clip.py:\n\nImplement's the clip ONNX node as a flexnode (for use with any accelerator)\n\n'''\nimport uuid\n\nimport numpy as np\n\nfrom operators.flexnode import FlexNode\nfrom core.defines import Operator\nfrom core.messaging import Message\n \nclass Clip(FlexNode):\n\n def __init__(self, onnx_node, inputs, outputs):\n FlexNode.__init__(self, onnx_node, inputs, outputs)\n self._min = -3.402823466e+38\n self._max = 3.402823466e+38\n\n if len(inputs) != 1 and len(inputs) != 3:\n raise ValueError(\"Clip can only have 1 or 3 inputs.\")\n\n self._input = inputs[0]\n\n if len(inputs) == 3:\n self._min = inputs[1]\n self._max = inputs[2] \n\n def map(self, memory_mapper):\n pass\n\n def unmap(self, memory_mapper):\n pass\n\n def _inputs2mem(self, memory_xfer_engine):\n pass\n\n def _mem2output(self, memory_xfer_engine):\n pass\n\n def compile(self, source, destinations):\n\n tile_commands = list()\n\n # Here, we are NOT generating tile_commands, (although, this is not difficult.)\n np.copyto(self._outputs[0], np.clip(self._input, self._min, self._max))\n\n return tile_commands\n"
] | [
[
"numpy.clip"
]
] |
aroig/nnutil2 | [
"1fc77df351d4eee1166688e25a94287a5cfa27c4"
] | [
"nnutil2/layers/segment.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# nnutil2 - Tensorflow utilities for training neural networks\n# Copyright (c) 2019, Abdó Roig-Maranges <[email protected]>\n#\n# This file is part of 'nnutil2'.\n#\n# This file may be modified and distributed under the terms of the 3-clause BSD\n# license. See the LICENSE file for details.\n\nfrom typing import List\n\nimport tensorflow as tf\n\nfrom ..util import kwargs_for\nfrom .layer import Layer\n\nclass Segment(Layer):\n \"\"\"A sequential collection of layers\"\"\"\n def __init__(self, layers: List[Layer] = [], activation=None, **kwargs):\n super(Segment, self).__init__(**kwargs)\n\n self._segment_layers = layers\n self._segment_activation = tf.keras.activations.get(activation)\n self._segment_states = []\n\n def get_config(self):\n config = {\n 'layers': [ly.get_config() for ly in self._layers],\n 'activation': self._segment_activation\n }\n\n base_config = super(Segment, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def call(self, inputs, **kwargs):\n x = inputs\n self._segment_states.append(x)\n\n for l in self._segment_layers:\n layer_kwargs = kwargs_for(kwargs, l.call)\n x = l(x, **layer_kwargs)\n self._segment_states.append(x)\n\n if self._segment_activation is not None:\n x = self._segment_activation(x)\n self._segment_states.append(x)\n\n return x\n\n def compute_output_shape(self, input_shape):\n shape = input_shape\n for l in self._segment_layers:\n shape = l.compute_output_shape(shape)\n return shape\n\n @property\n def flat_layers(self):\n layers = []\n\n def add_layers(ly):\n if isinstance(ly, Segment):\n for ly2 in ly.layers:\n add_layers(ly2)\n else:\n layers.append(ly)\n\n add_layers(self)\n return layers\n\n @property\n def layers(self):\n return self._segment_layers\n\n @property\n def states(self):\n return self._segment_states\n"
] | [
[
"tensorflow.keras.activations.get"
]
] |
briancylui/ALOCC-CVPR2018 | [
"78b6a1e8f3fcde8a46a88294926074a65ff0726a"
] | [
"train.py"
] | [
"import os\nimport numpy as np\nfrom models import ALOCC_Model\nfrom utils import pp, visualize, to_json, show_all_variables\nimport tensorflow as tf\n\nflags = tf.app.flags\nflags.DEFINE_integer(\"epoch\", 40, \"Epoch to train [25]\")\nflags.DEFINE_float(\"learning_rate\", 0.002, \"Learning rate of for adam [0.0002]\")\nflags.DEFINE_float(\"beta1\", 0.5, \"Momentum term of adam [0.5]\")\nflags.DEFINE_integer(\"attention_label\", 1, \"Conditioned label that growth attention of training label [1]\")\nflags.DEFINE_float(\"r_alpha\", 0.2, \"Refinement parameter [0.2]\")\nflags.DEFINE_integer(\"train_size\", np.inf, \"The size of train images [np.inf]\")\nflags.DEFINE_integer(\"batch_size\",128, \"The size of batch images [64]\")\nflags.DEFINE_integer(\"input_height\", 45, \"The size of image to use. [45]\")\nflags.DEFINE_integer(\"input_width\", None, \"The size of image to use. If None, same value as input_height [None]\")\nflags.DEFINE_integer(\"output_height\", 45, \"The size of the output images to produce [45]\")\nflags.DEFINE_integer(\"output_width\", None, \"The size of the output images to produce. If None, same value as output_height [None]\")\nflags.DEFINE_string(\"dataset\", \"UCSD\", \"The name of dataset [UCSD, mnist]\")\nflags.DEFINE_string(\"dataset_address\", \"./dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Train\", \"The path of dataset\")\nflags.DEFINE_string(\"input_fname_pattern\", \"*\", \"Glob pattern of filename of input images [*]\")\nflags.DEFINE_string(\"checkpoint_dir\", \"checkpoint\", \"Directory name to save the checkpoints [checkpoint]\")\nflags.DEFINE_string(\"log_dir\", \"log\", \"Directory name to save the log [log]\")\nflags.DEFINE_string(\"sample_dir\", \"samples\", \"Directory name to save the image samples [samples]\")\nflags.DEFINE_boolean(\"train\", True, \"True for training, False for testing [False]\")\nFLAGS = flags.FLAGS\n\n\ndef check_some_assertions():\n \"\"\"\n to check some assertions in inputs and also check sth else.\n \"\"\"\n if FLAGS.input_width is None:\n FLAGS.input_width = FLAGS.input_height\n if FLAGS.output_width is None:\n FLAGS.output_width = FLAGS.output_height\n\n if not os.path.exists(FLAGS.checkpoint_dir):\n os.makedirs(FLAGS.checkpoint_dir)\n if not os.path.exists(FLAGS.log_dir):\n os.makedirs(FLAGS.log_dir)\n if not os.path.exists(FLAGS.sample_dir):\n os.makedirs(FLAGS.sample_dir)\n\ndef main(_):\n \"\"\"\n The main function for training steps \n \"\"\"\n pp.pprint(flags.FLAGS.__flags)\n n_per_itr_print_results = 100\n kb_work_on_patch = True\n\n # ---------------------------------------------------------------------------------------------\n # ---------------------------------------------------------------------------------------------\n # Manual Switchs ------------------------------------------------------------------------------\n # ---------------------------------------------------------------------------------------------\n # DATASET PARAMETER : UCSD\n #FLAGS.dataset = 'UCSD'\n #FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Train'\n\n nd_input_frame_size = (240, 360)\n nd_slice_size = (45, 45)\n n_stride = 25\n n_fetch_data = 600\n # ---------------------------------------------------------------------------------------------\n # # DATASET PARAMETER : MNIST\n # FLAGS.dataset = 'mnist'\n # FLAGS.dataset_address = './dataset/mnist'\n # nd_input_frame_size = (28, 28)\n # nd_slice_size = (28, 28)\n\n FLAGS.train = True\n\n FLAGS.input_width = nd_slice_size[0]\n FLAGS.input_height = nd_slice_size[1]\n FLAGS.output_width = nd_slice_size[0]\n FLAGS.output_height = nd_slice_size[1]\n\n FLAGS.sample_dir = 'export/'+FLAGS.dataset +'_%d.%d'%(nd_slice_size[0],nd_slice_size[1])\n FLAGS.input_fname_pattern = '*'\n\n check_some_assertions()\n\n # manual handling of GPU\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)\n run_config = tf.ConfigProto(gpu_options=gpu_options)\n run_config.gpu_options.allow_growth=True\n\n with tf.Session(config=run_config) as sess:\n tmp_model = ALOCC_Model(\n sess,\n input_width=FLAGS.input_width,\n input_height=FLAGS.input_height,\n output_width=FLAGS.output_width,\n output_height=FLAGS.output_height,\n batch_size=FLAGS.batch_size,\n sample_num=FLAGS.batch_size,\n attention_label=FLAGS.attention_label,\n r_alpha=FLAGS.r_alpha,\n dataset_name=FLAGS.dataset,\n dataset_address=FLAGS.dataset_address,\n input_fname_pattern=FLAGS.input_fname_pattern,\n checkpoint_dir=FLAGS.checkpoint_dir,\n is_training = FLAGS.train,\n log_dir=FLAGS.log_dir,\n sample_dir=FLAGS.sample_dir,\n nd_patch_size=nd_slice_size,\n n_stride=n_stride,\n n_per_itr_print_results=n_per_itr_print_results,\n kb_work_on_patch=kb_work_on_patch,\n nd_input_frame_size = nd_input_frame_size,\n n_fetch_data=n_fetch_data)\n\n #show_all_variables()\n\n if FLAGS.train:\n print('Program is on Train Mode')\n tmp_model.train(FLAGS)\n else:\n if not tmp_model.load(FLAGS.checkpoint_dir)[0]:\n print('Program is on Test Mode')\n raise Exception(\"[!] Train a model first, then run test mode from file test.py\")\n\nif __name__ == '__main__':\n tf.app.run()\n"
] | [
[
"tensorflow.GPUOptions",
"tensorflow.ConfigProto",
"tensorflow.Session",
"tensorflow.app.run"
]
] |
grahamwhiteuk/neutralizing-bias | [
"a6ef764046fcc68ac0daa612c160ec23a79d3e73"
] | [
"src/tagging/train.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\ntrain bert \n\npython tagging/train.py --train ../../data/v6/corpus.wordbiased.tag.train --test ../../data/v6/corpus.wordbiased.tag.test --working_dir TEST --train_batch_size 3 --test_batch_size 10 --hidden_size 32 --debug_skip\n\"\"\"\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.optimization import BertAdam\n\nfrom collections import defaultdict\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler\nfrom tqdm import tqdm\nimport torch\nimport torch.nn as nn\nimport pickle\nimport sys\nimport os\nimport numpy as np\nfrom pytorch_pretrained_bert.modeling import BertForTokenClassification\nfrom torch.nn import CrossEntropyLoss\nfrom tensorboardX import SummaryWriter\nimport argparse\nimport sklearn.metrics as metrics\n\nimport model as tagging_model\nimport utils as tagging_utils\n\nimport sys; sys.path.append('.')\nfrom shared.data import get_dataloader\nfrom shared.args import ARGS\nfrom shared.constants import CUDA\n\n\n\n\nif not os.path.exists(ARGS.working_dir):\n os.makedirs(ARGS.working_dir)\n\nwith open(ARGS.working_dir + '/command.sh', 'w') as f:\n f.write('python' + ' '.join(sys.argv) + '\\n')\n\n\n\n\n# # # # # # # # ## # # # ## # # DATA # # # # # # # # ## # # # ## # #\n\n\n\nprint('LOADING DATA...')\ntokenizer = BertTokenizer.from_pretrained(ARGS.bert_model, cache_dir=ARGS.working_dir + '/cache')\ntok2id = tokenizer.vocab\ntok2id['<del>'] = len(tok2id)\n\ntrain_dataloader, num_train_examples = get_dataloader(\n ARGS.train, \n tok2id, ARGS.train_batch_size, \n ARGS.working_dir + '/train_data.pkl', \n categories_path=ARGS.categories_file)\neval_dataloader, num_eval_examples = get_dataloader(\n ARGS.test,\n tok2id, ARGS.test_batch_size, ARGS.working_dir + '/test_data.pkl',\n test=True, categories_path=ARGS.categories_file)\n\n# # # # # # # # ## # # # ## # # MODEL # # # # # # # # ## # # # ## # #\n\n\nprint('BUILDING MODEL...')\nif ARGS.tagger_from_debiaser:\n model = tagging_model.TaggerFromDebiaser(\n cls_num_labels=ARGS.num_categories, tok_num_labels=ARGS.num_tok_labels,\n tok2id=tok2id)\nelif ARGS.extra_features_top:\n model = tagging_model.BertForMultitaskWithFeaturesOnTop.from_pretrained(\n ARGS.bert_model,\n cls_num_labels=ARGS.num_categories,\n tok_num_labels=ARGS.num_tok_labels,\n cache_dir=ARGS.working_dir + '/cache',\n tok2id=tok2id)\nelif ARGS.extra_features_bottom:\n model = tagging_model.BertForMultitaskWithFeaturesOnBottom.from_pretrained(\n ARGS.bert_model,\n cls_num_labels=ARGS.num_categories,\n tok_num_labels=ARGS.num_tok_labels,\n cache_dir=ARGS.working_dir + '/cache',\n tok2id=tok2id)\nelse:\n model = tagging_model.BertForMultitask.from_pretrained(\n ARGS.bert_model,\n cls_num_labels=ARGS.num_categories,\n tok_num_labels=ARGS.num_tok_labels,\n cache_dir=ARGS.working_dir + '/cache',\n tok2id=tok2id)\nif CUDA:\n model = model.cuda()\n\nprint('PREPPING RUN...')\n\n# # # # # # # # ## # # # ## # # OPTIMIZER, LOSS # # # # # # # # ## # # # ## # #\n\n\noptimizer = tagging_utils.build_optimizer(\n model, int((num_train_examples * ARGS.epochs) / ARGS.train_batch_size),\n ARGS.learning_rate)\n\nloss_fn = tagging_utils.build_loss_fn()\n\n# # # # # # # # ## # # # ## # # TRAIN # # # # # # # # ## # # # ## # #\n\nwriter = SummaryWriter(ARGS.working_dir)\n\n\nprint('INITIAL EVAL...')\nmodel.eval()\nresults = tagging_utils.run_inference(model, eval_dataloader, loss_fn, tokenizer)\nwriter.add_scalar('eval/tok_loss', np.mean(results['tok_loss']), 0)\nwriter.add_scalar('eval/tok_acc', np.mean(results['labeling_hits']), 0)\n\nprint('TRAINING...')\nmodel.train()\nfor epoch in range(ARGS.epochs):\n print('STARTING EPOCH ', epoch)\n losses = tagging_utils.train_for_epoch(model, train_dataloader, loss_fn, optimizer)\n writer.add_scalar('train/loss', np.mean(losses), epoch + 1)\n\n # eval\n print('EVAL...')\n model.eval()\n results = tagging_utils.run_inference(model, eval_dataloader, loss_fn, tokenizer)\n writer.add_scalar('eval/tok_loss', np.mean(results['tok_loss']), epoch + 1)\n writer.add_scalar('eval/tok_acc', np.mean(results['labeling_hits']), epoch + 1)\n\n model.train()\n\n print('SAVING...')\n torch.save(model.state_dict(), ARGS.working_dir + '/model_%d.ckpt' % epoch) \n \n"
] | [
[
"numpy.mean"
]
] |
philippe-heitzmann/python-apps | [
"1cc6e5e9b9ac81c81a3d4f0e420ff488fe6b2f0a"
] | [
"sagemaker-dash/tutorials/app15.py"
] | [
"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.express as px\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\ndf = pd.read_csv('https://plotly.github.io/datasets/country_indicators.csv')\n\navailable_indicators = df['Indicator Name'].unique()\n\napp.layout = html.Div([\n html.Div([\n html.Div([\n dcc.Dropdown(id='crossfilter-xaxis-column',\n options=[{\n 'label': i,\n 'value': i\n } for i in available_indicators],\n value='Fertility rate, total (births per woman)'),\n dcc.RadioItems(id='crossfilter-xaxis-type',\n options=[{\n 'label': i,\n 'value': i\n } for i in ['Linear', 'Log']],\n value='Linear',\n labelStyle={\n 'display': 'inline-block',\n 'marginTop': '5px'\n })\n ],\n style={\n 'width': '49%',\n 'isplay': 'inline-block'\n }),\n html.Div([\n dcc.Dropdown(id='crossfilter-yaxis-column',\n options=[{\n 'label': i,\n 'value': i\n } for i in available_indicators],\n value='Life expectancy at birth, total (years)'),\n dcc.RadioItems(id='crossfilter-yaxis-type',\n options=[{\n 'label': i,\n 'value': i\n } for i in ['Linear', 'Log']],\n value='Linear',\n labelStyle={\n 'display': 'inline-block',\n 'marginTop': '5px'\n })\n ],\n style={\n 'width': '49%',\n 'float': 'right',\n 'display': 'inline-block'\n })\n ],\n style={'padding': '10px 5px'}),\n html.Div([\n dcc.Graph(id='crossfilter-indicator-scatter',\n hoverData={'points': [{\n 'customdata': 'Japan'\n }]})\n ],\n style={\n 'width': '49%',\n 'display': 'inline-block',\n 'padding': '0 20'\n }),\n html.Div([\n dcc.Graph(id='x-time-series'),\n dcc.Graph(id='y-time-series'),\n ],\n style={\n 'display': 'inline-block',\n 'width': '49%'\n }),\n html.Div(dcc.Slider(\n id='crossfilter-year--slider',\n min=df['Year'].min(),\n max=df['Year'].max(),\n value=df['Year'].max(),\n marks={str(year): str(year)\n for year in df['Year'].unique()},\n step=None),\n style={\n 'width': '49%',\n 'padding': '0px 20px 20px 20px'\n })\n])\n\n\[email protected](\n dash.dependencies.Output('crossfilter-indicator-scatter', 'figure'), [\n dash.dependencies.Input('crossfilter-xaxis-column', 'value'),\n dash.dependencies.Input('crossfilter-yaxis-column', 'value'),\n dash.dependencies.Input('crossfilter-xaxis-type', 'value'),\n dash.dependencies.Input('crossfilter-yaxis-type', 'value'),\n dash.dependencies.Input('crossfilter-year--slider', 'value')\n ])\ndef update_graph(xaxis_column_name, yaxis_column_name, xaxis_type, yaxis_type,\n year_value):\n dff = df[df['Year'] == year_value]\n\n fig = px.scatter(\n x=dff[dff['Indicator Name'] == xaxis_column_name]['Value'],\n y=dff[dff['Indicator Name'] == yaxis_column_name]['Value'],\n hover_name=dff[dff['Indicator Name'] ==\n yaxis_column_name]['Country Name'])\n\n fig.update_traces(customdata=dff[dff['Indicator Name'] ==\n yaxis_column_name]['Country Name'])\n\n fig.update_xaxes(title=xaxis_column_name,\n type='linear' if xaxis_type == 'Linear' else 'log')\n\n fig.update_yaxes(title=yaxis_column_name,\n type='linear' if yaxis_type == 'Linear' else 'log')\n\n fig.update_layout(margin={\n 'l': 40,\n 'b': 40,\n 't': 10,\n 'r': 0\n },\n hovermode='closest')\n\n return fig\n\n\ndef create_time_series(dff, axis_type, title):\n\n fig = px.scatter(dff, x='Year', y='Value')\n\n fig.update_traces(mode='lines+markers')\n\n fig.update_xaxes(showgrid=False)\n\n fig.update_yaxes(type='linear' if axis_type == 'Linear' else 'log')\n\n fig.add_annotation(x=0,\n y=0.85,\n xanchor='left',\n yanchor='bottom',\n xref='paper',\n yref='paper',\n showarrow=False,\n align='left',\n text=title)\n\n fig.update_layout(height=225, margin={'l': 20, 'b': 30, 'r': 10, 't': 10})\n\n return fig\n\n\[email protected](dash.dependencies.Output('x-time-series', 'figure'), [\n dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'),\n dash.dependencies.Input('crossfilter-xaxis-column', 'value'),\n dash.dependencies.Input('crossfilter-xaxis-type', 'value')\n])\ndef update_y_timeseries(hoverData, xaxis_column_name, axis_type):\n country_name = hoverData['points'][0]['customdata']\n dff = df[df['Country Name'] == country_name]\n dff = dff[dff['Indicator Name'] == xaxis_column_name]\n title = '<b>{}</b><br>{}'.format(country_name, xaxis_column_name)\n return create_time_series(dff, axis_type, title)\n\n\[email protected](dash.dependencies.Output('y-time-series', 'figure'), [\n dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'),\n dash.dependencies.Input('crossfilter-yaxis-column', 'value'),\n dash.dependencies.Input('crossfilter-yaxis-type', 'value')\n])\ndef update_x_timeseries(hoverData, yaxis_column_name, axis_type):\n dff = df[df['Country Name'] == hoverData['points'][0]['customdata']]\n dff = dff[dff['Indicator Name'] == yaxis_column_name]\n return create_time_series(dff, axis_type, yaxis_column_name)\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n"
] | [
[
"pandas.read_csv"
]
] |
hunterluepke/Learn-Python-for-Stats-and-Econ | [
"d580a8e27ba937fc8401ac6d0714b6488ac8bbb6"
] | [
"Projects/Sugarscape/Model.py"
] | [
"import numpy as np \nimport pandas as pd\nfrom scipy.stats.mstats import gmean\nimport random\nimport math\nfrom randomdict import RandomDict\n# from chest import *\nimport shelve\nfrom Patch import *\nfrom AgentBranch import *\nimport gc\nfrom memory_profiler import memory_usage\n#Model.py\nclass Model():\n def __init__(self, gui, num_agents, mutate, genetic, live_visual, agent_attributes,\n model_attributes):\n if live_visual:\n self.GUI = gui\n self.live_visual = live_visual\n self.name = gui.name\n self.run = gui.run\n self.initial_population = num_agents\n self.mutate = mutate\n self.genetic = genetic\n self.agent_attributes = agent_attributes\n self.model_attributes = model_attributes\n self.attributes = agent_attributes + model_attributes\n # attributes that are not copied during mutation or herding\n self.drop_attr = [\"col\", \"row\", \"dx\", \"dy\", \"id\", \"wealth\", \"top_wealth\",\n \"sugar\", \"water\",\"target\", \"not_target\",\n \"exchange_target\", \"not_exchange_target\", \"parent\", \"image\"]\n # if self.GUI.live_visual:\n # self.drop_attr.append(\"image\")\n if self.mutate:\n self.max_mutate_rate = 0.5 if mutate else 0 #.5\n if self.genetic:\n self.cross_over_rate = .5\n ############ set model parameters ############\n self.total_agents_created = 0 \n self.goods = [\"sugar\", \"water\"]\n self.goods_params = {good:{\"min\":5,\n \"max\":25} for good in self.goods}\n \n self.max_init_demand_vals = {\"price\":{\"min\": 1/2,\n \"max\": 2},\n \"quantity\":{\"min\":10,\n \"max\":25}}\n self.consumption_rate = {\"sugar\":.5,\n \"water\":.5}\n self.primary_breeds = [\"basic\", \"switcher\", \"arbitrageur\"]\n self.secondary_breeds = [\"herder\"]\n \n self.breeds = self.primary_breeds + self.secondary_breeds\n # all agents start as basic, only mutation can create other agents\n basic = 1\n self.breed_probabilities = {\"basic\":basic, # if you are not a basic, you are a switcher\n \"herder\":0,\n \"arbitrageur\":0}\n self.max_vision = 1\n # record price of every transaction\n # then take average at end of period\n self.transaction_prices = []\n self.average_price = np.nan\n self.total_exchanges = 0\n ############ import map and build nav_dict ############\n # hash table that identifies possible moves relative to agent position \n self.nav_dict = {\n v:{\n i:{\n j: True for j in range(-v, v + 1) if 0 < (i ** 2 + j ** 2) <= (v ** 2)}\n for i in range(-v, v + 1)}\n for v in range(1, self.max_vision + 1)}\n #sugarMap.shape calls the a tuple with dimensions\n #of the dataframe\n self.sugarMap = pd.read_csv('sugar-map.txt', header = None, sep = ' ')\n # add 1 to each max_Val\n for key in self.sugarMap:\n self.sugarMap[key] = self.sugarMap[key].add(1)\n self.rows, self.cols = self.sugarMap.shape\n \n ############ Initialization ############ \n self.initializePatches()\n self.initializeAgents()\n self.data_dict = shelve.open(\"shelves\\\\masterShelve\", writeback = True)\n for attribute in self.attributes:\n self.data_dict[attribute] = shelve.open(\"shelves\\\\subshelve-\"+attribute, writeback = True) \n \n def initializePatches(self):\n #Instantiate Patches\n #Create a dictionary to hold the patches, organize as grid. \n #We first fill these with zeros as placeh holders\n self.patch_dict = {row:{col:0}\n for row in range(self.rows) for col in range(self.cols)}\n for row in range(self.rows):\n for col in range(self.cols):\n # replace zeros with actual Patch objects\n good = \"sugar\" if row + col < self.cols else \"water\"\n self.patch_dict[row][col] = Patch(self, row , col, \n self.sugarMap[row][col], good)\n # use RandomDict - O(n) time complexity - for choosing random empty patch\n self.empty_patches = RandomDict({\n (row,col):self.patch_dict[row][col]\n for row in range(self.rows) for col in range(self.cols)})\n \n def initializeAgents(self):\n # agents stored in a dict by ID\n self.agent_dict = {} #if self.live_visual else Chest(path = data_aggregator.folder) #shelve.open(\"agent_dict\") \n # dead agents will be removed from agent_dict\n for i in range(self.initial_population):\n self.total_agents_created += 1\n ID = self.total_agents_created\n row, col = self.chooseRandomEmptyPatch() \n self.agent_dict[ID] = Agent(self, row, col, ID)\n self.patch_dict[row][col].agent = self.agent_dict[ID]\n self.population = self.total_agents_created\n# def recordAgentLocationInDict(self, agent):\n# patchIndex = self.convert2dTo1d(agent.row, agent.col)\n# self.agentLocationDict[patchIndex] = agent\n\n def chooseRandomEmptyPatch(self):\n row, col = self.empty_patches.random_key() \n del self.empty_patches[row, col]\n\n return row, col\n\n def runModel(self, periods):\n def updateModelVariables():\n self.population = len(agent_list)\n self.average_price = gmean(self.transaction_prices)\n self.transaction_prices = []\n \n for period in range(1, periods + 1):\n self.growPatches()\n agent_list = list(self.agent_dict.values())\n random.shuffle(agent_list)\n for agent in agent_list:\n agent.move()\n agent.harvest()\n agent.trade()\n agent.consume()\n agent.checkAlive()\n agent.reproduce()\n agent.updateParams()\n \n # data_aggregator.collectData(self, self.name, \n # self.run, period)\n updateModelVariables()\n self.collectData(str(period))\n \n if self.live_visual:\n if period % self.GUI.every_t_frames == 0:\n print(\"period\", period, \"population\", self.population, sep = \"\\t\")\n self.GUI.parent.title(\"Sugarscape: \" + str(period))\n self.GUI.updatePatches()\n self.GUI.moveAgents()\n self.GUI.canvas.update()\n\n if period == periods:\n mem_usage = memory_usage(-1, interval=1)#, timeout=1)\n print(period, \"end memory usage before sync//collect:\", mem_usage[0], sep = \"\\t\")\n self.data_dict.sync()\n gc.collect()\n mem_usage = memory_usage(-1, interval=1)#, timeout=1)\n print(period, \"end memory usage after sync//collect:\", mem_usage[0], sep = \"\\t\")\n\n def growPatches(self):\n for i in self.patch_dict:\n for patch in self.patch_dict[i].values():\n if patch.Q < patch.maxQ:\n patch.Q += 1\n\n\n def collectData(self, period):\n \n def collectAgentAttributes():\n temp_dict={}\n for attribute in self.agent_attributes:\n temp_dict[attribute] = []\n for ID, agent in self.agent_dict.items():\n for attribute in self.agent_attributes:\n temp_dict[attribute].append(getattr(agent, attribute)) \n \n for attribute, val in temp_dict.items():\n self.data_dict[attribute][period] = np.mean(val)\n\n def collectModelAttributes():\n for attribute in self.model_attributes:\n self.data_dict[attribute][period] = getattr(self, attribute)\n \n collectAgentAttributes()\n collectModelAttributes()\n"
] | [
[
"pandas.read_csv",
"scipy.stats.mstats.gmean",
"numpy.mean"
]
] |
eashdown/onnx-mlir | [
"2662d5530a01ddb11056ae7958118e82487a9eb8"
] | [
"utils/gen_onnx_mlir.py"
] | [
"#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict, OrderedDict\nfrom io import StringIO\nimport io\nimport os\nimport sys\nimport datetime\nimport argparse\n\nimport numpy as np # type: ignore\n\nfrom onnx import defs, FunctionProto, helper, OperatorStatus\nfrom onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN\nfrom onnx.backend.test.case import collect_snippets\nfrom onnx.backend.sample.ops import collect_sample_implementations\nfrom typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple\n\nimport pprint\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dry-run-onnx-ops\",\n help=\"Output ONNXOps.td.inc content to stdout.\",\n action=\"store_true\",\n default=False)\nparser.add_argument(\"--dry-run-op-build-table\",\n help=\"Output OpBuildTable.inc content to stdout.\",\n action=\"store_true\",\n default=False)\nparser.add_argument(\"--check-operation-version\",\n help=\"check whether the imported onnx package has new operation or \"\n \" newer version of operation compared with version stored in version_dicts\",\n action=\"store_true\",\n default=False)\n\nargs = parser.parse_args()\n\ncheck_operation_version = args.check_operation_version\n\n\n# Record the version of each operation that is treated as the current version.\n# To check whether the onnx package being used has newer version operation,\n# run this script with --check-operation-version flag.\n# Update this dictionary when a newer version is implemented\n# TODO: how to keep the old version\nversion_dict = {'Abs': 13,\n 'Acos': 7,\n 'Acosh': 9,\n 'Adagrad': 1,\n 'Adam': 1,\n 'Add': 13,\n 'And': 7,\n 'ArgMax': 13,\n 'ArgMin': 13,\n 'ArrayFeatureExtractor': 1,\n 'Asin': 7,\n 'Asinh': 9,\n 'Atan': 7,\n 'Atanh': 9,\n 'AveragePool': 11,\n 'BatchNormalization': 9,\n 'Binarizer': 1,\n 'BitShift': 11,\n 'Cast': 13,\n 'CastMap': 1,\n 'CategoryMapper': 1,\n 'Ceil': 13,\n 'Celu': 12,\n 'Clip': 13,\n 'Compress': 11,\n 'Concat': 13,\n 'ConcatFromSequence': 11,\n 'Constant': 13,\n 'ConstantOfShape': 9,\n 'Conv': 11,\n 'ConvInteger': 10,\n 'ConvTranspose': 11,\n 'Cos': 7,\n 'Cosh': 9,\n 'CumSum': 11,\n 'DepthToSpace': 13,\n 'DequantizeLinear': 13,\n 'Det': 11,\n 'DictVectorizer': 1,\n 'Div': 13,\n 'Dropout': 13,\n 'DynamicQuantizeLinear': 11,\n 'Einsum': 12,\n 'Elu': 6,\n 'Equal': 13,\n 'Erf': 13,\n 'Exp': 13,\n 'Expand': 13,\n 'EyeLike': 9,\n 'FeatureVectorizer': 1,\n 'Flatten': 13,\n 'Floor': 13,\n 'GRU': 7,\n 'Gather': 13,\n 'GatherElements': 13,\n 'GatherND': 13,\n 'Gemm': 13,\n 'GlobalAveragePool': 1,\n 'GlobalLpPool': 2,\n 'GlobalMaxPool': 1,\n 'Gradient': 1,\n 'Greater': 13,\n 'GreaterOrEqual': 12,\n 'HardSigmoid': 6,\n 'Hardmax': 13,\n 'Identity': 13,\n 'If': 13,\n 'Imputer': 1,\n 'InstanceNormalization': 6,\n 'IsInf': 10,\n 'IsNaN': 13,\n 'LRN': 13,\n 'LSTM': 7,\n 'LabelEncoder': 2,\n 'LeakyRelu': 6,\n 'Less': 13,\n 'LessOrEqual': 12,\n 'LinearClassifier': 1,\n 'LinearRegressor': 1,\n 'Log': 13,\n 'LogSoftmax': 13,\n 'Loop': 13,\n 'LpNormalization': 1,\n 'LpPool': 11,\n 'MatMul': 13,\n 'MatMulInteger': 10,\n 'Max': 13,\n 'MaxPool': 12,\n 'MaxRoiPool': 1,\n 'MaxUnpool': 11,\n 'Mean': 13,\n 'MeanVarianceNormalization': 13,\n 'Min': 13,\n 'Mod': 13,\n 'Momentum': 1,\n 'Mul': 13,\n 'Multinomial': 7,\n 'Neg': 13,\n 'NegativeLogLikelihoodLoss': 13,\n 'NonMaxSuppression': 11,\n 'NonZero': 13,\n 'Normalizer': 1,\n 'Not': 1,\n 'OneHot': 11,\n 'OneHotEncoder': 1,\n 'Or': 7,\n 'PRelu': 9,\n 'Pad': 13,\n 'Pow': 13,\n 'QLinearConv': 10,\n 'QLinearMatMul': 10,\n 'QuantizeLinear': 13,\n 'RNN': 7,\n 'RandomNormal': 1,\n 'RandomNormalLike': 1,\n 'RandomUniform': 1,\n 'RandomUniformLike': 1,\n 'Range': 11,\n 'Reciprocal': 13,\n 'ReduceL1': 13,\n 'ReduceL2': 13,\n 'ReduceLogSum': 13,\n 'ReduceLogSumExp': 13,\n 'ReduceMax': 13,\n 'ReduceMean': 13,\n 'ReduceMin': 13,\n 'ReduceProd': 13,\n 'ReduceSum': 13,\n 'ReduceSumSquare': 13,\n 'Relu': 13,\n 'Reshape': 13,\n 'Resize': 13,\n 'ReverseSequence': 10,\n 'RoiAlign': 10,\n 'Round': 11,\n 'SVMClassifier': 1,\n 'SVMRegressor': 1,\n 'Scaler': 1,\n 'Scan': 11,\n 'Scatter': 11,\n 'ScatterElements': 13,\n 'ScatterND': 13,\n 'Selu': 6,\n 'SequenceAt': 11,\n 'SequenceConstruct': 11,\n 'SequenceEmpty': 11,\n 'SequenceErase': 11,\n 'SequenceInsert': 11,\n 'SequenceLength': 11,\n 'Shape': 13,\n 'Shrink': 9,\n 'Sigmoid': 13,\n 'Sign': 13,\n 'Sin': 7,\n 'Sinh': 9,\n 'Size': 13,\n 'Slice': 13,\n 'Softmax': 13,\n 'SoftmaxCrossEntropyLoss': 13,\n 'Softplus': 1,\n 'Softsign': 1,\n 'SpaceToDepth': 13,\n #'Split': 13,\n 'Split': 11,\n 'SplitToSequence': 11,\n 'Sqrt': 13,\n #'Squeeze': 13,\n 'Squeeze': 11,\n 'StringNormalizer': 10,\n 'Sub': 13,\n 'Sum': 13,\n 'Tan': 7,\n 'Tanh': 13,\n 'TfIdfVectorizer': 9,\n 'ThresholdedRelu': 10,\n 'Tile': 13,\n 'TopK': 11,\n 'Transpose': 13,\n 'TreeEnsembleClassifier': 1,\n 'TreeEnsembleRegressor': 1,\n 'Unique': 11,\n #'Unsqueeze': 13,\n 'Unsqueeze': 11,\n 'Upsample': 10,\n 'Where': 9,\n 'Xor': 7,\n 'ZipMap': 1}\n# Manual specification of attribute defaults.\nspecial_attr_defaults = dict([\n # (\"AveragePool.kernel_shape\", ('ints', '{}')),\n # (\"MaxPool.kernel_shape\", ('ints', '{}')),\n # (\"Cast.to\", ('int', '0')),\n # (\"Concat.axis\", ('int', '0')),\n # (\"Conv.group\", ('int', '1')),\n # (\"Unsqueeze.axes\", ('ints', '{}')),\n # (\"RNN.activation_alpha\", ('floats', '{}')),\n # (\"RNN.activation_beta\", ('floats', '{}')),\n])\n# Manual specification of attribute type.\nspecial_attr_types = dict([(\"Cast.to\", 'type')])\n\n# Special operation importing handlers.\nspecial_op_handler = dict([\n (\"BatchNormalization\", \"ImportNodeBatchNormalization\"),\n (\"Dropout\", \"ImportNodeDropout\"),\n (\"Cast\", \"ImportNodeCast\"),\n (\"MaxPool\", \"ImportNodeMaxPool\"),\n (\"Pad\", \"ImportNodePad\"),\n (\"Slice\", \"ImportNodeSlice\"),\n #(\"Transpose\", \"ImportNodeTranspose\")\n])\n\n# Operations supporting shape inference.\nOpsWithShapeInference=[\n 'Abs',\n 'Add',\n 'And',\n 'Atan',\n 'AveragePool',\n 'Cast',\n 'Concat',\n 'Constant',\n 'ConstantOfShape',\n 'Conv',\n 'ConvInteger',\n 'ConvTranspose',\n 'Cos',\n 'Cosh',\n 'DequantizeLinear',\n 'Div',\n 'Dropout',\n 'DynamicQuantizeLinear',\n 'Elu',\n 'Erf',\n 'Exp',\n 'Expand',\n 'Flatten',\n 'GRU',\n 'Gather',\n 'Gemm',\n 'GlobalAveragePool',\n 'GlobalLpPool',\n 'GlobalMaxPool',\n 'HardSigmoid',\n 'Identity',\n 'LSTM',\n 'LeakyRelu',\n 'Less',\n 'Log',\n 'MatMul',\n 'Max',\n 'Min',\n 'Mul',\n 'Neg',\n 'OneHotEncoder',\n 'Or',\n 'Pad',\n 'Pow',\n 'PRelu',\n 'QLinearConv',\n 'QuantizeLinear',\n 'QLinearMatMul',\n 'RNN',\n 'Reciprocal',\n 'ReduceMax',\n 'ReduceMean',\n 'ReduceMin',\n 'ReduceProd',\n 'ReduceSum',\n 'Relu',\n 'Reshape',\n 'Scaler',\n 'Selu',\n 'Shape',\n 'Sigmoid',\n 'Sign',\n 'Sin',\n 'Sinh',\n 'Size',\n 'Slice',\n 'Softmax',\n 'Softplus',\n 'Softsign',\n 'Split',\n 'Sqrt',\n 'Squeeze',\n 'Sub',\n 'Sum',\n 'Tan',\n 'Tanh',\n 'Tile',\n 'Transpose',\n 'Unsqueeze',\n 'Xor',\n 'Loop',\n]\n\n# Operations supporting canonicalization.\nOpsWithCanonicalizer = ['Add', 'Constant', 'Identity', 'Gemm', 'Cast', 'Transpose',\n 'Dropout', 'Shape', 'Size', 'GlobalAveragePool',\n 'GlobalMaxPool', 'Squeeze', 'Unsqueeze']\n\nOpsWithHelpers = {\n \"Loop\": \"\"\"\n mlir::Operation::result_range v_final();\n mlir::Operation::result_range scan_outputs();\n \"\"\",\n \"Scan\": \"\"\"\n mlir::Operation::operand_range v_initial();\n mlir::Operation::result_range v_final();\n mlir::Operation::operand_range scan_inputs();\n mlir::Operation::result_range scan_outputs();\n \"\"\"\n}\n# Interface for special handling of type inference\n# The common code are put into get_type_inference_func\nOpsWithResultTypeInference = {\n \"Constant\":\n '''if (auto attr = valueAttr()) {\n resultTypes.push_back(attr.getType());\n } else if (auto attr = sparse_valueAttr()) {\n resultTypes.push_back(attr.getType());\n }''',\n \"Cast\":\n '''auto builder = mlir::OpBuilder(getContext());\n resultTypes.push_back(mlir::UnrankedTensorType::get(to()));''',\n \"ConstantOfShape\":\n '''if (auto attr = valueAttr()) {\n resultTypes.push_back(mlir::UnrankedTensorType::get(\n attr.getType().cast<ShapedType>().getElementType()));\n } else {\n resultTypes.push_back(mlir::UnrankedTensorType::get(\n FloatType::getF32(getContext())));\n }'''\n}\n\n# Add an Op in this list if the Op needs result type deduction which is required\n# when writing declarative rewriting rules. Deduced type is always\n# an UnrankedTensorType whose element type is the same as the first operand's\n# element type.\n#\n# Currenlty, there are only two build methods generated:\n# - one with operands and attributes having a separate parameter, and\n# - one with operands and attributes having aggregated parameters.\ncustom_builder_unranked_ops_list = ['Abs', 'Exp', 'ReduceSum', 'ReduceSumSquare',\n 'Pad', 'Sqrt', 'Neg', 'Unsqueeze', 'Softmax',\n 'ReduceMax', 'ReduceLogSum', 'Squeeze',\n 'Identity', 'Split']\n# Custom builder op list for operations with broadcast; we can deduce the right\n# output type, no need to leave it undef as in the above list.\n# Ops must have two operands, not one, not three... And there shall be two.\n# TODO: handle variadic ops omitted here: Max, Min, Min, Sum.\ncustom_builder_broadcast_ops_list = ['Add', 'And', 'Div', 'Equal', 'Greater',\n 'Less', 'Mul', 'Or', 'Pow', 'Sub', 'Xor']\n# union of both\ncustom_builder_ops_list = custom_builder_unranked_ops_list + custom_builder_broadcast_ops_list\n\n#a dictionary to add any special definition for an operation\ncustom_definition_misc = dict([ ('Constant',\n ''' let builders = [\n OpBuilder<(ins \"Attribute\":$sparse_value, \"Attribute\":$value), [{\n if (value) {\n auto tensorType = value.getType();\n build($_builder, $_state, tensorType, sparse_value, value,\n FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr());\n } else {\n auto tensorType = sparse_value.getType();\n build($_builder, $_state, tensorType, sparse_value, value,\n FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr());\n }\n }]>\n ];'''),\n ('Cast',\n ''' let builders = [\n OpBuilder<(ins \"Value\":$input, \"TypeAttr\":$to), [{\n auto resultType = mlir::UnrankedTensorType::get(to.getValue());\n build($_builder, $_state, resultType, input, to);\n }] >\n ];'''\n )])\n\nonnx_types = (\n 'bool', 'int8', 'int16', 'int32', 'int64', 'unkown', 'float16',\n 'float', 'double', 'complex64', 'complex128', 'string'\n)\ntblgen_types = ('AnyI1', 'AnyI8', 'AnyI16', 'AnyI32', 'AnyI64', 'BF16', 'F16', 'F32', 'F64',\n 'Complex<F32>', 'Complex<F64>', 'StringType'\n)\n\nMAX_NUM_TYPES=20\n\ndef should_render_domain(domain): # type: (Text) -> bool\n return True\n\n\ndef display_attr_type(v): # type: (OpSchema.AttrType) -> Text\n assert isinstance(v, OpSchema.AttrType)\n s = Text(v)\n s = s[s.rfind('.') + 1:].lower()\n if s[-1] == 's':\n s = 'list of ' + s\n return s\n\n\ndef get_unique_output_name(schema, name):\n for input in schema.inputs:\n if input.name == name:\n return 'out_' + name\n return name\n\n\ndef onnx_attr_type_to_mlir_attr_type(t):\n onnx_attr_type = Text(t)\n onnx_attr_type = onnx_attr_type[onnx_attr_type.rfind('.') + 1:].lower()\n\n if onnx_attr_type == 'int':\n mlir_attr_type = 'SI64Attr'\n elif onnx_attr_type == 'float':\n mlir_attr_type = 'F32Attr'\n elif onnx_attr_type == 'ints':\n mlir_attr_type = 'I64ArrayAttr'\n elif onnx_attr_type == 'floats':\n mlir_attr_type = 'F32ArrayAttr'\n elif onnx_attr_type == \"string\":\n mlir_attr_type = 'StrAttr'\n elif onnx_attr_type == \"strings\":\n mlir_attr_type = 'StrArrayAttr'\n elif onnx_attr_type == 'type':\n mlir_attr_type = 'TypeAttr'\n else:\n mlir_attr_type = 'AnyAttr'\n #TODO: tensor and sparse tensor\n return mlir_attr_type\n\n\n#TODO: any better way to do this.\ndef tblgen_attr_type_to_cpp_type(t):\n if 'I64Attr' in t:\n cpp_type = 'IntegerAttr'\n elif 'F32Attr' in t:\n cpp_type = 'FloatAttr'\n elif 'I64ArrayAttr' in t or 'F32ArrayAttr' in t:\n cpp_type = 'ArrayAttr'\n elif 'StrAttr' in t:\n cpp_type = 'StringAttr'\n elif 'strings' in t:\n cpp_type = 'ArrayAttr'\n else:\n cpp_type = 'Attribute'\n return cpp_type\n\n\ndef tblgen_operand_type_to_cpp_type(op_type):\n if op_type.startswith('Variadic'):\n mytype = 'ValueRange'\n else:\n mytype = 'Value'\n return mytype\n\n\ndef np_type_to_tblgen_attr_type(tstr):\n index = -1\n for i in range(len(onnx_types)):\n if onnx_types[i] in tstr:\n index = i\n break\n if index == -1:\n return None\n else:\n return tblgen_types[i]\n\ndef get_tblgen_type_index(type_str):\n return tblgen_types.index(type_str)\n\n#the possible data structures are tensor, map and seq(tensor())\ndef get_data_structure_element(allowed_type_str):\n structure_list = ['tensor', 'seq', 'map']\n for structure in structure_list:\n if allowed_type_str.startswith(structure) :\n element = allowed_type_str.replace(\n structure+'(', '', 1).replace(')', '', 1)\n return (structure, element)\n return (None, None)\n\ndef get_allowed_elem_types(schema, input):\n #allowed_types_str = None\n # return allowed_types_str\n # TODO: enable type constraints.\n if input.typeStr :\n tstr = input.typeStr\n structure, element = get_data_structure_element(tstr);\n # In case the type is directly specified\n if structure and element :\n t = np_type_to_tblgen_attr_type(element)\n if t == None :\n return allowed_structure, None\n else :\n return structure, [t]\n else :\n return None\n if schema.type_constraints:\n for type_constraint in schema.type_constraints:\n if type_constraint.type_param_str != tstr :\n continue\n allowed_type_list=[]\n allowedTypes = type_constraint.allowed_type_strs\n allowed_structure = None\n for allowedType in allowedTypes:\n structure, element = get_data_structure_element(allowedType);\n if structure == None or element == None:\n return None, None\n\n if allowed_structure != None and allowed_structure != structure :\n return None, None\n allowed_structure = structure\n t = np_type_to_tblgen_attr_type(element)\n if t == None :\n return allowed_structure, None\n if not t in allowed_type_list :\n allowed_tyoe_list = allowed_type_list.append(t)\n\n return allowed_structure,allowed_type_list\n\n return None, None\n\n\ndef inc_indent(indent=None):\n return \"\" if indent is None else indent + ' ' * 2\n\n\ndef dec_indent(indent):\n return indent[:-2]\n\n\ndef join_args(args):\n return \", \".join(args)\n\ndef get_operands_or_results(schema, type_str_dict, is_input):\n value_list = schema.inputs if is_input else schema.outputs\n if not value_list:\n return OrderedDict()\n\n def any_type_of(types):\n assert isinstance(types, list)\n if len(types) == 1:\n return types[0]\n else:\n return \"AnyTypeOf<[{}]>\".format(\", \".join(types))\n\n name_to_types = OrderedDict()\n for i, value in enumerate(value_list):\n types = get_onnx_mlir_types(schema, type_str_dict, value)\n\n '''\n structure, elem_types = get_allowed_elem_types(schema, type_str_dict, value)\n\n if structure == 'tensor' :\n if elem_types is None:\n types = [\"AnyMemRef\", \"AnyTensor\"]\n else:\n elem_types_str = ','.join(elem_types)\n types = [\"TensorOf<[{}]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types_str), types))\n elif structure == 'seq' :\n # Seq is not supported yet.\n # Use of TensorOf<[AnyTensor]> as a placeholder for tablegen.\n # When the Operation is used, warning/error will be generated at runtime.\n if elem_types is None:\n types = [\"AnyMemRef\", \"TensorOf<[AnyTensor]>\"]\n else:\n elem_types_str = ','.join(elem_types)\n types = [\"TensorOf<[TensorOf<[{}]>]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types_str), types))\n elif structure == 'map' :\n # Map is not supported yet.\n # Use of TupleOf as a placeholder for tablegen.\n # When the Operation is used, warning/error will be generated at runtime.\n if elem_types is None:\n types = [\"AnyMemRef\", \"TupleOf<[AnyTensor]>\"]\n else:\n elem_types_str = ','.join(elem_types)\n types = [\"TupleOf<[TensorOf<[{}]>]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types_str), types))\n else:\n types = [\"AnyMemRef\", \"AnyTensor\"]\n '''\n\n if OpSchema.FormalParameterOption.Optional == value.option:\n types.append(\"NoneType\")\n elif OpSchema.FormalParameterOption.Variadic == value.option:\n if value.isHomogeneous:\n types = [\"Variadic<{}>\".format(any_type_of(types))]\n else:\n #TODO handle(variadic, heterogeneous) \"\n types = [\"Variadic<{}>\".format(any_type_of(types))]\n sys.stderr.write(\"warning: (variadic, heterogeneous) for \" + schema.name +\n ' ' + value.name + \"\\n\")\n\n # Since output name can coincide with that of an input, we explicitly\n # append a suffix \"_out\" to such names for disambiguation.\n if is_input:\n value_name = value.name\n else:\n value_name = get_unique_output_name(schema, value.name)\n\n name_to_types[value_name] = any_type_of(types)\n return name_to_types\n\n\ndef get_attrs(schema):\n def get_attr_type_optional(attr_type):\n return 'OptionalAttr<{}>'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type))\n\n def get_attr_type_with_default(attr_type, attr_default):\n return 'DefaultValuedAttr<{}, \"{}\">'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)\n\n if not schema.attributes:\n return OrderedDict()\n\n name_to_type = OrderedDict()\n for _, attr in sorted(schema.attributes.items()):\n if attr.type == OpSchema.AttrType.GRAPH:\n continue\n\n qualified_attr_name = \"{}.{}\".format(schema.name, attr.name)\n if qualified_attr_name in special_attr_defaults:\n name_to_type[attr.name] = get_attr_type_with_default(\n *special_attr_defaults[qualified_attr_name])\n if qualified_attr_name in special_attr_types:\n name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(\n special_attr_types[qualified_attr_name])\n # option holds either required or default value\n elif attr.required:\n name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(\n attr.type)\n elif attr.default_value.name:\n\n def format_value(value): # type: (Any) -> Text\n if isinstance(value, float):\n formatted = str(np.round(value, 5))\n # use default formatting, unless too long.\n if (len(formatted) > 10):\n formatted = str(\"({:e})\".format(value))\n return formatted\n elif isinstance(\n value,\n (bytes, bytearray)) and sys.version_info[0] == 3:\n return str(value.decode('utf-8'))\n return str(value)\n\n default_value = helper.get_attribute_value(attr.default_value)\n if isinstance(default_value, list):\n default_value = [format_value(val) for val in default_value]\n default_value_str = '{}'.format(default_value)\n default_value_str = default_value_str.replace('[', '{', 1)\n default_value_str = default_value_str.replace(']', '}', 1)\n if Text(attr.type) == \"AttrType.STRINGS\":\n default_value_str = default_value_str.replace(\"'\", '\\\\\"')\n else:\n default_value_str = default_value_str.replace(\"'\", '')\n else:\n default_value = format_value(default_value)\n default_value_str = default_value\n\n name_to_type[attr.name] = get_attr_type_with_default(\n attr.type, default_value_str)\n else:\n name_to_type[attr.name] = get_attr_type_optional(attr.type)\n return name_to_type\n\ndef get_numberof_list(mylist):\n expected_num = len(mylist)\n for element in mylist :\n if OpSchema.FormalParameterOption.Variadic == element.option:\n expected_num = -1\n return expected_num\n\ndef get_output_type_mapping(schema):\n mapping=[]\n for output in schema.outputs :\n #if only one type is allowed, just set that\n structure, allowed_elem_types = get_allowed_elem_types(schema, output)\n if allowed_elem_types != None and len(allowed_elem_types) == 1 :\n mapping.append(str(get_tblgen_type_index(allowed_elem_types[0])))\n continue\n\n #map the type string\n if output.typeStr :\n tstr = output.typeStr\n found = False\n for i, input in enumerate(schema.inputs):\n if input.typeStr and input.typeStr == tstr:\n mapping.append(str(i+MAX_NUM_TYPES))\n found = True\n break\n if found:\n continue\n\n #unknown output type\n mapping.append(str(-1))\n\n return mapping\n\ndef get_numberof_inout(s, indent, schema):\n expected_num_operands = get_numberof_list(schema.inputs)\n indent = inc_indent(indent)\n s += indent + \"static int getNumberOfOperands() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(expected_num_operands)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n expected_num_results = get_numberof_list(schema.outputs)\n s += indent + \"static int getNumberOfResults() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(expected_num_results)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n s += indent + \"static std::vector<int> getTypeMap() {\\n\"\n mapping = get_output_type_mapping(schema)\n indent = inc_indent(indent)\n s += indent + \"return {\" + \",\".join(mapping) + \"};\\n\"\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n return s\n\n\ndef get_promotable_const_operands_func(s, indent, const_operands_name_to_idx):\n cpp_name_to_idx_literal = \"{\" + \", \".join([\n \"{{\\\"{}\\\", {}}}\".format(*name_to_idx)\n for name_to_idx in const_operands_name_to_idx\n ]) + \"}\"\n\n #s += indent + \"let extraClassDeclaration = [{\\n\"\n indent = inc_indent(indent)\n s += indent + \"std::map<std::string, size_t> promotableConstOperands() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(cpp_name_to_idx_literal)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n #indent = dec_indent(indent)\n #s += indent + \"}];\\n\"\n\n return s\n\ndef get_type_inference_func(s, indent, type_inference_code):\n indent = inc_indent(indent)\n\n s += indent + \"std::vector<mlir::Type> resultTypeInference() {\" + \"\\n\"\n indent = inc_indent(indent)\n s += indent + \"std::vector<mlir::Type> resultTypes;\" + \"\\n\"\n\n s += indent + type_inference_code + '\\n'\n\n s += indent + \"return resultTypes;\" + \"\\n\"\n indent = dec_indent(indent)\n s += indent + \"}\" + \"\\n\"\n\n indent = dec_indent(indent)\n return s\n\ndef parse_type_str(allowedType):\n # AnyI may be used for uint because the onnx_mlir is not generating uint output\n # This will be fixed later and UI will be replace AnyI\n onnx_to_mlir_type_dict = { '(': '<[',\n ')': ']>',\n 'tensor' : 'TensorOf',\n 'seq' : 'SeqOf',\n 'map' : 'TupleOf',\n 'bool': 'I1',\n #'uint8' : 'AnyI8',\n #uint16' : 'AnyI16',\n #uint32' : 'AnyI32',\n #uint64' : 'AnyI64',\n 'uint8' : 'UI8',\n 'uint16' : 'UI16',\n 'uint32' : 'UI32',\n 'uint64' : 'UI64',\n 'int8' : 'I8',\n 'int16' : 'I16',\n 'int32' : 'I32',\n 'int64' : 'I64',\n 'float16' : 'F16',\n 'bfloat16' : 'BF16',\n 'float' : 'F32',\n 'double' : 'F64',\n 'unkown' : 'BF16',\n 'complex64' : 'Complex<F32>',\n 'complex128' : 'Complex<F64>',\n 'string' : 'StringType'}\n\n # Apply substitutions in decreasing order of key-length, so that float16 is replaced\n # before float, and uint16 is replaced before int16, etc.\n mapping = list(onnx_to_mlir_type_dict.items())\n mapping.sort(key=lambda pair:len(pair[0]), reverse=True)\n for key, item in mapping:\n allowedType = allowedType.replace(key, item)\n return allowedType\n\ndef parse_a_type_constraint(constraint):\n allowedTypes = constraint.allowed_type_strs\n mlirTypes = []\n for allowedType in allowedTypes:\n mlirType = parse_type_str(allowedType)\n mlirTypes.append(mlirType)\n # Remove redundant and sort.\n # However onnx keeps a consitently meaningful order\n # There is no redundancy as long as each onnx type is mapped uniquely\n # mlirTypes = sorted(list(set(mlirTypes)))\n\n # MemRef is always needed\n mlirTypes.append(\"AnyMemRef\")\n return mlirTypes\n\ndef parse_type_constraints(schema):\n type_str_dict = dict()\n for type_constraint in schema.type_constraints:\n type_str_dict[type_constraint.type_param_str] = parse_a_type_constraint(type_constraint)\n return type_str_dict\n\ndef get_onnx_mlir_types(schema, type_str_dict, input):\n if input.typeStr :\n if not input.typeStr in type_str_dict :\n # some arguments use type description directly\n # instead of constraint\n return [parse_type_str(input.typeStr), \"AnyMemRef\"]\n else :\n return type_str_dict[input.typeStr]\n else :\n print('No typeStr ', schema.name)\n return []\n\ndef gen_op_def(schema):\n indent = inc_indent()\n s = 'def ONNX{0}Op:ONNX_Op<\"{0}\",\\n'.format(schema.name)\n\n regions = OrderedDict()\n for _, attr in sorted(schema.attributes.items()):\n if attr.type == OpSchema.AttrType.GRAPH:\n if attr.required:\n regions[attr.name] = \"SizedRegion<1>\"\n else:\n regions[attr.name] = \"AnyRegion\"\n\n # Generate decl for op traits.\n traits = [\"NoSideEffect\"]\n # OpsWithShapeInference:\n # Now the ShapeInference traits are added to all operation\n # Dummy implementations are added to ONNXOps.cpp\n # Error will be report if these operations are encountered at runtime\n traits.append(\"DeclareOpInterfaceMethods<ShapeInferenceOpInterface>\")\n if schema.name in OpsWithResultTypeInference.keys():\n traits.append(\"OpInterface<\\\"ResultTypeInferenceOpInterface\\\">\")\n if len(regions):\n traits.append(\"OpInterface<\\\"HasOnnxSubgraphOpInterface\\\">\")\n s += inc_indent(indent) + '[{}]> {{\\n'.format(join_args(traits))\n\n # Generate decl for canonicalizer.\n indent = inc_indent(indent)\n if schema.name in OpsWithCanonicalizer:\n s += indent + 'let hasCanonicalizer = 1;\\n'\n\n # Generate decl for summary.\n s += indent + 'let summary = \"ONNX {} operation\";\\n'.format(schema.name)\n\n # Generate description.\n s += indent + 'let description = [{\\n'\n if schema.doc:\n lines = schema.doc.lstrip().splitlines()\n for line in lines:\n escaped_line = line.replace('\"', '\\\\\"')\\\n .replace('}]', '\\\\}\\\\]')\n s += indent + '\"{}\"\\n'.format(escaped_line)\n s += indent + '}];\\n'\n\n # handle the type constraint for input and output\n # parse type constraint into onnx-mlir type string list\n type_str_dict = parse_type_constraints(schema)\n\n # Generate ins (consisting of operands and attributes).\n ins = get_operands_or_results(schema, type_str_dict, is_input=True)\n ins.update(get_attrs(schema))\n\n ins_strs = [\"{1}:${0}\".format(*i) for i in ins.items()]\n s += indent + 'let arguments = (ins {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(ins_strs))\n\n # Generate outs (operation results).\n outs = get_operands_or_results(schema, type_str_dict, is_input=False)\n outs_strs = [\"{1}:${0}\".format(*i) for i in outs.items()]\n s += indent + 'let results = (outs {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(outs_strs))\n\n regions_strs = [\"{1}:${0}\".format(*i) for i in regions.items()]\n\n if len(regions):\n s += indent + 'let regions = (region {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(regions_strs))\n\n # custom_builder_broadcast_ops_list\n\n # add custom builders\n # use element type of the first operand to construct an UnrankedTensorType for the output.\n if schema.name in custom_builder_ops_list:\n if len(ins) == 0:\n raise RuntimeWarning(\n \"warning: not generate custom build methods for \" +\n schema.name + \" since it does not have operands.\")\n else:\n s += indent + 'let builders = [\\n'\n # Custom builders with operands and attributes having a separate parameter.\n # E.g. OpBuilder<(ins \"Value\":$X, \"Value\":$Y, \"Attribute\":$A), [{}]>\n indent = inc_indent(indent)\n s += indent + 'OpBuilder<(ins '\n operands_dict = get_operands_or_results(schema, type_str_dict, is_input=True)\n attrs_dict = get_attrs(schema)\n s += ', '.join('\"{}\":${}'.format(tblgen_operand_type_to_cpp_type(ty),\n name) for name, ty in operands_dict.items())\n if operands_dict and attrs_dict:\n s += ', '\n s += ', '.join('\"{}\":${}'.format(tblgen_attr_type_to_cpp_type(ty),\n name) for name, ty in attrs_dict.items())\n s += '), [{\\n'\n indent = inc_indent(indent)\n\n # Get output type from first operand's type.\n first_operand_name = list(ins.items())[0][0]\n build_type_name = ''\n if schema.name in custom_builder_broadcast_ops_list:\n second_operand_name = list(ins.items())[1][0]\n s += indent + 'auto lhsTy = {}.getType();\\n'. \\\n format(first_operand_name)\n s += indent + 'auto rhsTy = {}.getType();\\n'. \\\n format(second_operand_name)\n s += indent + 'auto elementType = getBroadcastedRankedType(lhsTy, rhsTy);\\n'\n s += indent + 'auto shapedType = elementType.dyn_cast_or_null<ShapedType>();\\n';\n s += indent + 'if (!shapedType || !shapedType.hasStaticShape()) {\\n';\n s += indent + indent + 'elementType = {}'.format(first_operand_name) + \\\n '.getType().cast<ShapedType>().getElementType();\\n';\n s += indent + indent + 'elementType = UnrankedTensorType::get(elementType);\\n'\n s += indent + '}\\n';\n build_type_name = 'elementType'\n else:\n s += indent + 'auto elementType = {}'.format(first_operand_name) + \\\n '.getType().cast<ShapedType>().getElementType();\\n'\n build_type_name = 'UnrankedTensorType::get(elementType)'\n s += indent + 'build($_builder, $_state, {}'.format(build_type_name)\n for name, _ in ins.items():\n s += ', ' + name\n s += ');\\n'\n indent = dec_indent(indent)\n s += indent + '}]>,\\n'\n\n # Custom builders with all operands and attributes having aggregate parameters.\n # E.g. OpBuilder<(ins \"ValueRange operands,\n # ArrayRef<NamedAttribute> attributes\", [{}]>'\n s += indent + 'OpBuilder<(ins ' + \\\n '\"ValueRange\":$operands, \"ArrayRef<NamedAttribute>\":$attributes), [{\\n'\n indent = inc_indent(indent)\n if schema.name in custom_builder_broadcast_ops_list:\n s += indent + 'auto lhsTy = operands[0].getType();\\n'\n s += indent + 'auto rhsTy = operands[1].getType();\\n'\n s += indent + 'auto elementType = getBroadcastedRankedType(lhsTy, rhsTy);\\n'\n s += indent + 'auto shapedType = elementType.dyn_cast_or_null<ShapedType>();\\n';\n s += indent + 'if (!shapedType || !shapedType.hasStaticShape()) {\\n';\n s += indent + indent + 'elementType = operands[0]' + \\\n '.getType().cast<ShapedType>().getElementType();\\n';\n s += indent + indent + 'elementType = UnrankedTensorType::get(elementType);\\n'\n s += indent + '}\\n';\n else:\n s += indent + 'auto elementType = operands[0].getType().' + \\\n 'cast<ShapedType>().getElementType();\\n'\n s += indent + 'std::vector<mlir::Type> outputTypes;\\n'\n s += indent + 'outputTypes.emplace_back({});\\n'.format(build_type_name)\n s += indent + 'build($_builder, $_state, outputTypes, operands, attributes);\\n'\n indent = dec_indent(indent)\n s += indent + '}]>'\n\n s += '\\n' + indent + '];\\n'\n\n # Generate extracClassDeclaration.\n s += indent + \"let extraClassDeclaration = [{\\n\"\n #indent = inc_indent(indent)\n\n # Generate input/output number.\n s = get_numberof_inout(s, indent, schema)\n\n if schema.name in OpsWithResultTypeInference:\n s = get_type_inference_func(\n s, indent, OpsWithResultTypeInference[schema.name])\n\n if schema.name in OpsWithHelpers:\n s += OpsWithHelpers[schema.name]\n\n if len(regions):\n s += indent + \"int64_t getSubgraphRegionIdx(const std::string& name) {\\n\"\n indent = inc_indent(indent)\n for idx, region_name in enumerate(regions.keys()):\n s += indent + \"if (name == \\\"{}\\\") return {};\\n\".format(region_name, idx)\n s += indent + \"llvm_unreachable(\\\"region with the specified name does not exist\\\");\\n\"\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n s += indent + '}];\\n'\n\n if ( schema.name in custom_definition_misc) :\n s += custom_definition_misc[schema.name] + '\\n'\n\n s += '}\\n\\n'\n return s\n\n\n\"\"\"\nspecial cases:\n* Split: attr split default value: sizeof(output1) namely 1\n* Conv: attr dilations default value is {num_dim of first input - 2, 1}\n* Conv: attr kernel_shape type is ints\n* Transpose: attr perm default value is {} empty int list\n\"\"\"\n\n\ndef gen_op_importer(schema, file):\n indent = inc_indent()\n s = indent + 'import_handler_map_[\"' + schema.name +'\"] = \\n '\n\n expected_num_operands = len(schema.inputs)\n expected_num_results = len(schema.outputs)\n for input in schema.inputs:\n if OpSchema.FormalParameterOption.Variadic == input.option:\n expected_num_operands = -1\n for output in schema.outputs:\n if OpSchema.FormalParameterOption.Variadic == output.option:\n expected_num_results = -1\n\n handler_func = special_op_handler.get(\n schema.name, \"buildOperation<mlir::ONNX{}Op>\".format(schema.name))\n\n # Special handlers currently require expected num operands/results to be specified.\n # TODO: remove special handlers.\n args = [\"node\"]\n \"\"\"\n if expected_num_operands != -1 or expected_num_results != -1 or \"buildOperation\" not in handler_func:\n args.append(\n \"/* expected_num_operands = */ {}\".format(expected_num_operands))\n args.append(\n '/* expected_num_results = */ {}'.format(expected_num_results))\n \"\"\"\n s += inc_indent(indent) + '&onnx_mlir::detail::FrontendGenImpl::'\n s += handler_func+';\\n'\n\n file.write(s)\n\n\ndef build_operator_schemas():\n # domain -> support level -> name -> [schema]\n index = defaultdict(lambda: defaultdict(lambda: defaultdict(\n list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]\n for schema in defs.get_all_schemas_with_history():\n index[schema.domain][int(\n schema.support_level)][schema.name].append(schema)\n\n # Preprocess the Operator Schemas\n # [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]\n operator_schemas = list(\n ) # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]\n exsting_ops = set() # type: Set[Text]\n for domain, _supportmap in sorted(index.items()):\n if not should_render_domain(domain):\n continue\n processed_supportmap = list()\n for _support, _namemap in sorted(_supportmap.items()):\n processed_namemap = list()\n for n, unsorted_versions in sorted(_namemap.items()):\n versions = sorted(unsorted_versions,\n key=lambda s: s.since_version)\n schema = versions[-1]\n if schema.name in exsting_ops:\n continue\n\n if check_operation_version :\n # Generate operation of the latest version of your onnx.\n exsting_ops.add(schema.name)\n processed_namemap.append((n, schema, versions))\n\n # Add checks against version_dict\n if schema.name not in version_dict :\n print(\"Check-operation-version: Operation {} is new with version {}\"\n .format(schema.name, schema.since_version))\n elif schema.since_version > version_dict[schema.name]:\n print(\"Check-operation-version: Operation {}\"\n .format(schema.name)+\n \" has a newer version {} over old version {}\"\n .format(schema.since_version, version_dict[schema.name]))\n else:\n # Generate operation according to the version in version_dict.\n if schema.name not in version_dict :\n continue\n found = False\n for schema in reversed(versions):\n # Check the version number against the version_dict\n if schema.since_version == version_dict[schema.name]:\n exsting_ops.add(schema.name)\n processed_namemap.append((n, schema, versions))\n found = True\n break\n if not found:\n print(\"Your onnx installation may be too old. \"\n \"The desired version for operation {} is not found.\".format(\n schema.name))\n sys.exit()\n processed_supportmap.append((_support, processed_namemap))\n operator_schemas.append((domain, processed_supportmap))\n return operator_schemas\n\n\ndef main(args): # type: (Type[Args]) -> None\n curr_utc_time = datetime.datetime.now(\n datetime.timezone.utc).strftime(\"%m/%d/%Y, %H:%M:%S\")\n autogen_warning = (\n '//********************************************************\\n'\n '// Do not modify this file directly.\\n'\n '// This file is automatically generated via script.\\n'\n '// Details can be found in docs/ImportONNXDefs.md .\\n'\n '//********************************************************\\n\\n')\n autogen_warning = autogen_warning.format(curr_utc_time)\n\n op_def = args.op_def\n op_def.write(autogen_warning)\n\n op_importer = args.op_importer\n op_importer.write(autogen_warning)\n\n version_dict = dict()\n for domain, supportmap in build_operator_schemas():\n for _, namemap in supportmap:\n for op_type, schema, versions in namemap:\n if check_operation_version:\n version_dict[schema.name] = schema.since_version\n else:\n gen_op_importer(schema, op_importer)\n r = gen_op_def(schema)\n op_def.write(r)\n if check_operation_version :\n pprint.pprint(version_dict)\n\nif __name__ == '__main__':\n curr_dir = os.path.dirname(os.path.realpath(__file__))\n\n class Args(object):\n if args.dry_run_onnx_ops:\n op_def = StringIO()\n else:\n op_def_file_path = os.path.join(curr_dir, 'ONNXOps.td.inc')\n op_def = io.open(op_def_file_path, 'w', newline='')\n\n if args.dry_run_op_build_table:\n op_importer = StringIO()\n else:\n op_importer_file_path = os.path.join(curr_dir, 'OpBuildTable.inc')\n op_importer = io.open(op_importer_file_path, 'w', newline='')\n main(Args)\n\n if args.dry_run_onnx_ops:\n sys.stdout.write(Args.op_def.getvalue())\n if args.dry_run_op_build_table:\n sys.stdout.write(Args.op_importer.getvalue())\n"
] | [
[
"numpy.round"
]
] |
t2hk/scdv_glove_elasticsearch | [
"41cd336decf1e14e77439caaa26f64edf28ce42b"
] | [
"get_similar_words_triples.py"
] | [
"from gensim.models import KeyedVectors\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.font_manager as fm\nimport pandas as pd\n\nglove_vector_file = \"vectors.txt\"\ngensim_glove_vector_file = \"gensim_glove_vectors.txt\"\ntop_k = 10\n\nwords_triple_file = 'similarity_words.ttl'\n\n# GloVeの単語ベクトルファイルを読み込み、単語数とベクトルサイズを付与した処理用のファイルを作成する。\nvectors = pd.read_csv(glove_vector_file, delimiter=' ', index_col=0, header=None)\n\nvocab_count = vectors.shape[0] # 単語数\nnum_features = vectors.shape[1] # 次元数\n\nprint(\"単語数:{} 次元数:{}\".format(vocab_count, num_features))\n\nglove_vectors = KeyedVectors.load_word2vec_format(gensim_glove_vector_file, binary=False)\nwords = list(glove_vectors.vocab.keys())\n\nsim_words_list = []\n\nwith open(words_triple_file, 'w') as f:\n for word in words:\n sim_words = glove_vectors.most_similar(word, [], top_k)\n \n for sim_word in sim_words:\n triple = '\"{}\" owl:equivalentClass \"{}\"'.format(word, sim_word[0])\n \n sim_words_list.append(triple)\n f.writelines(triple + '\\n')\n\n\nlen(sim_words_list)\n"
] | [
[
"pandas.read_csv"
]
] |
microsetta/microsetta-admin | [
"1ba6787c0315a74d50cafd722dbbe044d507c07f"
] | [
"microsetta_admin/server.py"
] | [
"import jwt\nfrom flask import render_template, Flask, request, session, send_file\nimport secrets\nfrom datetime import datetime\nimport io\n\nfrom jwt import PyJWTError\nfrom werkzeug.exceptions import BadRequest\nfrom werkzeug.utils import redirect\nimport pandas as pd\n\nfrom microsetta_admin import metadata_util, upload_util\nfrom microsetta_admin.config_manager import SERVER_CONFIG\nfrom microsetta_admin._api import APIRequest\nimport importlib.resources as pkg_resources\n\nTOKEN_KEY_NAME = 'token'\nSEND_EMAIL_CHECKBOX_DEFAULT_NAME = 'send_email'\n\nPUB_KEY = pkg_resources.read_text(\n 'microsetta_admin',\n \"authrocket.pubkey\")\n\nDUMMY_SELECT_TEXT = '-------'\n\nRECEIVED_TYPE_DROPDOWN = \\\n [DUMMY_SELECT_TEXT, \"Blood (skin prick)\", \"Saliva\", \"Stool\",\n \"Sample Type Unclear (Swabs Included)\"]\n\nVALID_STATUS = \"sample-is-valid\"\nNO_SOURCE_STATUS = \"no-associated-source\"\nNO_ACCOUNT_STATUS = \"no-registered-account\"\nNO_COLLECTION_INFO_STATUS = \"no-collection-info\"\nINCONSISTENT_SAMPLE_STATUS = \"sample-has-inconsistencies\"\nUNKNOWN_VALIDITY_STATUS = \"received-unknown-validity\"\n\nSTATUS_OPTIONS = [DUMMY_SELECT_TEXT, VALID_STATUS, NO_SOURCE_STATUS,\n NO_ACCOUNT_STATUS, NO_COLLECTION_INFO_STATUS,\n INCONSISTENT_SAMPLE_STATUS, UNKNOWN_VALIDITY_STATUS]\n\nAPI_PROJECTS_URL = '/api/admin/projects'\n\n\ndef handle_pyjwt(pyjwt_error):\n # PyJWTError (Aka, anything wrong with token) will force user to log out\n # and log in again\n return redirect('/logout')\n\n\ndef parse_jwt(token):\n \"\"\"\n Raises\n ------\n jwt.PyJWTError\n If the token is invalid\n \"\"\"\n decoded = jwt.decode(token, PUB_KEY, algorithms=['RS256'], verify=True)\n return decoded\n\n\ndef build_login_variables():\n # Anything that renders sitebase.html must pass down these variables to\n # jinja2\n token_info = None\n if TOKEN_KEY_NAME in session:\n # If user leaves the page open, the token can expire before the\n # session, so if our token goes back we need to force them to login\n # again.\n token_info = parse_jwt(session[TOKEN_KEY_NAME])\n\n vars = {\n 'endpoint': SERVER_CONFIG[\"endpoint\"],\n 'ui_endpoint': SERVER_CONFIG[\"ui_endpoint\"],\n 'authrocket_url': SERVER_CONFIG[\"authrocket_url\"]\n }\n if token_info is not None:\n vars['email'] = token_info['email']\n return vars\n\n\ndef build_app():\n # Create the application instance\n app = Flask(__name__)\n\n flask_secret = SERVER_CONFIG[\"FLASK_SECRET_KEY\"]\n if flask_secret is None:\n print(\"WARNING: FLASK_SECRET_KEY must be set to run with gUnicorn\")\n flask_secret = secrets.token_urlsafe(16)\n app.secret_key = flask_secret\n app.config['SESSION_TYPE'] = 'memcached'\n app.config['SESSION_COOKIE_NAME'] = 'session-microsetta-admin'\n\n # Set mapping from exception type to response code\n app.register_error_handler(PyJWTError, handle_pyjwt)\n\n return app\n\n\napp = build_app()\n\n\[email protected]_processor\ndef utility_processor():\n def format_timestamp(timestamp_str):\n if not timestamp_str:\n return \"None\"\n datetime_obj = datetime.fromisoformat(timestamp_str)\n return datetime_obj.strftime(\"%Y %B %d %H:%M:%S\")\n return dict(format_timestamp=format_timestamp)\n\n\[email protected]('/')\ndef home():\n return render_template('sitebase.html', **build_login_variables())\n\n\[email protected]('/search', methods=['GET'])\ndef search():\n return _search()\n\n\[email protected]('/search/sample', methods=['GET', 'POST'])\ndef search_sample():\n return _search('samples')\n\n\[email protected]('/search/kit', methods=['GET', 'POST'])\ndef search_kit():\n return _search('kit')\n\n\[email protected]('/search/email', methods=['GET', 'POST'])\ndef search_email():\n return _search('account')\n\n\ndef _search(resource=None):\n if request.method == 'GET':\n return render_template('search.html', **build_login_variables())\n elif request.method == 'POST':\n query = request.form['search_%s' % resource]\n\n status, result = APIRequest.get(\n '/api/admin/search/%s/%s' % (resource, query))\n\n if status == 404:\n result = {'error_message': \"Query not found\"}\n return render_template('search_result.html',\n **build_login_variables(),\n result=result), 200\n elif status == 200:\n return render_template('search_result.html',\n **build_login_variables(),\n resource=resource,\n result=result), 200\n else:\n return result\n\n\ndef _translate_nones(a_dict, do_none_to_str):\n # Note: this ISN'T a deep copy. This function is NOT set up\n # for recursing through a multi-layer dictionary\n result = a_dict.copy()\n for k, v in result.items():\n if do_none_to_str and v is None:\n result[k] = \"\"\n elif not do_none_to_str and v == '':\n result[k] = None\n return result\n\n\ndef _get_projects(include_stats, is_active):\n projects_uri = API_PROJECTS_URL + f\"?include_stats={include_stats}\"\n if is_active is not None:\n projects_uri += f\"&is_active={is_active}\"\n status, projects_output = APIRequest.get(projects_uri)\n\n if status >= 400:\n result = {'error_message': f\"Unable to load project list: \"\n f\"{projects_uri}\"}\n else:\n cleaned_projects = [_translate_nones(x, True) for x in\n projects_output]\n # if we're not using full project stats, sort\n # alphabetically by project name\n if not include_stats:\n cleaned_projects = sorted(cleaned_projects,\n key=lambda k: k['project_name'])\n result = {'projects': cleaned_projects}\n\n return status, result\n\n\[email protected]('/manage_projects', methods=['GET', 'POST'])\ndef manage_projects():\n result = None\n is_active = request.args.get('is_active', None)\n if request.method == 'POST':\n model = {x: request.form[x] for x in request.form}\n project_id = model.pop('project_id')\n model['is_microsetta'] = model.get('is_microsetta', '') == 'true'\n model['bank_samples'] = model.get('bank_samples', '') == 'true'\n model = _translate_nones(model, False)\n\n if project_id.isdigit():\n # update (put) an existing project\n action = \"update\"\n status, api_output = APIRequest.put(\n '{}/{}'.format(API_PROJECTS_URL, project_id),\n json=model)\n else:\n # create (post) a new project\n action = \"create\"\n status, api_output = APIRequest.post(\n API_PROJECTS_URL, json=model)\n\n # if api post or put failed\n if status >= 400:\n result = {'error_message': f'Unable to {action} project.'}\n # end if post\n\n # if the above work (if any) didn't produce an error message, return\n # the projects list\n if result is None:\n _, result = _get_projects(include_stats=True, is_active=is_active)\n\n return render_template('manage_projects.html',\n **build_login_variables(),\n result=result), 200\n\n\[email protected]('/email_stats', methods=['GET', 'POST'])\ndef email_stats():\n _, result = _get_projects(include_stats=False, is_active=True)\n projects = result.get('projects')\n\n if request.method == 'GET':\n project = request.args.get('project', None)\n email = request.args.get('email')\n if email is None:\n # They want to search for emails, show them the search dialog\n return render_template(\"email_stats_pulldown.html\",\n **build_login_variables(),\n resource=None,\n search_error=None,\n projects=projects)\n emails = [email, ]\n elif request.method == 'POST':\n project = request.form.get('project', None)\n emails, upload_err = upload_util.parse_request_csv_col(\n request,\n 'file',\n 'email'\n )\n if upload_err is not None:\n return render_template('email_stats_pulldown.html',\n **build_login_variables(),\n resource=None,\n search_error=[{'error': upload_err}],\n projects=projects)\n else:\n raise BadRequest()\n\n if project == \"\":\n project = None\n\n # de-duplicate\n emails = list({e.lower() for e in emails})\n\n status, result = APIRequest.post(\n '/api/admin/account_email_summary',\n json={\n \"emails\": emails,\n \"project\": project\n })\n\n if status != 200:\n return render_template('email_stats_pulldown.html',\n search_error=[{'error': result}],\n resource=None,\n **build_login_variables(),\n projects=projects)\n\n # At a minimum, our table will display these columns.\n # We may show additional info depending on what comes back from the request\n base_data_template = {\n 'email': 'XXX',\n 'summary': 'XXX',\n 'account_id': 'XXX',\n 'creation_time': 'XXX',\n 'kit_name': 'XXX',\n 'project': 'XXX',\n 'unclaimed-samples-in-kit': 0,\n 'never-scanned': 0,\n 'sample-is-valid': 0,\n 'no-associated-source': 0,\n 'no-registered-account': 0,\n 'no-collection-info': 0,\n 'sample-has-inconsistencies': 0,\n 'received-unknown-validity': 0\n }\n\n df = pd.DataFrame([base_data_template] + result)\n df = df.drop(0) # remove the template row\n numeric_cols = [\n \"unclaimed-samples-in-kit\", \"never-scanned\", \"sample-is-valid\",\n \"no-associated-source\", \"no-registered-account\", \"no-collection-info\",\n \"sample-has-inconsistencies\", \"received-unknown-validity\"\n ]\n df[numeric_cols] = df[numeric_cols].apply(pd.to_numeric)\n df[numeric_cols] = df[numeric_cols].fillna(0)\n\n def urlify_account_id(id_):\n if pd.isnull(id_):\n return \"No associated account\"\n else:\n ui_endpoint = SERVER_CONFIG['ui_endpoint']\n account_url = f\"{ui_endpoint}/accounts/{id_}\"\n return f'<a target=\"_blank\" href=\"{account_url}\">{id_}</a>'\n\n # see https://stackoverflow.com/questions/20035518/insert-a-link-inside-a-pandas-table # noqa\n df['account_id'] = df[\"account_id\"].apply(urlify_account_id)\n return render_template(\"email_stats_pulldown.html\",\n search_error=None,\n resource=df,\n **build_login_variables(),\n projects=projects)\n\n\[email protected]('/per_sample_summary', methods=['GET', 'POST'])\ndef per_sample_summary():\n # get a list of all projects in the system\n _, result = _get_projects(include_stats=False, is_active=True)\n projects = result.get('projects')\n\n # filter out any projects that don't belong to Microsetta\n projects = [x for x in projects if x['is_microsetta'] is True]\n\n # build a list of dictionaries with just the project id and the project\n # name.\n projects = [{'project_name': x['project_name'],\n 'project_id': x['project_id']} for x in projects]\n\n # determine if user wants sample ids stripped\n strip_sampleid = request.form.get('strip_sampleid', 'off')\n strip_sampleid = strip_sampleid.lower() == 'on'\n\n if request.method == 'GET':\n # If user arrived via GET then they are either here w/out\n # querying and they simply need the default webpage, or they are\n # querying with either a list of barcodes, or with a project id.\n\n # look for both parameters to determine which state we are in.\n sample_barcode = request.args.get('sample_barcode')\n project_id = request.args.get('project_id')\n\n if sample_barcode is None and project_id is None:\n # user just wants the default page.\n return render_template('per_sample_summary.html',\n resource=None,\n projects=projects,\n **build_login_variables())\n\n if project_id is not None:\n # user wants to get summaries on all samples in a project.\n payload = {'project_id': project_id}\n status, result = APIRequest.post('/api/admin/account_barcode_summa'\n 'ry?strip_sampleid=False',\n json=payload)\n\n if status == 200:\n if result['partial_result'] is True:\n unprocessed_barcodes = result['unprocessed_barcodes']\n else:\n unprocessed_barcodes = None\n\n resource = pd.DataFrame(result['samples'])\n order = ['sampleid', 'project', 'account-email',\n 'source-email', 'source-type', 'site-sampled',\n 'sample-status', 'sample-received', 'ffq-taken',\n 'ffq-complete', 'vioscreen_username']\n order.extend(sorted(set(resource.columns) - set(order)))\n resource = resource[order]\n if unprocessed_barcodes:\n return render_template('per_sample_summary.html',\n resource=resource,\n projects=projects,\n error_message=\"Too many barcodes. S\"\n \"erver processed only\"\n \" the first 1000.\",\n **build_login_variables())\n else:\n return render_template('per_sample_summary.html',\n resource=resource,\n projects=projects,\n **build_login_variables())\n\n else:\n return render_template('per_sample_summary.html',\n resource=None,\n projects=projects,\n error_message=result,\n **build_login_variables())\n\n # if we are here then the user is querying using barcodes and we\n # simply need to set up the query below to perform.\n sample_barcodes = [sample_barcode, ]\n else:\n # assume POST, since there are only two methods defined in route.\n # if we are here, it is because the user is querying using an uploaded\n # file containing sample names.\n sample_barcodes, err = upload_util.parse_request_csv_col(request,\n 'file',\n 'sample_name')\n if err is not None:\n # there was an error. abort early.\n return render_template('per_sample_summary.html',\n resource=None,\n projects=projects,\n **build_login_variables(),\n search_error=[{'error': err}])\n\n # perform the main query.\n payload = {'sample_barcodes': sample_barcodes}\n status, result = APIRequest.post('/api/admin/account_barcode_summary?stri'\n 'p_sampleid=%s' % str(strip_sampleid),\n json=payload)\n\n if status == 200:\n if result['partial_result'] is True:\n unprocessed_barcodes = result['unprocessed_barcodes']\n else:\n unprocessed_barcodes = None\n resource = pd.DataFrame(result['samples'])\n order = ['sampleid', 'project', 'account-email', 'source-email',\n 'source-type', 'site-sampled', 'sample-status',\n 'sample-received', 'ffq-taken', 'ffq-complete',\n 'vioscreen_username']\n order.extend(sorted(set(resource.columns) - set(order)))\n resource = resource[order]\n\n if unprocessed_barcodes:\n return render_template('per_sample_summary.html',\n resource=resource,\n projects=projects,\n error_message=\"Too many barcodes. S\"\n \"erver processed only\"\n \" the first 1000.\",\n **build_login_variables())\n else:\n return render_template('per_sample_summary.html',\n resource=resource,\n projects=projects,\n **build_login_variables())\n else:\n return render_template('per_sample_summary.html',\n resource=None,\n projects=projects,\n error_message=result,\n **build_login_variables())\n\n\ndef _get_by_sample_barcode(sample_barcodes, strip_sampleid, projects):\n payload = {'sample_barcodes': sample_barcodes}\n status, result = APIRequest.post('/api/admin/account_barcode_summary?'\n 'strip_sampleid=%s' % str(strip_sampleid),\n json=payload)\n if status == 200:\n if result['partial_result'] is True:\n unprocessed_barcodes = result['unprocessed_barcodes']\n else:\n unprocessed_barcodes = None\n\n resource = pd.DataFrame(result['samples'])\n order = ['sampleid', 'project', 'account-email', 'source-email',\n 'source-type', 'site-sampled', 'sample-status',\n 'sample-received', 'ffq-taken', 'ffq-complete',\n 'vioscreen_username']\n order.extend(sorted(set(resource.columns) - set(order)))\n resource = resource[order]\n\n if unprocessed_barcodes:\n return render_template('per_sample_summary.html',\n resource=resource,\n projects=projects,\n error_message=\"Too many barcodes. S\"\n \"erver processed only\"\n \" the first 1000.\",\n **build_login_variables())\n else:\n return render_template('per_sample_summary.html',\n resource=resource,\n projects=projects,\n **build_login_variables())\n else:\n return render_template('per_sample_summary.html',\n resource=None,\n projects=projects,\n error_message=result,\n **build_login_variables())\n\n\[email protected]('/create_kits', methods=['GET', 'POST'])\ndef new_kits():\n _, result = _get_projects(include_stats=False, is_active=True)\n projects = result.get('projects')\n\n if request.method == 'GET':\n return render_template('create_kits.html',\n error_message=result.get('error_message'),\n projects=projects,\n **build_login_variables())\n\n elif request.method == 'POST':\n num_kits = int(request.form['num_kits'])\n num_samples = int(request.form['num_samples'])\n prefix = request.form['prefix']\n selected_project_ids = request.form.getlist('project_ids')\n payload = {'number_of_kits': num_kits,\n 'number_of_samples': num_samples,\n 'project_ids': selected_project_ids}\n if prefix:\n payload['kit_id_prefix'] = prefix\n\n status, result = APIRequest.post(\n '/api/admin/create/kits',\n json=payload)\n\n if status != 201:\n return render_template('create_kits.html',\n error_message='Failed to create kits',\n projects=projects,\n **build_login_variables())\n\n # StringIO/BytesIO based off https://stackoverflow.com/a/45111660\n buf = io.StringIO()\n payload = io.BytesIO()\n\n # explicitly expand out the barcode detail\n kits = pd.DataFrame(result['created'])\n for i in range(num_samples):\n kits['barcode_%d' % (i+1)] = [r['sample_barcodes'][i]\n for _, r in kits.iterrows()]\n kits.drop(columns='sample_barcodes', inplace=True)\n\n kits.to_csv(buf, sep=',', index=False, header=True)\n payload.write(buf.getvalue().encode('utf-8'))\n payload.seek(0)\n buf.close()\n\n stamp = datetime.now().strftime('%d%b%Y-%H%M')\n fname = f'kits-{stamp}.csv'\n\n return send_file(payload, as_attachment=True,\n attachment_filename=fname,\n mimetype='text/csv')\n\n\ndef _check_sample_status(extended_barcode_info):\n warning = None\n in_microsetta_project = any(\n [x['is_microsetta'] for x in extended_barcode_info['projects_info']])\n\n # one warning to rule them all; check in order of precendence\n if not in_microsetta_project:\n warning = UNKNOWN_VALIDITY_STATUS\n elif extended_barcode_info['account'] is None:\n warning = NO_ACCOUNT_STATUS\n elif extended_barcode_info['source'] is None:\n warning = NO_SOURCE_STATUS\n # collection datetime is used as the bellwether for the whole\n # set of sample collection info because it is relevant to all\n # kinds of samples (whereas previously used field, sample site, is not\n # filled when environmental samples are returned).\n elif extended_barcode_info['sample'].get('datetime_collected') is None:\n warning = NO_COLLECTION_INFO_STATUS\n\n return warning\n\n\n# Set up handlers for the cases,\n# GET to view the page,\n# POST to update info for a barcode -AND (possibly)-\n# email end user about the change in sample status,\ndef _scan_get(sample_barcode, update_error):\n # If there is no sample_barcode in the GET\n # they still need to enter one in the box, so show empty page\n if sample_barcode is None:\n return render_template('scan.html', **build_login_variables())\n\n # Assuming there is a sample barcode, grab that sample's information\n status, result = APIRequest.get(\n '/api/admin/search/samples/%s' % sample_barcode)\n\n # If we successfully grab it, show the page to the user\n if status == 200:\n # Process result in python because its easier than jinja2.\n status_warning = _check_sample_status(result)\n\n # check the latest scan to find the default sample_status for form\n latest_status = DUMMY_SELECT_TEXT\n if result['latest_scan']:\n latest_status = result['latest_scan']['sample_status']\n\n account = result.get('account')\n events = []\n if account:\n event_status, event_result = APIRequest.get(\n '/api/admin/events/accounts/%s' % account['id']\n )\n if event_status != 200:\n raise Exception(\"Couldn't pull event history\")\n\n events = event_result\n\n return render_template(\n 'scan.html',\n **build_login_variables(),\n barcode_info=result[\"barcode_info\"],\n projects_info=result['projects_info'],\n scans_info=result['scans_info'],\n latest_status=latest_status,\n dummy_status=DUMMY_SELECT_TEXT,\n status_options=STATUS_OPTIONS,\n send_email=session.get(SEND_EMAIL_CHECKBOX_DEFAULT_NAME, True),\n sample_info=result['sample'],\n extended_info=result,\n status_warning=status_warning,\n update_error=update_error,\n received_type_dropdown=RECEIVED_TYPE_DROPDOWN,\n source=result['source'],\n events=events\n )\n elif status == 401:\n # If we fail due to unauthorized, need the user to log in again\n return redirect('/logout')\n elif status == 404:\n # If we fail due to not found, need to tell the user to pick a diff\n # barcode\n return render_template(\n 'scan.html',\n **build_login_variables(),\n search_error=\"Barcode %s Not Found\" % sample_barcode,\n update_error=update_error,\n received_type_dropdown=RECEIVED_TYPE_DROPDOWN\n )\n else:\n raise BadRequest()\n\n\ndef _scan_post_update_info(sample_barcode,\n technician_notes,\n sample_status,\n action,\n issue_type,\n template,\n received_type,\n recorded_type):\n\n ###\n # Bugfix Part 1 for duplicate emails being sent. Theory is that client is\n # out of sync due to hitting back button after a scan has changed\n # state.\n # Can't test if client is up to date without ETags, so for right now,\n # we just validate whether or not they should send an email, duplicating\n # the client log. (This can still break with multiple admin clients,\n # but that is unlikely at the moment.)\n latest_status = None\n # TODO: Replace this with ETags!\n status, result = APIRequest.get(\n '/api/admin/search/samples/%s' % sample_barcode)\n\n if result['latest_scan']:\n latest_status = result['latest_scan']['sample_status']\n ###\n\n # Do the actual update\n status, response = APIRequest.post(\n '/api/admin/scan/%s' % sample_barcode,\n json={\n \"sample_status\": sample_status,\n \"technician_notes\": technician_notes\n }\n )\n\n # if the update failed, keep track of the error so it can be displayed\n if status != 201:\n update_error = response\n return _scan_get(sample_barcode, update_error)\n else:\n update_error = None\n\n # If we're not supposed to send an email, go back to GET\n if action != \"send_email\":\n return _scan_get(sample_barcode, update_error)\n\n ###\n # Bugfix Part 2 for duplicate emails being sent.\n if sample_status == latest_status:\n # This is what we'll hit if javascript thinks it's updating status\n # but is out of sync with the database.\n update_error = \"Ignoring Send Email, sample_status would \" \\\n \"not have been updated (Displayed page was out of \" \\\n \"sync)\"\n return _scan_get(sample_barcode, update_error)\n ###\n\n # This is what we'll hit if there are no email templates to send for\n # the new sample status (or if we screw up javascript side :D )\n if template is None:\n update_error = \"Cannot Send Email: No Issue Type Specified \" \\\n \"(or no issue types available)\"\n return _scan_get(sample_barcode, update_error)\n\n # Otherwise, send out an email to the end user\n status, response = APIRequest.post(\n '/api/admin/email',\n json={\n \"issue_type\": issue_type,\n \"template\": template,\n \"template_args\": {\n \"sample_barcode\": sample_barcode,\n \"recorded_type\": recorded_type,\n \"received_type\": received_type\n }\n }\n )\n\n # if the email failed to send, keep track of the error\n # so it can be displayed\n if status != 200:\n update_error = response\n else:\n update_error = None\n\n return _scan_get(sample_barcode, update_error)\n\n\[email protected]('/scan', methods=['GET', 'POST'])\ndef scan():\n # Now that the handlers are set up, parse the request to determine what\n # to do.\n\n # If its a get, grab the sample_barcode from the query string rather than\n # form parameters\n if request.method == 'GET':\n sample_barcode = request.args.get('sample_barcode')\n return _scan_get(sample_barcode, None)\n\n # If its a post, make the changes, then refresh the page\n if request.method == 'POST':\n # Without some extra ajax, we can't persist the send_email checkbox\n # until they actually post the form\n send_email = request.form.get('send_email', False)\n session[SEND_EMAIL_CHECKBOX_DEFAULT_NAME] = send_email\n\n sample_barcode = request.form['sample_barcode']\n technician_notes = request.form['technician_notes']\n sample_status = request.form['sample_status']\n\n action = request.form.get('action')\n issue_type = request.form.get('issue_type')\n template = request.form.get('template')\n received_type = request.form.get('received_type')\n recorded_type = request.form.get('recorded_type')\n\n return _scan_post_update_info(sample_barcode,\n technician_notes,\n sample_status,\n action,\n issue_type,\n template,\n received_type,\n recorded_type)\n\n\[email protected]('/metadata_pulldown', methods=['GET', 'POST'])\ndef metadata_pulldown():\n allow_missing = request.form.get('allow_missing_samples', False)\n\n if request.method == 'GET':\n sample_barcode = request.args.get('sample_barcode')\n # If there is no sample_barcode in the GET\n # they still need to enter one in the box, so show empty page\n if sample_barcode is None:\n return render_template('metadata_pulldown.html',\n **build_login_variables())\n sample_barcodes = [sample_barcode]\n elif request.method == 'POST':\n sample_barcodes, upload_err = upload_util.parse_request_csv_col(\n request,\n 'file',\n 'sample_name'\n )\n if upload_err is not None:\n return render_template('metadata_pulldown.html',\n **build_login_variables(),\n search_error=[{'error': upload_err}])\n else:\n raise BadRequest()\n\n df, errors = metadata_util.retrieve_metadata(sample_barcodes)\n\n # Strangely, these api requests are returning an html error page rather\n # than a machine parseable json error response object with message.\n # This is almost certainly due to error handling for the cohosted minimal\n # client. In future, we should just pass down whatever the api says here.\n if len(errors) == 0 or allow_missing:\n df = metadata_util.drop_private_columns(df)\n\n # TODO: Streaming direct from pandas is a pain. Need to search for\n # better ways to iterate and chunk this file as we generate it\n strstream = io.StringIO()\n df.to_csv(strstream, sep='\\t', index=True, header=True)\n\n # TODO: utf-8 or utf-16 encoding??\n bytestream = io.BytesIO()\n bytestream.write(strstream.getvalue().encode('utf-8'))\n bytestream.seek(0)\n\n strstream.close()\n return send_file(bytestream,\n mimetype=\"text/tab-separated-values\",\n as_attachment=True,\n attachment_filename=\"metadata_pulldown.tsv\",\n add_etags=False,\n cache_timeout=None,\n conditional=False,\n last_modified=None,\n )\n else:\n\n return render_template('metadata_pulldown.html',\n **build_login_variables(),\n info={'barcodes': sample_barcodes},\n search_error=errors)\n\n\[email protected]('/submit_daklapack_order', methods=['GET'])\ndef submit_daklapack_order():\n error_msg_key = \"error_message\"\n\n def return_error(msg):\n return render_template('submit_daklapack_order.html',\n **build_login_variables(),\n error_message=msg)\n\n status, dak_articles_output = APIRequest.get(\n '/api/admin/daklapack_articles')\n if status >= 400:\n return return_error(\"Unable to load daklapack articles list.\")\n\n status, projects_output = _get_projects(include_stats=False,\n is_active=True)\n if status >= 400:\n return return_error(projects_output[error_msg_key])\n\n return render_template('submit_daklapack_order.html',\n **build_login_variables(),\n error_message=None,\n dummy_status=DUMMY_SELECT_TEXT,\n dak_articles=dak_articles_output,\n contact_phone_number=SERVER_CONFIG[\n \"order_contact_phone\"],\n projects=projects_output['projects'])\n\n\[email protected]('/submit_daklapack_order', methods=['POST'])\ndef post_submit_daklapack_order():\n def return_error(msg):\n return render_template('submit_daklapack_order.html',\n **build_login_variables(),\n error_message=msg)\n\n error_message = success_submissions = failure_submissions = headers = None\n expected_headers = [\"firstName\", \"lastName\", \"address1\", \"insertion\",\n \"address2\", \"postalCode\", \"city\", \"state\",\n \"country\", \"countryCode\"]\n\n # get required fields; cast where expected by api\n phone_number = request.form['contact_phone_number']\n project_ids_list = list(map(int, request.form.getlist('projects')))\n dak_article_code = request.form['dak_article_code']\n article_quantity = int(request.form['quantity'])\n file = request.files['addresses_file']\n\n # get optional fields or defaults\n planned_send_str = request.form.get('planned_send_date')\n planned_send_date = planned_send_str if planned_send_str else None\n\n description = request.form.get('description')\n fedex_ref_1 = request.form.get('fedex_ref_1')\n fedex_ref_2 = request.form.get('fedex_ref_2')\n fedex_ref_3 = request.form.get('fedex_ref_3')\n\n try:\n # NB: import everything as a string so that zip codes beginning with\n # zero (e.g., 06710) don't get silently cast to numbers\n if file.filename.endswith('xls'):\n addresses_df = pd.read_excel(file, dtype=str)\n elif file.filename.endswith('xlsx'):\n addresses_df = pd.read_excel(file, engine='openpyxl', dtype=str)\n else:\n raise ValueError(f\"Unrecognized extension on putative excel \"\n f\"filename: {file.filename}\")\n\n headers = list(addresses_df.columns)\n except Exception as e: # noqa\n return return_error('Could not parse addresses file')\n\n if headers != expected_headers:\n return return_error(f\"Received column names {headers} do \"\n f\"not match expected column names\"\n f\" {expected_headers}\")\n\n # add (same) contact phone number to every address\n addresses_df['phone'] = phone_number\n\n addresses_df = addresses_df.fillna(\"\")\n temp_dict = addresses_df.to_dict(orient='index')\n addresses_list = [temp_dict[n] for n in range(len(temp_dict))]\n\n status, post_output = APIRequest.post(\n '/api/admin/daklapack_orders',\n json={\n \"project_ids\": project_ids_list,\n \"article_code\": dak_article_code,\n \"quantity\": article_quantity,\n \"addresses\": addresses_list,\n \"planned_send_date\": planned_send_date,\n \"description\": description,\n \"fedex_ref_1\": fedex_ref_1,\n \"fedex_ref_2\": fedex_ref_2,\n \"fedex_ref_3\": fedex_ref_3\n }\n )\n\n # if the post failed, keep track of the error so it can be displayed\n if status != 200:\n error_message = post_output\n else:\n order_submissions = post_output[\"order_submissions\"]\n success_submissions = [x for x in order_submissions if\n x[\"order_success\"]]\n failure_submissions = [x for x in order_submissions if not\n x[\"order_success\"]]\n\n return render_template('submit_daklapack_order.html',\n **build_login_variables(),\n error_message=error_message,\n success_submissions=success_submissions,\n failure_submissions=failure_submissions)\n\n\[email protected]('/authrocket_callback')\ndef authrocket_callback():\n token = request.args.get('token')\n session[TOKEN_KEY_NAME] = token\n return redirect(\"/\")\n\n\[email protected]('/logout')\ndef logout():\n if TOKEN_KEY_NAME in session:\n del session[TOKEN_KEY_NAME]\n return redirect(\"/\")\n\n\n# If we're running in stand alone mode, run the application\nif __name__ == '__main__':\n if SERVER_CONFIG[\"ssl_cert_path\"] and SERVER_CONFIG[\"ssl_key_path\"]:\n ssl_context = (\n SERVER_CONFIG[\"ssl_cert_path\"], SERVER_CONFIG[\"ssl_key_path\"]\n )\n else:\n ssl_context = None\n\n app.run(\n port=SERVER_CONFIG['port'],\n debug=SERVER_CONFIG['debug'],\n ssl_context=ssl_context\n )\n"
] | [
[
"pandas.isnull",
"pandas.DataFrame",
"pandas.read_excel"
]
] |
JasonWayne/deep-learning-snippets | [
"7c64e065752fcbb902494d757a41140f42facf05"
] | [
"frameworks/tensorflow/print_tensor_in_ckpt.py"
] | [
"'''\ncommon usage: \n 1. put this script in ckpt folder\n 2. python print_tensor_in_ckpt.py > tensors.txt\n'''\n# ref: https://stackoverflow.com/questions/38218174/how-do-i-find-the-variable-names-and-values-that-are-saved-in-a-checkpoint\nimport tensorflow as tf\nfrom tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file\n\n\nlatest_ckp = tf.train.latest_checkpoint('./')\nprint_tensors_in_checkpoint_file(latest_ckp, all_tensors=True, tensor_name='')\n"
] | [
[
"tensorflow.python.tools.inspect_checkpoint.print_tensors_in_checkpoint_file",
"tensorflow.train.latest_checkpoint"
]
] |
TangZhenchaoTZC/Keras-mask-detection | [
"325679d06a12a90b2552ed7d447298a23e3b9d57"
] | [
"fasterRCNNtrain/loss_and_gen.py"
] | [
"\"\"\"fasterRCNN训练的损失函数与数据生成器\"\"\"\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras import backend as K\nimport keras\nimport tensorflow as tf\nimport numpy as np\nfrom random import shuffle\nimport random\nfrom PIL import Image\nfrom keras.objectives import categorical_crossentropy\nfrom matplotlib.colors import rgb_to_hsv, hsv_to_rgb\n\nimport sys\nsys.path.append(\"..\")\nfrom net import RPN as RPN\n\n\ndef rand(a=0, b=1):\n return np.random.rand() * (b - a) + a\n\n\ndef cls_loss(ratio=3):\n def _cls_loss(y_true, y_pred):\n # y_true [batch_size, num_anchor, num_classes+1]\n # y_pred [batch_size, num_anchor, num_classes]\n labels = y_true\n anchor_state = y_true[:, :, -1] # -1 是需要忽略的, 0 是背景, 1 是存在目标\n classification = y_pred\n\n # 找出存在目标的先验框\n indices_for_object = tf.where(keras.backend.equal(anchor_state, 1))\n labels_for_object = tf.gather_nd(labels, indices_for_object)\n classification_for_object = tf.gather_nd(classification, indices_for_object)\n\n cls_loss_for_object = keras.backend.binary_crossentropy(labels_for_object, classification_for_object)\n\n # 找出实际上为背景的先验框\n indices_for_back = tf.where(keras.backend.equal(anchor_state, 0))\n labels_for_back = tf.gather_nd(labels, indices_for_back)\n classification_for_back = tf.gather_nd(classification, indices_for_back)\n\n # 计算每一个先验框应该有的权重\n cls_loss_for_back = keras.backend.binary_crossentropy(labels_for_back, classification_for_back)\n\n # 标准化,实际上是正样本的数量\n normalizer_pos = tf.where(keras.backend.equal(anchor_state, 1))\n normalizer_pos = keras.backend.cast(keras.backend.shape(normalizer_pos)[0], keras.backend.floatx())\n normalizer_pos = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer_pos)\n\n normalizer_neg = tf.where(keras.backend.equal(anchor_state, 0))\n normalizer_neg = keras.backend.cast(keras.backend.shape(normalizer_neg)[0], keras.backend.floatx())\n normalizer_neg = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer_neg)\n\n # 将所获得的loss除上正样本的数量\n cls_loss_for_object = keras.backend.sum(cls_loss_for_object) / normalizer_pos\n cls_loss_for_back = ratio * keras.backend.sum(cls_loss_for_back) / normalizer_neg\n\n # 总的loss\n loss = cls_loss_for_object + cls_loss_for_back\n\n return loss\n\n return _cls_loss\n\n\ndef smooth_l1(sigma=1.0):\n sigma_squared = sigma ** 2\n\n def _smooth_l1(y_true, y_pred):\n # y_true [batch_size, num_anchor, 4+1]\n # y_pred [batch_size, num_anchor, 4]\n regression = y_pred\n regression_target = y_true[:, :, :-1]\n anchor_state = y_true[:, :, -1]\n\n # 找到正样本\n indices = tf.where(keras.backend.equal(anchor_state, 1))\n regression = tf.gather_nd(regression, indices)\n regression_target = tf.gather_nd(regression_target, indices)\n\n # 计算 smooth L1 loss\n # f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma\n # |x| - 0.5 / sigma / sigma otherwise\n regression_diff = regression - regression_target\n regression_diff = keras.backend.abs(regression_diff)\n regression_loss = tf.where(\n keras.backend.less(regression_diff, 1.0 / sigma_squared),\n 0.5 * sigma_squared * keras.backend.pow(regression_diff, 2),\n regression_diff - 0.5 / sigma_squared\n )\n\n normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])\n normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())\n loss = keras.backend.sum(regression_loss) / normalizer\n\n return loss\n\n return _smooth_l1\n\n\ndef class_loss_regr(num_classes):\n epsilon = 1e-4\n\n def class_loss_regr_fixed_num(y_true, y_pred):\n x = y_true[:, :, 4 * num_classes:] - y_pred\n x_abs = K.abs(x)\n x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')\n loss = 4 * K.sum(\n y_true[:, :, :4 * num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(\n epsilon + y_true[:, :, :4 * num_classes])\n return loss\n\n return class_loss_regr_fixed_num\n\n\ndef class_loss_cls(y_true, y_pred):\n return K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :]))\n\n\ndef get_new_img_size(width, height, img_min_side=600):\n if width <= height:\n f = float(img_min_side) / width\n resized_height = int(f * height)\n resized_width = int(img_min_side)\n else:\n f = float(img_min_side) / height\n resized_width = int(f * width)\n resized_height = int(img_min_side)\n\n return resized_width, resized_height\n\n\ndef get_img_output_length(width, height):\n def get_output_length(input_length):\n # input_length += 6\n filter_sizes = [7, 3, 1, 1]\n padding = [3, 1, 0, 0]\n stride = 2\n for i in range(4):\n # input_length = (input_length - filter_size + stride) // stride\n input_length = (input_length + 2 * padding[i] - filter_sizes[i]) // stride + 1\n return input_length\n\n return get_output_length(width), get_output_length(height)\n\n\nclass Generator(object):\n def __init__(self, bbox_util, train_lines, num_classes, solid, solid_shape=[600, 600]):\n self.bbox_util = bbox_util\n self.train_lines = train_lines\n self.train_batches = len(train_lines)\n self.num_classes = num_classes\n self.solid = solid\n # 用于固定训练图片的大小(600,600)\n self.solid_shape = solid_shape\n\n def get_random_data(self, annotation_line, jitter=.3, hue=.1, sat=1.5, val=1.5):\n \"\"\"数据增强,提高模型鲁棒性\"\"\"\n line = annotation_line.split()\n image = Image.open(line[0])\n iw, ih = image.size\n\n # 如果solid=True,训练的图片大小会强制resize\n if self.solid:\n w, h = self.solid_shape\n else:\n w, h = get_new_img_size(iw, ih)\n box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])\n\n # resize image\n new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(1 - jitter, 1 + jitter)\n scale = rand(.25, 2)\n if new_ar < 1:\n nh = int(scale * h)\n nw = int(nh * new_ar)\n else:\n nw = int(scale * w)\n nh = int(nw / new_ar)\n image = image.resize((nw, nh), Image.BICUBIC)\n\n # place image\n dx = int(rand(0, w - nw))\n dy = int(rand(0, h - nh))\n new_image = Image.new('RGB', (w, h), (128, 128, 128))\n new_image.paste(image, (dx, dy))\n image = new_image\n\n # flip image or not\n flip = rand() < .5\n if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)\n\n # distort image\n hue = rand(-hue, hue)\n sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)\n val = rand(1, val) if rand() < .5 else 1 / rand(1, val)\n x = rgb_to_hsv(np.array(image) / 255.)\n x[..., 0] += hue\n x[..., 0][x[..., 0] > 1] -= 1\n x[..., 0][x[..., 0] < 0] += 1\n x[..., 1] *= sat\n x[..., 2] *= val\n x[x > 1] = 1\n x[x < 0] = 0\n image_data = hsv_to_rgb(x) * 255 # numpy array, 0 to 1\n\n # correct boxes\n box_data = np.zeros((len(box), 5))\n if len(box) > 0:\n np.random.shuffle(box)\n box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx\n box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy\n if flip: box[:, [0, 2]] = w - box[:, [2, 0]]\n box[:, 0:2][box[:, 0:2] < 0] = 0\n box[:, 2][box[:, 2] > w] = w\n box[:, 3][box[:, 3] > h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box\n box_data = np.zeros((len(box), 5))\n box_data[:len(box)] = box\n if len(box) == 0:\n return image_data, []\n\n if (box_data[:, :4] > 0).any():\n return image_data, box_data\n else:\n return image_data, []\n\n def generate(self):\n \"\"\"数据生成器\"\"\"\n while True:\n # 打乱2007_train.txt\n shuffle(self.train_lines)\n lines = self.train_lines\n for annotation_line in lines:\n # 对每一行即没一张图片进行数据增强:改变光照,对比度等,使图片变得多样,从而提高模型鲁棒性\n # img为数据增强后的图片,y为目标的信息\n img, y = self.get_random_data(annotation_line)\n height, width, _ = np.shape(img)\n\n # 没有目标就跳过\n if len(y) == 0:\n continue\n # 将目标信息归一化\n boxes = np.array(y[:, :4], dtype=np.float32)\n boxes[:, 0] = boxes[:, 0] / width\n boxes[:, 1] = boxes[:, 1] / height\n boxes[:, 2] = boxes[:, 2] / width\n boxes[:, 3] = boxes[:, 3] / height\n\n box_heights = boxes[:, 3] - boxes[:, 1]\n box_widths = boxes[:, 2] - boxes[:, 0]\n # 如果遇到标记错误为负数的情况,应跳过\n if (box_heights <= 0).any() or (box_widths <= 0).any():\n continue\n\n y[:, :4] = boxes[:, :4]\n\n # 获得先验框 38*38*9个\n anchors = RPN.create_anchor(get_img_output_length(width, height), width, height)\n\n # 计算真实框对应的先验框,返回正样本:可以对应到真实框的先验框,负样本:背景\n assignment = self.bbox_util.assign_boxes(y, anchors)\n\n # 训练一般随机选择128个正样本,128个负样本\n num_regions = 256\n\n classification = assignment[:, 4]\n regression = assignment[:, :]\n\n mask_pos = classification[:] > 0\n num_pos = len(classification[mask_pos])\n # 如果正样本数量大于128,就忽略多余的正样本\n if num_pos > num_regions / 2:\n val_locs = random.sample(range(num_pos), int(num_pos - num_regions / 2))\n classification[mask_pos][val_locs] = -1\n regression[mask_pos][val_locs, -1] = -1\n\n mask_neg = classification[:] == 0\n num_neg = len(classification[mask_neg])\n # 如果负样本过多,也进行忽略,这么做是为了平衡正负样本的数量\n if len(classification[mask_neg]) + num_pos > num_regions:\n val_locs = random.sample(range(num_neg), int(num_neg - num_pos))\n classification[mask_neg][val_locs] = -1\n\n classification = np.reshape(classification, [-1, 1])\n regression = np.reshape(regression, [-1, 5])\n\n tmp_inp = np.array(img)\n tmp_targets = [np.expand_dims(np.array(classification, dtype=np.float32), 0),\n np.expand_dims(np.array(regression, dtype=np.float32), 0)]\n\n # 1.对图片进行预处理 2.返回训练使用的预测信息 3.返回真实框\n yield preprocess_input(np.expand_dims(tmp_inp, 0)), tmp_targets, np.expand_dims(y, 0)"
] | [
[
"numpy.random.shuffle",
"matplotlib.colors.hsv_to_rgb",
"tensorflow.gather_nd",
"numpy.logical_and",
"numpy.reshape",
"numpy.expand_dims",
"numpy.shape",
"numpy.random.rand",
"numpy.array"
]
] |
caelanhadley/NNFSIP | [
"da048af5ded549db7464b206b255104900b40ab8"
] | [
"models/intro/vertical.py"
] | [
"import matplotlib.pyplot as plt\nimport nnfs\nfrom nnfs.datasets import vertical_data\n\nnnfs.init()\n\nX, y = vertical_data(samples=100, classes=3)\n\nplt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap='brg')\nplt.show()\n\nimport numpy as np\nimport nnfs\nimport matplotlib.pyplot as plt\n\nnnfs.init()\n\nclass Layer_Dense:\n def __init__(self, n_inputs, n_neurons):\n self.weights = 0.01 * np.random.randn(n_inputs, n_neurons)\n self.biases = np.zeros((1,n_neurons))\n def forward(self, inputs):\n self.output = np.dot(inputs, self.weights) + self.biases\n\nclass Activation_ReLU:\n # Forward Pass\n def forward(self, inputs):\n self.output = np.maximum(0,inputs)\n\nclass Activation_Softmax:\n def forward(self, inputs):\n exp_values= np.exp(inputs - np.max(inputs, axis=1, keepdims=True))\n normalized = exp_values / np.sum(exp_values, axis=1, keepdims=True)\n self.output = normalized\n\nclass Loss:\n # Calculates the data and regularization losses\n # given model output and ground truth values\n def calculate(self, output, y):\n # Calculate sample losses\n sample_losses = self.forward(output, y)\n # Calculate mean loss\n data_loss = np.mean(sample_losses)\n # Return loss\n return data_loss\n\n\nclass Loss_CatagoricalCrossEntropy(Loss):\n def forward(self, y_pred, y_true):\n # Number of Samples\n samples = len(y_pred)\n # Clip Data to prevent div by 0\n # Clip Both sides to not drag the mean torwards any value\n y_pred_clipped = np.clip(y_pred, 1e-7, 1-1e-7)\n\n # Probabilities for target values -\n # Only if categorical labels\n if len(y_true.shape) == 1:\n correct_confidences = y_pred_clipped[range(samples), y_true]\n # Mask Values - only for one-hot encoded labels\n elif len(y_true.shape) == 2:\n correct_confidences = np.sum(y_pred_clipped * y_true, axis=1)\n \n negative_log_likelyhoods = -np.log(correct_confidences)\n return negative_log_likelyhoods\n\n# Model\ndense1 = Layer_Dense(2,3)\nactivation1 = Activation_ReLU()\ndense2 = Layer_Dense(3, 3)\nactivation2 = Activation_Softmax()\nloss_function = Loss_CatagoricalCrossEntropy()\n\n# Helper variables\nlowest_loss = 9999999 # some initial value\nbest_dense1_weights = dense1.weights.copy()\nbest_dense1_biases = dense1.biases.copy()\nbest_dense2_weights = dense2.weights.copy()\nbest_dense2_biases = dense2.biases.copy()\n\n\nfor iteration in range(10000):\n# Generate a new set of weights for iteration\n dense1.weights += 0.05 * np.random.randn(2, 3)\n dense1.biases += 0.05 * np.random.randn(1, 3)\n dense2.weights += 0.05 * np.random.randn(3, 3)\n dense2.biases += 0.05 * np.random.randn(1, 3)\n # Perform a forward pass of the training data through this layer\n dense1.forward(X)\n activation1.forward(dense1.output)\n dense2.forward(activation1.output)\n activation2.forward(dense2.output)\n # Perform a forward pass through activation function\n # it takes the output of second dense layer here and returns loss\n loss = loss_function.calculate(activation2.output, y)\n # Calculate accuracy from output of activation2 and targets\n # calculate values along first axis\n predictions = np.argmax(activation2.output, axis=1)\n accuracy = np.mean(predictions==y)\n # If loss is smaller - print and save weights and biases aside\n if loss < lowest_loss:\n print('New set of weights found, iteration:', iteration,\n 'loss:', loss, 'acc:', accuracy)\n best_dense1_weights = dense1.weights.copy()\n best_dense1_biases = dense1.biases.copy()\n best_dense2_weights = dense2.weights.copy()\n best_dense2_biases = dense2.biases.copy()\n lowest_loss = loss\n # Revert weights and biases\n else:\n dense1.weights = best_dense1_weights.copy()\n dense1.biases = best_dense1_biases.copy()\n dense2.weights = best_dense2_weights.copy()\n dense2.biases = best_dense2_biases.copy()"
] | [
[
"numpy.sum",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"numpy.random.randn",
"numpy.argmax",
"matplotlib.pyplot.show",
"numpy.clip",
"numpy.log",
"numpy.max",
"numpy.maximum",
"numpy.dot",
"numpy.mean"
]
] |
tudorcebere/jax | [
"b1d0f87648f73b06091ea3929a52b5d572391088"
] | [
"jax/experimental/jax2tf/tests/primitives_test.py"
] | [
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for JAX primitive coverage.\"\"\"\n\nimport unittest\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nfrom functools import partial\n\nimport jax\nfrom jax import dtypes\nfrom jax import lax\nfrom jax import numpy as jnp\nfrom jax import test_util as jtu\nfrom jax.config import config\nfrom jax.experimental import jax2tf\nfrom jax.experimental.jax2tf.tests import tf_test_util\nfrom jax.interpreters import xla\n\nimport numpy as np\nimport tensorflow as tf # type: ignore[import]\n\nconfig.parse_flags_with_absl()\n\n# Import after parsing flags\nfrom jax.experimental.jax2tf.tests import primitive_harness\n\nREDUCE = (\n jnp.all,\n jnp.any,\n jnp.max,\n jnp.min,\n jnp.prod,\n jnp.sum,\n)\n\nINDEX = (\n jax.ops.index_add,\n jax.ops.index_max,\n jax.ops.index_min,\n jax.ops.index_mul,\n jax.ops.index_update,\n)\n\n\nclass JaxPrimitiveTest(tf_test_util.JaxToTfTestCase):\n\n def test_primitive_coverage(self):\n \"\"\"Fail if there are JAX primitives that are not implemented.\"\"\"\n # Harvest primitives from XLA translation tables\n all_primitives = (set(xla.translations)\n | set(xla.backend_specific_translations['cpu'])\n | set(xla.backend_specific_translations['gpu'])\n | set(xla.backend_specific_translations['tpu'])\n | set(xla.initial_style_translations)\n | set(xla.parallel_translations))\n\n tf_impl = set(jax.experimental.jax2tf.jax2tf.tf_impl)\n tf_not_yet_impl = set(jax.experimental.jax2tf.jax2tf.tf_not_yet_impl)\n\n all_primitives = tuple(sorted(all_primitives, key=str))\n for p in all_primitives:\n # TODO: remove tie_in once omnistaging is on by default\n if p.name == \"axis_index\" or p.name == \"tie_in\":\n continue\n if p in tf_not_yet_impl:\n self.assertNotIn(p, tf_impl) # Should not be in both tf_impl and tf_not_yet_impl\n else:\n self.assertIn(p, tf_impl)\n\n @parameterized.named_parameters(\n dict(testcase_name=f\"_{f_jax.__name__}\",\n f_jax=f_jax)\n for f_jax in [jnp.add, jnp.subtract, jnp.multiply, jnp.divide,\n jnp.less, jnp.less_equal, jnp.equal, jnp.greater,\n jnp.greater_equal, jnp.not_equal, jnp.maximum,\n jnp.minimum])\n def test_type_promotion(self, f_jax=jnp.add):\n # We only test a few types here, as tensorflow does not support many\n # types like uint* or bool in binary ops.\n types = [dtypes.bfloat16, np.int32, np.int64, np.float32]\n for x_dtype in types:\n for y_dtype in types:\n x = np.array([1, 2], dtype=x_dtype)\n y = np.array([3, 4], dtype=y_dtype)\n self.ConvertAndCompare(f_jax, x, y)\n\n def test_concat(self):\n values = [np.array([1, 2], dtype=np.float32),\n np.array([1, 2], dtype=np.int32),\n np.array([1, 2], dtype=np.int8)]\n f_jax = jax.jit(lambda x: jnp.concatenate(x, axis=0))\n self.ConvertAndCompare(f_jax, values)\n\n @primitive_harness.parameterized(primitive_harness.lax_pad)\n def test_pad(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_top_k)\n def test_top_k(self, harness: primitive_harness.Harness):\n if (harness.params[\"k\"] > harness.params[\"shape\"][-1] or\n harness.params[\"k\"] < 0):\n with self.assertRaisesRegex(ValueError, \"k argument to top_k must be\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n elif harness.params[\"dtype\"] in jtu.dtypes.complex:\n # TODO(necula): fix top_k complex bug on TPU\n if jtu.device_under_test() == \"tpu\":\n raise unittest.SkipTest(\"top_k complex on TPU raises different error\")\n with self.assertRaisesRegex(RuntimeError, \"Unimplemented: complex comparison\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n # TODO: TF and JAX sort [inf, nan] differently.\n elif harness.name.startswith(\"nan_\"):\n raise unittest.SkipTest(\"inconsistent [nan, inf] sorting\")\n else:\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_sort)\n def test_sort(self, harness: primitive_harness.Harness):\n if (jtu.device_under_test() == \"gpu\" and\n len(harness.arg_descriptors) == 4 and\n not harness.params[\"is_stable\"]):\n # TODO: fix the TF GPU test\n raise unittest.SkipTest(\"GPU tests are running TF on CPU\")\n if jtu.device_under_test() == \"tpu\" and harness.params[\"dtype\"] in jtu.dtypes.complex:\n raise unittest.SkipTest(\"JAX sort is not implemented on TPU for complex\")\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_fft)\n @jtu.skip_on_flag(\"jax_skip_slow_tests\", True)\n def test_fft(self, harness: primitive_harness.Harness):\n if len(harness.params[\"fft_lengths\"]) > 3:\n with self.assertRaisesRegex(RuntimeError, \"FFT only supports ranks 1-3\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n elif (jtu.device_under_test() == \"tpu\" and\n len(harness.params[\"fft_lengths\"]) > 1):\n # TODO(b/140351181): FFT is mostly unimplemented on TPU, even for JAX\n with self.assertRaisesRegex(RuntimeError,\n \"only 1D FFT is currently supported.\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n else:\n tol = None\n if jtu.device_under_test() == \"gpu\":\n if harness.params[\"dtype\"] in jtu.dtypes.boolean:\n tol = 0.01\n else:\n tol = 1e-3\n self.ConvertAndCompare(harness.dyn_fun,\n *harness.dyn_args_maker(self.rng()),\n atol=tol, rtol=tol)\n\n @primitive_harness.parameterized(primitive_harness.lax_linalg_qr)\n def test_qr(self, harness: primitive_harness.Harness):\n # See jax.lib.lapack.geqrf for the list of compatible types\n\n dtype = harness.params[\"dtype\"]\n dut = jtu.device_under_test()\n # These cases are not implemented in JAX\n if dtype in (jtu.dtypes.all_integer + [jnp.bfloat16]):\n unimplemented_jax = True\n elif dtype is np.complex64 and dut == \"tpu\":\n unimplemented_jax = True\n elif dtype is np.float16 and dut in (\"cpu\", \"gpu\"):\n unimplemented_jax = True\n else:\n unimplemented_jax = False\n\n if unimplemented_jax:\n raise unittest.SkipTest(f\"QR not implemented in JAX for {dtype} on {dut}\")\n\n # TODO: see https://github.com/google/jax/pull/3775#issuecomment-659407824.\n # - for now, the performance of the HLO QR implementation called when\n # compiling with TF is expected to have worse performance than the\n # custom calls made in JAX.\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),\n atol=1e-5, rtol=1e-5)\n\n @primitive_harness.parameterized(primitive_harness.lax_linalg_svd)\n @jtu.skip_on_flag(\"jax_skip_slow_tests\", True)\n def test_svd(self, harness: primitive_harness.Harness):\n if harness.params[\"dtype\"] in [np.float16, dtypes.bfloat16]:\n if jtu.device_under_test() != \"tpu\":\n # Does not work in JAX\n with self.assertRaisesRegex(NotImplementedError, \"Unsupported dtype\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n return\n\n if harness.params[\"dtype\"] in [np.complex64, np.complex128]:\n if jtu.device_under_test() == \"tpu\":\n # TODO: on JAX on TPU there is no SVD implementation for complex\n with self.assertRaisesRegex(RuntimeError,\n \"Binary op compare with different element types\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n return\n\n def _custom_assert(r_jax, r_tf, atol=1e-6, rtol=1e-6):\n def _reconstruct_operand(result, is_tf: bool):\n # Reconstructing operand as documented in numpy.linalg.svd (see\n # https://numpy.org/doc/stable/reference/generated/numpy.linalg.svd.html)\n s, u, v = result\n if is_tf:\n s = s.numpy()\n u = u.numpy()\n v = v.numpy()\n U = u[..., :s.shape[-1]]\n V = v[..., :s.shape[-1], :]\n S = s[..., None, :]\n return jnp.matmul(U * S, V), s.shape, u.shape, v.shape\n\n if harness.params[\"compute_uv\"]:\n r_jax_reconstructed = _reconstruct_operand(r_jax, False)\n r_tf_reconstructed = _reconstruct_operand(r_tf, True)\n self.assertAllClose(r_jax_reconstructed, r_tf_reconstructed,\n atol=atol, rtol=rtol)\n else:\n self.assertAllClose(r_jax, r_tf, atol=atol, rtol=rtol)\n\n tol = 1e-4\n custom_assert = partial(_custom_assert, atol=tol, rtol=tol)\n\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),\n atol=tol, rtol=tol,\n custom_assert=custom_assert,\n always_custom_assert=True)\n\n @primitive_harness.parameterized(primitive_harness.lax_select_and_gather_add)\n @jtu.ignore_warning(category=UserWarning,\n message=\"Using reduced precision for gradient.*\")\n def test_select_and_gather_add(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_reduce_window)\n def test_reduce_window(self, harness: primitive_harness.Harness):\n dtype = harness.params['dtype']\n\n if (jtu.device_under_test() == 'tpu' and dtype is np.complex64):\n raise unittest.SkipTest(\n 'TODO: JAX reduce_window on TPU does not handle complex64'\n )\n\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_unary_elementwise)\n def test_unary_elementwise(self, harness: primitive_harness.Harness):\n dtype = harness.params[\"dtype\"]\n lax_name = harness.params[\"lax_name\"]\n arg, = harness.dyn_args_maker(self.rng())\n custom_assert = None\n if lax_name == \"digamma\":\n # TODO(necula): fix bug with digamma/(f32|f16) on TPU\n if dtype in [np.float16, np.float32] and jtu.device_under_test() == \"tpu\":\n raise unittest.SkipTest(\"TODO: fix bug: nan vs not-nan\")\n\n # In the bfloat16 case, TF and lax both return NaN in undefined cases.\n if not dtype is dtypes.bfloat16:\n # digamma is not defined at 0 and -1\n def custom_assert(result_jax, result_tf):\n # lax.digamma returns NaN and tf.math.digamma returns inf\n special_cases = (arg == 0.) | (arg == -1.)\n nr_special_cases = np.count_nonzero(special_cases)\n self.assertAllClose(np.full((nr_special_cases,), dtype(np.nan)),\n result_jax[special_cases])\n self.assertAllClose(np.full((nr_special_cases,), dtype(np.inf)),\n result_tf[special_cases])\n # non-special cases are equal\n self.assertAllClose(result_jax[~ special_cases],\n result_tf[~ special_cases])\n if lax_name == \"erf_inv\":\n # TODO(necula): fix erf_inv bug on TPU\n if jtu.device_under_test() == \"tpu\":\n raise unittest.SkipTest(\"erf_inv bug on TPU: nan vs non-nan\")\n # TODO: investigate: in the (b)float16 cases, TF and lax both return the\n # same result in undefined cases.\n if not dtype in [np.float16, dtypes.bfloat16]:\n # erf_inv is not defined for arg <= -1 or arg >= 1\n def custom_assert(result_jax, result_tf): # noqa: F811\n # for arg < -1 or arg > 1\n # lax.erf_inv returns NaN; tf.math.erf_inv return +/- inf\n special_cases = (arg < -1.) | (arg > 1.)\n nr_special_cases = np.count_nonzero(special_cases)\n self.assertAllClose(np.full((nr_special_cases,), dtype(np.nan),\n dtype=dtype),\n result_jax[special_cases])\n signs = np.where(arg[special_cases] < 0., -1., 1.)\n self.assertAllClose(np.full((nr_special_cases,),\n signs * dtype(np.inf), dtype=dtype),\n result_tf[special_cases])\n # non-special cases are equal\n self.assertAllClose(result_jax[~ special_cases],\n result_tf[~ special_cases])\n atol = None\n if jtu.device_under_test() == \"gpu\":\n # TODO(necula): revisit once we fix the GPU tests\n atol = 1e-3\n self.ConvertAndCompare(harness.dyn_fun, arg, custom_assert=custom_assert,\n atol=atol)\n\n @primitive_harness.parameterized(primitive_harness.lax_bitwise_not)\n def test_bitwise_not(self, harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_population_count)\n def test_population_count(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_add_mul)\n def test_add_mul(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_min_max)\n def test_min_max(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_binary_elementwise)\n def test_binary_elementwise(self, harness):\n tol = None\n lax_name, dtype = harness.params[\"lax_name\"], harness.params[\"dtype\"]\n if lax_name in (\"igamma\", \"igammac\"):\n # TODO(necula): fix bug with igamma/f16\n if dtype in [np.float16, dtypes.bfloat16]:\n raise unittest.SkipTest(\"TODO: igamma(c) unsupported with (b)float16 in JAX\")\n # TODO(necula): fix bug with igamma/f32 on TPU\n if dtype is np.float32 and jtu.device_under_test() == \"tpu\":\n raise unittest.SkipTest(\"TODO: fix bug: nan vs not-nan\")\n arg1, arg2 = harness.dyn_args_maker(self.rng())\n custom_assert = None\n if lax_name == \"igamma\":\n # igamma is not defined when the first argument is <=0\n def custom_assert(result_jax, result_tf):\n # lax.igamma returns NaN when arg1 == arg2 == 0; tf.math.igamma returns 0\n special_cases = (arg1 == 0.) & (arg2 == 0.)\n nr_special_cases = np.count_nonzero(special_cases)\n self.assertAllClose(np.full((nr_special_cases,), np.nan, dtype=dtype),\n result_jax[special_cases])\n self.assertAllClose(np.full((nr_special_cases,), 0., dtype=dtype),\n result_tf[special_cases])\n # non-special cases are equal\n self.assertAllClose(result_jax[~ special_cases],\n result_tf[~ special_cases])\n if lax_name == \"igammac\":\n # On GPU, tolerance also needs to be adjusted in compiled mode\n if dtype == np.float64 and jtu.device_under_test() == 'gpu':\n tol = 1e-14\n # igammac is not defined when the first argument is <=0\n def custom_assert(result_jax, result_tf): # noqa: F811\n # lax.igammac returns 1. when arg1 <= 0; tf.math.igammac returns NaN\n special_cases = (arg1 <= 0.) | (arg2 <= 0)\n nr_special_cases = np.count_nonzero(special_cases)\n self.assertAllClose(np.full((nr_special_cases,), 1., dtype=dtype),\n result_jax[special_cases])\n self.assertAllClose(np.full((nr_special_cases,), np.nan, dtype=dtype),\n result_tf[special_cases])\n # On CPU, tolerance only needs to be adjusted in eager & graph modes\n tol = None\n if dtype == np.float64:\n tol = 1e-14\n\n # non-special cases are equal\n self.assertAllClose(result_jax[~ special_cases],\n result_tf[~ special_cases], atol=tol, rtol=tol)\n self.ConvertAndCompare(harness.dyn_fun, arg1, arg2,\n custom_assert=custom_assert, atol=tol, rtol=tol)\n\n @primitive_harness.parameterized(primitive_harness.lax_binary_elementwise_logical)\n def test_binary_elementwise_logical(self, harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n\n @primitive_harness.parameterized(primitive_harness.lax_betainc)\n def test_betainc(self, harness: primitive_harness.Harness):\n dtype = harness.params[\"dtype\"]\n # TODO: https://www.tensorflow.org/api_docs/python/tf/math/betainc only\n # supports float32/64 tests.\n # TODO(bchetioui): investigate why the test actually fails in JAX.\n if dtype in [np.float16, dtypes.bfloat16]:\n raise unittest.SkipTest(\"(b)float16 not implemented in TF\")\n\n tol = None\n if dtype is np.float64:\n tol = 1e-14\n\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),\n atol=tol, rtol=tol)\n\n # TODO(necula): combine tests that are identical except for the harness\n # wait until we get more experience with using harnesses.\n @primitive_harness.parameterized(primitive_harness.lax_shift_left)\n def test_shift_left(self, harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_shift_right_logical)\n def test_shift_right_logical(self, harness):\n if jtu.device_under_test() == \"tpu\" and harness.params[\"dtype\"] in [np.int8, np.int16]:\n raise unittest.SkipTest(\"TODO: silent error for negative inputs\")\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_shift_right_arithmetic)\n def test_shift_right_arithmetic(self, harness):\n if jtu.device_under_test() == \"tpu\" and harness.params[\"dtype\"] in [np.uint8, np.uint16]:\n raise unittest.SkipTest(\"TODO: silent error for negative inputs\")\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_slice)\n def test_slice(self, harness):\n # JAX.slice rejects negative indices; check, and skip jax2tf\n if any(si < 0 or si >= sh or li < 0 or li > sh\n for sh, si, li in zip(harness.params[\"shape\"],\n harness.params[\"start_indices\"],\n harness.params[\"limit_indices\"])):\n with self.assertRaisesRegex(TypeError, \"\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n else:\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_dynamic_slice)\n def test_dynamic_slice(self, harness):\n # JAX.dynamic_slice rejects slice sizes too big; check this, and skip jax2tf\n args = harness.dyn_args_maker(self.rng())\n if any(li - si < 0 or li - si >= sh\n for sh, si, li in zip(harness.params[\"shape\"],\n harness.params[\"start_indices\"],\n harness.params[\"limit_indices\"])):\n with self.assertRaisesRegex(TypeError, \"\"):\n harness.dyn_fun(*args)\n return\n\n self.ConvertAndCompare(harness.dyn_fun, *args)\n\n @primitive_harness.parameterized(primitive_harness.lax_dynamic_update_slice)\n def test_dynamic_update_slice(self, harness):\n # JAX.dynamic_update_slice rejects update slices too big; check, and skip jax2tf\n if any(ush > sh\n for sh, ush in zip(harness.params[\"shape\"],\n harness.params[\"update_shape\"])):\n with self.assertRaisesRegex(TypeError, \"\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n else:\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_squeeze)\n def test_squeeze(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_conv_general_dilated)\n def test_conv_general_dilated(self, harness: primitive_harness.Harness):\n if jtu.device_under_test() == \"gpu\":\n raise unittest.SkipTest(\"TODO: test failures on GPU\")\n tol = None\n # TODO(bchetioui): significant discrepancies in some float16 cases.\n if harness.params[\"dtype\"] is np.float16:\n tol = 1.\n # TODO(bchetioui): slight occasional discrepancy in float32 cases.\n elif harness.params[\"dtype\"] is np.float32:\n tol = 1e-5\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),\n atol=tol, rtol=tol)\n\n @primitive_harness.parameterized(primitive_harness.lax_gather)\n def test_gather(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_scatter)\n def test_scatter(self, harness: primitive_harness.Harness):\n f_name = harness.params['f_lax'].__name__\n dtype = harness.params['dtype']\n\n if jtu.device_under_test() == 'tpu':\n if dtype is np.complex64 and f_name in ['scatter_min', 'scatter_max']:\n raise unittest.SkipTest(f\"TODO: complex {f_name} on TPU fails in JAX\")\n\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n def test_boolean_gather(self):\n values = np.array([[True, True], [False, True], [False, False]],\n dtype=np.bool_)\n indices = np.array([0, 1], dtype=np.int32)\n for axis in [0, 1]:\n f_jax = jax.jit(lambda v, i: jnp.take(v, i, axis=axis)) # pylint: disable=cell-var-from-loop\n self.ConvertAndCompare(f_jax, values, indices)\n\n def test_gather_rank_change(self):\n params = jnp.array([[1.0, 1.5, 2.0], [2.0, 2.5, 3.0], [3.0, 3.5, 4.0]])\n indices = jnp.array([[1, 1, 2], [0, 1, 0]])\n f_jax = jax.jit(lambda i: params[i])\n self.ConvertAndCompare(f_jax, indices)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"_{f_jax.__name__}\",\n f_jax=f_jax)\n for f_jax in REDUCE))\n def test_reduce_ops_with_numerical_input(self, f_jax):\n values = np.array([1, 2, 3], dtype=np.float32)\n self.ConvertAndCompare(f_jax, values)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"_{f_jax.__name__}\",\n f_jax=f_jax)\n for f_jax in (jnp.cumsum, jnp.cumprod)))\n def test_cumulated_ops(self, f_jax):\n values = np.array([1, 2, 3], dtype=np.float32)\n self.ConvertAndCompare(f_jax, values)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"_{op.__name__}\",\n op=op)\n for op in INDEX))\n def test_scatter_static(self, op):\n values = np.ones((5, 6), dtype=np.float32)\n update = np.float32(6.)\n f_jax = jax.jit(lambda v, u: op(v, jax.ops.index[::2, 3:], u))\n self.ConvertAndCompare(f_jax, values, update)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"_{f_jax.__name__}\",\n f_jax=f_jax)\n for f_jax in REDUCE))\n def test_reduce_ops_with_boolean_input(self, f_jax):\n values = np.array([True, False, True], dtype=np.bool_)\n self.ConvertAndCompare(f_jax, values)\n\n @primitive_harness.parameterized(primitive_harness.random_gamma)\n def test_random_gamma(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),\n rtol=1e-5)\n\n @primitive_harness.parameterized(primitive_harness.random_split)\n def test_random_split(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n def test_zeros_like(self):\n v = np.float32(2.)\n f_jax = jax.ad_util.zeros_like_jaxval\n self.ConvertAndCompare(f_jax, v)\n\n def test_stop_gradient(self):\n f = jax2tf.convert(lax.stop_gradient)\n self.assertEqual(f(tf.ones([])), 1.)\n\n # test_bfloat16_constant checks that https://github.com/google/jax/issues/3942 is\n # fixed\n def test_bfloat16_constant(self):\n def jax_fn_scalar(x):\n x = x.astype(jnp.bfloat16)\n x *= 2.\n return x\n\n def jax_fn_array(x):\n x = x.astype(jnp.bfloat16)\n x *= np.array([1.5, 2.5, 3.5], jnp.bfloat16)\n return x\n\n tf_fn_scalar = jax2tf.convert(jax_fn_scalar)\n self.assertAllClose(tf_fn_scalar(1.375).numpy(), jnp.bfloat16(2.750))\n\n tf_fn_array = jax2tf.convert(jax_fn_array)\n self.assertAllClose(tf_fn_array(np.array([3, 4, 5])),\n np.array([4.5, 10, 17.5], jnp.bfloat16))\n\nif __name__ == \"__main__\":\n absltest.main(testLoader=jtu.JaxTestLoader())\n"
] | [
[
"numpy.ones",
"tensorflow.ones",
"numpy.float32",
"numpy.count_nonzero",
"numpy.array",
"numpy.where",
"numpy.full"
]
] |
sorhus/tensorflow | [
"99de1826646c8d354259187fc9c2330b794c1ac4"
] | [
"tensorflow/python/eager/ops_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for operations in eager execution.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.layers import core\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import sparse_ops\n\n\nclass OpsTest(test_util.TensorFlowTestCase):\n\n def testExecuteBasic(self):\n three = constant_op.constant(3)\n five = constant_op.constant(5)\n product = three * five\n self.assertAllEqual(15, product)\n\n def testMatMulGPU(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n three = constant_op.constant([[3.]]).gpu()\n five = constant_op.constant([[5.]]).gpu()\n product = math_ops.matmul(three, five)\n self.assertEqual([[15.0]], product.numpy())\n\n def testExecuteStringAttr(self):\n three = constant_op.constant(3.0)\n checked_three = array_ops.check_numerics(three,\n message='just checking')\n self.assertEqual([[3]], checked_three.numpy())\n\n def testExecuteFloatAttr(self):\n three = constant_op.constant(3.0)\n almost_three = constant_op.constant(2.8)\n almost_equal = math_ops.approximate_equal(\n three, almost_three, tolerance=0.3)\n self.assertTrue(almost_equal)\n\n def testExecuteIntAttr(self):\n three = constant_op.constant(3)\n four = constant_op.constant(4)\n total = math_ops.add_n([three, four])\n self.assertAllEqual(7, total)\n\n def testExecuteBoolAttr(self):\n three = constant_op.constant([[3]])\n five = constant_op.constant([[5]])\n product = math_ops.matmul(three, five, transpose_a=True)\n self.assertAllEqual([[15]], product)\n\n def testExecuteOneListOutput(self):\n split_dim = constant_op.constant(1)\n value = constant_op.constant([[0, 1, 2], [3, 4, 5]])\n x1, x2, x3 = array_ops.split(value, 3, axis=split_dim)\n self.assertAllEqual([[0], [3]], x1)\n self.assertAllEqual([[1], [4]], x2)\n self.assertAllEqual([[2], [5]], x3)\n\n def testGraphMode(self):\n graph = ops.Graph()\n with graph.as_default(), context.graph_mode():\n array_ops.placeholder(dtypes.int32)\n self.assertEqual(1, len(graph.get_operations()))\n\n # See comments on handling of int32 tensors on GPU in\n # EagerTensor.__init__.\n def testInt32CPUDefault(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n with context.device('/gpu:0'):\n r = constant_op.constant(1) + constant_op.constant(2)\n self.assertAllEqual(r, 3)\n\n def testExecuteListOutputLen1(self):\n split_dim = constant_op.constant(1)\n value = constant_op.constant([[0, 1, 2], [3, 4, 5]])\n result = array_ops.split(value, 1, axis=split_dim)\n self.assertTrue(isinstance(result, list))\n self.assertEqual(1, len(result))\n self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])\n\n def testExecuteListOutputLen0(self):\n empty = constant_op.constant([], dtype=dtypes.int32)\n result = array_ops.unstack(empty, 0)\n self.assertTrue(isinstance(result, list))\n self.assertEqual(0, len(result))\n\n def testExecuteMultipleNonListOutput(self):\n x = constant_op.constant([1, 2, 3, 4, 5, 6])\n y = constant_op.constant([1, 3, 5])\n result = array_ops.listdiff(x, y)\n out, idx = result\n self.assertTrue(out is result.out)\n self.assertTrue(idx is result.idx)\n self.assertAllEqual([2, 4, 6], out)\n self.assertAllEqual([1, 3, 5], idx)\n\n def testExecuteMultipleListOutput(self):\n split_dim = constant_op.constant(1, dtype=dtypes.int64)\n indices = constant_op.constant([[0, 2], [0, 4], [0, 5], [1, 0], [1, 1]],\n dtype=dtypes.int64)\n values = constant_op.constant([2, 3, 5, 7, 11])\n shape = constant_op.constant([2, 7], dtype=dtypes.int64)\n result = sparse_ops.gen_sparse_ops.sparse_split(\n split_dim,\n indices,\n values,\n shape,\n num_split=2)\n output_indices, output_values, output_shape = result\n self.assertEqual(2, len(output_indices))\n self.assertEqual(2, len(output_values))\n self.assertEqual(2, len(output_shape))\n self.assertEqual(output_indices, result.output_indices)\n self.assertEqual(output_values, result.output_values)\n self.assertEqual(output_shape, result.output_shape)\n self.assertAllEqual([[0, 2], [1, 0], [1, 1]], output_indices[0])\n self.assertAllEqual([[0, 0], [0, 1]], output_indices[1])\n self.assertAllEqual([2, 7, 11], output_values[0])\n self.assertAllEqual([3, 5], output_values[1])\n self.assertAllEqual([2, 4], output_shape[0])\n self.assertAllEqual([2, 3], output_shape[1])\n\n # TODO(josh11b): Test an op that has multiple outputs, some but not\n # all of which are lists. Examples: barrier_take_many (currently\n # unsupported since it uses a type list) or sdca_optimizer (I don't\n # have an example of legal inputs & outputs).\n\n def testComposition(self):\n x = constant_op.constant(1, dtype=dtypes.int32)\n three_x = x + x + x\n self.assertEquals(dtypes.int32, three_x.dtype)\n self.assertAllEqual(3, three_x)\n\n def testOperatorOverrides(self):\n # TODO(henrytan): test with negative number.\n a = constant_op.constant([1])\n b = constant_op.constant([2])\n\n self.assertAllEqual((-a), [-1])\n self.assertAllEqual(abs(b), [2])\n\n self.assertAllEqual((a + b), [3])\n self.assertAllEqual((a - b), [-1])\n self.assertAllEqual((a * b), [2])\n self.assertAllEqual((a * a), [1])\n\n self.assertAllEqual((a**b), [1])\n self.assertAllEqual((a / b), [1 / 2])\n self.assertAllEqual((a / a), [1])\n self.assertAllEqual((a % b), [1])\n\n self.assertAllEqual((a < b), [True])\n self.assertAllEqual((a <= b), [True])\n self.assertAllEqual((a > b), [False])\n self.assertAllEqual((a >= b), [False])\n self.assertAllEqual((a == b), False)\n self.assertAllEqual((a != b), True)\n\n self.assertAllEqual(1, a[constant_op.constant(0)])\n\n def test_basic_slice(self):\n npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)\n t = constant_op.constant(npt)\n\n self.assertAllEqual(npt[:, :, :], t[:, :, :])\n self.assertAllEqual(npt[::, ::, ::], t[::, ::, ::])\n self.assertAllEqual(npt[::1, ::1, ::1], t[::1, ::1, ::1])\n self.assertAllEqual(npt[::1, ::5, ::2], t[::1, ::5, ::2])\n self.assertAllEqual(npt[::-1, :, :], t[::-1, :, :])\n self.assertAllEqual(npt[:, ::-1, :], t[:, ::-1, :])\n self.assertAllEqual(npt[:, :, ::-1], t[:, :, ::-1])\n self.assertAllEqual(npt[-2::-1, :, ::1], t[-2::-1, :, ::1])\n self.assertAllEqual(npt[-2::-1, :, ::2], t[-2::-1, :, ::2])\n\n def testDegenerateSlices(self):\n npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)\n t = constant_op.constant(npt)\n # degenerate by offering a forward interval with a negative stride\n self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])\n # degenerate with a reverse interval with a positive stride\n self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])\n # empty interval in every dimension\n self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])\n\n def testEllipsis(self):\n npt = np.array(\n [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]])\n t = constant_op.constant(npt)\n\n self.assertAllEqual(npt[0:], t[0:])\n # implicit ellipsis\n self.assertAllEqual(npt[0:, ...], t[0:, ...])\n # ellipsis alone\n self.assertAllEqual(npt[...], t[...])\n # ellipsis at end\n self.assertAllEqual(npt[0:1, ...], t[0:1, ...])\n # ellipsis at begin\n self.assertAllEqual(npt[..., 0:1], t[..., 0:1])\n # ellipsis at middle\n self.assertAllEqual(npt[0:1, ..., 0:1], t[0:1, ..., 0:1])\n\n def testShrink(self):\n npt = np.array([[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],\n [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]])\n t = constant_op.constant(npt)\n self.assertAllEqual(npt[:, :, :, :, 3], t[:, :, :, :, 3])\n self.assertAllEqual(npt[..., 3], t[..., 3])\n self.assertAllEqual(npt[:, 0], t[:, 0])\n self.assertAllEqual(npt[:, :, 0], t[:, :, 0])\n\n def testOpWithInputsOnDifferentDevices(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n\n # The GPU kernel for the Reshape op requires that the\n # shape input be on CPU.\n value = constant_op.constant([1., 2.]).gpu()\n shape = constant_op.constant([2, 1])\n reshaped = array_ops.reshape(value, shape)\n self.assertAllEqual([[1], [2]], reshaped.cpu())\n\n def testInt64(self):\n # Fill requires the first input to be an int32 tensor.\n self.assertAllEqual(\n [1.0, 1.0],\n array_ops.fill(constant_op.constant([2], dtype=dtypes.int64),\n constant_op.constant(1)))\n\n def testOutputOnHostMemory(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n # The Shape op kernel on GPU places the output in host memory.\n value = constant_op.constant([1.]).gpu()\n shape = array_ops.shape(value)\n self.assertEqual([1], shape.numpy())\n\n def testSilentCopy(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n # Temporarily replace the context\n # pylint: disable=protected-access\n del context._context\n try:\n context._context = context.Context(\n device_policy=context.DEVICE_PLACEMENT_SILENT)\n cpu_tensor = constant_op.constant(1.0)\n gpu_tensor = cpu_tensor.gpu()\n self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)\n finally:\n del context._context\n context._context = context.Context()\n # pylint: enable=protected-access\n\n def testSoftPlacement(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n # Temporarily replace the context\n # pylint: disable=protected-access\n del context._context\n try:\n context._context = context.Context(\n device_policy=context.DEVICE_PLACEMENT_SILENT,\n config=config_pb2.ConfigProto(allow_soft_placement=True))\n cpu_tensor = constant_op.constant(1.0)\n result = cpu_tensor + cpu_tensor\n self.assertEqual(result.device,\n '/job:localhost/replica:0/task:0/device:GPU:0')\n finally:\n del context._context\n context._context = context.Context()\n # pylint: enable=protected-access\n\n def testRandomUniform(self):\n scalar_shape = constant_op.constant([], dtype=dtypes.int32)\n\n x = random_ops.random_uniform(scalar_shape)\n self.assertEquals(0, x.shape.ndims)\n self.assertEquals(dtypes.float32, x.dtype)\n\n x = random_ops.random_uniform(\n scalar_shape, minval=constant_op.constant(5.),\n maxval=constant_op.constant(6.))\n self.assertLess(x, 6)\n self.assertGreaterEqual(x, 5)\n\n def testArgsToMatchingEagerDefault(self):\n # Uses default\n ctx = context.context()\n t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int32)\n self.assertEquals(t, dtypes.int32)\n self.assertEquals(r[0].dtype, dtypes.int32)\n t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int64)\n self.assertEquals(t, dtypes.int64)\n self.assertEquals(r[0].dtype, dtypes.int64)\n # Doesn't use default\n t, r = execute.args_to_matching_eager(\n [['string', 'arg']], ctx, dtypes.int32)\n self.assertEquals(t, dtypes.string)\n self.assertEquals(r[0].dtype, dtypes.string)\n\n def testFlattenLayer(self):\n flatten_layer = core.Flatten()\n x = constant_op.constant([[[-10, -20], [-30, -40]], [[10, 20], [30, 40]]])\n y = flatten_layer(x)\n self.assertAllEqual([[-10, -20, -30, -40], [10, 20, 30, 40]], y)\n\n def testIdentity(self):\n self.assertAllEqual(2, array_ops.identity(2))\n\n def testIdentityOnVariable(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n with context.device('/gpu:0'):\n v = resource_variable_ops.ResourceVariable(True)\n self.assertAllEqual(True, array_ops.identity(v))\n\n def testIncompatibleSetShape(self):\n x = constant_op.constant(1)\n with self.assertRaises(ValueError):\n x.set_shape((1, 2))\n\n def testCompatibleSetShape(self):\n x = constant_op.constant([[1, 2]])\n x.set_shape(tensor_shape.TensorShape([None, 2]))\n self.assertEqual(x.get_shape(), (1, 2))\n\n def testCastScalarToPrimitiveTypes(self):\n x = constant_op.constant(1.3)\n self.assertIsInstance(int(x), int)\n self.assertEqual(int(x), 1)\n self.assertIsInstance(float(x), float)\n self.assertAllClose(float(x), 1.3)\n\n def testCastNonScalarToPrimitiveTypesFails(self):\n x = constant_op.constant([1.3, 2])\n with self.assertRaises(TypeError):\n int(x)\n with self.assertRaises(TypeError):\n float(x)\n\n def testFormatString(self):\n x = constant_op.constant(3.1415)\n self.assertEqual('3.14', '{:.2f}'.format(x))\n\n def testNoOpIsNone(self):\n self.assertTrue(control_flow_ops.no_op() is None)\n\n\nif __name__ == '__main__':\n test.main()\n"
] | [
[
"tensorflow.python.ops.array_ops.listdiff",
"tensorflow.python.eager.context.Context",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.array_ops.check_numerics",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.eager.test.main",
"tensorflow.python.layers.core.Flatten",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.eager.context.context",
"numpy.arange",
"tensorflow.python.eager.context.device",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.ops.math_ops.approximate_equal",
"tensorflow.python.eager.execute.args_to_matching_eager",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.ops.sparse_ops.gen_sparse_ops.sparse_split",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.ops.array_ops.reshape",
"numpy.array",
"tensorflow.python.ops.control_flow_ops.no_op"
]
] |
CommanderCero/RL_Algorithms | [
"fd8172e0075247b682a1dca752306147fa2ed3ba"
] | [
"A2C/utils.py"
] | [
"import scipy.signal as signal\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport models\nimport gym\nimport wandb\n\ndef create_feedforward(sizes, activation=nn.ReLU): \n layers = []\n for i in range(len(sizes) - 1):\n layers.append(nn.Linear(sizes[i], sizes[i+1]))\n if i < len(sizes) - 2:\n layers.append(activation())\n return nn.Sequential(*layers)\n\ndef get_shape(shape):\n if shape is None:\n return ()\n return shape\n\ndef discounted_cumsum(rewards, reward_decay):\n \"\"\"Taken from https://stackoverflow.com/questions/47970683/vectorize-a-numpy-discount-calculation\"\"\"\n return signal.lfilter([1], [1, -reward_decay], x=rewards[::-1])[::-1]\n\nclass TrajectoryBuffer:\n def __init__(self, observation_shape, action_shape, size, reward_decay=0.99):\n self.max_size = size\n self.trajectory_start = 0\n self.pos = 0\n self.reward_decay = reward_decay\n \n self.observations = np.empty((size, *observation_shape), dtype=np.float32)\n self.actions = np.empty((size, *get_shape(action_shape)), dtype=np.float32)\n self.rewards = np.empty((size,), dtype=np.float32)\n self.returns = np.empty((size,), dtype=np.float32)\n self.dones = np.empty((size,), dtype=np.float32)\n \n def store(self, observation, action, reward, done):\n assert self.pos < self.max_size, \"Buffer Overflow\"\n \n self.observations[self.pos] = observation\n self.actions[self.pos] = action\n self.rewards[self.pos] = reward\n self.dones[self.pos] = done\n self.pos += 1\n \n def end_trajectory(self, value=0):\n # Compute return\n sl = slice(self.trajectory_start, self.pos)\n rewards = self.rewards[sl]\n rewards = np.append(rewards, value)\n self.returns[sl] = discounted_cumsum(rewards, self.reward_decay)[:-1]\n \n self.trajectory_start = self.pos\n \n def get_data(self):\n sl = slice(0, self.pos)\n data = dict(\n observations=self.observations[sl],\n actions=self.actions[sl],\n rewards=self.rewards[sl],\n returns=self.returns[sl],\n dones=self.dones[sl]\n )\n \n return {key : torch.from_numpy(value) for key, value in data.items()}\n \n def clear(self):\n self.pos = 0\n self.trajectory_start = 0\n \n \nclass VecTrajectoryBuffer:\n def __init__(self, observation_shape, action_shape, num_envs, size, reward_decay=0.99):\n self.max_size = size\n self.pos = 0\n self.reward_decay = reward_decay\n self.traj_starts = np.zeros((num_envs,), dtype=int)\n \n self.observations = np.empty((size, num_envs, *observation_shape), dtype=np.float32)\n self.actions = np.empty((size, num_envs, *get_shape(action_shape)), dtype=np.float32)\n self.rewards = np.empty((size, num_envs), dtype=np.float32)\n self.returns = np.empty((size, num_envs), dtype=np.float32)\n self.dones = np.empty((size, num_envs), dtype=np.float32)\n \n def store(self, observations, actions, rewards, dones):\n assert self.pos < self.max_size, \"Buffer Overflow\"\n \n self.observations[self.pos] = observations\n self.actions[self.pos] = actions\n self.rewards[self.pos] = rewards\n self.dones[self.pos] = dones\n self.pos += 1\n \n # Compute returns\n for env_index, done in enumerate(dones):\n if done:\n self._end_trajectory(env_index)\n \n def end_trajectory(self, values):\n for env_index, value in enumerate(values):\n self._end_trajectory(env_index, value)\n \n def _end_trajectory(self, env_index, value=0):\n # Compute return\n sl = slice(self.traj_starts[env_index], self.pos)\n rewards = self.rewards[sl, env_index]\n rewards = np.append(rewards, value)\n self.returns[sl, env_index] = discounted_cumsum(rewards, self.reward_decay)[:-1]\n \n # Update trajectory start\n self.traj_starts[env_index] = self.pos\n \n def get_data(self, device=torch.device('cpu')):\n sl = slice(0, self.pos)\n \n data = dict(\n observations=self._remove_env_axis(self.observations[sl]),\n actions=self._remove_env_axis(self.actions[sl]),\n rewards=self._remove_env_axis(self.rewards[sl]),\n returns=self._remove_env_axis(self.returns[sl]),\n dones=self._remove_env_axis(self.dones[sl])\n )\n \n return {key : torch.from_numpy(value).to(device) for key, value in data.items()}\n \n def clear(self):\n self.pos = 0\n self.traj_starts.fill(0)\n \n def _remove_env_axis(self, array):\n # array.shape = (size, num_envs, ???)\n shape = array.shape\n # Swap size with num_envs to ensure reshaping won't mix trajectories\n array = array.swapaxes(0, 1)\n # Flatten\n new_shape = (shape[0] * shape[1], *shape[2:])\n array = array.reshape(new_shape)\n return array\n \n \ndef play(model: models.Policy, env: gym.Env, repeats=10, device=torch.device('cpu')):\n for _ in range(repeats):\n state = env.reset()\n done = False\n while not done:\n inp = torch.FloatTensor([state]).to(device)\n action = model.get_actions(inp)[0]\n state, reward, done, _ = env.step(action)\n env.render()\n \n env.close()\n \ndef capture_video(model: models.Policy, env: gym.Env, fps=30, device=torch.device('cpu')):\n frames = []\n reward_sum = 0\n step_count = 0\n \n state = env.reset()\n done = False\n while not done:\n inp = torch.FloatTensor([state]).to(device)\n action = model.get_actions(inp)[0]\n state, reward, done, _ = env.step(action)\n frames.append(np.array(env.render(\"rgb_array\")))\n \n reward_sum += reward\n step_count += 1\n \n frames = np.array(frames) # (Time, Width, Height, Channels)\n frames = np.moveaxis(frames, 3, 1) # (Time, Channels, Width, Height)\n return wandb.Video(frames, caption=f\"RewardSum={reward_sum}; EpisodeLength={step_count}\", fps=fps)"
] | [
[
"torch.FloatTensor",
"torch.nn.Linear",
"numpy.empty",
"numpy.append",
"numpy.zeros",
"numpy.moveaxis",
"scipy.signal.lfilter",
"torch.from_numpy",
"torch.nn.Sequential",
"numpy.array",
"torch.device"
]
] |
LeanderLXZ/oracle-recognition | [
"c82976333d4a72218b06fffc94192238d95fcf9e"
] | [
"src/capsulesEM_V1/capsules/nets.py"
] | [
"\"\"\"An implementation of matrix capsules with EM routing.\n\"\"\"\n\nimport tensorflow as tf\n\nfrom core import _conv2d_wrapper, capsules_init, capsules_conv, capsules_fc\n\nslim = tf.contrib.slim\n\n# ------------------------------------------------------------------------------#\n# -------------------------------- capsules net --------------------------------#\n# ------------------------------------------------------------------------------#\n\ndef capsules_v0(inputs, num_classes, iterations, name='CapsuleEM-V0'):\n \"\"\"Replicate the network in `Matrix Capsules with EM Routing.`\n \"\"\"\n\n with tf.variable_scope(name) as scope:\n\n # inputs [N, H, W, C] -> conv2d, 5x5, strides 2, channels 32 -> nets [N, OH, OW, 32]\n nets = _conv2d_wrapper(\n inputs, shape=[5, 5, 1, 32], strides=[1, 2, 2, 1], padding='SAME', add_bias=True, activation_fn=tf.nn.relu, name='conv1'\n )\n # inputs [N, H, W, C] -> conv2d, 1x1, strides 1, channels 32x(4x4+1) -> (poses, activations)\n nets = capsules_init(\n nets, shape=[1, 1, 32, 32], strides=[1, 1, 1, 1], padding='VALID', pose_shape=[4, 4], name='capsule_init'\n )\n # inputs: (poses, activations) -> capsule-conv 3x3x32x32x4x4, strides 2 -> (poses, activations)\n nets = capsules_conv(\n nets, shape=[3, 3, 32, 32], strides=[1, 2, 2, 1], iterations=iterations, name='capsule_conv1'\n )\n # inputs: (poses, activations) -> capsule-conv 3x3x32x32x4x4, strides 1 -> (poses, activations)\n nets = capsules_conv(\n nets, shape=[3, 3, 32, 32], strides=[1, 1, 1, 1], iterations=iterations, name='capsule_conv2'\n )\n # inputs: (poses, activations) -> capsule-fc 1x1x32x10x4x4 shared view transform matrix within each channel -> (poses, activations)\n nets = capsules_fc(\n nets, num_classes, iterations=iterations, name='capsule_fc'\n )\n\n poses, activations = nets\n\n return poses, activations\n\n# ------------------------------------------------------------------------------#\n# ------------------------------------ loss ------------------------------------#\n# ------------------------------------------------------------------------------#\n\ndef spread_loss(labels, activations, margin, name):\n \"\"\"This adds spread loss to total loss.\n\n :param labels: [N, O], where O is number of output classes, one hot vector, tf.uint8.\n :param activations: [N, O], activations.\n :param margin: margin 0.2 - 0.9 fixed schedule during training.\n\n :return: spread loss\n \"\"\"\n\n activations_shape = activations.get_shape().as_list()\n\n with tf.variable_scope(name) as scope:\n\n mask_t = tf.equal(labels, 1)\n mask_i = tf.equal(labels, 0)\n\n activations_t = tf.reshape(\n tf.boolean_mask(activations, mask_t), [activations_shape[0], 1]\n )\n activations_i = tf.reshape(\n tf.boolean_mask(activations, mask_i), [activations_shape[0], activations_shape[1] - 1]\n )\n\n # margin = tf.Print(\n # margin, [margin], 'margin', summarize=20\n # )\n\n gap_mit = tf.reduce_sum(\n tf.square(\n tf.nn.relu(\n margin - (activations_t - activations_i)\n )\n )\n )\n\n # tf.add_to_collection(\n # tf.GraphKeys.LOSSES, gap_mit\n # )\n #\n # total_loss = tf.add_n(\n # tf.get_collection(\n # tf.GraphKeys.LOSSES\n # ), name='total_loss'\n # )\n\n tf.losses.add_loss(gap_mit)\n\n return gap_mit\n\n# ------------------------------------------------------------------------------#\n\n"
] | [
[
"tensorflow.equal",
"tensorflow.losses.add_loss",
"tensorflow.variable_scope",
"tensorflow.boolean_mask",
"tensorflow.nn.relu"
]
] |
broadinstitute/lincs-profiling-comparison | [
"075c3bc60eeb3934fc42c30bae6aeed8cda1cd6d"
] | [
"2.MOA-prediction/4.model_viz/scripts/nbconverted/0.blend_test_predictions.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# ### - Ensemble/Blend the 4 model predictions into a single prediction\n\n# In[1]:\n\n\nimport os\nimport datetime\nfrom time import time\nimport pathlib\nimport pandas as pd\nimport numpy as np\nfrom collections import defaultdict\nfrom collections import Counter\n\n\n# In[2]:\n\n\nfrom sklearn.metrics import precision_recall_curve,average_precision_score\nfrom sklearn.metrics import log_loss, roc_curve\nfrom sklearn.metrics import auc,roc_auc_score\n\n\n# In[3]:\n\n\nfrom numba import njit\nfrom scipy.optimize import minimize, fsolve\n\n\n# In[4]:\n\n\n# The two options here are \"\" and \"_subsample\"\nfile_indicator = \"\"\ndata_dir = pathlib.Path(\"../2.data_split/model_data\")\n\n\n# In[5]:\n\n\ncp_test = pathlib.Path(f\"{data_dir}/cp/test_lvl4_data{file_indicator}.csv.gz\")\nL1000_test = pathlib.Path(f\"{data_dir}/L1/test_lvl4_data.csv.gz\")\ncp_L1000_test = pathlib.Path(f\"{data_dir}/merged/test_lvl4_data.csv.gz\")\n\n\n# In[6]:\n\n\nmodel_preds_dir = '../L1000_CP_model_predictions/'\n\n\n# In[7]:\n\n\ndf_cp_test = pd.read_csv(cp_test, compression='gzip',low_memory = False)\ndf_L1000_test = pd.read_csv(L1000_test, compression='gzip',low_memory = False)\ndf_cp_L1000_test = pd.read_csv(cp_L1000_test, compression='gzip',low_memory = False)\n\n\n# In[8]:\n\n\ndf_cp_L1000_test.shape\n\n\n# In[9]:\n\n\n##resnet\ndf_cp_resnet_test = pd.read_csv(os.path.join(model_preds_dir, f'cp_test_preds_resnet{file_indicator}.csv'))\ndf_L1000_resnet_test = pd.read_csv(os.path.join(model_preds_dir, 'L1000_test_preds_resnet.csv'))\ndf_cp_L1000_resnet_test = pd.read_csv(os.path.join(model_preds_dir, 'cp_L1000_test_preds_resnet.csv'))\n\n\n# In[10]:\n\n\nprint(df_cp_L1000_resnet_test.shape)\ndf_cp_L1000_resnet_test.head()\n\n\n# In[11]:\n\n\n##1-d cnn\ndf_cp_cnn_test = pd.read_csv(os.path.join(model_preds_dir, f'cp_test_preds_1dcnn{file_indicator}.csv'))\ndf_L1000_cnn_test = pd.read_csv(os.path.join(model_preds_dir, 'L1000_test_preds_1dcnn.csv'))\ndf_cp_L1000_cnn_test = pd.read_csv(os.path.join(model_preds_dir, 'cp_L1000_test_preds_1dcnn.csv'))\n\n\n# In[12]:\n\n\nprint(df_cp_L1000_cnn_test.shape)\ndf_cp_L1000_cnn_test.head()\n\n\n# In[13]:\n\n\n##tabnet\ndf_cp_tabnet_test = pd.read_csv(os.path.join(model_preds_dir, f'cp_test_preds_tabnet{file_indicator}.csv'))\ndf_L1000_tabnet_test = pd.read_csv(os.path.join(model_preds_dir, 'L1000_test_preds_tabnet.csv'))\ndf_cp_L1000_tabnet_test = pd.read_csv(os.path.join(model_preds_dir, 'cp_L1000_test_preds_tabnet.csv'))\n\n\n# In[14]:\n\n\ndf_cp_L1000_tabnet_test.shape\n\n\n# In[15]:\n\n\n##stagedNN\ndf_cp_simplenn_test = pd.read_csv(os.path.join(model_preds_dir, f'cp_test_preds_simplenn{file_indicator}.csv'))\ndf_L1000_simplenn_test = pd.read_csv(os.path.join(model_preds_dir, 'L1000_test_preds_simplenn.csv'))\ndf_cp_L1000_simplenn_test = pd.read_csv(os.path.join(model_preds_dir, 'cp_L1000_test_preds_simplenn.csv'))\n\n\n# In[16]:\n\n\ndf_cp_L1000_simplenn_test.shape\n\n\n# In[17]:\n\n\ndf_cp_tst_targets = df_cp_test[df_cp_cnn_test.columns]\ndf_L1000_tst_targets = df_L1000_test[df_L1000_cnn_test.columns]\ndf_cp_L1000_tst_targets = df_cp_L1000_test[df_cp_L1000_cnn_test.columns]\n\n\n# In[18]:\n\n\ndf_cp_tst_targets.shape\n\n\n# In[19]:\n\n\ndf_L1000_tst_targets.shape\n\n\n# In[20]:\n\n\ndf_cp_L1000_tst_targets.shape\n\n\n# #### - Resnet, 1d-cnn, Tabnet, Simplenn --> 4 model predictions\n\n# In[21]:\n\n\n# CPMP's logloss from https://www.kaggle.com/c/lish-moa/discussion/183010\ndef log_loss_numpy(y_true, y_pred):\n y_true_ravel = np.asarray(y_true).ravel()\n y_pred = np.asarray(y_pred).ravel()\n y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15)\n loss = np.where(y_true_ravel == 1, - np.log(y_pred), - np.log(1 - y_pred))\n return loss.mean()\n\ndef func_numpy_metric(weights, oof, y_true):\n oof_blend = np.tensordot(weights, oof, axes = ((0), (0)))\n return log_loss_numpy(y_true, oof_blend)\n\ndef grad_func(weights, oof, y_true):\n oof_clip = np.clip(oof, 1e-15, 1 - 1e-15)\n gradients = np.zeros(oof.shape[0])\n for i in range(oof.shape[0]):\n a, b, c = y_true, oof_clip[i], np.zeros((oof.shape[1], oof.shape[2]))\n for j in range(oof.shape[0]):\n if j != i:\n c += weights[j] * oof_clip[j]\n gradients[i] = -np.mean((-a*b+(b**2)*weights[i]+b*c)/((b**2)*(weights[i]**2)+2*b*c*weights[i]-b*weights[i]+(c**2)-c))\n return gradients\n\n@njit\ndef grad_func_jit(weights, oof, y_true):\n oof_clip = np.minimum(1 - 1e-15, np.maximum(oof, 1e-15))\n gradients = np.zeros(oof.shape[0])\n for i in range(oof.shape[0]):\n a, b, c = y_true, oof_clip[i], np.zeros((oof.shape[1], oof.shape[2]))\n for j in range(oof.shape[0]):\n if j != i:\n c += weights[j] * oof_clip[j]\n gradients[i] = -np.mean((-a*b+(b**2)*weights[i]+b*c)/((b**2)*(weights[i]**2)+2*b*c*weights[i]-b*weights[i]+(c**2)-c))\n return gradients\n\n\n# In[22]:\n\n\ncp_model_preds = [df_cp_cnn_test, df_cp_resnet_test, df_cp_tabnet_test, df_cp_simplenn_test]\nL1000_model_preds = [df_L1000_cnn_test, df_L1000_resnet_test, df_L1000_tabnet_test, df_L1000_simplenn_test]\ncp_L1000_model_preds = [df_cp_L1000_cnn_test, df_cp_L1000_resnet_test, df_cp_L1000_tabnet_test, df_cp_L1000_simplenn_test]\n\n\n# In[23]:\n\n\nmodels_name = ['1d-Cnn', 'Resnet', 'Tabnet', 'SimpleNN']\n\ndef get_optmized_blended_weights(model_oofs, df_targets, num_of_models = 4, models_name = models_name):\n \"\"\"\n This function assign weights to each of the models used in predicting MOAs based on the log-loss obtained \n when comparing each model prediction results with the actual MOA (Mechanism of actions) test labels.\n\n for more info:https://www.kaggle.com/gogo827jz/optimise-blending-weights-with-bonus-0/notebook\n \"\"\"\n model_oof_preds = np.zeros((num_of_models, df_targets.shape[0], df_targets.shape[1]))\n for idx in range(num_of_models):\n model_oof_preds[idx] = model_oofs[idx].values\n score_oof = log_loss_numpy(df_targets, model_oof_preds[idx])\n print(f'{idx} {models_name[idx]}, Test loss:\\t', score_oof)\n \n tol = 1e-10\n init_guess = [1 / model_oof_preds.shape[0]] * model_oof_preds.shape[0]\n bnds = [(0, 1) for _ in range(model_oof_preds.shape[0])]\n cons = {\n 'type': 'eq',\n 'fun': lambda x: np.sum(x) - 1,\n 'jac': lambda x: [1] * len(x)\n }\n print('Inital Blend OOF:', func_numpy_metric(init_guess, model_oof_preds, df_targets.values))\n \n start_time = time()\n \n res_scipy = minimize(fun = func_numpy_metric, x0 = init_guess, \n args=(model_oof_preds, df_targets.values), \n method = 'SLSQP', ##L-BFGS-B ##SLSQP\n jac = grad_func_jit, # grad_func \n bounds = bnds, constraints = cons, tol = tol)\n print(f'[{str(datetime.timedelta(seconds = time() - start_time))[2:7]}] Optimised Blend OOF:', res_scipy.fun)\n print('Optimised Weights:', res_scipy.x)\n return model_oof_preds, res_scipy.x\n\n\n# In[24]:\n\n\n_, L1000_model_weights = get_optmized_blended_weights(L1000_model_preds, df_L1000_tst_targets,)\n\n\n# In[25]:\n\n\n_, cp_model_weights = get_optmized_blended_weights(cp_model_preds, df_cp_tst_targets,)\n\n\n# In[26]:\n\n\n_, cp_L1000_model_weights = get_optmized_blended_weights(cp_L1000_model_preds, df_cp_L1000_tst_targets)\n\n\n# In[27]:\n\n\ndef model_eval_results(df_tst, df_tst_y, df_preds):\n \"\"\"\n This function prints out the model evaluation results from the train and test predictions.\n The evaluation metrics used in assessing the performance of the models are: ROC AUC score,\n log loss and Precision-Recall AUC score\n \"\"\"\n eval_metrics = ['log loss', 'ROC AUC score', 'PR-AUC/Average_precision_score',]\n moa_class_list = df_tst['moa'].unique()\n val_moas = [moa for moa_list in moa_class_list for moa in moa_list.split('|')]\n print('-' * 10, 'Test data prediction results', '-' * 10)\n print(f'{eval_metrics[0]}:', log_loss(np.ravel(df_tst_y), np.ravel(df_preds)))\n print(f'{eval_metrics[1]}:', roc_auc_score(df_tst_y[val_moas],df_preds[val_moas], average='macro'))\n print(f'{eval_metrics[2]}:', average_precision_score(df_tst_y[val_moas], df_preds[val_moas], average=\"micro\"))\n\n\n# In[28]:\n\n\n##[1.57502187e-01,1.15142271e-16,0.00000000e+00,8.42497813e-01] <-- modify the model weights\ndf_L1000_blend = pd.DataFrame(np.zeros(df_L1000_cnn_test.shape), columns = df_L1000_cnn_test.columns)\ndf_L1000_blend = df_L1000_cnn_test*0.45 + df_L1000_resnet_test*0.05 + df_L1000_tabnet_test*0.05 + df_L1000_simplenn_test*0.45\n\n\n# In[29]:\n\n\n0.45+(0.05*2)+0.45\n\n\n# In[30]:\n\n\nmodel_eval_results(df_L1000_test, df_L1000_tst_targets, df_L1000_blend)\n\n\n# In[31]:\n\n\n##[4.29598527e-01 3.27312317e-01 2.43089156e-01 5.42101086e-18] <-- modify the model weights\ndf_cp_blend = pd.DataFrame(np.zeros(df_cp_cnn_test.shape), columns = df_cp_cnn_test.columns)\ndf_cp_blend = df_cp_cnn_test*0.35 + df_cp_resnet_test*0.35 + df_cp_tabnet_test*0.25 + df_cp_simplenn_test*0.05\n\n\n# In[32]:\n\n\n0.35+0.35+0.25+0.05\n\n\n# In[33]:\n\n\nmodel_eval_results(df_cp_test, df_cp_tst_targets, df_cp_blend)\n\n\n# In[34]:\n\n\n##[0.28574384 0.09796798 0.06528908 0.5509991 ] <-- modify the model weights\ndf_cp_L1000_blend = pd.DataFrame(np.zeros(df_cp_L1000_cnn_test.shape), columns = df_cp_L1000_cnn_test.columns)\ndf_cp_L1000_blend = df_cp_L1000_cnn_test*0.30 + df_cp_L1000_resnet_test*0.20 + df_cp_L1000_tabnet_test*0.15 + df_cp_L1000_simplenn_test*0.35\n\n\n# In[35]:\n\n\n0.30+0.20+0.15+0.35\n\n\n# In[36]:\n\n\nmodel_eval_results(df_cp_L1000_test, df_cp_L1000_tst_targets, df_cp_L1000_blend)\n\n\n# In[37]:\n\n\ndef save_to_csv(df, path, file_name, compress=None):\n \"\"\"save dataframes to csv\"\"\"\n \n if not os.path.exists(path):\n os.mkdir(path)\n \n df.to_csv(os.path.join(path, file_name), index=False, compression=compress)\n\n\n# In[38]:\n\n\nsave_to_csv(df_cp_blend, model_preds_dir, f'cp_test_preds_blend{file_indicator}.csv')\nsave_to_csv(df_L1000_blend, model_preds_dir, 'L1000_test_preds_blend.csv')\nsave_to_csv(df_cp_L1000_blend, model_preds_dir, 'cp_L1000_test_preds_blend.csv')\n\n"
] | [
[
"numpy.sum",
"numpy.zeros",
"pandas.read_csv",
"scipy.optimize.minimize",
"numpy.asarray",
"numpy.tensordot",
"sklearn.metrics.roc_auc_score",
"numpy.ravel",
"numpy.clip",
"numpy.log",
"numpy.maximum",
"sklearn.metrics.average_precision_score",
"numpy.mean"
]
] |
dbis-uibk/MediaEval2021 | [
"14d754d9cea36415090aaa115db81f5ace465964"
] | [
"plans/fixed_ensemble_vggish_linear_4.py"
] | [
"\"\"\"Ensemble plan manually split by type moode/theme.\"\"\"\nimport json\n\nfrom dbispipeline.evaluators import FixedSplitEvaluator\nfrom dbispipeline.evaluators import ModelCallbackWrapper\nimport numpy as np\nfrom sklearn.pipeline import Pipeline\n\nfrom mediaeval2021 import common\nfrom mediaeval2021.dataloaders.melspectrograms import MelSpectPickleLoader\nfrom mediaeval2021.models.ensemble import Ensemble\nfrom mediaeval2021.models.wrapper import TorchWrapper\n\ndataloader = MelSpectPickleLoader('data/mediaeval2020/melspect_1366.pickle')\n\nlabel_splits = [\n np.arange(0, 14, 1),\n np.arange(14, 28, 1),\n np.arange(28, 42, 1),\n np.arange(42, 56, 1),\n]\n\npipeline = Pipeline([\n ('model',\n Ensemble(\n base_estimator=TorchWrapper(\n model_name='CNN',\n dataloader=dataloader,\n batch_size=64,\n ),\n label_splits=label_splits,\n epochs=100,\n )),\n])\n\nevaluator = ModelCallbackWrapper(\n FixedSplitEvaluator(**common.fixed_split_params()),\n lambda model: common.store_prediction(model, dataloader),\n)\n\nresult_handlers = [\n lambda results: print(json.dumps(results, indent=4)),\n]\n"
] | [
[
"numpy.arange"
]
] |
veronicatozzo/regain | [
"5eaa9685eb34afa77abaf80a4e5764444bc95dd7"
] | [
"regain/covariance/time_graphical_lasso_.py"
] | [
"# BSD 3-Clause License\n\n# Copyright (c) 2017, Federico T.\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Sparse inverse covariance selection over time via ADMM.\n\nMore information can be found in the paper linked at:\nhttps://arxiv.org/abs/1703.01958\n\"\"\"\nfrom __future__ import division\n\nimport warnings\n\nimport numpy as np\nfrom scipy import linalg\nfrom six.moves import map, range, zip\nfrom sklearn.covariance import empirical_covariance, log_likelihood\nfrom sklearn.utils.extmath import squared_norm\nfrom sklearn.utils.validation import check_X_y\n\nfrom regain.covariance.graphical_lasso_ import GraphicalLasso, logl\nfrom regain.norm import l1_od_norm\nfrom regain.prox import prox_logdet, soft_thresholding\nfrom regain.update_rules import update_rho\nfrom regain.utils import convergence, error_norm_time\nfrom regain.validation import check_norm_prox\n\n\ndef loss(S, K, n_samples=None):\n \"\"\"Loss function for time-varying graphical lasso.\"\"\"\n if n_samples is None:\n n_samples = np.ones(S.shape[0])\n return sum(\n -ni * logl(emp_cov, precision)\n for emp_cov, precision, ni in zip(S, K, n_samples))\n\n\ndef objective(n_samples, S, K, Z_0, Z_1, Z_2, alpha, beta, psi):\n \"\"\"Objective function for time-varying graphical lasso.\"\"\"\n obj = loss(S, K, n_samples=n_samples)\n\n if isinstance(alpha, np.ndarray):\n obj += sum(l1_od_norm(a * z) for a, z in zip(alpha, Z_0))\n else:\n obj += alpha * sum(map(l1_od_norm, Z_0))\n\n if isinstance(beta, np.ndarray):\n obj += sum(b[0][0] * m for b, m in zip(beta, map(psi, Z_2 - Z_1)))\n else:\n obj += beta * sum(map(psi, Z_2 - Z_1))\n\n return obj\n\n\ndef init_precision(emp_cov, mode='empirical'):\n if isinstance(mode, np.ndarray):\n return mode.copy()\n\n if mode == 'empirical':\n n_times, _, n_features = emp_cov.shape\n covariance_ = emp_cov.copy()\n covariance_ *= 0.95\n K = np.empty_like(emp_cov)\n for i, (c, e) in enumerate(zip(covariance_, emp_cov)):\n c.flat[::n_features + 1] = e.flat[::n_features + 1]\n K[i] = linalg.pinvh(c)\n elif mode == 'zeros':\n K = np.zeros_like(emp_cov)\n\n return K\n\n\ndef time_graphical_lasso(\n emp_cov, alpha=0.01, rho=1, beta=1, max_iter=100, n_samples=None,\n verbose=False, psi='laplacian', tol=1e-4, rtol=1e-4,\n return_history=False, return_n_iter=True, mode='admm',\n compute_objective=True, stop_at=None, stop_when=1e-4,\n update_rho_options=None, init='empirical'):\n \"\"\"Time-varying graphical lasso solver.\n\n Solves the following problem via ADMM:\n min sum_{i=1}^T -n_i log_likelihood(S_i, K_i) + alpha*||K_i||_{od,1}\n + beta sum_{i=2}^T Psi(K_i - K_{i-1})\n\n where S_i = (1/n_i) X_i^T \\times X_i is the empirical covariance of data\n matrix X (training observations by features).\n\n Parameters\n ----------\n emp_cov : ndarray, shape (n_features, n_features)\n Empirical covariance of data.\n alpha, beta : float, optional\n Regularisation parameter.\n rho : float, optional\n Augmented Lagrangian parameter.\n max_iter : int, optional\n Maximum number of iterations.\n n_samples : ndarray\n Number of samples available for each time point.\n tol : float, optional\n Absolute tolerance for convergence.\n rtol : float, optional\n Relative tolerance for convergence.\n return_history : bool, optional\n Return the history of computed values.\n return_n_iter : bool, optional\n Return the number of iteration before convergence.\n verbose : bool, default False\n Print info at each iteration.\n update_rho_options : dict, optional\n Arguments for the rho update.\n See regain.update_rules.update_rho function for more information.\n compute_objective : bool, default True\n Choose to compute the objective value.\n init : {'empirical', 'zero', ndarray}\n Choose how to initialize the precision matrix, with the inverse\n empirical covariance, zero matrix or precomputed.\n\n Returns\n -------\n K : numpy.array, 3-dimensional (T x d x d)\n Solution to the problem for each time t=1...T .\n history : list\n If return_history, then also a structure that contains the\n objective value, the primal and dual residual norms, and tolerances\n for the primal and dual residual norms at each iteration.\n\n \"\"\"\n psi, prox_psi, psi_node_penalty = check_norm_prox(psi)\n\n Z_0 = init_precision(emp_cov, mode=init)\n Z_1 = Z_0.copy()[:-1] # np.zeros_like(emp_cov)[:-1]\n Z_2 = Z_0.copy()[1:] # np.zeros_like(emp_cov)[1:]\n\n U_0 = np.zeros_like(Z_0)\n U_1 = np.zeros_like(Z_1)\n U_2 = np.zeros_like(Z_2)\n\n Z_0_old = np.zeros_like(Z_0)\n Z_1_old = np.zeros_like(Z_1)\n Z_2_old = np.zeros_like(Z_2)\n\n # divisor for consensus variables, accounting for two less matrices\n divisor = np.full(emp_cov.shape[0], 3, dtype=float)\n divisor[0] -= 1\n divisor[-1] -= 1\n\n if n_samples is None:\n n_samples = np.ones(emp_cov.shape[0])\n\n checks = [\n convergence(\n obj=objective(\n n_samples, emp_cov, Z_0, Z_0, Z_1, Z_2, alpha, beta, psi))\n ]\n for iteration_ in range(max_iter):\n # update K\n A = Z_0 - U_0\n A[:-1] += Z_1 - U_1\n A[1:] += Z_2 - U_2\n A /= divisor[:, None, None]\n # soft_thresholding_ = partial(soft_thresholding, lamda=alpha / rho)\n # K = np.array(map(soft_thresholding_, A))\n A += A.transpose(0, 2, 1)\n A /= 2.\n\n A *= -rho * divisor[:, None, None] / n_samples[:, None, None]\n A += emp_cov\n\n K = np.array(\n [\n prox_logdet(a, lamda=ni / (rho * div))\n for a, div, ni in zip(A, divisor, n_samples)\n ])\n\n # update Z_0\n A = K + U_0\n A += A.transpose(0, 2, 1)\n A /= 2.\n Z_0 = soft_thresholding(A, lamda=alpha / rho)\n\n # other Zs\n A_1 = K[:-1] + U_1\n A_2 = K[1:] + U_2\n if not psi_node_penalty:\n prox_e = prox_psi(A_2 - A_1, lamda=2. * beta / rho)\n Z_1 = .5 * (A_1 + A_2 - prox_e)\n Z_2 = .5 * (A_1 + A_2 + prox_e)\n else:\n Z_1, Z_2 = prox_psi(\n np.concatenate((A_1, A_2), axis=1), lamda=.5 * beta / rho,\n rho=rho, tol=tol, rtol=rtol, max_iter=max_iter)\n\n # update residuals\n U_0 += K - Z_0\n U_1 += K[:-1] - Z_1\n U_2 += K[1:] - Z_2\n\n # diagnostics, reporting, termination checks\n rnorm = np.sqrt(\n squared_norm(K - Z_0) + squared_norm(K[:-1] - Z_1) +\n squared_norm(K[1:] - Z_2))\n\n snorm = rho * np.sqrt(\n squared_norm(Z_0 - Z_0_old) + squared_norm(Z_1 - Z_1_old) +\n squared_norm(Z_2 - Z_2_old))\n\n obj = objective(\n n_samples, emp_cov, Z_0, K, Z_1, Z_2, alpha, beta, psi) \\\n if compute_objective else np.nan\n\n # if np.isinf(obj):\n # Z_0 = Z_0_old\n # break\n\n check = convergence(\n obj=obj,\n rnorm=rnorm,\n snorm=snorm,\n e_pri=np.sqrt(K.size + 2 * Z_1.size) * tol + rtol * max(\n np.sqrt(\n squared_norm(Z_0) + squared_norm(Z_1) + squared_norm(Z_2)),\n np.sqrt(\n squared_norm(K) + squared_norm(K[:-1]) +\n squared_norm(K[1:]))),\n e_dual=np.sqrt(K.size + 2 * Z_1.size) * tol + rtol * rho *\n np.sqrt(squared_norm(U_0) + squared_norm(U_1) + squared_norm(U_2)),\n # precision=Z_0.copy()\n )\n Z_0_old = Z_0.copy()\n Z_1_old = Z_1.copy()\n Z_2_old = Z_2.copy()\n\n if verbose:\n print(\n \"obj: %.4f, rnorm: %.4f, snorm: %.4f,\"\n \"eps_pri: %.4f, eps_dual: %.4f\" % check[:5])\n\n checks.append(check)\n if stop_at is not None:\n if abs(check.obj - stop_at) / abs(stop_at) < stop_when:\n break\n\n if check.rnorm <= check.e_pri and check.snorm <= check.e_dual:\n break\n\n rho_new = update_rho(\n rho, rnorm, snorm, iteration=iteration_,\n **(update_rho_options or {}))\n # scaled dual variables should be also rescaled\n U_0 *= rho / rho_new\n U_1 *= rho / rho_new\n U_2 *= rho / rho_new\n rho = rho_new\n\n # assert is_pos_def(Z_0)\n else:\n warnings.warn(\"Objective did not converge.\")\n\n covariance_ = np.array([linalg.pinvh(x) for x in Z_0])\n return_list = [Z_0, covariance_]\n if return_history:\n return_list.append(checks)\n if return_n_iter:\n return_list.append(iteration_ + 1)\n return return_list\n\n\nclass TimeGraphicalLasso(GraphicalLasso):\n \"\"\"Sparse inverse covariance estimation with an l1-penalized estimator.\n\n Parameters\n ----------\n alpha : positive float, default 0.01\n Regularization parameter for precision matrix. The higher alpha,\n the more regularization, the sparser the inverse covariance.\n\n beta : positive float, default 1\n Regularization parameter to constrain precision matrices in time.\n The higher beta, the more regularization,\n and consecutive precision matrices in time are more similar.\n\n psi : {'laplacian', 'l1', 'l2', 'linf', 'node'}, default 'laplacian'\n Type of norm to enforce for consecutive precision matrices in time.\n\n rho : positive float, default 1\n Augmented Lagrangian parameter.\n\n over_relax : positive float, deafult 1\n Over-relaxation parameter (typically between 1.0 and 1.8).\n\n tol : positive float, default 1e-4\n Absolute tolerance to declare convergence.\n\n rtol : positive float, default 1e-4\n Relative tolerance to declare convergence.\n\n max_iter : integer, default 100\n The maximum number of iterations.\n\n verbose : boolean, default False\n If verbose is True, the objective function, rnorm and snorm are\n printed at each iteration.\n\n assume_centered : boolean, default False\n If True, data are not centered before computation.\n Useful when working with data whose mean is almost, but not exactly\n zero.\n If False, data are centered before computation.\n\n time_on_axis : {'first', 'last'}, default 'first'\n If data have time as the last dimension, set this to 'last'.\n Useful to use scikit-learn functions as train_test_split.\n\n update_rho_options : dict, default None\n Options for the update of rho. See `update_rho` function for details.\n\n compute_objective : boolean, default True\n Choose if compute the objective function during iterations\n (only useful if `verbose=True`).\n\n init : {'empirical', 'zeros', ndarray}, default 'empirical'\n How to initialise the inverse covariance matrix. Default is take\n the empirical covariance and inverting it.\n\n Attributes\n ----------\n covariance_ : array-like, shape (n_times, n_features, n_features)\n Estimated covariance matrix\n\n precision_ : array-like, shape (n_times, n_features, n_features)\n Estimated precision matrix.\n\n n_iter_ : int\n Number of iterations run.\n\n \"\"\"\n\n def __init__(\n self, alpha=0.01, beta=1., mode='admm', rho=1., tol=1e-4,\n rtol=1e-4, psi='laplacian', max_iter=100, verbose=False,\n assume_centered=False, return_history=False,\n update_rho_options=None, compute_objective=True, stop_at=None,\n stop_when=1e-4, suppress_warn_list=False, init='empirical'):\n super(TimeGraphicalLasso, self).__init__(\n alpha=alpha, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter,\n verbose=verbose, assume_centered=assume_centered, mode=mode,\n update_rho_options=update_rho_options,\n compute_objective=compute_objective, init=init)\n self.beta = beta\n self.psi = psi\n self.return_history = return_history\n self.stop_at = stop_at\n self.stop_when = stop_when\n self.suppress_warn_list = suppress_warn_list\n\n def get_observed_precision(self):\n \"\"\"Getter for the observed precision matrix.\n\n Returns\n -------\n precision_ : array-like,\n The precision matrix associated to the current covariance object.\n\n \"\"\"\n return self.get_precision()\n\n def _fit(self, emp_cov, n_samples):\n \"\"\"Fit the TimeGraphicalLasso model to X.\n\n Parameters\n ----------\n emp_cov : ndarray, shape (n_time, n_features, n_features)\n Empirical covariance of data.\n\n \"\"\"\n\n out = time_graphical_lasso(\n emp_cov, alpha=self.alpha, rho=self.rho, beta=self.beta,\n mode=self.mode, n_samples=n_samples, tol=self.tol, rtol=self.rtol,\n psi=self.psi, max_iter=self.max_iter, verbose=self.verbose,\n return_n_iter=True, return_history=self.return_history,\n update_rho_options=self.update_rho_options,\n compute_objective=self.compute_objective, stop_at=self.stop_at,\n stop_when=self.stop_when, init=self.init)\n if self.return_history:\n self.precision_, self.covariance_, self.history_, self.n_iter_ = \\\n out\n else:\n self.precision_, self.covariance_, self.n_iter_ = out\n return self\n\n def fit(self, X, y):\n \"\"\"Fit the TimeGraphicalLasso model to X.\n\n Parameters\n ----------\n X : ndarray, shape = (n_samples * n_times, n_dimensions)\n Data matrix.\n y : ndarray, shape = (n_times,)\n Indicate the temporal belonging of each sample.\n\n \"\"\"\n # Covariance does not make sense for a single feature\n X, y = check_X_y(\n X, y, accept_sparse=False, dtype=np.float64, order=\"C\",\n ensure_min_features=2, estimator=self)\n\n n_dimensions = X.shape[1]\n self.classes_, n_samples = np.unique(y, return_counts=True)\n n_times = self.classes_.size\n\n # n_samples = np.array([x.shape[0] for x in X])\n if self.assume_centered:\n self.location_ = np.zeros((n_times, n_dimensions))\n else:\n self.location_ = np.array(\n [X[y == cl].mean(0) for cl in self.classes_])\n\n emp_cov = np.array(\n [\n empirical_covariance(\n X[y == cl], assume_centered=self.assume_centered)\n for cl in self.classes_\n ])\n\n return self._fit(emp_cov, n_samples)\n\n def score(self, X, y):\n \"\"\"Computes the log-likelihood of a Gaussian data set with\n `self.covariance_` as an estimator of its covariance matrix.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n Test data of which we compute the likelihood, where n_samples is\n the number of samples and n_features is the number of features.\n X is assumed to be drawn from the same distribution than\n the data used in fit (including centering).\n\n y : array-like, shape = (n_samples,)\n Class of samples.\n\n Returns\n -------\n res : float\n The likelihood of the data set with `self.covariance_` as an\n estimator of its covariance matrix.\n\n \"\"\"\n # Covariance does not make sense for a single feature\n X, y = check_X_y(\n X, y, accept_sparse=False, dtype=np.float64, order=\"C\",\n ensure_min_features=2, estimator=self)\n\n # compute empirical covariance of the test set\n test_cov = np.array(\n [\n empirical_covariance(\n X[y == cl] - self.location_[i], assume_centered=True)\n for i, cl in enumerate(self.classes_)\n ])\n\n res = sum(\n X[y == cl].shape[0] * log_likelihood(S, K) for S, K, cl in zip(\n test_cov, self.get_observed_precision(), self.classes_))\n\n return res\n\n def error_norm(\n self, comp_cov, norm='frobenius', scaling=True, squared=True):\n \"\"\"Compute the Mean Squared Error between two covariance estimators.\n (In the sense of the Frobenius norm).\n\n Parameters\n ----------\n comp_cov : array-like, shape = [n_features, n_features]\n The covariance to compare with.\n\n norm : str\n The type of norm used to compute the error. Available error types:\n - 'frobenius' (default): sqrt(tr(A^t.A))\n - 'spectral': sqrt(max(eigenvalues(A^t.A))\n where A is the error ``(comp_cov - self.covariance_)``.\n\n scaling : bool\n If True (default), the squared error norm is divided by n_features.\n If False, the squared error norm is not rescaled.\n\n squared : bool\n Whether to compute the squared error norm or the error norm.\n If True (default), the squared error norm is returned.\n If False, the error norm is returned.\n\n Returns\n -------\n The Mean Squared Error (in the sense of the Frobenius norm) between\n `self` and `comp_cov` covariance estimators.\n\n \"\"\"\n return error_norm_time(\n self.covariance_, comp_cov, norm=norm, scaling=scaling,\n squared=squared)\n"
] | [
[
"numpy.zeros_like",
"numpy.ones",
"numpy.zeros",
"sklearn.utils.extmath.squared_norm",
"numpy.empty_like",
"sklearn.covariance.empirical_covariance",
"sklearn.covariance.log_likelihood",
"scipy.linalg.pinvh",
"numpy.sqrt",
"numpy.concatenate",
"numpy.full",
"sklearn.utils.validation.check_X_y",
"numpy.unique"
]
] |
peterataylor/evalml | [
"917f07845c4a319bb08c7aaa8df9e09623df11c8"
] | [
"evalml/tests/component_tests/test_prophet_regressor.py"
] | [
"import numpy as np\nimport pandas as pd\nimport pytest\nfrom pytest import importorskip\n\nfrom evalml.model_family import ModelFamily\nfrom evalml.pipelines.components import ProphetRegressor\nfrom evalml.problem_types import ProblemTypes\n\nprophet = importorskip(\"prophet\", reason=\"Skipping test because prophet not installed\")\n\n\ndef test_model_family():\n assert ProphetRegressor.model_family == ModelFamily.PROPHET\n\n\ndef test_cmdstanpy_backend():\n m = prophet.Prophet(stan_backend=\"CMDSTANPY\")\n assert m.stan_backend.get_type() == \"CMDSTANPY\"\n\n\ndef test_problem_types():\n assert set(ProphetRegressor.supported_problem_types) == {\n ProblemTypes.TIME_SERIES_REGRESSION\n }\n\n\ndef test_init_with_other_params():\n clf = ProphetRegressor(\n daily_seasonality=True,\n mcmc_samples=5,\n interval_width=0.8,\n uncertainty_samples=0,\n )\n assert clf.parameters == {\n \"changepoint_prior_scale\": 0.05,\n \"daily_seasonality\": True,\n \"date_index\": None,\n \"holidays_prior_scale\": 10,\n \"interval_width\": 0.8,\n \"mcmc_samples\": 5,\n \"seasonality_mode\": \"additive\",\n \"seasonality_prior_scale\": 10,\n \"uncertainty_samples\": 0,\n \"stan_backend\": \"CMDSTANPY\",\n }\n\n\ndef test_feature_importance(ts_data):\n X, y = ts_data\n clf = ProphetRegressor(uncertainty_samples=False, changepoint_prior_scale=2.0)\n clf.fit(X, y)\n clf.feature_importance == np.zeros(1)\n\n\ndef test_get_params(ts_data):\n clf = ProphetRegressor()\n assert clf.get_params() == {\n \"changepoint_prior_scale\": 0.05,\n \"date_index\": None,\n \"seasonality_prior_scale\": 10,\n \"holidays_prior_scale\": 10,\n \"seasonality_mode\": \"additive\",\n \"stan_backend\": \"CMDSTANPY\",\n }\n\n\ndef test_fit_predict_ts_with_X_index(ts_data):\n X, y = ts_data\n assert isinstance(X.index, pd.DatetimeIndex)\n\n p_clf = prophet.Prophet(uncertainty_samples=False, changepoint_prior_scale=2.0)\n prophet_df = ProphetRegressor.build_prophet_df(X=X, y=y, date_column=\"ds\")\n\n p_clf.fit(prophet_df)\n y_pred_p = p_clf.predict(prophet_df)[\"yhat\"]\n\n clf = ProphetRegressor(uncertainty_samples=False, changepoint_prior_scale=2.0)\n clf.fit(X, y)\n y_pred = clf.predict(X)\n np.array_equal(y_pred_p.values, y_pred.values)\n\n\ndef test_fit_predict_ts_with_y_index(ts_data):\n X, y = ts_data\n X = X.reset_index(drop=True)\n assert isinstance(y.index, pd.DatetimeIndex)\n\n p_clf = prophet.Prophet(uncertainty_samples=False, changepoint_prior_scale=2.0)\n prophet_df = ProphetRegressor.build_prophet_df(X=X, y=y, date_column=\"ds\")\n\n p_clf.fit(prophet_df)\n y_pred_p = p_clf.predict(prophet_df)[\"yhat\"]\n\n clf = ProphetRegressor(uncertainty_samples=False, changepoint_prior_scale=2.0)\n clf.fit(X, y)\n y_pred = clf.predict(X, y)\n\n np.array_equal(y_pred_p.values, y_pred.values)\n\n\ndef test_fit_predict_ts_no_X(ts_data):\n y = pd.Series(\n range(1, 32), name=\"dates\", index=pd.date_range(\"2020-10-01\", \"2020-10-31\")\n )\n\n p_clf = prophet.Prophet(uncertainty_samples=False, changepoint_prior_scale=2.0)\n prophet_df = ProphetRegressor.build_prophet_df(\n X=pd.DataFrame(), y=y, date_column=\"ds\"\n )\n p_clf.fit(prophet_df)\n y_pred_p = p_clf.predict(prophet_df)[\"yhat\"]\n\n clf = ProphetRegressor(uncertainty_samples=False, changepoint_prior_scale=2.0)\n clf.fit(X=None, y=y)\n y_pred = clf.predict(X=None, y=y)\n\n np.array_equal(y_pred_p.values, y_pred.values)\n\n\ndef test_fit_predict_date_col(ts_data):\n X = pd.DataFrame(\n {\n \"features\": range(100),\n \"these_dates\": pd.date_range(\"1/1/21\", periods=100),\n \"more_dates\": pd.date_range(\"7/4/1987\", periods=100),\n }\n )\n y = pd.Series(np.random.randint(1, 5, 100), name=\"y\")\n\n clf = ProphetRegressor(\n date_index=\"these_dates\", uncertainty_samples=False, changepoint_prior_scale=2.0\n )\n clf.fit(X, y)\n y_pred = clf.predict(X)\n\n p_clf = prophet.Prophet(uncertainty_samples=False, changepoint_prior_scale=2.0)\n prophet_df = ProphetRegressor.build_prophet_df(X=X, y=y, date_column=\"these_dates\")\n\n p_clf.fit(prophet_df)\n y_pred_p = p_clf.predict(prophet_df)[\"yhat\"]\n\n np.array_equal(y_pred_p.values, y_pred.values)\n\n\ndef test_fit_predict_no_date_col_or_index(ts_data):\n X, y = ts_data\n X = X.reset_index(drop=True)\n y = y.reset_index(drop=True)\n assert not isinstance(X.index, pd.DatetimeIndex)\n assert not isinstance(y.index, pd.DatetimeIndex)\n\n clf = ProphetRegressor()\n with pytest.raises(\n ValueError,\n match=\"Prophet estimator requires input data X to have a datetime column\",\n ):\n clf.fit(X, y)\n"
] | [
[
"pandas.date_range",
"numpy.zeros",
"pandas.DataFrame",
"numpy.array_equal",
"numpy.random.randint"
]
] |
choderalab/saltswap | [
"d30804beb158960a62f94182c694df6dd9130fb8"
] | [
"development/performance/langevin_error_example.py"
] | [
"import numpy as np\nfrom simtk import openmm, unit\nfrom simtk.openmm import app\nfrom openmmtools.testsystems import WaterBox\nfrom openmmtools.integrators import ExternalPerturbationLangevinIntegrator\nprint('OpenMM version: ', openmm.version.full_version)\n\n# Using one CPU thread\nimport os\nos.environ['OPENMM_CPU_THREADS'] = '1'\n\n# Long range method\nnonbonded_method = 'CutoffPeriodic'\n\n# Creating a waterbox\nwbox = WaterBox(box_edge=21.0*unit.angstrom , nonbondedMethod=getattr(app, nonbonded_method))\nwbox.system.addForce(openmm.MonteCarloBarostat(1*unit.atmospheres, 300*unit.kelvin))\n\n# Extracting the nonbonded force\nnon_bonded_force = wbox.system.getForce(2)\n\n# The integrator to perform the equilibrium dynamics\nintegrator = ExternalPerturbationLangevinIntegrator(temperature=300*unit.kelvin, collision_rate=50.0 / unit.picosecond, timestep=1.0 * unit.femtosecond)\n\n# Creating the context\nplatform = openmm.Platform.getPlatformByName('CPU')\ncontext = openmm.Context(wbox.system, integrator, platform)\ncontext.setPositions(wbox.positions)\n\n# Running some equilibrium dynamics\nintegrator.step(100)\n\n# The number of NCMC type iterations and NCMC steps per iteration.\nniterations = 20\nncmc_steps = 10\n\ninternal_work = np.zeros(niterations)\nexternal_work = np.zeros(niterations)\n\n# Whether to call updateParametersInContext. If True, then assertion below will fail.\nupdate_parameters = True\n\n# A model of NCMC without perturbation but using updateParametersInContext\nfor i in range(niterations):\n #integrator.reset_protocol_work()\n #integrator.setGlobalVariableByName('first_step',0)\n integrator.setGlobalVariableByName('protocol_work',0)\n for s in range(ncmc_steps):\n integrator.step(1)\n initial_external_energy = context.getState(getEnergy=True).getPotentialEnergy() / unit.kilojoule_per_mole\n ###---- Not perturbing the system but updating parameters anyway----###\n if update_parameters:\n non_bonded_force.updateParametersInContext(context)\n final_external_energy = context.getState(getEnergy=True).getPotentialEnergy() / unit.kilojoule_per_mole\n integrator.step(1)\n internal_work[i] = integrator.getGlobalVariableByName('protocol_work')\n external_work[i] = final_external_energy - initial_external_energy\nassert np.all(np.abs(internal_work - external_work) < 1E-5)\n"
] | [
[
"numpy.abs",
"numpy.zeros"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.